language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
mahmoud__boltons
|
tests/test_iterutils.py
|
{
"start": 1701,
"end": 11672
}
|
class ____:
# TODO: test namedtuples and other immutable containers
def test_basic_clone(self):
orig = {"a": "b", "c": [1, 2]}
assert orig == remap(orig)
orig2 = [{1: 2}, {"a": "b", "c": [1, 2, {"cat": "dog"}]}]
assert orig2 == remap(orig2)
def test_empty(self):
assert [] == remap([])
assert {} == remap({})
assert set() == remap(set())
def test_unremappable(self):
obj = object()
with pytest.raises(TypeError):
remap(obj)
def test_basic_upper(self):
orig = {'a': 1, 'b': object(), 'c': {'d': set()}}
remapped = remap(orig, lambda p, k, v: (k.upper(), v))
assert orig['a'] == remapped['A']
assert orig['b'] == remapped['B']
assert orig['c']['d'] == remapped['C']['D']
def test_item_drop(self):
orig = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
even_items = remap(orig, lambda p, k, v: not (v % 2))
assert even_items == [0, 2, 4, 6, 8]
def test_noncallables(self):
with pytest.raises(TypeError):
remap([], visit='test')
with pytest.raises(TypeError):
remap([], enter='test')
with pytest.raises(TypeError):
remap([], exit='test')
def test_sub_selfref(self):
coll = [0, 1, 2, 3]
sub = []
sub.append(sub)
coll.append(sub)
with pytest.raises(RuntimeError):
# if equal, should recurse infinitely
assert coll == remap(coll)
def test_root_selfref(self):
selfref = [0, 1, 2, 3]
selfref.append(selfref)
with pytest.raises(RuntimeError):
assert selfref == remap(selfref)
selfref2 = {}
selfref2['self'] = selfref2
with pytest.raises(RuntimeError):
assert selfref2 == remap(selfref2)
def test_duperef(self):
val = ['hello']
duperef = [val, val]
remapped = remap(duperef)
assert remapped[0] is remapped[1]
assert remapped[0] is not duperef[0]
def test_namedtuple(self):
"""TODO: this fails right now because namedtuples' __new__ is
overridden to accept arguments. remap's default_enter tries
to create an empty namedtuple and gets a TypeError.
Could make it so that immutable types actually don't create a
blank new parent and instead use the old_parent as a
placeholder, creating a new one at exit-time from the value's
__class__ (how default_exit works now). But even then it would
have to *args in the values, as namedtuple constructors don't
take an iterable.
"""
Point = namedtuple('Point', 'x y')
point_map = {'origin': [Point(0, 0)]}
with pytest.raises(TypeError):
remapped = remap(point_map)
assert isinstance(remapped['origin'][0], Point)
def test_path(self):
path_map = {}
# test visit's path
target_str = 'test'
orig = [[[target_str]]]
ref_path = (0, 0, 0)
def visit(path, key, value):
if value is target_str:
path_map['target_str'] = path + (key,)
return key, value
remapped = remap(orig, visit=visit)
assert remapped == orig
assert path_map['target_str'] == ref_path
# test enter's path
target_obj = object()
orig = {'a': {'b': {'c': {'d': ['e', target_obj, 'f']}}}}
ref_path = ('a', 'b', 'c', 'd', 1)
def enter(path, key, value):
if value is target_obj:
path_map['target_obj'] = path + (key,)
return default_enter(path, key, value)
remapped = remap(orig, enter=enter)
assert remapped == orig
assert path_map['target_obj'] == ref_path
# test exit's path
target_set = frozenset([1, 7, 3, 8])
orig = [0, 1, 2, [3, 4, [5, target_set]]]
ref_path = (3, 2, 1)
def exit(path, key, old_parent, new_parent, new_items):
if old_parent is target_set:
path_map['target_set'] = path + (key,)
return default_exit(path, key, old_parent, new_parent, new_items)
remapped = remap(orig, exit=exit)
assert remapped == orig
assert path_map['target_set'] == ref_path
def test_reraise_visit(self):
root = {'A': 'b', 1: 2}
key_to_lower = lambda p, k, v: (k.lower(), v)
with pytest.raises(AttributeError):
remap(root, key_to_lower)
remapped = remap(root, key_to_lower, reraise_visit=False)
assert remapped['a'] == 'b'
assert remapped[1] == 2
def test_drop_nones(self):
orig = {'a': 1, 'b': None, 'c': [3, None, 4, None]}
ref = {'a': 1, 'c': [3, 4]}
drop_none = lambda p, k, v: v is not None
remapped = remap(orig, visit=drop_none)
assert remapped == ref
orig = [None] * 100
remapped = remap(orig, drop_none)
assert not remapped
def test_dict_to_omd(self):
def enter(path, key, value):
if isinstance(value, dict):
return OMD(), sorted(value.items())
return default_enter(path, key, value)
orig = [{'title': 'Wild Palms',
'ratings': {1: 1, 2: 3, 3: 5, 4: 6, 5: 3}},
{'title': 'Twin Peaks',
'ratings': {1: 3, 2: 2, 3: 8, 4: 12, 5: 15}}]
remapped = remap(orig, enter=enter)
assert remapped == orig
assert isinstance(remapped[0], OMD)
assert isinstance(remapped[0]['ratings'], OMD)
assert isinstance(remapped[1], OMD)
assert isinstance(remapped[1]['ratings'], OMD)
def test_sort_all_lists(self):
def exit(path, key, old_parent, new_parent, new_items):
# NB: in this case, I'd normally use *a, **kw
ret = default_exit(path, key, old_parent, new_parent, new_items)
if isinstance(ret, list):
ret.sort()
return ret
# NB: Airplane model numbers (Boeing and Airbus)
orig = [[[7, 0, 7],
[7, 2, 7],
[7, 7, 7],
[7, 3, 7]],
[[3, 8, 0],
[3, 2, 0],
[3, 1, 9],
[3, 5, 0]]]
ref = [[[0, 2, 3],
[0, 3, 5],
[0, 3, 8],
[1, 3, 9]],
[[0, 7, 7],
[2, 7, 7],
[3, 7, 7],
[7, 7, 7]]]
remapped = remap(orig, exit=exit)
assert remapped == ref
def test_collector_pattern(self):
all_interests = set()
def enter(path, key, value):
try:
all_interests.update(value['interests'])
except:
pass
return default_enter(path, key, value)
orig = [{'name': 'Kate',
'interests': ['theater', 'manga'],
'dads': [{'name': 'Chris',
'interests': ['biking', 'python']}]},
{'name': 'Avery',
'interests': ['museums', 'pears'],
'dads': [{'name': 'Kurt',
'interests': ['python', 'recursion']}]}]
ref = {'python', 'recursion', 'biking', 'museums',
'pears', 'theater', 'manga'}
remap(orig, enter=enter)
assert all_interests == ref
def test_add_length(self):
def exit(path, key, old_parent, new_parent, new_items):
ret = default_exit(path, key, old_parent, new_parent, new_items)
try:
ret['review_length'] = len(ret['review'])
except:
pass
return ret
orig = {'Star Trek':
{'TNG': {'stars': 10,
'review': "Episodic AND deep. <3 Data."},
'DS9': {'stars': 8.5,
'review': "Like TNG, but with a story and no Data."},
'ENT': {'stars': None,
'review': "Can't review what you can't watch."}},
'Babylon 5': {'stars': 6,
'review': "Sophomoric, like a bitter laugh."},
'Dr. Who': {'stars': None,
'review': "800 episodes is too many to review."}}
remapped = remap(orig, exit=exit)
assert (remapped['Star Trek']['TNG']['review_length']
< remapped['Star Trek']['DS9']['review_length'])
def test_prepop(self):
"""Demonstrating normalization and ID addition through prepopulating
the objects with an enter callback.
"""
base_obj = {'name': None,
'rank': None,
'id': 1}
def enter(path, key, value):
new_parent, new_items = default_enter(path, key, value)
try:
new_parent.update(base_obj)
base_obj['id'] += 1
except:
pass
return new_parent, new_items
orig = [{'name': 'Firefox', 'rank': 1},
{'name': 'Chrome', 'rank': 2},
{'name': 'IE'}]
ref = [{'name': 'Firefox', 'rank': 1, 'id': 1},
{'name': 'Chrome', 'rank': 2, 'id': 2},
{'name': 'IE', 'rank': None, 'id': 3}]
remapped = remap(orig, enter=enter)
assert remapped == ref
def test_remap_set(self):
# explicit test for sets to make sure #84 is covered
s = {1, 2, 3}
assert remap(s) == s
fs = frozenset([1, 2, 3])
assert remap(fs) == fs
def test_remap_file(self):
with open(CUR_PATH, 'rb') as f:
x = {'a': [1, 2, 3], 'f': [f]}
assert remap(x) == x
f.read()
assert remap(x) == x
f.close() # see #146
assert remap(x) == x
return
|
TestRemap
|
python
|
ray-project__ray
|
python/ray/llm/_internal/batch/observability/usage_telemetry/usage.py
|
{
"start": 1605,
"end": 3871
}
|
class ____:
"""Named Actor to keep the state of all deployed models and record telemetry."""
def __init__(self):
self._tracking_telemetries: List[BatchModelTelemetry] = []
self._record_tag_func = record_extra_usage_tag
def _update_record_tag_func(self, record_tag_func: Callable) -> None:
self._record_tag_func = record_tag_func
def generate_report(self) -> Dict[str, str]:
return {
BatchTelemetryTags.LLM_BATCH_PROCESSOR_CONFIG_NAME: ",".join(
[t.processor_config_name for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_MODEL_ARCHITECTURE: ",".join(
[t.model_architecture for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_SIZE: ",".join(
[str(t.batch_size) for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_ACCELERATOR_TYPE: ",".join(
[t.accelerator_type for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_CONCURRENCY: ",".join(
[str(t.concurrency) for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_TASK_TYPE: ",".join(
[t.task_type for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_PIPELINE_PARALLEL_SIZE: ",".join(
[str(t.pipeline_parallel_size) for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_TENSOR_PARALLEL_SIZE: ",".join(
[str(t.tensor_parallel_size) for t in self._tracking_telemetries]
),
BatchTelemetryTags.LLM_BATCH_DATA_PARALLEL_SIZE: ",".join(
[str(t.data_parallel_size) for t in self._tracking_telemetries]
),
}
def record(self, telemetry: BatchModelTelemetry) -> None:
"""Append and record telemetries."""
from ray._common.usage.usage_lib import TagKey
self._tracking_telemetries.append(telemetry)
telemetry_dict = self.generate_report()
for key, value in telemetry_dict.items():
self._record_tag_func(TagKey.Value(key), value)
|
_TelemetryAgent
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_ipc.py
|
{
"start": 8029,
"end": 10039
}
|
class ____(ContextResettingTestCase):
def test_staged(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# spawn new process for testing
mpctx = mp.get_context('spawn')
result_queue = mpctx.Queue()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
if driver.USE_NV_BINDING:
self.assertEqual(ipch_recon.handle.reserved, ipch.handle.reserved)
else:
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# Test on every CUDA devices
for device_num in range(len(cuda.gpus)):
args = (ipch, device_num, result_queue)
proc = mpctx.Process(target=staged_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
def test_ipc_array(self):
for device_num in range(len(cuda.gpus)):
# prepare data for IPC
arr = np.random.random(10)
devarr = cuda.to_device(arr)
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, device_num, result_queue)
proc = ctx.Process(target=staged_ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
@windows_only
@skip_on_cudasim('Ipc not available in CUDASIM')
|
TestIpcStaged
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/test_admin.py
|
{
"start": 548,
"end": 4678
}
|
class ____(AdminTestCase):
def test_admin_registration(self):
"""
Test how the registration works
"""
@self.register(Model2A)
class Model2Admin(PolymorphicParentModelAdmin):
base_model = Model2A
list_filter = (PolymorphicChildModelFilter,)
child_models = (Model2B, Model2C, Model2D)
@self.register(Model2B)
@self.register(Model2C)
@self.register(Model2D)
class Model2ChildAdmin(PolymorphicChildModelAdmin):
base_model = Model2A
base_fieldsets = (("Base fields", {"fields": ("field1",)}),)
# -- add page
ct_id = ContentType.objects.get_for_model(Model2D).pk
self.admin_get_add(Model2A) # shows type page
self.admin_get_add(Model2A, qs=f"?ct_id={ct_id}") # shows type page
self.admin_get_add(Model2A) # shows type page
self.admin_post_add(
Model2A,
{"field1": "A", "field2": "B", "field3": "C", "field4": "D"},
qs=f"?ct_id={ct_id}",
)
d_obj = Model2A.objects.all()[0]
assert d_obj.__class__ == Model2D
assert d_obj.field1 == "A"
assert d_obj.field2 == "B"
# -- list page
self.admin_get_changelist(Model2A) # asserts 200
# -- edit
response = self.admin_get_change(Model2A, d_obj.pk)
self.assertContains(response, "field4")
self.admin_post_change(
Model2A,
d_obj.pk,
{"field1": "A2", "field2": "B2", "field3": "C2", "field4": "D2"},
)
d_obj.refresh_from_db()
assert d_obj.field1 == "A2"
assert d_obj.field2 == "B2"
assert d_obj.field3 == "C2"
assert d_obj.field4 == "D2"
# -- history
self.admin_get_history(Model2A, d_obj.pk)
# -- delete
self.admin_get_delete(Model2A, d_obj.pk)
self.admin_post_delete(Model2A, d_obj.pk)
pytest.raises(Model2A.DoesNotExist, (lambda: d_obj.refresh_from_db()))
def test_admin_inlines(self):
"""
Test the registration of inline models.
"""
class InlineModelAChild(StackedPolymorphicInline.Child):
model = InlineModelA
class InlineModelBChild(StackedPolymorphicInline.Child):
model = InlineModelB
class Inline(StackedPolymorphicInline):
model = InlineModelA
child_inlines = (InlineModelAChild, InlineModelBChild)
@self.register(InlineParent)
class InlineParentAdmin(PolymorphicInlineSupportMixin, admin.ModelAdmin):
inlines = (Inline,)
parent = InlineParent.objects.create(title="FOO")
assert parent.inline_children.count() == 0
# -- get edit page
response = self.admin_get_change(InlineParent, parent.pk)
# Make sure the fieldset has the right data exposed in data-inline-formset
self.assertContains(response, "childTypes")
self.assertContains(response, escape('"type": "inlinemodela"'))
self.assertContains(response, escape('"type": "inlinemodelb"'))
# -- post edit page
self.admin_post_change(
InlineParent,
parent.pk,
{
"title": "FOO2",
"inline_children-INITIAL_FORMS": 0,
"inline_children-TOTAL_FORMS": 1,
"inline_children-MIN_NUM_FORMS": 0,
"inline_children-MAX_NUM_FORMS": 1000,
"inline_children-0-parent": parent.pk,
"inline_children-0-polymorphic_ctype": ContentType.objects.get_for_model(
InlineModelB
).pk,
"inline_children-0-field1": "A2",
"inline_children-0-field2": "B2",
},
)
parent.refresh_from_db()
assert parent.title == "FOO2"
assert parent.inline_children.count() == 1
child = parent.inline_children.all()[0]
assert child.__class__ == InlineModelB
assert child.field1 == "A2"
assert child.field2 == "B2"
|
PolymorphicAdminTests
|
python
|
spyder-ide__spyder
|
external-deps/spyder-remote-services/spyder_remote_services/services/environ/handler.py
|
{
"start": 188,
"end": 2130
}
|
class ____(JupyterHandler):
"""Handler for environment variables."""
auth_resource = "spyder-services"
def write_json(self, data, status=HTTPStatus.OK):
"""Write JSON response."""
self.set_status(status)
self.set_header("Content-Type", "application/json")
self.finish(orjson.dumps(data))
def write_value(self, value, status=HTTPStatus.OK):
"""Write a value response."""
self.set_status(status)
self.set_header("Content-Type", "text/plain")
self.finish(value)
def finish_with_status(self, status):
"""Finish the request with a specific status."""
self.set_status(status)
self.finish()
@web.authenticated
@authorized
def get(self, name=None):
"""Get the value of an environment variable."""
if name is None:
self.write_json(
os.environ.copy(),
)
return
value = os.environ.get(name)
if value is None:
raise web.HTTPError(
HTTPStatus.NOT_FOUND, f"Environment variable {name} not found",
)
self.write_value(value)
@web.authenticated
@authorized
def post(self, name):
"""Set the value of an environment variable."""
value = self.get_body_argument("value")
os.environ[name] = value
self.finish_with_status(HTTPStatus.CREATED)
@web.authenticated
@authorized
def delete(self, name):
"""Delete an environment variable."""
if name in os.environ:
del os.environ[name]
self.finish_with_status(HTTPStatus.NO_CONTENT)
else:
raise web.HTTPError(
HTTPStatus.NOT_FOUND, f"Environment variable {name} not found",
)
_name_regex = r"(?P<name>.+)"
handlers = [
(r"/environ", EnvVarsHandler),
(rf"/environ/{_name_regex}", EnvVarsHandler),
]
|
EnvVarsHandler
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 807274,
"end": 808109
}
|
class ____(sgqlc.types.Type, Node, UniformResourceLocatable):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"actor",
"commit",
"created_at",
"merge_ref",
"merge_ref_name",
"pull_request",
)
actor = sgqlc.types.Field(Actor, graphql_name="actor")
commit = sgqlc.types.Field(Commit, graphql_name="commit")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
merge_ref = sgqlc.types.Field("Ref", graphql_name="mergeRef")
merge_ref_name = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="mergeRefName"
)
pull_request = sgqlc.types.Field(
sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest"
)
|
MergedEvent
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_aead.py
|
{
"start": 1560,
"end": 9601
}
|
class ____:
@pytest.mark.skipif(
sys.platform not in {"linux", "darwin"} or sys.maxsize < 2**31,
reason="mmap and 64-bit platform required",
)
def test_data_too_large(self):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = b"0" * 12
large_data = large_mmap()
with pytest.raises(OverflowError):
chacha.encrypt(nonce, large_data, b"")
with pytest.raises(OverflowError):
chacha.encrypt(nonce, b"", large_data)
def test_generate_key(self):
key = ChaCha20Poly1305.generate_key()
assert len(key) == 32
def test_bad_key(self, backend):
with pytest.raises(TypeError):
ChaCha20Poly1305(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
ChaCha20Poly1305(b"0" * 31)
@pytest.mark.parametrize(
("nonce", "data", "associated_data"),
[
[object(), b"data", b""],
[b"0" * 12, object(), b""],
[b"0" * 12, b"data", object()],
],
)
def test_params_not_bytes_encrypt(
self, nonce, data, associated_data, backend
):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
with pytest.raises(TypeError):
chacha.encrypt(nonce, data, associated_data)
with pytest.raises(TypeError):
chacha.decrypt(nonce, data, associated_data)
def test_nonce_not_12_bytes(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
with pytest.raises(ValueError):
chacha.encrypt(b"00", b"hello", b"")
with pytest.raises(ValueError):
buf = bytearray(16)
chacha.encrypt_into(b"00", b"hello", b"", buf)
with pytest.raises(ValueError):
chacha.decrypt(b"00", b"hello", b"")
with pytest.raises(ValueError):
buf = bytearray(1)
chacha.decrypt_into(b"00", b"hello", b"", buf)
def test_decrypt_data_too_short(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
with pytest.raises(InvalidTag):
chacha.decrypt(b"0" * 12, b"0", None)
with pytest.raises(InvalidTag):
buf = bytearray(16)
chacha.decrypt_into(b"0" * 12, b"0", None, buf)
def test_associated_data_none_equal_to_empty_bytestring(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = os.urandom(12)
ct1 = chacha.encrypt(nonce, b"some_data", None)
ct2 = chacha.encrypt(nonce, b"some_data", b"")
assert ct1 == ct2
pt1 = chacha.decrypt(nonce, ct1, None)
pt2 = chacha.decrypt(nonce, ct2, b"")
assert pt1 == pt2
def test_openssl_vectors(self, subtests, backend):
vectors = load_vectors_from_file(
os.path.join("ciphers", "ChaCha20Poly1305", "openssl.txt"),
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
key = binascii.unhexlify(vector["key"])
nonce = binascii.unhexlify(vector["iv"])
aad = binascii.unhexlify(vector["aad"])
tag = binascii.unhexlify(vector["tag"])
pt = binascii.unhexlify(vector["plaintext"])
ct = binascii.unhexlify(vector["ciphertext"])
chacha = ChaCha20Poly1305(key)
if vector.get("result") == b"CIPHERFINAL_ERROR":
with pytest.raises(InvalidTag):
chacha.decrypt(nonce, ct + tag, aad)
else:
computed_pt = chacha.decrypt(nonce, ct + tag, aad)
assert computed_pt == pt
computed_ct = chacha.encrypt(nonce, pt, aad)
assert computed_ct == ct + tag
def test_boringssl_vectors(self, subtests, backend):
vectors = load_vectors_from_file(
os.path.join("ciphers", "ChaCha20Poly1305", "boringssl.txt"),
load_nist_vectors,
)
for vector in vectors:
with subtests.test():
key = binascii.unhexlify(vector["key"])
nonce = binascii.unhexlify(vector["nonce"])
if vector["ad"].startswith(b'"'):
aad = vector["ad"][1:-1]
else:
aad = binascii.unhexlify(vector["ad"])
tag = binascii.unhexlify(vector["tag"])
if vector["in"].startswith(b'"'):
pt = vector["in"][1:-1]
else:
pt = binascii.unhexlify(vector["in"])
ct = binascii.unhexlify(vector["ct"].strip(b'"'))
chacha = ChaCha20Poly1305(key)
computed_pt = chacha.decrypt(nonce, ct + tag, aad)
assert computed_pt == pt
computed_ct = chacha.encrypt(nonce, pt, aad)
assert computed_ct == ct + tag
def test_buffer_protocol(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
pt = b"encrypt me"
ad = b"additional"
nonce = os.urandom(12)
ct = chacha.encrypt(nonce, pt, ad)
computed_pt = chacha.decrypt(nonce, ct, ad)
assert computed_pt == pt
chacha2 = ChaCha20Poly1305(bytearray(key))
ct2 = chacha2.encrypt(bytearray(nonce), pt, ad)
assert ct2 == ct
computed_pt2 = chacha2.decrypt(bytearray(nonce), ct2, ad)
assert computed_pt2 == pt
def test_encrypt_into(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = os.urandom(12)
pt = b"encrypt me"
ad = b"additional"
buf = bytearray(len(pt) + 16)
n = chacha.encrypt_into(nonce, pt, ad, buf)
assert n == len(pt) + 16
ct = chacha.encrypt(nonce, pt, ad)
assert buf == ct
@pytest.mark.parametrize(
("ptlen", "buflen"), [(10, 25), (10, 27), (15, 30), (20, 37)]
)
def test_encrypt_into_buffer_incorrect_size(self, ptlen, buflen, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = os.urandom(12)
pt = b"x" * ptlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
chacha.encrypt_into(nonce, pt, None, buf)
def test_decrypt_into(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = os.urandom(12)
pt = b"decrypt me"
ad = b"additional"
ct = chacha.encrypt(nonce, pt, ad)
buf = bytearray(len(pt))
n = chacha.decrypt_into(nonce, ct, ad, buf)
assert n == len(pt)
assert buf == pt
@pytest.mark.parametrize(
("ctlen", "buflen"), [(26, 9), (26, 11), (31, 14), (36, 21)]
)
def test_decrypt_into_buffer_incorrect_size(self, ctlen, buflen, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = os.urandom(12)
ct = b"x" * ctlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
chacha.decrypt_into(nonce, ct, None, buf)
def test_decrypt_into_invalid_tag(self, backend):
key = ChaCha20Poly1305.generate_key()
chacha = ChaCha20Poly1305(key)
nonce = os.urandom(12)
pt = b"some data"
ad = b"additional"
ct = chacha.encrypt(nonce, pt, ad)
# Corrupt the ciphertext
corrupted_ct = bytearray(ct)
corrupted_ct[0] ^= 1
buf = bytearray(len(pt))
with pytest.raises(InvalidTag):
chacha.decrypt_into(nonce, bytes(corrupted_ct), ad, buf)
@pytest.mark.skipif(
not _aead_supported(AESCCM),
reason="Does not support AESCCM",
)
|
TestChaCha20Poly1305
|
python
|
getsentry__sentry
|
src/sentry/dynamic_sampling/rules/helpers/latest_releases.py
|
{
"start": 2264,
"end": 4442
}
|
class ____:
"""
Class that hides the complexity of extending boosted releases.
"""
boosted_releases: list[BoostedRelease] = field(default_factory=list)
def add_release(
self, cache_key: str, id: int, timestamp: float, environment: str | None
) -> None:
self.boosted_releases.append(
BoostedRelease(cache_key=cache_key, id=id, timestamp=timestamp, environment=environment)
)
def to_extended_boosted_releases(
self, project_id: int
) -> tuple[list[ExtendedBoostedRelease], list[str]]:
# We get release models in order to have all the information to extend the releases we get from the cache.
models = self._get_releases_models()
current_timestamp = datetime.now(timezone.utc).timestamp()
extended_boosted_releases = []
expired_boosted_releases = []
for boosted_release in self.boosted_releases:
release_model = models.get(boosted_release.id, None)
# In case we are unable to find the release in the database, it means that it has been deleted but
# it still present in Redis. For this reason, we will mark this as expired, in order to have it deleted
# during cleanup.
if release_model is None:
expired_boosted_releases.append(boosted_release.cache_key)
continue
extended_boosted_release = boosted_release.extend(
release=release_model, project_id=project_id
)
if extended_boosted_release.is_active(current_timestamp):
extended_boosted_releases.append(extended_boosted_release)
else:
expired_boosted_releases.append(boosted_release.cache_key)
return extended_boosted_releases, expired_boosted_releases
def _get_last_release_ids(self) -> list[int]:
return [boosted_release.id for boosted_release in self.boosted_releases]
def _get_releases_models(self) -> dict[int, Release]:
return {
release.id: release
for release in Release.objects.filter(id__in=self._get_last_release_ids())
}
|
BoostedReleases
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/state/session_state_test.py
|
{
"start": 13116,
"end": 17890
}
|
class ____(DeltaGeneratorTestCase):
def test_widget_presence(self):
state = st.session_state
assert "foo" not in state
state.foo = "foo"
assert "foo" in state
assert state.foo == "foo"
def test_widget_outputs_dont_alias(self):
color = st.select_slider(
"Select a color of the rainbow",
options=[
["red", "orange"],
["yellow", "green"],
["blue", "indigo"],
["violet"],
],
key="color",
)
ctx = get_script_run_ctx()
assert ctx.session_state["color"] is not color
def test_callbacks_with_rerun():
"""Calling 'rerun' from within a widget callback
is disallowed and results in a warning.
"""
def script():
import streamlit as st
def callback():
st.session_state["message"] = "ran callback"
st.rerun()
st.checkbox("cb", on_change=callback)
at = AppTest.from_function(script).run()
at.checkbox[0].check().run()
assert at.session_state["message"] == "ran callback"
warning = at.warning[0]
assert "no-op" in warning.value
def test_fragment_callback_flag_resets_on_rerun_exception() -> None:
"""Ensure fragment callback context flag is cleared on RerunException.
This guards against leaving `ctx.in_fragment_callback` stuck to True if
a callback raises, which could contaminate subsequent runs.
"""
from streamlit.runtime.scriptrunner import RerunException
ss = SessionState()
wid = "w-frag"
# A callback that raises RerunException
def cb() -> None:
raise RerunException(None)
meta = WidgetMetadata(
id=wid,
deserializer=lambda v: v,
serializer=lambda v: v,
value_type="int_value",
callbacks={"change": cb},
fragment_id="frag-1",
)
ss._set_widget_metadata(meta)
ss._old_state[wid] = 1
ss._new_widget_state.set_from_value(wid, 2) # ensure _widget_changed is True
mock_ctx = MagicMock()
mock_ctx.in_fragment_callback = False
with patch(
"streamlit.runtime.state.session_state.get_script_run_ctx",
return_value=mock_ctx,
):
# Callbacks internally catch RerunException and log a warning.
ss._call_callbacks()
assert mock_ctx.in_fragment_callback is False
def test_updates():
at = AppTest.from_file("test_data/linked_sliders.py").run()
assert at.slider.values == [-100.0, -148.0]
assert at.markdown.values == ["Celsius `-100.0`", "Fahrenheit `-148.0`"]
# Both sliders update when first is changed
at.slider[0].set_value(0.0).run()
assert at.slider.values == [0.0, 32.0]
assert at.markdown.values == ["Celsius `0.0`", "Fahrenheit `32.0`"]
# Both sliders update when second is changed
at.slider[1].set_value(212.0).run()
assert at.slider.values == [100.0, 212.0]
assert at.markdown.values == ["Celsius `100.0`", "Fahrenheit `212.0`"]
# Sliders update when one is changed repeatedly
at.slider[0].set_value(0.0).run()
assert at.slider.values == [0.0, 32.0]
at.slider[0].set_value(100.0).run()
assert at.slider.values == [100.0, 212.0]
def test_serializable_check():
"""When the config option is on, adding unserializable data to session
state should result in an exception.
"""
with patch_config_options({"runner.enforceSerializableSessionState": True}):
def script():
import streamlit as st
def unserializable_data():
return lambda x: x
st.session_state.unserializable = unserializable_data()
at = AppTest.from_function(script).run()
assert at.exception
assert "pickle" in at.exception[0].value
def test_serializable_check_off():
"""When the config option is off, adding unserializable data to session
state should work without errors.
"""
with patch_config_options({"runner.enforceSerializableSessionState": False}):
def script():
import streamlit as st
def unserializable_data():
return lambda x: x
st.session_state.unserializable = unserializable_data()
at = AppTest.from_function(script).run()
assert not at.exception
def check_roundtrip(widget_id: str, value: Any) -> None:
session_state = _raw_session_state()
wid = session_state._get_widget_id(widget_id)
metadata = session_state._new_widget_state.widget_metadata[wid]
serializer = metadata.serializer
deserializer = metadata.deserializer
assert deserializer(serializer(value)) == value
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
|
SessionStateTest
|
python
|
huggingface__transformers
|
src/transformers/data/datasets/glue.py
|
{
"start": 1106,
"end": 2164
}
|
class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
line.
"""
task_name: str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys())})
data_dir: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
self.task_name = self.task_name.lower()
|
GlueDataTrainingArguments
|
python
|
huggingface__transformers
|
src/transformers/models/olmo2/modular_olmo2.py
|
{
"start": 12273,
"end": 14002
}
|
class ____(OlmoDecoderLayer):
def __init__(self, config: Olmo2Config, layer_idx: int):
super().__init__(config, layer_idx=layer_idx)
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
del self.input_layernorm
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
Olmo2DecoderLayer
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1050394,
"end": 1050867
}
|
class ____(sgqlc.types.Type):
"""Email attributes from External Identity"""
__schema__ = github_schema
__field_names__ = ("primary", "type", "value")
primary = sgqlc.types.Field(Boolean, graphql_name="primary")
"""Boolean to identify primary emails"""
type = sgqlc.types.Field(String, graphql_name="type")
"""Type of email"""
value = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="value")
"""Email id"""
|
UserEmailMetadata
|
python
|
doocs__leetcode
|
lcci/17.25.Word Rectangle/Solution.py
|
{
"start": 356,
"end": 1697
}
|
class ____:
def maxRectangle(self, words: List[str]) -> List[str]:
def check(mat):
m, n = len(mat), len(mat[0])
ans = 1
for j in range(n):
node = trie
for i in range(m):
idx = ord(mat[i][j]) - ord("a")
if node.children[idx] is None:
return 0
node = node.children[idx]
if not node.is_end:
ans = 2
return ans
def dfs(ws):
nonlocal ans, max_s, max_l
if len(ws[0]) * max_l <= max_s or len(t) >= max_l:
return
for w in ws:
t.append(w)
st = check(t)
if st == 0:
t.pop()
continue
if st == 1 and max_s < len(t) * len(t[0]):
ans = t[:]
max_s = len(t) * len(t[0])
dfs(ws)
t.pop()
d = defaultdict(list)
trie = Trie()
max_l = 0
for w in words:
trie.insert(w)
max_l = max(max_l, len(w))
d[len(w)].append(w)
max_s = 0
ans = []
for ws in d.values():
t = []
dfs(ws)
return ans
|
Solution
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/uploaded_file_manager_test.py
|
{
"start": 1143,
"end": 4279
}
|
class ____(unittest.TestCase):
def setUp(self):
self.mgr = MemoryUploadedFileManager("/mock/upload")
def test_added_file_id(self):
"""Presigned file URL should have a unique ID."""
info1, info2 = self.mgr.get_upload_urls("session", ["name1", "name1"])
assert info1.file_id != info2.file_id
def test_retrieve_added_file(self):
"""An added file should maintain all its source properties
except its ID."""
self.mgr.add_file("session", FILE_1)
self.mgr.add_file("session", FILE_2)
file1_from_storage, *rest_files = self.mgr.get_files("session", ["url1"])
assert len(rest_files) == 0
assert file1_from_storage.file_id == FILE_1.file_id
assert file1_from_storage.name == FILE_1.name
assert file1_from_storage.type == FILE_1.type
assert file1_from_storage.data == FILE_1.data
file2_from_storage, *other_files = self.mgr.get_files("session", ["url2"])
assert len(other_files) == 0
assert file2_from_storage.file_id == FILE_2.file_id
assert file2_from_storage.name == FILE_2.name
assert file2_from_storage.type == FILE_2.type
assert file2_from_storage.data == FILE_2.data
def test_remove_file(self):
# This should not error.
self.mgr.remove_file("non-session", "non-file-id")
self.mgr.add_file("session", FILE_1)
self.mgr.remove_file("session", FILE_1.file_id)
assert self.mgr.get_files("session", [FILE_1.file_id]) == []
# Remove the file again. It doesn't exist, but this isn't an error.
self.mgr.remove_file("session", FILE_1.file_id)
assert self.mgr.get_files("session", [FILE_1.file_id]) == []
self.mgr.add_file("session", FILE_1)
self.mgr.add_file("session", FILE_2)
self.mgr.remove_file("session", FILE_1.file_id)
assert self.mgr.get_files("session", [FILE_1.file_id, FILE_2.file_id]) == [
FILE_2
]
def test_remove_session_files(self):
# This should not error.
self.mgr.remove_session_files("non-report")
# Add two files with different session IDs, but the same widget ID.
self.mgr.add_file("session1", FILE_1)
self.mgr.add_file("session1", FILE_2)
self.mgr.add_file("session2", FILE_1)
self.mgr.remove_session_files("session1")
assert self.mgr.get_files("session1", [FILE_1.file_id, FILE_2.file_id]) == []
assert self.mgr.get_files("session2", [FILE_1.file_id]) == [FILE_1]
def test_cache_stats_provider(self):
"""Test CacheStatsProvider implementation."""
# Test empty manager
assert self.mgr.get_stats() == []
# Test manager with files
self.mgr.add_file("session1", FILE_1)
self.mgr.add_file("session1", FILE_2)
expected = [
CacheStat(
category_name="UploadedFileManager",
cache_name="",
byte_length=len(FILE_1.data) + len(FILE_2.data),
),
]
assert expected == self.mgr.get_stats()
|
UploadedFileManagerTest
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/triggers/test_emr.py
|
{
"start": 2059,
"end": 2827
}
|
class ____:
def test_serialization(self):
job_flow_id = "test_job_flow_id"
waiter_delay = 30
waiter_max_attempts = 60
aws_conn_id = "aws_default"
trigger = EmrCreateJobFlowTrigger(
job_flow_id=job_flow_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.emr.EmrCreateJobFlowTrigger"
assert kwargs == {
"job_flow_id": "test_job_flow_id",
"waiter_delay": 30,
"waiter_max_attempts": 60,
"aws_conn_id": "aws_default",
}
|
TestEmrCreateJobFlowTrigger
|
python
|
django-guardian__django-guardian
|
guardian/backends.py
|
{
"start": 1914,
"end": 5792
}
|
class ____:
"""Django backend for checking object-level permissions."""
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, request: HttpRequest, username: Optional[str] = None, password: Optional[str] = None) -> Any:
return None
def has_perm(self, user_obj: Any, perm: str, obj: Optional[Model] = None) -> bool:
"""Check if a user has the permission for a given object.
Returns `True` if given `user_obj` has `perm` for `obj`.
If no `obj` is given, `False` is returned.
The main difference between Django's `ModelBackend` is that we can pass
`obj` instance here and `perm` doesn't have to contain
`app_label` as it can be retrieved from given `obj`.
**Inactive user support**
If `user` is authenticated but inactive at the same time, all checks
always return `False`.
Note:
Remember, that if user is not *active*, all checks would return `False`.
Parameters:
user_obj (User): User instance.
perm (str): Permission string.
obj (Model): Model instance.
Returns:
`True` if `user_obj` has permission, `False` otherwise.
"""
# check if user_obj and object are supported
support, user_obj = check_support(user_obj, obj)
if not support:
return False
if "." in perm:
app_label, _ = perm.split(".", 1)
# TODO (David Graham): Check if obj is None or change the method signature
if app_label != obj._meta.app_label: # type: ignore[union-attr]
# Check the content_type app_label when permission
# and obj app labels don't match.
ctype = get_content_type(obj)
if app_label != ctype.app_label:
raise WrongAppError(
"Passed perm has app label of '%s' while "
"given obj has app label '%s' and given obj"
"content_type has app label '%s'" % (app_label, obj._meta.app_label, ctype.app_label) # type: ignore[union-attr]
)
check = ObjectPermissionChecker(user_obj)
return check.has_perm(perm, obj)
def get_group_permissions(self, user_obj: Any, obj: Optional[Model] = None) -> Iterable[str]:
"""Returns group permissions for a given object.
Parameters:
user_obj (User): User instance.
obj (Model): Django Model instance. If None, returns empty set
since this backend only handles object-level permissions.
Returns:
a set of permission strings that the given `user_obj` has for `obj`
through their group memberships.
"""
# This backend only handles object-level permissions
if obj is None:
return set()
# check if user_obj and object are supported
support, user_obj = check_support(user_obj, obj)
if not support:
return set()
check = ObjectPermissionChecker(user_obj)
return set(check.get_group_perms(obj))
def get_all_permissions(self, user_obj: Any, obj: Optional[Model] = None) -> Iterable[str]:
"""Returns all permissions for a given object.
Parameters:
user_obj (User): User instance.
obj (Model): Django Model instance.
Returns:
a set of permission strings that the given `user_obj` has for `obj`.
"""
# check if user_obj and object are supported
support, user_obj = check_support(user_obj, obj)
if not support:
return set()
check = ObjectPermissionChecker(user_obj)
return set(check.get_perms(obj))
|
ObjectPermissionBackend
|
python
|
ray-project__ray
|
rllib/policy/torch_policy_v2.py
|
{
"start": 1890,
"end": 49486
}
|
class ____(Policy):
"""PyTorch specific Policy class to use with RLlib."""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
*,
max_seq_len: int = 20,
):
"""Initializes a TorchPolicy instance.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: The Policy's config dict.
max_seq_len: Max sequence length for LSTM training.
"""
self.framework = config["framework"] = "torch"
self._loss_initialized = False
super().__init__(observation_space, action_space, config)
# Create model.
model, dist_class = self._init_model_and_dist_class()
# Create multi-GPU model towers, if necessary.
# - The central main model will be stored under self.model, residing
# on self.device (normally, a CPU).
# - Each GPU will have a copy of that model under
# self.model_gpu_towers, matching the devices in self.devices.
# - Parallelization is done by splitting the train batch and passing
# it through the model copies in parallel, then averaging over the
# resulting gradients, applying these averages on the main model and
# updating all towers' weights from the main model.
# - In case of just one device (1 (fake or real) GPU or 1 CPU), no
# parallelization will be done.
# Get devices to build the graph on.
num_gpus = self._get_num_gpus_for_policy()
gpu_ids = list(range(torch.cuda.device_count()))
logger.info(f"Found {len(gpu_ids)} visible cuda devices.")
# Place on one or more CPU(s) when either:
# - Fake GPU mode.
# - num_gpus=0 (either set by user or we are in local_mode=True).
# - No GPUs available.
if config["_fake_gpus"] or num_gpus == 0 or not gpu_ids:
self.device = torch.device("cpu")
self.devices = [self.device for _ in range(int(math.ceil(num_gpus)) or 1)]
self.model_gpu_towers = [
model if i == 0 else copy.deepcopy(model)
for i in range(int(math.ceil(num_gpus)) or 1)
]
if hasattr(self, "target_model"):
self.target_models = {
m: self.target_model for m in self.model_gpu_towers
}
self.model = model
# Place on one or more actual GPU(s), when:
# - num_gpus > 0 (set by user) AND
# - local_mode=False AND
# - actual GPUs available AND
# - non-fake GPU mode.
else:
# We are a remote worker (WORKER_MODE=1):
# GPUs should be assigned to us by ray.
if ray._private.worker._mode() == ray._private.worker.WORKER_MODE:
gpu_ids = ray.get_gpu_ids()
if len(gpu_ids) < num_gpus:
raise ValueError(
"TorchPolicy was not able to find enough GPU IDs! Found "
f"{gpu_ids}, but num_gpus={num_gpus}."
)
self.devices = [
torch.device("cuda:{}".format(i))
for i, id_ in enumerate(gpu_ids)
if i < num_gpus
]
self.device = self.devices[0]
ids = [id_ for i, id_ in enumerate(gpu_ids) if i < num_gpus]
self.model_gpu_towers = []
for i, _ in enumerate(ids):
model_copy = copy.deepcopy(model)
self.model_gpu_towers.append(model_copy.to(self.devices[i]))
if hasattr(self, "target_model"):
self.target_models = {
m: copy.deepcopy(self.target_model).to(self.devices[i])
for i, m in enumerate(self.model_gpu_towers)
}
self.model = self.model_gpu_towers[0]
self.dist_class = dist_class
self.unwrapped_model = model # used to support DistributedDataParallel
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when calling the model
# first, then its value function (e.g. in a loss function), in
# between of which another model call is made (e.g. to compute an
# action).
self._lock = threading.RLock()
self._state_inputs = self.model.get_initial_state()
self._is_recurrent = len(tree.flatten(self._state_inputs)) > 0
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
self.exploration = self._create_exploration()
self._optimizers = force_list(self.optimizer())
# Backward compatibility workaround so Policy will call self.loss()
# directly.
# TODO (jungong): clean up after all policies are migrated to new sub-class
# implementation.
self._loss = None
# Store, which params (by index within the model's list of
# parameters) should be updated per optimizer.
# Maps optimizer idx to set or param indices.
self.multi_gpu_param_groups: List[Set[int]] = []
main_params = {p: i for i, p in enumerate(self.model.parameters())}
for o in self._optimizers:
param_indices = []
for pg_idx, pg in enumerate(o.param_groups):
for p in pg["params"]:
param_indices.append(main_params[p])
self.multi_gpu_param_groups.append(set(param_indices))
# Create n sample-batch buffers (num_multi_gpu_tower_stacks), each
# one with m towers (num_gpus).
num_buffers = self.config.get("num_multi_gpu_tower_stacks", 1)
self._loaded_batches = [[] for _ in range(num_buffers)]
# If set, means we are using distributed allreduce during learning.
self.distributed_world_size = None
self.batch_divisibility_req = self.get_batch_divisibility_req()
self.max_seq_len = max_seq_len
# If model is an RLModule it won't have tower_stats instead there will be a
# self.tower_state[model] -> dict for each tower.
self.tower_stats = {}
if not hasattr(self.model, "tower_stats"):
for model in self.model_gpu_towers:
self.tower_stats[model] = {}
def loss_initialized(self):
return self._loss_initialized
@OverrideToImplementCustomLogic
@override(Policy)
def loss(
self,
model: ModelV2,
dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch,
) -> Union[TensorType, List[TensorType]]:
"""Constructs the loss function.
Args:
model: The Model to calculate the loss for.
dist_class: The action distr. class.
train_batch: The training data.
Returns:
Loss tensor given the input batch.
"""
raise NotImplementedError
@OverrideToImplementCustomLogic
def action_sampler_fn(
self,
model: ModelV2,
*,
obs_batch: TensorType,
state_batches: TensorType,
**kwargs,
) -> Tuple[TensorType, TensorType, TensorType, List[TensorType]]:
"""Custom function for sampling new actions given policy.
Args:
model: Underlying model.
obs_batch: Observation tensor batch.
state_batches: Action sampling state batch.
Returns:
Sampled action
Log-likelihood
Action distribution inputs
Updated state
"""
return None, None, None, None
@OverrideToImplementCustomLogic
def action_distribution_fn(
self,
model: ModelV2,
*,
obs_batch: TensorType,
state_batches: TensorType,
**kwargs,
) -> Tuple[TensorType, type, List[TensorType]]:
"""Action distribution function for this Policy.
Args:
model: Underlying model.
obs_batch: Observation tensor batch.
state_batches: Action sampling state batch.
Returns:
Distribution input.
ActionDistribution class.
State outs.
"""
return None, None, None
@OverrideToImplementCustomLogic
def make_model(self) -> ModelV2:
"""Create model.
Note: only one of make_model or make_model_and_action_dist
can be overridden.
Returns:
ModelV2 model.
"""
return None
@OverrideToImplementCustomLogic
def make_model_and_action_dist(
self,
) -> Tuple[ModelV2, Type[TorchDistributionWrapper]]:
"""Create model and action distribution function.
Returns:
ModelV2 model.
ActionDistribution class.
"""
return None, None
@OverrideToImplementCustomLogic
def get_batch_divisibility_req(self) -> int:
"""Get batch divisibility request.
Returns:
Size N. A sample batch must be of size K*N.
"""
# By default, any sized batch is ok, so simply return 1.
return 1
@OverrideToImplementCustomLogic
def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Stats function. Returns a dict of statistics.
Args:
train_batch: The SampleBatch (already) used for training.
Returns:
The stats dict.
"""
return {}
@OverrideToImplementCustomLogic_CallToSuperRecommended
def extra_grad_process(
self, optimizer: "torch.optim.Optimizer", loss: TensorType
) -> Dict[str, TensorType]:
"""Called after each optimizer.zero_grad() + loss.backward() call.
Called for each self._optimizers/loss-value pair.
Allows for gradient processing before optimizer.step() is called.
E.g. for gradient clipping.
Args:
optimizer: A torch optimizer object.
loss: The loss tensor associated with the optimizer.
Returns:
An dict with information on the gradient processing step.
"""
return {}
@OverrideToImplementCustomLogic_CallToSuperRecommended
def extra_compute_grad_fetches(self) -> Dict[str, Any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Extra fetch dict to be added to the fetch dict of the
`compute_gradients` call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
@OverrideToImplementCustomLogic_CallToSuperRecommended
def extra_action_out(
self,
input_dict: Dict[str, TensorType],
state_batches: List[TensorType],
model: TorchModelV2,
action_dist: TorchDistributionWrapper,
) -> Dict[str, TensorType]:
"""Returns dict of extra info to include in experience batch.
Args:
input_dict: Dict of model input tensors.
state_batches: List of state tensors.
model: Reference to the model object.
action_dist: Torch action dist object
to get log-probs (e.g. for already sampled actions).
Returns:
Extra outputs to return in a `compute_actions_from_input_dict()`
call (3rd return value).
"""
return {}
@override(Policy)
@OverrideToImplementCustomLogic_CallToSuperRecommended
def postprocess_trajectory(
self,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[Any, SampleBatch]] = None,
episode=None,
) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
sample_batch: The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[Episode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
return sample_batch
@OverrideToImplementCustomLogic
def optimizer(
self,
) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]:
"""Custom the local PyTorch optimizer(s) to use.
Returns:
The local PyTorch optimizer(s) to use for this Policy.
"""
if hasattr(self, "config"):
optimizers = [
torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
]
else:
optimizers = [torch.optim.Adam(self.model.parameters())]
if self.exploration:
optimizers = self.exploration.get_exploration_optimizer(optimizers)
return optimizers
def _init_model_and_dist_class(self):
if is_overridden(self.make_model) and is_overridden(
self.make_model_and_action_dist
):
raise ValueError(
"Only one of make_model or make_model_and_action_dist "
"can be overridden."
)
if is_overridden(self.make_model):
model = self.make_model()
dist_class, _ = ModelCatalog.get_action_dist(
self.action_space, self.config["model"], framework=self.framework
)
elif is_overridden(self.make_model_and_action_dist):
model, dist_class = self.make_model_and_action_dist()
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
self.action_space, self.config["model"], framework=self.framework
)
model = ModelCatalog.get_model_v2(
obs_space=self.observation_space,
action_space=self.action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework=self.framework,
)
# Compile the model, if requested by the user.
if self.config.get("torch_compile_learner"):
if (
torch is not None
and version.parse(torch.__version__) < TORCH_COMPILE_REQUIRED_VERSION
):
raise ValueError("`torch.compile` is not supported for torch < 2.0.0!")
lw = "learner" if self.config.get("worker_index") else "worker"
model = torch.compile(
model,
backend=self.config.get(
f"torch_compile_{lw}_dynamo_backend", "inductor"
),
dynamic=False,
mode=self.config.get(f"torch_compile_{lw}_dynamo_mode"),
)
return model, dist_class
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Dict[str, TensorType],
explore: bool = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
seq_lens = None
with torch.no_grad():
# Pass lazy (torch) tensor dict to Model as `input_dict`.
input_dict = self._lazy_tensor_dict(input_dict)
input_dict.set_training(True)
# Pack internal state inputs into (separate) list.
state_batches = [
input_dict[k] for k in input_dict.keys() if "state_in" in k[:8]
]
# Calculate RNN sequence lengths.
if state_batches:
seq_lens = torch.tensor(
[1] * len(state_batches[0]),
dtype=torch.long,
device=state_batches[0].device,
)
return self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorStructType], TensorStructType] = None,
prev_reward_batch: Union[List[TensorStructType], TensorStructType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes=None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]]:
with torch.no_grad():
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
input_dict = self._lazy_tensor_dict(
{
SampleBatch.CUR_OBS: obs_batch,
"is_training": False,
}
)
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = np.asarray(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = np.asarray(prev_reward_batch)
state_batches = [
convert_to_torch_tensor(s, self.device) for s in (state_batches or [])
]
return self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
@with_lock
@override(Policy)
def compute_log_likelihoods(
self,
actions: Union[List[TensorStructType], TensorStructType],
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[
Union[List[TensorStructType], TensorStructType]
] = None,
prev_reward_batch: Optional[
Union[List[TensorStructType], TensorStructType]
] = None,
actions_normalized: bool = True,
in_training: bool = True,
) -> TensorType:
if is_overridden(self.action_sampler_fn) and not is_overridden(
self.action_distribution_fn
):
raise ValueError(
"Cannot compute log-prob/likelihood w/o an "
"`action_distribution_fn` and a provided "
"`action_sampler_fn`!"
)
with torch.no_grad():
input_dict = self._lazy_tensor_dict(
{SampleBatch.CUR_OBS: obs_batch, SampleBatch.ACTIONS: actions}
)
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
state_batches = [
convert_to_torch_tensor(s, self.device) for s in (state_batches or [])
]
if self.exploration:
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(explore=False)
# Action dist class and inputs are generated via custom function.
if is_overridden(self.action_distribution_fn):
dist_inputs, dist_class, state_out = self.action_distribution_fn(
self.model,
obs_batch=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=False,
is_training=False,
)
action_dist = dist_class(dist_inputs, self.model)
# Default action-dist inputs calculation.
else:
dist_class = self.dist_class
dist_inputs, _ = self.model(input_dict, state_batches, seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Normalize actions if necessary.
actions = input_dict[SampleBatch.ACTIONS]
if not actions_normalized and self.config["normalize_actions"]:
actions = normalize_action(actions, self.action_space_struct)
log_likelihoods = action_dist.logp(actions)
return log_likelihoods
@with_lock
@override(Policy)
def learn_on_batch(self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
# Set Model to train mode.
if self.model:
self.model.train()
# Callback handling.
learn_stats = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch, result=learn_stats
)
# Compute gradients (will calculate all losses and `backward()`
# them to get the grads).
grads, fetches = self.compute_gradients(postprocessed_batch)
# Step the optimizers.
self.apply_gradients(_directStepOptimizerSingleton)
self.num_grad_updates += 1
if self.model and hasattr(self.model, "metrics"):
fetches["model"] = self.model.metrics()
else:
fetches["model"] = {}
fetches.update(
{
"custom_metrics": learn_stats,
NUM_AGENT_STEPS_TRAINED: postprocessed_batch.count,
NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates,
# -1, b/c we have to measure this diff before we do the update above.
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: (
self.num_grad_updates
- 1
- (postprocessed_batch.num_grad_updates or 0)
),
}
)
return fetches
@override(Policy)
def load_batch_into_buffer(
self,
batch: SampleBatch,
buffer_index: int = 0,
) -> int:
# Set the is_training flag of the batch.
batch.set_training(True)
# Shortcut for 1 CPU only: Store batch in `self._loaded_batches`.
if len(self.devices) == 1 and self.devices[0].type == "cpu":
assert buffer_index == 0
pad_batch_to_sequences_of_same_size(
batch=batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
_enable_new_api_stack=False,
padding="zero",
)
self._lazy_tensor_dict(batch)
self._loaded_batches[0] = [batch]
return len(batch)
# Batch (len=28, seq-lens=[4, 7, 4, 10, 3]):
# 0123 0123456 0123 0123456789ABC
# 1) split into n per-GPU sub batches (n=2).
# [0123 0123456] [012] [3 0123456789 ABC]
# (len=14, 14 seq-lens=[4, 7, 3] [1, 10, 3])
slices = batch.timeslices(num_slices=len(self.devices))
# 2) zero-padding (max-seq-len=10).
# - [0123000000 0123456000 0120000000]
# - [3000000000 0123456789 ABC0000000]
for slice in slices:
pad_batch_to_sequences_of_same_size(
batch=slice,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
_enable_new_api_stack=False,
padding="zero",
)
# 3) Load splits into the given buffer (consisting of n GPUs).
slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)]
self._loaded_batches[buffer_index] = slices
# Return loaded samples per-device.
return len(slices[0])
@override(Policy)
def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
return sum(len(b) for b in self._loaded_batches[buffer_index])
@override(Policy)
def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
if not self._loaded_batches[buffer_index]:
raise ValueError(
"Must call Policy.load_batch_into_buffer() before "
"Policy.learn_on_loaded_batch()!"
)
# Get the correct slice of the already loaded batch to use,
# based on offset and batch size.
device_batch_size = self.config.get("minibatch_size")
if device_batch_size is None:
device_batch_size = self.config.get(
"sgd_minibatch_size",
self.config["train_batch_size"],
)
device_batch_size //= len(self.devices)
# Set Model to train mode.
if self.model_gpu_towers:
for t in self.model_gpu_towers:
t.train()
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_batches`.
if len(self.devices) == 1 and self.devices[0].type == "cpu":
assert buffer_index == 0
if device_batch_size >= len(self._loaded_batches[0][0]):
batch = self._loaded_batches[0][0]
else:
batch = self._loaded_batches[0][0][offset : offset + device_batch_size]
return self.learn_on_batch(batch)
if len(self.devices) > 1:
# Copy weights of main model (tower-0) to all other towers.
state_dict = self.model.state_dict()
# Just making sure tower-0 is really the same as self.model.
assert self.model_gpu_towers[0] is self.model
for tower in self.model_gpu_towers[1:]:
tower.load_state_dict(state_dict)
if device_batch_size >= sum(len(s) for s in self._loaded_batches[buffer_index]):
device_batches = self._loaded_batches[buffer_index]
else:
device_batches = [
b[offset : offset + device_batch_size]
for b in self._loaded_batches[buffer_index]
]
# Callback handling.
batch_fetches = {}
for i, batch in enumerate(device_batches):
custom_metrics = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=batch, result=custom_metrics
)
batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics}
# Do the (maybe parallelized) gradient calculation step.
tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches)
# Mean-reduce gradients over GPU-towers (do this on CPU: self.device).
all_grads = []
for i in range(len(tower_outputs[0][0])):
if tower_outputs[0][0][i] is not None:
all_grads.append(
torch.mean(
torch.stack([t[0][i].to(self.device) for t in tower_outputs]),
dim=0,
)
)
else:
all_grads.append(None)
# Set main model's grads to mean-reduced values.
for i, p in enumerate(self.model.parameters()):
p.grad = all_grads[i]
self.apply_gradients(_directStepOptimizerSingleton)
self.num_grad_updates += 1
for i, (model, batch) in enumerate(zip(self.model_gpu_towers, device_batches)):
batch_fetches[f"tower_{i}"].update(
{
LEARNER_STATS_KEY: self.stats_fn(batch),
"model": model.metrics(),
NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates,
# -1, b/c we have to measure this diff before we do the update
# above.
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: (
self.num_grad_updates - 1 - (batch.num_grad_updates or 0)
),
}
)
batch_fetches.update(self.extra_compute_grad_fetches())
return batch_fetches
@with_lock
@override(Policy)
def compute_gradients(self, postprocessed_batch: SampleBatch) -> ModelGradients:
assert len(self.devices) == 1
# If not done yet, see whether we have to zero-pad this batch.
if not postprocessed_batch.zero_padded:
pad_batch_to_sequences_of_same_size(
batch=postprocessed_batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
_enable_new_api_stack=False,
padding="zero",
)
postprocessed_batch.set_training(True)
self._lazy_tensor_dict(postprocessed_batch, device=self.devices[0])
# Do the (maybe parallelized) gradient calculation step.
tower_outputs = self._multi_gpu_parallel_grad_calc([postprocessed_batch])
all_grads, grad_info = tower_outputs[0]
grad_info["allreduce_latency"] /= len(self._optimizers)
grad_info.update(self.stats_fn(postprocessed_batch))
fetches = self.extra_compute_grad_fetches()
return all_grads, dict(fetches, **{LEARNER_STATS_KEY: grad_info})
@override(Policy)
def apply_gradients(self, gradients: ModelGradients) -> None:
if gradients == _directStepOptimizerSingleton:
for i, opt in enumerate(self._optimizers):
opt.step()
else:
# TODO(sven): Not supported for multiple optimizers yet.
assert len(self._optimizers) == 1
for g, p in zip(gradients, self.model.parameters()):
if g is not None:
if torch.is_tensor(g):
p.grad = g.to(self.device)
else:
p.grad = torch.from_numpy(g).to(self.device)
self._optimizers[0].step()
def get_tower_stats(self, stats_name: str) -> List[TensorStructType]:
"""Returns list of per-tower stats, copied to this Policy's device.
Args:
stats_name: The name of the stats to average over (this str
must exist as a key inside each tower's `tower_stats` dict).
Returns:
The list of stats tensor (structs) of all towers, copied to this
Policy's device.
Raises:
AssertionError: If the `stats_name` cannot be found in any one
of the tower's `tower_stats` dicts.
"""
data = []
for model in self.model_gpu_towers:
if self.tower_stats:
tower_stats = self.tower_stats[model]
else:
tower_stats = model.tower_stats
if stats_name in tower_stats:
data.append(
tree.map_structure(
lambda s: s.to(self.device), tower_stats[stats_name]
)
)
assert len(data) > 0, (
f"Stats `{stats_name}` not found in any of the towers (you have "
f"{len(self.model_gpu_towers)} towers in total)! Make "
"sure you call the loss function on at least one of the towers."
)
return data
@override(Policy)
def get_weights(self) -> ModelWeights:
return {k: v.cpu().detach().numpy() for k, v in self.model.state_dict().items()}
@override(Policy)
def set_weights(self, weights: ModelWeights) -> None:
weights = convert_to_torch_tensor(weights, device=self.device)
self.model.load_state_dict(weights)
@override(Policy)
def is_recurrent(self) -> bool:
return self._is_recurrent
@override(Policy)
def num_state_tensors(self) -> int:
return len(self.model.get_initial_state())
@override(Policy)
def get_initial_state(self) -> List[TensorType]:
return [s.detach().cpu().numpy() for s in self.model.get_initial_state()]
@override(Policy)
@OverrideToImplementCustomLogic_CallToSuperRecommended
def get_state(self) -> PolicyState:
# Legacy Policy state (w/o torch.nn.Module and w/o PolicySpec).
state = super().get_state()
state["_optimizer_variables"] = []
for i, o in enumerate(self._optimizers):
optim_state_dict = convert_to_numpy(o.state_dict())
state["_optimizer_variables"].append(optim_state_dict)
# Add exploration state.
if self.exploration:
# This is not compatible with RLModules, which have a method
# `forward_exploration` to specify custom exploration behavior.
state["_exploration_state"] = self.exploration.get_state()
return state
@override(Policy)
@OverrideToImplementCustomLogic_CallToSuperRecommended
def set_state(self, state: PolicyState) -> None:
# Set optimizer vars first.
optimizer_vars = state.get("_optimizer_variables", None)
if optimizer_vars:
assert len(optimizer_vars) == len(self._optimizers)
for o, s in zip(self._optimizers, optimizer_vars):
# Torch optimizer param_groups include things like beta, etc. These
# parameters should be left as scalar and not converted to tensors.
# otherwise, torch.optim.step() will start to complain.
optim_state_dict = {"param_groups": s["param_groups"]}
optim_state_dict["state"] = convert_to_torch_tensor(
s["state"], device=self.device
)
o.load_state_dict(optim_state_dict)
# Set exploration's state.
if hasattr(self, "exploration") and "_exploration_state" in state:
self.exploration.set_state(state=state["_exploration_state"])
# Restore global timestep.
self.global_timestep = state["global_timestep"]
# Then the Policy's (NN) weights and connectors.
super().set_state(state)
@override(Policy)
def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
"""Exports the Policy's Model to local directory for serving.
Creates a TorchScript model and saves it.
Args:
export_dir: Local writable directory or filename.
onnx: If given, will export model in ONNX format. The
value of this parameter set the ONNX OpSet version to use.
"""
os.makedirs(export_dir, exist_ok=True)
if onnx:
self._lazy_tensor_dict(self._dummy_batch)
# Provide dummy state inputs if not an RNN (torch cannot jit with
# returned empty internal states list).
if "state_in_0" not in self._dummy_batch:
self._dummy_batch["state_in_0"] = self._dummy_batch[
SampleBatch.SEQ_LENS
] = np.array([1.0])
seq_lens = self._dummy_batch[SampleBatch.SEQ_LENS]
state_ins = []
i = 0
while "state_in_{}".format(i) in self._dummy_batch:
state_ins.append(self._dummy_batch["state_in_{}".format(i)])
i += 1
dummy_inputs = {
k: self._dummy_batch[k]
for k in self._dummy_batch.keys()
if k != "is_training"
}
file_name = os.path.join(export_dir, "model.onnx")
torch.onnx.export(
self.model,
(dummy_inputs, state_ins, seq_lens),
file_name,
export_params=True,
opset_version=onnx,
do_constant_folding=True,
input_names=list(dummy_inputs.keys())
+ ["state_ins", SampleBatch.SEQ_LENS],
output_names=["output", "state_outs"],
dynamic_axes={
k: {0: "batch_size"}
for k in list(dummy_inputs.keys())
+ ["state_ins", SampleBatch.SEQ_LENS]
},
)
# Save the torch.Model (architecture and weights, so it can be retrieved
# w/o access to the original (custom) Model or Policy code).
else:
filename = os.path.join(export_dir, "model.pt")
try:
torch.save(self.model, f=filename)
except Exception:
if os.path.exists(filename):
os.remove(filename)
logger.warning(ERR_MSG_TORCH_POLICY_CANNOT_SAVE_MODEL)
@override(Policy)
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into torch model."""
return self.model.import_from_h5(import_file)
@with_lock
def _compute_action_helper(
self, input_dict, state_batches, seq_lens, explore, timestep
):
"""Shared forward pass logic (w/ and w/o trajectory view API).
Returns:
A tuple consisting of a) actions, b) state_out, c) extra_fetches.
The input_dict is modified in-place to include a numpy copy of the computed
actions under `SampleBatch.ACTIONS`.
"""
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
# Switch to eval mode.
if self.model:
self.model.eval()
extra_fetches = dist_inputs = logp = None
if is_overridden(self.action_sampler_fn):
action_dist = None
actions, logp, dist_inputs, state_out = self.action_sampler_fn(
self.model,
obs_batch=input_dict,
state_batches=state_batches,
explore=explore,
timestep=timestep,
)
else:
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(explore=explore, timestep=timestep)
if is_overridden(self.action_distribution_fn):
dist_inputs, dist_class, state_out = self.action_distribution_fn(
self.model,
obs_batch=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=explore,
timestep=timestep,
is_training=False,
)
else:
dist_class = self.dist_class
dist_inputs, state_out = self.model(input_dict, state_batches, seq_lens)
if not (
isinstance(dist_class, functools.partial)
or issubclass(dist_class, TorchDistributionWrapper)
):
raise ValueError(
"`dist_class` ({}) not a TorchDistributionWrapper "
"subclass! Make sure your `action_distribution_fn` or "
"`make_model_and_action_dist` return a correct "
"distribution class.".format(dist_class.__name__)
)
action_dist = dist_class(dist_inputs, self.model)
# Get the exploration action from the forward results.
actions, logp = self.exploration.get_exploration_action(
action_distribution=action_dist, timestep=timestep, explore=explore
)
# Add default and custom fetches.
if extra_fetches is None:
extra_fetches = self.extra_action_out(
input_dict, state_batches, self.model, action_dist
)
# Action-dist inputs.
if dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
# Action-logp and action-prob.
if logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = torch.exp(logp.float())
extra_fetches[SampleBatch.ACTION_LOGP] = logp
# Update our global timestep by the batch size.
self.global_timestep += len(input_dict[SampleBatch.CUR_OBS])
return convert_to_numpy((actions, state_out, extra_fetches))
def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None):
if not isinstance(postprocessed_batch, SampleBatch):
postprocessed_batch = SampleBatch(postprocessed_batch)
postprocessed_batch.set_get_interceptor(
functools.partial(convert_to_torch_tensor, device=device or self.device)
)
return postprocessed_batch
def _multi_gpu_parallel_grad_calc(
self, sample_batches: List[SampleBatch]
) -> List[Tuple[List[TensorType], GradInfoDict]]:
"""Performs a parallelized loss and gradient calculation over the batch.
Splits up the given train batch into n shards (n=number of this
Policy's devices) and passes each data shard (in parallel) through
the loss function using the individual devices' models
(self.model_gpu_towers). Then returns each tower's outputs.
Args:
sample_batches: A list of SampleBatch shards to
calculate loss and gradients for.
Returns:
A list (one item per device) of 2-tuples, each with 1) gradient
list and 2) grad info dict.
"""
assert len(self.model_gpu_towers) == len(sample_batches)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(shard_idx, model, sample_batch, device):
torch.set_grad_enabled(grad_enabled)
try:
with NullContextManager() if device.type == "cpu" else torch.cuda.device( # noqa: E501
device
):
loss_out = force_list(
self.loss(model, self.dist_class, sample_batch)
)
# Call Model's custom-loss with Policy loss outputs and
# train_batch.
if hasattr(model, "custom_loss"):
loss_out = model.custom_loss(loss_out, sample_batch)
assert len(loss_out) == len(self._optimizers)
# Loop through all optimizers.
grad_info = {"allreduce_latency": 0.0}
parameters = list(model.parameters())
all_grads = [None for _ in range(len(parameters))]
for opt_idx, opt in enumerate(self._optimizers):
# Erase gradients in all vars of the tower that this
# optimizer would affect.
param_indices = self.multi_gpu_param_groups[opt_idx]
for param_idx, param in enumerate(parameters):
if param_idx in param_indices and param.grad is not None:
param.grad.data.zero_()
# Recompute gradients of loss over all variables.
loss_out[opt_idx].backward(retain_graph=True)
grad_info.update(
self.extra_grad_process(opt, loss_out[opt_idx])
)
grads = []
# Note that return values are just references;
# Calling zero_grad would modify the values.
for param_idx, param in enumerate(parameters):
if param_idx in param_indices:
if param.grad is not None:
grads.append(param.grad)
all_grads[param_idx] = param.grad
if self.distributed_world_size:
start = time.time()
if torch.cuda.is_available():
# Sadly, allreduce_coalesced does not work with
# CUDA yet.
for g in grads:
torch.distributed.all_reduce(
g, op=torch.distributed.ReduceOp.SUM
)
else:
torch.distributed.all_reduce_coalesced(
grads, op=torch.distributed.ReduceOp.SUM
)
for param_group in opt.param_groups:
for p in param_group["params"]:
if p.grad is not None:
p.grad /= self.distributed_world_size
grad_info["allreduce_latency"] += time.time() - start
with lock:
results[shard_idx] = (all_grads, grad_info)
except Exception as e:
import traceback
with lock:
results[shard_idx] = (
ValueError(
e.args[0]
+ "\n traceback"
+ traceback.format_exc()
+ "\n"
+ "In tower {} on device {}".format(shard_idx, device)
),
e,
)
# Single device (GPU) or fake-GPU case (serialize for better
# debugging).
if len(self.devices) == 1 or self.config["_fake_gpus"]:
for shard_idx, (model, sample_batch, device) in enumerate(
zip(self.model_gpu_towers, sample_batches, self.devices)
):
_worker(shard_idx, model, sample_batch, device)
# Raise errors right away for better debugging.
last_result = results[len(results) - 1]
if isinstance(last_result[0], ValueError):
raise last_result[0] from last_result[1]
# Multi device (GPU) case: Parallelize via threads.
else:
threads = [
threading.Thread(
target=_worker, args=(shard_idx, model, sample_batch, device)
)
for shard_idx, (model, sample_batch, device) in enumerate(
zip(self.model_gpu_towers, sample_batches, self.devices)
)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Gather all threads' outputs and return.
outputs = []
for shard_idx in range(len(sample_batches)):
output = results[shard_idx]
if isinstance(output[0], Exception):
raise output[0] from output[1]
outputs.append(results[shard_idx])
return outputs
|
TorchPolicyV2
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_parsers.py
|
{
"start": 840,
"end": 3703
}
|
class ____(ParserTestCase, CompressorTestCase):
parser_cls = "compressor.parser.Html5LibParser"
# Special test variants required since xml.etree holds attributes
# as a plain dictionary, e.g. key order is unpredictable.
def test_css_split(self):
split = self.css_node.split_contents()
out0 = (
SOURCE_FILE,
os.path.join(settings.COMPRESS_ROOT, "css", "one.css"),
"css/one.css",
"{http://www.w3.org/1999/xhtml}link",
{"rel": "stylesheet", "href": "/static/css/one.css", "type": "text/css"},
)
self.assertEqual(out0, split[0][:3] + (split[0][3].tag, split[0][3].attrib))
out1 = (
SOURCE_HUNK,
"p { border:5px solid green;}",
None,
'<style type="text/css">p { border:5px solid green;}</style>',
)
self.assertEqual(
out1, split[1][:3] + (self.css_node.parser.elem_str(split[1][3]),)
)
out2 = (
SOURCE_FILE,
os.path.join(settings.COMPRESS_ROOT, "css", "two.css"),
"css/two.css",
"{http://www.w3.org/1999/xhtml}link",
{"rel": "stylesheet", "href": "/static/css/two.css", "type": "text/css"},
)
self.assertEqual(out2, split[2][:3] + (split[2][3].tag, split[2][3].attrib))
def test_js_split(self):
split = self.js_node.split_contents()
out0 = (
SOURCE_FILE,
os.path.join(settings.COMPRESS_ROOT, "js", "one.js"),
"js/one.js",
"{http://www.w3.org/1999/xhtml}script",
{"src": "/static/js/one.js", "type": "text/javascript"},
None,
)
self.assertEqual(
out0, split[0][:3] + (split[0][3].tag, split[0][3].attrib, split[0][3].text)
)
out1 = (
SOURCE_HUNK,
'obj.value = "value";',
None,
"{http://www.w3.org/1999/xhtml}script",
{"type": "text/javascript"},
'obj.value = "value";',
)
self.assertEqual(
out1, split[1][:3] + (split[1][3].tag, split[1][3].attrib, split[1][3].text)
)
@override_settings(COMPRESS_ENABLED=False)
def test_css_return_if_off(self):
# Yes, they are semantically equal but attributes might be
# scrambled in unpredictable order. A more elaborate check
# would require parsing both arguments with a different parser
# and then evaluating the result, which no longer is
# a meaningful unit test.
self.assertEqual(len(self.css), len(self.css_node.output()))
@override_settings(COMPRESS_PRECOMPILERS=(), COMPRESS_ENABLED=False)
def test_js_return_if_off(self):
# As above.
self.assertEqual(len(self.js), len(self.js_node.output()))
|
Html5LibParserTests
|
python
|
pexpect__pexpect
|
examples/chess2.py
|
{
"start": 1251,
"end": 5021
}
|
class ____:
def __init__(self, engine = "/usr/local/bin/gnuchess -a -h 1"):
self.child = pexpect.spawn (engine)
self.term = ANSI.ANSI ()
#self.child.expect ('Chess')
#if self.child.after != 'Chess':
# raise IOError, 'incompatible chess program'
#self.term.process_list (self.child.before)
#self.term.process_list (self.child.after)
self.last_computer_move = ''
def read_until_cursor (self, r,c, e=0):
'''Eventually something like this should move into the screen class or
a subclass. Maybe a combination of pexpect and screen...
'''
fout = open ('log','a')
while self.term.cur_r != r or self.term.cur_c != c:
try:
k = self.child.read(1, 10)
except Exception as e:
print('EXCEPTION, (r,c):(%d,%d)\n' %(self.term.cur_r, self.term.cur_c))
sys.stdout.flush()
self.term.process (k)
fout.write ('(r,c):(%d,%d)\n' %(self.term.cur_r, self.term.cur_c))
fout.flush()
if e:
sys.stdout.write (k)
sys.stdout.flush()
if self.term.cur_r == r and self.term.cur_c == c:
fout.close()
return 1
print('DIDNT EVEN HIT.')
fout.close()
return 1
def expect_region (self):
'''This is another method that would be moved into the
screen class.
'''
pass
def do_scan (self):
fout = open ('log','a')
while 1:
c = self.child.read(1,10)
self.term.process (c)
fout.write ('(r,c):(%d,%d)\n' %(self.term.cur_r, self.term.cur_c))
fout.flush()
sys.stdout.write (c)
sys.stdout.flush()
def do_move (self, move, e = 0):
time.sleep(1)
self.read_until_cursor (19,60, e)
self.child.sendline (move)
def wait (self, color):
while 1:
r = self.term.get_region (14,50,14,60)[0]
r = r.strip()
if r == color:
return
time.sleep (1)
def parse_computer_move (self, s):
i = s.find ('is: ')
cm = s[i+3:i+9]
return cm
def get_computer_move (self, e = 0):
time.sleep(1)
self.read_until_cursor (19,60, e)
time.sleep(1)
r = self.term.get_region (17,50,17,62)[0]
cm = self.parse_computer_move (r)
return cm
def switch (self):
print('switching')
self.child.sendline ('switch')
def set_depth (self, depth):
self.child.sendline ('depth')
self.child.expect ('depth=')
self.child.sendline ('%d' % depth)
def quit(self):
self.child.sendline ('quit')
def LOG (s):
print(s)
sys.stdout.flush ()
fout = open ('moves.log', 'a')
fout.write (s + '\n')
fout.close()
print('Starting...')
black = Chess()
white = Chess()
white.read_until_cursor (19,60,1)
white.switch()
done = 0
while not done:
white.wait ('Black')
move_white = white.get_computer_move(1)
LOG ( 'move white:'+ move_white )
black.do_move (move_white)
black.wait ('White')
move_black = black.get_computer_move()
LOG ( 'move black:'+ move_black )
white.do_move (move_black, 1)
g.quit()
|
Chess
|
python
|
facebookresearch__faiss
|
tests/test_build_blocks.py
|
{
"start": 12095,
"end": 13082
}
|
class ____(unittest.TestCase):
def test_keep_min(self):
self.run_test(False)
def test_keep_max(self):
self.run_test(True)
def run_test(self, keep_max):
nq = 100
nb = 1000
restab = faiss.rand((nq, nb), 123)
ids = faiss.randint((nq, nb), 1324, 10000)
all_rh = {}
for nstep in 1, 3:
rh = faiss.ResultHeap(nq, 10, keep_max=keep_max)
for i in range(nstep):
i0, i1 = i * nb // nstep, (i + 1) * nb // nstep
D = restab[:, i0:i1].copy()
I = ids[:, i0:i1].copy()
rh.add_result(D, I)
rh.finalize()
if keep_max:
assert np.all(rh.D[:, :-1] >= rh.D[:, 1:])
else:
assert np.all(rh.D[:, :-1] <= rh.D[:, 1:])
all_rh[nstep] = rh
np.testing.assert_equal(all_rh[1].D, all_rh[3].D)
np.testing.assert_equal(all_rh[1].I, all_rh[3].I)
|
TestResultHeap
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/generative.py
|
{
"start": 890,
"end": 1057
}
|
class ____:
return_metadata: bool = False
images: Optional[Iterable[str]] = None
image_properties: Optional[List[str]] = None
|
_GenerativeConfigRuntimeOptions
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/test_hf_safetensor_e2e.py
|
{
"start": 9584,
"end": 11927
}
|
class ____(DTensorTestBase):
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_consolidate_to_one_file(self) -> None:
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
import safetensors
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
mesh_shape = (self.world_size,)
mesh_1d = init_device_mesh(self.device_type, mesh_shape)
# Create local tensor with row-wise sharding
rows_per_rank = global_tensor.shape[0] // self.world_size
start_row = self.rank * rows_per_rank
end_row = start_row + rows_per_rank
local_tensor = global_tensor[start_row:end_row].clone()
# Create DTensor with row-wise sharding
dtensor = DTensor.from_local(
local_tensor,
device_mesh=mesh_1d,
placements=[Shard(0)],
shape=global_tensor.shape,
stride=(4, 1),
)
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
checkpoint_dir = self.temp_dir
state_dict_to_save = {"dtensor": dtensor}
dist_cp.save(
state_dict=state_dict_to_save,
storage_writer=dist_cp.HuggingFaceStorageWriter(
path=checkpoint_dir,
save_distributed=True,
enable_consolidation=True,
),
)
dist.barrier()
if self.rank == 0:
file_path = os.path.join(checkpoint_dir, "model-00001-of-00001.safetensors")
loaded_dict = safetensors.torch.load_file(file_path)
self.assertEqual(loaded_dict.keys(), {"dtensor"})
self.assertTrue(torch.equal(loaded_dict["dtensor"], global_tensor))
dist.barrier()
ONE_D_PLACEMENTS = [
[Shard(0)],
[Replicate()],
]
ONE_D_TO_ONE_D_PLACEMENTS = [
([Replicate()], [Shard(0)]),
([Shard(0)], [Replicate()]),
]
TWO_D_PLACEMENTS = [
[Replicate(), Replicate()],
[Replicate(), Shard(0)],
[Shard(0), Replicate()],
[Shard(0), Shard(0)],
]
TWO_D_TO_TWO_D_PLACEMENTS = []
for p1 in TWO_D_PLACEMENTS:
for p2 in TWO_D_PLACEMENTS:
if p1 != p2:
TWO_D_TO_TWO_D_PLACEMENTS.append((p1, p2))
@instantiate_parametrized_tests
|
TestDistributedHFSafetensorsConsolidation
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatter/marker/colorbar/_tickformatstop.py
|
{
"start": 233,
"end": 8549
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter.marker.colorbar"
_path_str = "scatter.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter.marker
.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickformatstop
|
python
|
huggingface__transformers
|
src/transformers/models/video_llama_3/video_processing_video_llama_3.py
|
{
"start": 3105,
"end": 17379
}
|
class ____(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 128 * 28 * 28, "longest_edge": 28 * 28 * 768}
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
min_pixels = 128 * 28 * 28
max_pixels = 28 * 28 * 768
patch_size = 14
temporal_patch_size = 1
merge_size = 2
min_frames = 4
max_frames = 180
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
valid_kwargs = VideoLlama3VideoProcessorInitKwargs
model_input_names = ["pixel_values_videos", "video_grid_thw", "video_merge_sizes", "video_compression_mask"]
use_token_compression = True
return_metadata = True
def __init__(self, **kwargs: Unpack[VideoLlama3VideoProcessorInitKwargs]):
size = kwargs.pop("size", None)
min_pixels = kwargs.pop("min_pixels", None)
max_pixels = kwargs.pop("max_pixels", None)
# backward compatibility: override size with min_pixels and max_pixels if they are provided
size = self.size if size is None else size
if min_pixels is not None:
size["shortest_edge"] = min_pixels
size.pop("min_pixels", None)
if max_pixels is not None:
size["longest_edge"] = max_pixels
size.pop("max_pixels", None)
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
super().__init__(size=size, min_pixels=min_pixels, max_pixels=max_pixels, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
temporal_patch_size: Optional[int] = None,
min_frames: Optional[int] = None,
max_frames: Optional[int] = None,
num_frames: Optional[int] = None,
fps: Optional[Union[int, float]] = None,
**kwargs,
):
"""
Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
and `fps` are mutually exclusive.
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
temporal_patch_size (`int`, *optional*):
The temporal patch size of the vision encoder. Number of sampled frames will be rounded to be divisible by frame factor.
min_frames (`int`, *optional*):
The minimum number of frames that can be sampled.
max_frames (`int`, *optional*):
The maximum number of frames that can be sampled.
num_frames (`int`, *optional*):
Maximum number of frames to sample. Defaults to `self.num_frames`.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if fps is not None and num_frames is not None:
raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!")
num_frames = num_frames if num_frames is not None else self.num_frames
fps = fps if fps is not None else self.fps
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
min_frames = min_frames if min_frames is not None else self.min_frames
max_frames = max_frames if max_frames is not None else self.max_frames
total_num_frames = metadata.total_num_frames
# If num_frames is not given but fps is, calculate num_frames from fps
if num_frames is not None:
num_frames = round(num_frames / temporal_patch_size) * temporal_patch_size
elif fps is not None:
if metadata is None or metadata.fps is None:
raise ValueError(
"Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
"Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
)
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
num_frames = total_num_frames / metadata.fps * fps
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
if num_frames > total_num_frames:
raise ValueError(
f"Video can't be sampled. The inferred `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
"Decrease `num_frames` or `fps` for sampling."
)
if num_frames is not None:
indices = torch.arange(0, total_num_frames, total_num_frames / num_frames).int()
else:
indices = torch.arange(0, total_num_frames).int()
return indices
def _preprocess(
self,
videos: list["torch.Tensor"],
do_convert_rgb: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
min_pixels: Optional[int] = None,
max_pixels: Optional[int] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
use_token_compression: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
device: Optional["torch.Tensor"] = None,
**kwargs,
):
# Group videos by size for batched resizing
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
height, width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
resized_height, resized_width = height, width
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=min_pixels,
max_pixels=max_pixels // shape[0],
)
stacked_videos = self.resize(
image=stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(1, self.temporal_patch_size - 1, 1, 1, 1)
patches = torch.cat([patches, repeats], dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
video_merge_sizes = torch.tensor([merge_size] * video_grid_thw.size(0)).to(video_grid_thw)
if use_token_compression:
video_compression_mask = self._get_compression_mask(
pixel_values_videos=pixel_values_videos,
video_grid_thw=video_grid_thw,
video_merge_sizes=video_merge_sizes,
)
else:
num_video_tokens = video_grid_thw.prod(-1).sum() // (merge_size**2)
video_compression_mask = torch.ones(
(num_video_tokens,), dtype=torch.bool, device=pixel_values_videos.device
)
return BatchFeature(
data={
"pixel_values_videos": pixel_values_videos,
"video_grid_thw": video_grid_thw,
"video_merge_sizes": video_merge_sizes,
"video_compression_mask": video_compression_mask,
},
tensor_type=return_tensors,
)
def get_num_of_video_patches(self, num_frames: int, height: int, width: int, videos_kwargs=None):
"""
A utility that returns number of video patches a given video size.
Args:
num_frames (`int`):
Number of frames in the input video.
height (`int`):
Height of the input video.
width (`int`):
Width of the input video.
videos_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the video processor.
Returns:
`Tuple(int, int)`: Number of placeholder tokens required and number of patches per image.
"""
min_pixels = videos_kwargs.get("min_pixels", None) or self.size["shortest_edge"]
max_pixels = videos_kwargs.get("max_pixels", None) or self.size["longest_edge"]
patch_size = videos_kwargs.get("patch_size", None) or self.patch_size
merge_size = videos_kwargs.get("merge_size", None) or self.merge_size
temporal_patch_size = videos_kwargs.get("temporal_patch_size", None) or self.temporal_patch_size
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
grid_t = num_frames // temporal_patch_size
return grid_t * grid_h * grid_w
def _get_compression_mask(
self,
pixel_values_videos: torch.FloatTensor,
video_grid_thw: torch.LongTensor,
video_merge_sizes: torch.LongTensor,
threshold: Optional[float] = 0.1,
min_tokens: Optional[int] = 1,
) -> torch.BoolTensor:
"""
Get the compression mask for video tokens based on pixel differences.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
video_merge_sizes (`torch.Tensor` of shape `(num_videos,)`):
The spatial downsampling ratio of each video feature.
threshold (`float`, *optional*, defaults to 0.1):
The threshold to determine whether a token should be kept based on pixel differences.
min_tokens (`int`, *optional*, defaults to 1):
The minimum number of tokens to keep for each frame.
"""
videos = pixel_values_videos.split(video_grid_thw.prod(dim=1).tolist(), dim=0)
compression_masks = []
for images, grid_size, merge_size in zip(videos, video_grid_thw, video_merge_sizes):
t, h, w = grid_size
if t == 1:
num_tokens = images.size(0) // (merge_size**2)
compression_masks.append(torch.ones((num_tokens,), dtype=torch.bool, device=images.device))
else:
# NOTE: video token compressor
images = images.view(t, (h // merge_size) * (w // merge_size), -1)
pixel_diff = images[1:] - images[:-1]
pixel_diff = torch.abs(pixel_diff).mean(dim=-1) * 255
pixel_diff = torch.cat([torch.full_like(pixel_diff[0:1], threshold + 1), pixel_diff], dim=0)
mask = pixel_diff > threshold
padding_ids = torch.nonzero(mask.sum(dim=1) < min_tokens)[:, 0]
mask[padding_ids, :min_tokens] = 1
compression_masks.append(mask.flatten())
return torch.cat(compression_masks)
__all__ = ["VideoLlama3VideoProcessor"]
|
VideoLlama3VideoProcessor
|
python
|
dask__distributed
|
distributed/spill.py
|
{
"start": 802,
"end": 1268
}
|
class ____(NamedTuple):
"""Size of a key/value pair when spilled to disk, in bytes"""
# output of sizeof()
memory: int
# pickled size
disk: int
def __add__(self, other: SpilledSize) -> SpilledSize: # type: ignore
return SpilledSize(self.memory + other.memory, self.disk + other.disk)
def __sub__(self, other: SpilledSize) -> SpilledSize:
return SpilledSize(self.memory - other.memory, self.disk - other.disk)
|
SpilledSize
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/vertex_ai/test_experiment_service.py
|
{
"start": 3726,
"end": 4839
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("ExperimentRunHook"))
def test_execute(self, mock_hook):
op = CreateExperimentRunOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
experiment_run_tensorboard=TEST_TENSORBOARD,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_experiment_run.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
experiment_name=TEST_EXPERIMENT_NAME,
experiment_run_name=TEST_EXPERIMENT_RUN_NAME,
experiment_run_tensorboard=TEST_TENSORBOARD,
run_after_creation=False,
)
|
TestVertexAICreateExperimentRunOperator
|
python
|
django__django
|
django/db/backends/postgresql/introspection.py
|
{
"start": 582,
"end": 12739
}
|
class ____(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: "BooleanField",
17: "BinaryField",
20: "BigIntegerField",
21: "SmallIntegerField",
23: "IntegerField",
25: "TextField",
700: "FloatField",
701: "FloatField",
869: "GenericIPAddressField",
1042: "CharField", # blank-padded
1043: "CharField",
1082: "DateField",
1083: "TimeField",
1114: "DateTimeField",
1184: "DateTimeField",
1186: "DurationField",
1266: "TimeField",
1700: "DecimalField",
2950: "UUIDField",
3802: "JSONField",
}
# A hook for subclasses.
index_default_access_method = "btree"
ignored_tables = []
on_delete_types = {
"a": DO_NOTHING,
"c": DB_CASCADE,
"d": DB_SET_DEFAULT,
"n": DB_SET_NULL,
# DB_RESTRICT - "r" is not supported.
}
def get_field_type(self, data_type, description):
field_type = super().get_field_type(data_type, description)
if description.is_autofield or (
# Required for pre-Django 4.1 serial columns.
description.default
and "nextval" in description.default
):
if field_type == "IntegerField":
return "AutoField"
elif field_type == "BigIntegerField":
return "BigAutoField"
elif field_type == "SmallIntegerField":
return "SmallAutoField"
return field_type
def get_table_list(self, cursor):
"""Return a list of table and view names in the current database."""
cursor.execute(
"""
SELECT
c.relname,
CASE
WHEN c.relispartition THEN 'p'
WHEN c.relkind IN ('m', 'v') THEN 'v'
ELSE 't'
END,
obj_description(c.oid, 'pg_class')
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
"""
)
return [
TableInfo(*row)
for row in cursor.fetchall()
if row[0] not in self.ignored_tables
]
def get_table_description(self, cursor, table_name):
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# Query the pg_catalog tables as cursor.description does not reliably
# return the nullable property and information_schema.columns does not
# contain details of materialized views.
cursor.execute(
"""
SELECT
a.attname AS column_name,
NOT (a.attnotnull OR (t.typtype = 'd' AND t.typnotnull)) AS is_nullable,
pg_get_expr(ad.adbin, ad.adrelid) AS column_default,
CASE WHEN collname = 'default' THEN NULL ELSE collname END AS collation,
a.attidentity != '' AS is_autofield,
col_description(a.attrelid, a.attnum) AS column_comment
FROM pg_attribute a
LEFT JOIN pg_attrdef ad ON a.attrelid = ad.adrelid AND a.attnum = ad.adnum
LEFT JOIN pg_collation co ON a.attcollation = co.oid
JOIN pg_type t ON a.atttypid = t.oid
JOIN pg_class c ON a.attrelid = c.oid
JOIN pg_namespace n ON c.relnamespace = n.oid
WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')
AND c.relname = %s
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
""",
[table_name],
)
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute(
"SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name)
)
# PostgreSQL OIDs may vary depending on the installation, especially
# for datatypes from extensions, e.g. "hstore". In such cases, the
# type_display attribute (psycopg 3.2+) should be used.
type_display_available = psycopg_version() >= (3, 2)
return [
FieldInfo(
line.name,
(
line.type_display
if type_display_available and line.type_display == "hstore"
else line.type_code
),
# display_size is always None on psycopg2.
line.internal_size if line.display_size is None else line.display_size,
line.internal_size,
# precision and scale are always 2^16 - 1 on psycopg2 for
# DecimalFields with no precision.
None if line.precision == 2**16 - 1 else line.precision,
None if line.scale == 2**16 - 1 else line.scale,
*field_map[line.name],
)
for line in cursor.description
]
def get_sequences(self, cursor, table_name, table_fields=()):
cursor.execute(
"""
SELECT
s.relname AS sequence_name,
a.attname AS colname
FROM
pg_class s
JOIN pg_depend d ON d.objid = s.oid
AND d.classid = 'pg_class'::regclass
AND d.refclassid = 'pg_class'::regclass
JOIN pg_attribute a ON d.refobjid = a.attrelid
AND d.refobjsubid = a.attnum
JOIN pg_class tbl ON tbl.oid = d.refobjid
AND tbl.relname = %s
AND pg_catalog.pg_table_is_visible(tbl.oid)
WHERE
s.relkind = 'S';
""",
[table_name],
)
return [
{"name": row[0], "table": table_name, "column": row[1]}
for row in cursor.fetchall()
]
def get_relations(self, cursor, table_name):
"""
Return a dictionary of
{
field_name: (field_name_other_table, other_table, db_on_delete)
}
representing all foreign keys in the given table.
"""
cursor.execute(
"""
SELECT a1.attname, c2.relname, a2.attname, con.confdeltype
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN
pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN
pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE
c1.relname = %s AND
con.contype = 'f' AND
c1.relnamespace = c2.relnamespace AND
pg_catalog.pg_table_is_visible(c1.oid)
""",
[table_name],
)
return {
row[0]: (row[2], row[1], self.on_delete_types.get(row[3]))
for row in cursor.fetchall()
}
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes.
"""
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
cursor.execute(
"""
SELECT
c.conname,
array(
SELECT attname
FROM unnest(c.conkey) WITH ORDINALITY cols(colid, arridx)
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
WHERE cl.relname = %s
AND pg_catalog.pg_table_is_visible(cl.oid)
AND c.contype != 'n'
""",
[table_name],
)
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
cursor.execute(
"""
SELECT
indexname,
array_agg(attname ORDER BY arridx),
indisunique,
indisprimary,
array_agg(ordering ORDER BY arridx),
amname,
exprdef,
s2.attoptions
FROM (
SELECT
c2.relname as indexname, idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN %s THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT *
FROM
pg_index i,
unnest(i.indkey, i.indoption)
WITH ORDINALITY koi(key, option, arridx)
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN
pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s AND pg_catalog.pg_table_is_visible(c.oid)
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""",
[self.index_default_access_method, table_name],
)
for (
index,
columns,
unique,
primary,
orders,
type_,
definition,
options,
) in cursor.fetchall():
if index not in constraints:
basic_index = (
type_ == self.index_default_access_method
and
# '_btree' references
# django.contrib.postgres.indexes.BTreeIndex.suffix.
not index.endswith("_btree")
and options is None
)
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if basic_index else type_,
"definition": definition,
"options": options,
}
return constraints
|
DatabaseIntrospection
|
python
|
huggingface__transformers
|
src/transformers/models/colpali/modeling_colpali.py
|
{
"start": 2249,
"end": 4474
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
The embeddings of the model.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder after projecting last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
embeddings: Optional[torch.Tensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The ColPali architecture leverages VLMs to construct efficient multi-vector embeddings directly
from document images (“screenshots”) for document retrieval. The model is trained to maximize the similarity
between these document embeddings and the corresponding query embeddings, using the late interaction method
introduced in ColBERT.
Using ColPali removes the need for potentially complex and brittle layout recognition and OCR pipelines with a
single model that can take into account both the textual and visual content (layout, charts, etc.) of a document.
ColPali is part of the ColVision model family, which was first introduced in the following paper:
[*ColPali: Efficient Document Retrieval with Vision Language Models*](https://huggingface.co/papers/2407.01449).
"""
)
|
ColPaliForRetrievalOutput
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/comment_webhooks.py
|
{
"start": 458,
"end": 630
}
|
class ____(CommentEvent):
pass
analytics.register(CommentCreatedEvent)
analytics.register(CommentUpdatedEvent)
analytics.register(CommentDeletedEvent)
|
CommentDeletedEvent
|
python
|
pytest-dev__pytest-asyncio
|
tests/async_fixtures/test_async_fixtures.py
|
{
"start": 531,
"end": 814
}
|
class ____:
is_same_instance = False
@pytest.fixture(autouse=True)
async def async_fixture_method(self):
self.is_same_instance = True
@pytest.mark.asyncio
async def test_async_fixture_method(self):
assert self.is_same_instance
|
TestAsyncFixtureMethod
|
python
|
pypa__warehouse
|
tests/unit/packaging/test_views.py
|
{
"start": 12256,
"end": 17114
}
|
class ____:
@pytest.fixture
def gitlab_attestation(self, gitlab_provenance):
return gitlab_provenance.attestation_bundles[0].attestations[0]
@pytest.fixture
def github_attestation(self, github_provenance):
return github_provenance.attestation_bundles[0].attestations[0]
def test_github_pep740(self, github_attestation):
github_publisher = pretend.stub(
kind="GitHub",
workflow=".github/workflows/release.yml",
)
viewer = views.PEP740AttestationViewer(
publisher=github_publisher,
attestation=github_attestation,
)
assert viewer.statement_type == "https://in-toto.io/Statement/v1"
assert viewer.predicate_type == "https://docs.pypi.org/attestations/publish/v1"
assert viewer.subject_name == "sampleproject-4.0.0.tar.gz"
assert (
viewer.subject_digest
== "0ace7980f82c5815ede4cd7bf9f6693684cec2ae47b9b7ade9add533b8627c6b"
)
assert viewer.transparency_entry["integratedTime"] == "1730932627"
assert viewer.repository_url == "https://github.com/pypa/sampleproject"
assert viewer.workflow_filename == ".github/workflows/release.yml"
assert viewer.workflow_url == (
"https://github.com/pypa/sampleproject/blob/"
"621e4974ca25ce531773def586ba3ed8e736b3fc/"
".github/workflows/release.yml"
)
assert viewer.build_digest == "621e4974ca25ce531773def586ba3ed8e736b3fc"
assert viewer.issuer == "https://token.actions.githubusercontent.com"
assert viewer.environment == "github-hosted"
assert viewer.source == "https://github.com/pypa/sampleproject"
assert viewer.source_digest == "621e4974ca25ce531773def586ba3ed8e736b3fc"
assert viewer.source_reference == "refs/heads/main"
assert viewer.owner == "https://github.com/pypa"
assert viewer.trigger == "push"
assert viewer.access == "public"
assert viewer.permalink_with_digest == (
"https://github.com/pypa/sampleproject/tree/"
"621e4974ca25ce531773def586ba3ed8e736b3fc"
)
assert (
viewer.permalink_with_reference
== "https://github.com/pypa/sampleproject/tree/refs/heads/main"
)
def test_gitlab_pep740(self, gitlab_attestation):
gitlab_publisher = pretend.stub(
kind="GitLab",
workflow_filepath=".gitlab-ci.yml",
)
viewer = views.PEP740AttestationViewer(
publisher=gitlab_publisher,
attestation=gitlab_attestation,
)
assert viewer.statement_type == "https://in-toto.io/Statement/v1"
assert viewer.predicate_type == "https://docs.pypi.org/attestations/publish/v1"
assert viewer.subject_name == "pep740_sampleproject-1.0.0.tar.gz"
assert (
viewer.subject_digest
== "6cdd4a1a0a49aeef47265e7bf8ec1667257b397d34d731dc7b7af349deca1cd8"
)
assert viewer.transparency_entry["integratedTime"] == "1732724143"
assert (
viewer.repository_url == "https://gitlab.com/pep740-example/sampleproject"
)
assert viewer.workflow_filename == ".gitlab-ci.yml"
assert viewer.workflow_url == (
"https://gitlab.com/pep740-example/sampleproject/blob/"
"0b706bbf1b50e7266b33762568566d6ec0f76d69//.gitlab-ci.yml"
)
assert viewer.build_digest == "0b706bbf1b50e7266b33762568566d6ec0f76d69"
assert viewer.issuer == "https://gitlab.com"
assert viewer.environment == "gitlab-hosted"
assert viewer.source == "https://gitlab.com/pep740-example/sampleproject"
assert viewer.source_digest == "0b706bbf1b50e7266b33762568566d6ec0f76d69"
assert viewer.source_reference == "refs/heads/main"
assert viewer.owner == "https://gitlab.com/pep740-example"
assert viewer.trigger == "push"
assert viewer.access == "private"
assert viewer.permalink_with_digest == (
"https://gitlab.com/pep740-example/sampleproject/-/tree/"
"0b706bbf1b50e7266b33762568566d6ec0f76d69"
)
assert (
viewer.permalink_with_reference
== "https://gitlab.com/pep740-example/sampleproject/-/tree/main"
)
def test_unknown_publisher(self, github_attestation):
viewer = views.PEP740AttestationViewer(
publisher=pretend.stub(
kind="Unknown",
),
attestation=pretend.stub(certificate_claims={}),
)
assert viewer.workflow_filename == ""
assert (
viewer._format_url("https://example.com", "refs/heads/main")
== "https://example.com/refs/heads/main"
)
|
TestPEP740AttestationViewer
|
python
|
pytorch__pytorch
|
torchgen/api/ufunc.py
|
{
"start": 3993,
"end": 6693
}
|
class ____:
ctor: list[Binding]
apply: list[Binding]
# ufunctors are a CUDA-only concept representing functors that take some of
# their arguments on a host-side constructor, and the rest in the device-side
# apply. E.g.,
#
# template <typename scalar_t>
# struct CUDAFunctorOnSelf_add {
# using opmath_t = at::opmath_type<scalar_t>;
# opmath_t other_;
# opmath_t alpha_;
# CUDAFunctorOnSelf_add(opmath_t other, opmath_t alpha) : other_(other), alpha_(alpha) {}
# __device__ scalar_t operator()(scalar_t self) {
# return ufunc::add(static_cast<opmath_t>(self), other_, alpha_);
# }
# };
#
# The ctor refers to the constructor CUDAFunctorOnSelf_add, while apply refers
# to the operator() definition
def ufunctor_arguments(
g: NativeFunctionsGroup, *, scalar_tensor_idx: int | None, scalar_t: BaseCppType
) -> UfunctorBindings:
ctor = []
apply = []
for a in g.functional.func.arguments.flat_non_out:
if a.type.is_tensor_like():
if scalar_tensor_idx == 0:
# put it in the ctor anyway
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
scalar_tensor_idx = None
else:
if scalar_tensor_idx is not None:
scalar_tensor_idx -= 1
apply.append(ufunctor_apply_argument(a, scalar_t=scalar_t))
else:
ctor.append(ufunctor_ctor_argument(a, scalar_t=scalar_t))
assert scalar_tensor_idx is None
return UfunctorBindings(ctor=ctor, apply=apply)
# ufuncs are the inner loop template functions that you wrote in ufunc/add.h
# which do the actual computation in question. E.g.,
#
# template <typename T>
# C10_HOST_DEVICE T add(T self, T other, T alpha) __ubsan_ignore_undefined__ {
# return self + alpha * other;
# }
#
# In this file, we refer to T as compute_t which is bound by caller
def ufunc_arguments(g: NativeFunctionsGroup, *, compute_t: CType) -> list[Binding]:
return [
ufunc_argument(a, compute_t=compute_t)
for a in g.functional.func.arguments.flat_non_out
]
# Stubs are the DispatchStub trampolines that CPU kernels use to get to their
# vectorized versions. E.g.,
#
# using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
# DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
def stub_arguments(g: NativeFunctionsGroup) -> list[Binding]:
# stubs drop all tensor arguments (they are implicit in the TensorIterator
# argument and keep everything else)
return [
r
for a in g.out.func.arguments.flat_non_out
if not a.type.is_tensor_like()
for r in structured.argument(a)
]
|
UfunctorBindings
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/providers/instagram/tests.py
|
{
"start": 246,
"end": 967
}
|
class ____(OAuth2TestsMixin, TestCase):
provider_id = InstagramProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"username": "georgewhewell",
"bio": "",
"website": "",
"profile_picture":
"http://images.ak.instagram.com/profiles/profile_11428116_75sq_1339547159.jpg",
"full_name": "georgewhewell",
"counts": {
"media": 74,
"followed_by": 91,
"follows": 104
},
"id": "11428116"
}""",
) # noqa
def get_expected_to_str(self):
return "georgewhewell"
|
InstagramTests
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/links/glue.py
|
{
"start": 914,
"end": 1229
}
|
class ____(BaseAwsLink):
"""Helper class for constructing AWS Glue Job Run Details Link."""
name = "AWS Glue Job Run Details"
key = "glue_job_run_details"
format_str = (
BASE_AWS_CONSOLE_LINK + "/gluestudio/home?region={region_name}#/job/{job_name}/run/{job_run_id}"
)
|
GlueJobRunDetailsLink
|
python
|
python-attrs__attrs
|
typing-examples/mypy.py
|
{
"start": 8131,
"end": 8241
}
|
class ____:
pass
def test(cls: type) -> None:
if attr.has(cls):
attr.resolve_types(cls)
|
Hashable
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/spark_s3_datasource.py
|
{
"start": 1119,
"end": 5845
}
|
class ____(_SparkFilePathDatasource):
"""
SparkS3Datasource is a subclass of SparkDatasource which connects to
Amazon S3.
"""
# class attributes
data_connector_type: ClassVar[Type[S3DataConnector]] = S3DataConnector
# these fields should not be passed to the execution engine
_EXTRA_EXCLUDED_EXEC_ENG_ARGS: ClassVar[set] = {
"bucket",
"boto3_options",
}
# instance attributes
type: Literal["spark_s3"] = "spark_s3"
# S3 specific attributes
bucket: str
boto3_options: Dict[str, Union[ConfigStr, Any]] = {}
_s3_client: Union[BaseClient, None] = pydantic.PrivateAttr(default=None)
def _get_s3_client(self) -> BaseClient:
s3_client: Union[BaseClient, None] = self._s3_client
if not s3_client:
# Validate that "boto3" libarary was successfully imported and attempt to create "s3_client" handle. # noqa: E501 # FIXME CoP
if aws.boto3:
_check_config_substitutions_needed(
self, self.boto3_options, raise_warning_if_provider_not_present=True
)
# pull in needed config substitutions using the `_config_provider`
# The `FluentBaseModel.dict()` call will do the config substitution on the serialized dict if a `config_provider` is passed. # noqa: E501 # FIXME CoP
boto3_options: dict = self.dict(config_provider=self._config_provider).get(
"boto3_options", {}
)
try:
s3_client = aws.boto3.client("s3", **boto3_options)
except Exception as e:
# Failure to create "s3_client" is most likely due invalid "boto3_options" dictionary. # noqa: E501 # FIXME CoP
raise SparkS3DatasourceError( # noqa: TRY003 # FIXME CoP
f'Due to exception: "{e!s}", "s3_client" could not be created.'
) from e
else:
raise SparkS3DatasourceError( # noqa: TRY003 # FIXME CoP
'Unable to create "SparkS3Datasource" due to missing boto3 dependency.'
)
self._s3_client = s3_client
return s3_client
@override
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the SparkS3Datasource.
Args:
test_assets: If assets have been passed to the SparkS3Datasource, whether to test them as well.
Raises:
TestConnectionError: If the connection test fails.
""" # noqa: E501 # FIXME CoP
try:
# tests S3 connection
_ = self._get_s3_client()
except Exception as e:
raise TestConnectionError( # noqa: TRY003 # FIXME CoP
f"Attempt to connect to datasource failed with the following error message: {e!s}"
) from e
# tests Spark connection, raising TestConnectionError
super().test_connection()
if self.assets and test_assets:
for asset in self.assets:
asset.test_connection()
@override
def _build_data_connector(
self,
data_asset: SPARK_PATH_ASSET_UNION,
s3_prefix: str = "",
s3_delimiter: str = "/",
s3_max_keys: int = 1000,
s3_recursive_file_discovery: bool = False,
**kwargs,
) -> None:
"""Builds and attaches the `S3DataConnector` to the asset."""
if kwargs:
raise TypeError( # noqa: TRY003 # FIXME CoP
f"_build_data_connector() got unexpected keyword arguments {list(kwargs.keys())}"
)
data_asset._data_connector = self.data_connector_type.build_data_connector(
datasource_name=self.name,
data_asset_name=data_asset.name,
s3_client=self._get_s3_client(),
bucket=self.bucket,
prefix=s3_prefix,
delimiter=s3_delimiter,
max_keys=s3_max_keys,
recursive_file_discovery=s3_recursive_file_discovery,
file_path_template_map_fn=S3Url.OBJECT_URL_TEMPLATE.format,
whole_directory_path_override=data_asset.get_whole_directory_path_override(),
)
# build a more specific `_test_connection_error_message`
data_asset._test_connection_error_message = (
self.data_connector_type.build_test_connection_error_message(
data_asset_name=data_asset.name,
bucket=self.bucket,
prefix=s3_prefix,
delimiter=s3_delimiter,
recursive_file_discovery=s3_recursive_file_discovery,
)
)
|
SparkS3Datasource
|
python
|
numpy__numpy
|
numpy/_core/tests/test_array_coercion.py
|
{
"start": 18600,
"end": 22852
}
|
class ____:
def test_nested_simple(self):
initial = [1.2]
nested = initial
for i in range(ncu.MAXDIMS - 1):
nested = [nested]
arr = np.array(nested, dtype="float64")
assert arr.shape == (1,) * ncu.MAXDIMS
with pytest.raises(ValueError):
np.array([nested], dtype="float64")
with pytest.raises(ValueError, match=".*would exceed the maximum"):
np.array([nested]) # user must ask for `object` explicitly
arr = np.array([nested], dtype=object)
assert arr.dtype == np.dtype("O")
assert arr.shape == (1,) * ncu.MAXDIMS
assert arr.item() is initial
def test_pathological_self_containing(self):
# Test that this also works for two nested sequences
l = []
l.append(l)
arr = np.array([l, l, l], dtype=object)
assert arr.shape == (3,) + (1,) * (ncu.MAXDIMS - 1)
# Also check a ragged case:
arr = np.array([l, [None], l], dtype=object)
assert arr.shape == (3, 1)
@pytest.mark.parametrize("arraylike", arraylikes())
def test_nested_arraylikes(self, arraylike):
# We try storing an array like into an array, but the array-like
# will have too many dimensions. This means the shape discovery
# decides that the array-like must be treated as an object (a special
# case of ragged discovery). The result will be an array with one
# dimension less than the maximum dimensions, and the array being
# assigned to it (which does work for object or if `float(arraylike)`
# works).
initial = arraylike(np.ones((1, 1)))
nested = initial
for i in range(ncu.MAXDIMS - 1):
nested = [nested]
with pytest.raises(ValueError, match=".*would exceed the maximum"):
# It will refuse to assign the array into
np.array(nested, dtype="float64")
# If this is object, we end up assigning a (1, 1) array into (1,)
# (due to running out of dimensions), this is currently supported but
# a special case which is not ideal.
arr = np.array(nested, dtype=object)
assert arr.shape == (1,) * ncu.MAXDIMS
assert arr.item() == np.array(initial).item()
@pytest.mark.parametrize("arraylike", arraylikes())
def test_uneven_depth_ragged(self, arraylike):
arr = np.arange(4).reshape((2, 2))
arr = arraylike(arr)
# Array is ragged in the second dimension already:
out = np.array([arr, [arr]], dtype=object)
assert out.shape == (2,)
assert out[0] is arr
assert type(out[1]) is list
# Array is ragged in the third dimension:
with pytest.raises(ValueError):
# This is a broadcast error during assignment, because
# the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
np.array([arr, [arr, arr]], dtype=object)
def test_empty_sequence(self):
arr = np.array([[], [1], [[1]]], dtype=object)
assert arr.shape == (3,)
# The empty sequence stops further dimension discovery, so the
# result shape will be (0,) which leads to an error during:
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
def test_array_of_different_depths(self):
# When multiple arrays (or array-likes) are included in a
# sequences and have different depth, we currently discover
# as many dimensions as they share. (see also gh-17224)
arr = np.zeros((3, 2))
mismatch_first_dim = np.zeros((1, 2))
mismatch_second_dim = np.zeros((3, 3))
dtype, shape = ncu._discover_array_parameters(
[arr, mismatch_second_dim], dtype=np.dtype("O"))
assert shape == (2, 3)
dtype, shape = ncu._discover_array_parameters(
[arr, mismatch_first_dim], dtype=np.dtype("O"))
assert shape == (2,)
# The second case is currently supported because the arrays
# can be stored as objects:
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
assert res[0] is arr
assert res[1] is mismatch_first_dim
|
TestNested
|
python
|
pytorch__pytorch
|
test/inductor/test_padding.py
|
{
"start": 8581,
"end": 13226
}
|
class ____(TestCaseBase):
@maybe_cprofile
def run_acc_and_perf_test(self, model, inputs, perf_inputs=None, tol=1e-3):
"""
Run accuracy test.
Also compare the perf with and without the comprehensive padding if
DO_PERF_TEST is true.
"""
if perf_inputs is None:
perf_inputs = inputs
def _process_inputs(x):
"""
return args and kwargs
"""
if isinstance(x, dict):
return [], x
if not isinstance(inputs, (tuple, list)):
x = [x]
return x, {}
args, kwargs = _process_inputs(inputs)
perf_args, perf_kwargs = _process_inputs(perf_inputs)
if DO_ACC_TEST:
model.eval()
self.common_numeric_check(model, *args, **kwargs, tol=tol)
else:
print("Accuracy test skipped")
model.train()
if DO_PERF_TEST:
print("Do performance test")
def get_f(m, optim):
def f(*args, **kwargs):
optim.zero_grad(True)
with torch.autocast(GPU_TYPE):
pred = m(*args, **kwargs)
loss = reduce_to_scalar_loss(pred)
loss.backward()
optim.step()
return f
latency_with_padding = None
print("Benchmark with padding")
with config.patch(comprehensive_padding=True):
m_copy_with_padding = copy.deepcopy(model)
optim_with_padding = get_optim(m_copy_with_padding)
opt_f_with_padding = torch.compile(
get_f(m_copy_with_padding, optim_with_padding)
)
latency_with_padding = benchmarker.benchmark_gpu(
lambda: opt_f_with_padding(*perf_args, **perf_kwargs)
)
latency_without_padding = None
print("bencmark without padding")
with config.patch(comprehensive_padding=False):
m_copy_without_padding = copy.deepcopy(model)
optim_without_padding = get_optim(m_copy_without_padding)
opt_f_without_padding = torch.compile(
get_f(m_copy_without_padding, optim_without_padding)
)
latency_without_padding = benchmarker.benchmark_gpu(
lambda: opt_f_without_padding(*perf_args, **perf_kwargs)
)
print(
f"Latency with and without padding: {latency_with_padding:.3f} v.s. {latency_without_padding:.3f}"
)
# profiling
self.do_profiling(
opt_f_with_padding,
opt_f_without_padding,
args=perf_args,
kwargs=perf_kwargs,
)
def test_nvidia_deeprecommender(self):
"""
Compared the perf with and without comprehensive padding.
"""
layer_sizes = [197951, 512, 512, 1024, 512, 512, 197951]
x = torch.randn(4, layer_sizes[0])
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
mod_list = []
for i in range(len(layer_sizes) - 1):
mod_list.append(nn.Linear(layer_sizes[i], layer_sizes[i + 1]))
mod_list.append(nn.SELU())
if i == 2:
mod_list.append(nn.Dropout(0.8))
self.seq = nn.Sequential(*mod_list)
def forward(self, x):
return self.seq(x)
m = Model()
perf_inputs = torch.randn(256, layer_sizes[0])
self.run_acc_and_perf_test(m, x, perf_inputs)
@unittest.skipIf(not DO_PERF_TEST or not HAS_TRANSFORMER, "Perf test not enabled")
def test_longformer(self, bs=4):
from transformers import AutoConfig, AutoModelForMaskedLM
config = AutoConfig.from_pretrained("allenai/longformer-base-4096")
model = AutoModelForMaskedLM.from_config(config)
vocab_size = model.config.vocab_size
seq_length = 1024
input_dict = gen_transformer_inputs(vocab_size, bs, seq_length)
self.run_acc_and_perf_test(model, input_dict)
@unittest.skipIf(not DO_PERF_TEST or not HAS_TRANSFORMER, "Perf test not enabled")
def test_longformer_small_bs(self):
"""
The model exists in both HF and TB. In TB it uses a smaller batch size.
"""
self.test_longformer(bs=2)
@instantiate_parametrized_tests
|
PerfTestWithAndWithoutPadding
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-substring-with-at-most-two-distinct-characters.py
|
{
"start": 724,
"end": 1264
}
|
class ____(object):
def lengthOfLongestSubstringTwoDistinct(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter()
left, max_length = 0, 0
for right, char in enumerate(s):
counter[char] += 1
while len(counter) > 2:
counter[s[left]] -= 1
if counter[s[left]] == 0:
del counter[s[left]]
left += 1
max_length = max(max_length, right-left+1)
return max_length
|
Solution2
|
python
|
MongoEngine__mongoengine
|
tests/fields/test_string_field.py
|
{
"start": 99,
"end": 1382
}
|
class ____(MongoDBTestCase):
def test_storage(self):
class Person(Document):
name = StringField()
Person.drop_collection()
person = Person(name="test123")
person.save()
assert get_as_pymongo(person) == {"_id": person.id, "name": "test123"}
def test_validation(self):
class Person(Document):
name = StringField(max_length=20, min_length=2)
userid = StringField(r"[0-9a-z_]+$")
with pytest.raises(ValidationError, match="only accepts string values"):
Person(name=34).validate()
with pytest.raises(ValidationError, match="value is too short"):
Person(name="s").validate()
# Test regex validation on userid
person = Person(userid="test.User")
with pytest.raises(ValidationError):
person.validate()
person.userid = "test_user"
assert person.userid == "test_user"
person.validate()
# Test max length validation on name
person = Person(name="Name that is more than twenty characters")
with pytest.raises(ValidationError):
person.validate()
person = Person(name="a friendl name", userid="7a757668sqjdkqlsdkq")
person.validate()
|
TestStringField
|
python
|
pydantic__pydantic
|
pydantic/v1/types.py
|
{
"start": 19299,
"end": 24335
}
|
class ____(Decimal, metaclass=ConstrainedNumberMeta):
gt: OptionalIntFloatDecimal = None
ge: OptionalIntFloatDecimal = None
lt: OptionalIntFloatDecimal = None
le: OptionalIntFloatDecimal = None
max_digits: OptionalInt = None
decimal_places: OptionalInt = None
multiple_of: OptionalIntFloatDecimal = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
exclusiveMinimum=cls.gt,
exclusiveMaximum=cls.lt,
minimum=cls.ge,
maximum=cls.le,
multipleOf=cls.multiple_of,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield decimal_validator
yield number_size_validator
yield number_multiple_validator
yield cls.validate
@classmethod
def validate(cls, value: Decimal) -> Decimal:
try:
normalized_value = value.normalize()
except InvalidOperation:
normalized_value = value
digit_tuple, exponent = normalized_value.as_tuple()[1:]
if exponent in {'F', 'n', 'N'}:
raise errors.DecimalIsNotFiniteError()
if exponent >= 0:
# A positive exponent adds that many trailing zeros.
digits = len(digit_tuple) + exponent
decimals = 0
else:
# If the absolute value of the negative exponent is larger than the
# number of digits, then it's the same as the number of digits,
# because it'll consume all of the digits in digit_tuple and then
# add abs(exponent) - len(digit_tuple) leading zeros after the
# decimal point.
if abs(exponent) > len(digit_tuple):
digits = decimals = abs(exponent)
else:
digits = len(digit_tuple)
decimals = abs(exponent)
whole_digits = digits - decimals
if cls.max_digits is not None and digits > cls.max_digits:
raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits)
if cls.decimal_places is not None and decimals > cls.decimal_places:
raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places)
if cls.max_digits is not None and cls.decimal_places is not None:
expected = cls.max_digits - cls.decimal_places
if whole_digits > expected:
raise errors.DecimalWholeDigitsError(whole_digits=expected)
return value
def condecimal(
*,
gt: Decimal = None,
ge: Decimal = None,
lt: Decimal = None,
le: Decimal = None,
max_digits: Optional[int] = None,
decimal_places: Optional[int] = None,
multiple_of: Decimal = None,
) -> Type[Decimal]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of
)
return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ UUID TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
UUID1 = UUID
UUID3 = UUID
UUID4 = UUID
UUID5 = UUID
else:
class UUID1(UUID):
_required_version = 1
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format=f'uuid{cls._required_version}')
class UUID3(UUID1):
_required_version = 3
class UUID4(UUID1):
_required_version = 4
class UUID5(UUID1):
_required_version = 5
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATH TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
FilePath = Path
DirectoryPath = Path
else:
class FilePath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='file-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_file():
raise errors.PathNotAFileError(path=value)
return value
class DirectoryPath(Path):
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(format='directory-path')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield path_validator
yield path_exists_validator
yield cls.validate
@classmethod
def validate(cls, value: Path) -> Path:
if not value.is_dir():
raise errors.PathNotADirectoryError(path=value)
return value
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ JSON TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
ConstrainedDecimal
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/hooks/test_sagemaker.py
|
{
"start": 8110,
"end": 37656
}
|
class ____:
@mock.patch.object(AwsLogsHook, "get_log_events")
def test_multi_stream_iter(self, mock_log_stream):
event = {"timestamp": 1}
mock_log_stream.side_effect = [iter([event]), iter([]), None]
hook = SageMakerHook()
event_iter = hook.multi_stream_iter("log", [None, None, None])
assert next(event_iter) == (0, event)
@mock.patch.object(S3Hook, "create_bucket")
@mock.patch.object(S3Hook, "load_file")
def test_configure_s3_resources(self, mock_load_file, mock_create_bucket):
hook = SageMakerHook()
evaluation_result = {"Image": image, "Role": role}
hook.configure_s3_resources(test_evaluation_config)
assert test_evaluation_config == evaluation_result
mock_create_bucket.assert_called_once_with(bucket_name=bucket)
mock_load_file.assert_called_once_with(path, key, bucket)
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(S3Hook, "check_for_key")
@mock.patch.object(S3Hook, "check_for_bucket")
@mock.patch.object(S3Hook, "check_for_prefix")
def test_check_s3_url(self, mock_check_prefix, mock_check_bucket, mock_check_key, mock_client):
mock_client.return_value = None
hook = SageMakerHook()
mock_check_bucket.side_effect = [False, True, True, True]
mock_check_key.side_effect = [False, True, False]
mock_check_prefix.side_effect = [False, True, True]
with pytest.raises(AirflowException):
hook.check_s3_url(data_url)
with pytest.raises(AirflowException):
hook.check_s3_url(data_url)
assert hook.check_s3_url(data_url) is True
assert hook.check_s3_url(data_url) is True
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(SageMakerHook, "check_s3_url")
def test_check_valid_training(self, mock_check_url, mock_client):
mock_client.return_value = None
hook = SageMakerHook()
hook.check_training_config(create_training_params)
mock_check_url.assert_called_once_with(data_url)
# InputDataConfig is optional, verify if check succeeds without InputDataConfig
create_training_params_no_inputdataconfig = create_training_params.copy()
create_training_params_no_inputdataconfig.pop("InputDataConfig")
hook.check_training_config(create_training_params_no_inputdataconfig)
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(SageMakerHook, "check_s3_url")
def test_check_valid_tuning(self, mock_check_url, mock_client):
mock_client.return_value = None
hook = SageMakerHook()
hook.check_tuning_config(create_tuning_params)
mock_check_url.assert_called_once_with(data_url)
def test_conn(self):
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
assert hook.aws_conn_id == "sagemaker_test_conn_id"
@mock.patch.object(SageMakerHook, "check_training_config")
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_training_job(self, mock_client, mock_check_training):
mock_check_training.return_value = True
mock_session = mock.Mock()
attrs = {"create_training_job.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_training_job(
create_training_params, wait_for_completion=False, print_log=False
)
mock_session.create_training_job.assert_called_once_with(**create_training_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "check_training_config")
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch("time.sleep", return_value=None)
def test_training_ends_with_wait(self, _, mock_client, mock_check_training):
mock_check_training.return_value = True
mock_session = mock.Mock()
attrs = {
"create_training_job.return_value": test_arn_return,
"describe_training_job.side_effect": [
DESCRIBE_TRAINING_INPROGRESS_RETURN,
DESCRIBE_TRAINING_STOPPING_RETURN,
DESCRIBE_TRAINING_COMPLETED_RETURN,
],
}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id_1")
hook.create_training_job(
create_training_params, wait_for_completion=True, print_log=False, check_interval=0
)
assert mock_session.describe_training_job.call_count == 3
@mock.patch.object(SageMakerHook, "check_training_config")
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch("time.sleep", return_value=None)
def test_training_throws_error_when_failed_with_wait(self, _, mock_client, mock_check_training):
mock_check_training.return_value = True
mock_session = mock.Mock()
attrs = {
"create_training_job.return_value": test_arn_return,
"describe_training_job.side_effect": [
DESCRIBE_TRAINING_INPROGRESS_RETURN,
DESCRIBE_TRAINING_STOPPING_RETURN,
DESCRIBE_TRAINING_FAILED_RETURN,
DESCRIBE_TRAINING_COMPLETED_RETURN,
],
}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id_1")
with pytest.raises(AirflowException):
hook.create_training_job(
create_training_params,
wait_for_completion=True,
print_log=False,
check_interval=0,
)
assert mock_session.describe_training_job.call_count == 3
@mock.patch.object(SageMakerHook, "check_tuning_config")
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_tuning_job(self, mock_client, mock_check_tuning_config):
mock_session = mock.Mock()
attrs = {"create_hyper_parameter_tuning_job.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_tuning_job(create_tuning_params, wait_for_completion=False)
mock_session.create_hyper_parameter_tuning_job.assert_called_once_with(**create_tuning_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "check_s3_url")
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_transform_job(self, mock_client, mock_check_url):
mock_check_url.return_value = True
mock_session = mock.Mock()
attrs = {"create_transform_job.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_transform_job(create_transform_params, wait_for_completion=False)
mock_session.create_transform_job.assert_called_once_with(**create_transform_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_transform_job_fs(self, mock_client):
mock_session = mock.Mock()
attrs = {"create_transform_job.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_transform_job(create_transform_params_fs, wait_for_completion=False)
mock_session.create_transform_job.assert_called_once_with(**create_transform_params_fs)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_model(self, mock_client):
mock_session = mock.Mock()
attrs = {"create_model.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_model(create_model_params)
mock_session.create_model.assert_called_once_with(**create_model_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_endpoint_config(self, mock_client):
mock_session = mock.Mock()
attrs = {"create_endpoint_config.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_endpoint_config(create_endpoint_config_params)
mock_session.create_endpoint_config.assert_called_once_with(**create_endpoint_config_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "get_conn")
def test_create_endpoint(self, mock_client):
mock_session = mock.Mock()
attrs = {"create_endpoint.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.create_endpoint(create_endpoint_params, wait_for_completion=False)
mock_session.create_endpoint.assert_called_once_with(**create_endpoint_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "get_conn")
def test_update_endpoint(self, mock_client):
mock_session = mock.Mock()
attrs = {"update_endpoint.return_value": test_arn_return}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.update_endpoint(update_endpoint_params, wait_for_completion=False)
mock_session.update_endpoint.assert_called_once_with(**update_endpoint_params)
assert response == test_arn_return
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_training_job(self, mock_client):
mock_session = mock.Mock()
attrs = {"describe_training_job.return_value": "InProgress"}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_training_job(job_name)
mock_session.describe_training_job.assert_called_once_with(TrainingJobName=job_name)
assert response == "InProgress"
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_tuning_job(self, mock_client):
mock_session = mock.Mock()
attrs = {"describe_hyper_parameter_tuning_job.return_value": "InProgress"}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_tuning_job(job_name)
mock_session.describe_hyper_parameter_tuning_job.assert_called_once_with(
HyperParameterTuningJobName=job_name
)
assert response == "InProgress"
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_transform_job(self, mock_client):
mock_session = mock.Mock()
attrs = {"describe_transform_job.return_value": "InProgress"}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_transform_job(job_name)
mock_session.describe_transform_job.assert_called_once_with(TransformJobName=job_name)
assert response == "InProgress"
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_model(self, mock_client):
mock_session = mock.Mock()
attrs = {"describe_model.return_value": model_name}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_model(model_name)
mock_session.describe_model.assert_called_once_with(ModelName=model_name)
assert response == model_name
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_endpoint_config(self, mock_client):
mock_session = mock.Mock()
attrs = {"describe_endpoint_config.return_value": config_name}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_endpoint_config(config_name)
mock_session.describe_endpoint_config.assert_called_once_with(EndpointConfigName=config_name)
assert response == config_name
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_endpoint(self, mock_client):
mock_session = mock.Mock()
attrs = {"describe_endpoint.return_value": "InProgress"}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_endpoint(endpoint_name)
mock_session.describe_endpoint.assert_called_once_with(EndpointName=endpoint_name)
assert response == "InProgress"
def test_secondary_training_status_changed_true(self):
changed = secondary_training_status_changed(
SECONDARY_STATUS_DESCRIPTION_1, SECONDARY_STATUS_DESCRIPTION_2
)
assert changed
def test_secondary_training_status_changed_false(self):
changed = secondary_training_status_changed(
SECONDARY_STATUS_DESCRIPTION_1, SECONDARY_STATUS_DESCRIPTION_1
)
assert not changed
def test_secondary_training_status_message_status_changed(self):
now = datetime.now(tzlocal())
SECONDARY_STATUS_DESCRIPTION_1["LastModifiedTime"] = now
expected_time = now.astimezone(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S")
expected = f"{expected_time} {status} - {message}"
assert (
secondary_training_status_message(SECONDARY_STATUS_DESCRIPTION_1, SECONDARY_STATUS_DESCRIPTION_2)
== expected
)
@mock.patch.object(AwsLogsHook, "conn")
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(time, "monotonic")
def test_describe_training_job_with_logs_in_progress(self, mock_time, mock_client, mock_log_client):
mock_session = mock.Mock()
mock_log_session = mock.Mock()
attrs = {"describe_training_job.return_value": DESCRIBE_TRAINING_COMPLETED_RETURN}
log_attrs = {
"describe_log_streams.side_effect": LIFECYCLE_LOG_STREAMS,
"get_log_events.side_effect": STREAM_LOG_EVENTS,
}
mock_time.return_value = 50
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
mock_log_session.configure_mock(**log_attrs)
mock_log_client.return_value = mock_log_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_training_job_with_log(
job_name=job_name,
positions={},
stream_names=[],
instance_count=1,
state=LogState.WAIT_IN_PROGRESS,
last_description={},
last_describe_job_call=0,
)
assert response == (LogState.JOB_COMPLETE, {}, 50)
@pytest.mark.parametrize("log_state", [LogState.JOB_COMPLETE, LogState.COMPLETE])
@mock.patch.object(AwsLogsHook, "conn")
@mock.patch.object(SageMakerHook, "get_conn")
def test_describe_training_job_with_complete_states(self, mock_client, mock_log_client, log_state):
mock_session = mock.Mock()
mock_log_session = mock.Mock()
attrs = {"describe_training_job.return_value": DESCRIBE_TRAINING_COMPLETED_RETURN}
log_attrs = {
"describe_log_streams.side_effect": LIFECYCLE_LOG_STREAMS,
"get_log_events.side_effect": STREAM_LOG_EVENTS,
}
mock_session.configure_mock(**attrs)
mock_client.return_value = mock_session
mock_log_session.configure_mock(**log_attrs)
mock_log_client.return_value = mock_log_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
response = hook.describe_training_job_with_log(
job_name=job_name,
positions={},
stream_names=[],
instance_count=1,
state=log_state,
last_description={},
last_describe_job_call=0,
)
assert response == (LogState.COMPLETE, {}, 0)
@mock.patch.object(SageMakerHook, "check_training_config")
@mock.patch.object(AwsLogsHook, "conn")
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch.object(SageMakerHook, "describe_training_job_with_log")
@mock.patch("time.sleep", return_value=None)
def test_training_with_logs(self, _, mock_describe, mock_client, mock_log_client, mock_check_training):
mock_check_training.return_value = True
mock_describe.side_effect = [
(LogState.WAIT_IN_PROGRESS, DESCRIBE_TRAINING_INPROGRESS_RETURN, 0),
(LogState.JOB_COMPLETE, DESCRIBE_TRAINING_STOPPING_RETURN, 0),
(LogState.COMPLETE, DESCRIBE_TRAINING_COMPLETED_RETURN, 0),
]
mock_session = mock.Mock()
mock_log_session = mock.Mock()
attrs = {
"create_training_job.return_value": test_arn_return,
"describe_training_job.return_value": DESCRIBE_TRAINING_COMPLETED_RETURN,
}
log_attrs = {
"describe_log_streams.side_effect": LIFECYCLE_LOG_STREAMS,
"get_log_events.side_effect": STREAM_LOG_EVENTS,
}
mock_session.configure_mock(**attrs)
mock_log_session.configure_mock(**log_attrs)
mock_client.return_value = mock_session
mock_log_client.return_value = mock_log_session
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id_1")
hook.create_training_job(
create_training_params, wait_for_completion=True, print_log=True, check_interval=0
)
assert mock_describe.call_count == 3
assert mock_session.describe_training_job.call_count == 1
@mock.patch.object(SageMakerHook, "get_conn")
def test_count_processing_jobs_by_name(self, mock_conn):
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
existing_job_name = "existing_job"
mock_conn().list_processing_jobs.return_value = {
"ProcessingJobSummaries": [{"ProcessingJobName": existing_job_name}]
}
ret = hook.count_processing_jobs_by_name(existing_job_name)
assert ret == 1
@mock.patch.object(SageMakerHook, "get_conn")
def test_count_processing_jobs_by_name_only_counts_actual_hits(self, mock_conn):
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
existing_job_name = "existing_job"
mock_conn().list_processing_jobs.return_value = {
"ProcessingJobSummaries": [
{"ProcessingJobName": existing_job_name},
{"ProcessingJobName": f"contains_but_does_not_start_with_{existing_job_name}"},
{"ProcessingJobName": f"{existing_job_name}_with_different_suffix-123"},
]
}
ret = hook.count_processing_jobs_by_name(existing_job_name)
assert ret == 1
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch("time.sleep", return_value=None)
def test_count_processing_jobs_by_name_retries_on_throttle_exception(self, _, mock_conn):
throttle_exception = ClientError(
error_response={"Error": {"Code": "ThrottlingException"}}, operation_name="empty"
)
successful_result = {"ProcessingJobSummaries": [{"ProcessingJobName": "existing_job"}]}
# Return a ThrottleException on the first call, then a mocked successful value the second.
mock_conn().list_processing_jobs.side_effect = [throttle_exception, successful_result]
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
ret = hook.count_processing_jobs_by_name("existing_job")
assert mock_conn().list_processing_jobs.call_count == 2
assert ret == 1
@mock.patch.object(SageMakerHook, "get_conn")
@mock.patch("time.sleep", return_value=None)
def test_count_processing_jobs_by_name_fails_after_max_retries(self, _, mock_conn):
mock_conn().list_processing_jobs.side_effect = ClientError(
error_response={"Error": {"Code": "ThrottlingException"}}, operation_name="empty"
)
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
retries = 3
with pytest.raises(ClientError) as raised_exception:
hook.count_processing_jobs_by_name("existing_job", retries=retries)
assert mock_conn().list_processing_jobs.call_count == retries + 1
assert raised_exception.value.response["Error"]["Code"] == "ThrottlingException"
@mock.patch.object(SageMakerHook, "get_conn")
def test_count_processing_jobs_by_name_job_not_exists_should_return_falsy(self, mock_conn):
error_resp = {"Error": {"Code": "ResourceNotFound"}}
mock_conn().list_processing_jobs.side_effect = ClientError(
error_response=error_resp, operation_name="empty"
)
hook = SageMakerHook(aws_conn_id="sagemaker_test_conn_id")
ret = hook.count_processing_jobs_by_name("existing_job")
assert ret == 0
@mock_aws
def test_delete_model(self):
hook = SageMakerHook()
with patch.object(hook.conn, "delete_model") as mock_delete:
hook.delete_model(model_name="test")
mock_delete.assert_called_once_with(ModelName="test")
@mock_aws
def test_delete_model_when_not_exist(self):
hook = SageMakerHook()
with pytest.raises(ClientError) as raised_exception:
hook.delete_model(model_name="test")
ex = raised_exception.value
assert ex.operation_name == "DeleteModel"
assert ex.response["ResponseMetadata"]["HTTPStatusCode"] == 404
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_start_pipeline_returns_arn(self, mock_conn):
mock_conn().start_pipeline_execution.return_value = {"PipelineExecutionArn": "hellotest"}
hook = SageMakerHook(aws_conn_id="aws_default")
params_dict = {"one": "1", "two": "2"}
arn = hook.start_pipeline(pipeline_name="test_name", pipeline_params=params_dict)
assert arn == "hellotest"
args_passed = mock_conn().start_pipeline_execution.call_args.kwargs
assert args_passed["PipelineName"] == "test_name"
# check conversion to the weird format for passing parameters (list of tuples)
assert len(args_passed["PipelineParameters"]) == 2
for transformed_param in args_passed["PipelineParameters"]:
assert "Name" in transformed_param.keys()
assert "Value" in transformed_param.keys()
# Name contains the key
assert transformed_param["Name"] in params_dict.keys()
# Value contains the value associated with the key in Name
assert transformed_param["Value"] == params_dict[transformed_param["Name"]]
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_stop_pipeline_returns_status(self, mock_conn):
mock_conn().describe_pipeline_execution.return_value = {"PipelineExecutionStatus": "Stopping"}
hook = SageMakerHook(aws_conn_id="aws_default")
pipeline_status = hook.stop_pipeline(pipeline_exec_arn="test")
assert pipeline_status == "Stopping"
mock_conn().stop_pipeline_execution.assert_called_once_with(PipelineExecutionArn="test")
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_stop_pipeline_raises_when_already_stopped_if_specified(self, mock_conn):
error = ClientError(
error_response={
"Error": {"Message": "Only pipelines with 'Executing' status can be stopped", "Code": "0"}
},
operation_name="empty",
)
mock_conn().stop_pipeline_execution.side_effect = error
mock_conn().describe_pipeline_execution.return_value = {"PipelineExecutionStatus": "Stopping"}
hook = SageMakerHook(aws_conn_id="aws_default")
with pytest.raises(ClientError) as raised_exception:
hook.stop_pipeline(pipeline_exec_arn="test", fail_if_not_running=True)
assert raised_exception.value == error
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_stop_pipeline_retries_on_conflict(self, mock_conn):
conflict_error = ClientError(
error_response={"Error": {"Code": "ConflictException"}},
operation_name="empty",
)
mock_conn().stop_pipeline_execution.side_effect = [
conflict_error,
conflict_error,
conflict_error,
conflict_error,
None,
]
hook = SageMakerHook(aws_conn_id="aws_default")
hook.stop_pipeline(pipeline_exec_arn="test")
assert mock_conn().stop_pipeline_execution.call_count == 5
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_stop_pipeline_fails_if_all_retries_error(self, mock_conn):
conflict_error = ClientError(
error_response={"Error": {"Message": "blah", "Code": "ConflictException"}},
operation_name="empty",
)
mock_conn().stop_pipeline_execution.side_effect = conflict_error
hook = SageMakerHook(aws_conn_id="aws_default")
with pytest.raises(ClientError) as raised_exception:
hook.stop_pipeline(pipeline_exec_arn="test")
assert mock_conn().stop_pipeline_execution.call_count == 5
assert raised_exception.value == conflict_error
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_create_model_package_group(self, mock_conn):
created = SageMakerHook().create_model_package_group("group-name")
mock_conn().create_model_package_group.assert_called_once_with(
ModelPackageGroupName="group-name",
ModelPackageGroupDescription="",
)
assert created
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_create_model_package_group_returns_false_if_exists(self, mock_conn):
mock_conn().create_model_package_group.side_effect = ClientError(
error_response={
"Error": {
"Code": "ValidationException",
"Message": "Model Package Group already exists: arn:aws:sagemaker:foo:bar",
}
},
operation_name="empty",
)
hook = SageMakerHook()
created = hook.create_model_package_group("group-name")
assert created is False
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_create_auto_ml_parameter_structure(self, conn_mock):
hook = SageMakerHook()
hook.create_auto_ml_job(
job_name="a",
s3_input="b",
target_attribute="c",
s3_output="d",
role_arn="e",
compressed_input=True,
time_limit=30,
wait_for_completion=False,
)
assert conn_mock().create_auto_ml_job.call_args.kwargs == {
"AutoMLJobConfig": {"CompletionCriteria": {"MaxAutoMLJobRuntimeInSeconds": 30}},
"AutoMLJobName": "a",
"InputDataConfig": [
{
"CompressionType": "Gzip",
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": "b"}},
"TargetAttributeName": "c",
}
],
"OutputDataConfig": {"S3OutputPath": "d"},
"RoleArn": "e",
}
@patch("airflow.providers.amazon.aws.hooks.sagemaker.SageMakerHook.conn", new_callable=mock.PropertyMock)
def test_create_auto_ml_waits_for_completion(self, conn_mock):
hook = SageMakerHook()
conn_mock().describe_auto_ml_job.side_effect = [
{"AutoMLJobStatus": "InProgress", "AutoMLJobSecondaryStatus": "a"},
{"AutoMLJobStatus": "InProgress", "AutoMLJobSecondaryStatus": "b"},
{
"AutoMLJobStatus": "Completed",
"AutoMLJobSecondaryStatus": "c",
"BestCandidate": {"name": "me"},
},
]
ret = hook.create_auto_ml_job("a", "b", "c", "d", "e", check_interval=0)
assert conn_mock().describe_auto_ml_job.call_count == 3
assert ret == {"name": "me"}
|
TestSageMakerHook
|
python
|
pallets__jinja
|
src/jinja2/nodes.py
|
{
"start": 13722,
"end": 13861
}
|
class ____(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ("node",)
node: Node
|
ExprStmt
|
python
|
doocs__leetcode
|
solution/2800-2899/2860.Happy Students/Solution.py
|
{
"start": 0,
"end": 322
}
|
class ____:
def countWays(self, nums: List[int]) -> int:
nums.sort()
n = len(nums)
ans = 0
for i in range(n + 1):
if i and nums[i - 1] >= i:
continue
if i < n and nums[i] <= i:
continue
ans += 1
return ans
|
Solution
|
python
|
dateutil__dateutil
|
tests/test_tz.py
|
{
"start": 22890,
"end": 24553
}
|
class ____(unittest.TestCase):
def testSingleton(self):
UTC_0 = tz.tzutc()
UTC_1 = tz.tzutc()
self.assertIs(UTC_0, UTC_1)
def testOffset(self):
ct = datetime(2009, 4, 1, 12, 11, 13, tzinfo=tz.tzutc())
self.assertEqual(ct.utcoffset(), timedelta(seconds=0))
def testDst(self):
ct = datetime(2009, 4, 1, 12, 11, 13, tzinfo=tz.tzutc())
self.assertEqual(ct.dst(), timedelta(seconds=0))
def testTzName(self):
ct = datetime(2009, 4, 1, 12, 11, 13, tzinfo=tz.tzutc())
self.assertEqual(ct.tzname(), 'UTC')
def testEquality(self):
UTC0 = tz.tzutc()
UTC1 = tz.tzutc()
self.assertEqual(UTC0, UTC1)
def testInequality(self):
UTC = tz.tzutc()
UTCp4 = tz.tzoffset('UTC+4', 14400)
self.assertNotEqual(UTC, UTCp4)
def testInequalityInteger(self):
self.assertFalse(tz.tzutc() == 7)
self.assertNotEqual(tz.tzutc(), 7)
def testInequalityUnsupported(self):
self.assertEqual(tz.tzutc(), ComparesEqual)
def testRepr(self):
UTC = tz.tzutc()
self.assertEqual(repr(UTC), 'tzutc()')
def testTimeOnlyUTC(self):
# https://github.com/dateutil/dateutil/issues/132
# tzutc doesn't care
tz_utc = tz.tzutc()
self.assertEqual(dt_time(13, 20, tzinfo=tz_utc).utcoffset(),
timedelta(0))
def testAmbiguity(self):
# Pick an arbitrary datetime, this should always return False.
dt = datetime(2011, 9, 1, 2, 30, tzinfo=tz.tzutc())
self.assertFalse(tz.datetime_ambiguous(dt))
@pytest.mark.tzoffset
|
TzUTCTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 969130,
"end": 969608
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of SetUserInteractionLimit"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "user")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
user = sgqlc.types.Field("User", graphql_name="user")
"""The user that the interaction limit was set for."""
|
SetUserInteractionLimitPayload
|
python
|
run-llama__llama_index
|
llama-index-core/tests/evaluation/test_base.py
|
{
"start": 339,
"end": 1965
}
|
class ____(BaseEvaluator):
def __init__(
self,
mock_score: float = 1.0,
mock_passing: bool = True,
mock_feedback: str = "test feedback",
) -> None:
self._mock_score = mock_score
self._mock_passing = mock_passing
self._mock_feedback = mock_feedback
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
return EvaluationResult(
query=query,
contexts=contexts,
response=response,
passing=self._mock_passing,
score=self._mock_score,
feedback=self._mock_feedback,
)
def test_evaluator_basic() -> None:
test_evaluator = MockEvaluator()
eval_result_0 = test_evaluator.evaluate(
query="test query",
response="test response",
contexts=["test context 1", "test context 2"],
)
eval_result_1 = test_evaluator.evaluate_response(
query="test query",
response=Response(
response="test response",
source_nodes=[
NodeWithScore(node=TextNode(text="test context 1"), score=1.0),
NodeWithScore(node=TextNode(text="test context 2"), score=1.0),
],
),
)
assert eval_result_0 == eval_result_1
|
MockEvaluator
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
|
{
"start": 41509,
"end": 46306
}
|
class ____(Glm4vMoePreTrainedModel):
config: Glm4vMoeVisionConfig
input_modalities = ("image", "video")
_no_split_modules = ["Glm4vMoeVisionBlock"]
def __init__(self, config) -> None:
super().__init__(config)
self.spatial_merge_size = config.spatial_merge_size
self.patch_size = config.patch_size
self.embeddings = Glm4vMoeVisionEmbeddings(config)
self.patch_embed = Glm4vMoeVisionPatchEmbed(config)
head_dim = config.hidden_size // config.num_heads
self.rotary_pos_emb = Glm4vMoeVisionRotaryEmbedding(head_dim // 2)
self.blocks = nn.ModuleList([Glm4vMoeVisionBlock(config) for _ in range(config.depth)])
self.merger = Glm4vMoeVisionPatchMerger(
dim=config.out_hidden_size, context_dim=config.intermediate_size, hidden_act=config.hidden_act
)
self.post_conv_layernorm = Glm4vMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.downsample = nn.Conv2d(
in_channels=config.hidden_size,
out_channels=config.out_hidden_size,
kernel_size=config.spatial_merge_size,
stride=config.spatial_merge_size,
)
self.post_layernorm = Glm4vMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.post_init()
def rot_pos_emb(self, grid_thw):
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_size = grid_thw[:, 1:].max()
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb, pos_ids
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor:
"""
Args:
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
hidden_states = self.post_conv_layernorm(hidden_states)
rotary_pos_emb, image_type_ids = self.rot_pos_emb(grid_thw)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
hidden_states = self.embeddings(hidden_states, seqlens, grid_thw, image_type_ids[:, 0], image_type_ids[:, 1])
for blk in self.blocks:
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
)
hidden_states = self.post_layernorm(hidden_states)
hidden_states = hidden_states.view(
-1, self.spatial_merge_size, self.spatial_merge_size, hidden_states.shape[-1]
)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.downsample(hidden_states).view(-1, self.config.out_hidden_size)
hidden_states = self.merger(hidden_states)
return hidden_states
@auto_docstring
|
Glm4vMoeVisionModel
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_baked.py
|
{
"start": 3225,
"end": 9868
}
|
class ____(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
cls.mapper_registry.map_imperatively(User, cls.tables.users)
def test_first_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(fixture_session()).first(), None)
def test_first_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
bq += lambda q: q.filter(User.name.like("%ed%")).order_by(User.id)
eq_(bq(fixture_session()).first(), (8,))
def test_one_or_none_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
eq_(bq(fixture_session()).one_or_none(), None)
def test_one_or_none_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(fixture_session()).one_or_none()
eq_(u1.name, "ed")
def test_one_or_none_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found when one or none was required",
bq(fixture_session()).one_or_none,
)
def test_one_no_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "asdf")
assert_raises_message(
orm_exc.NoResultFound,
"No row was found when one was required",
bq(fixture_session()).one,
)
def test_one_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name == "ed")
u1 = bq(fixture_session()).one()
eq_(u1.name, "ed")
def test_one_multiple_result(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
bq += lambda q: q.filter(User.name.like("%ed%"))
assert_raises_message(
orm_exc.MultipleResultsFound,
"Multiple rows were found when exactly one was required",
bq(fixture_session()).one,
)
def test_get(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = fixture_session()
def go():
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.get(User, 7) # noqa
def go():
u2 = bq(sess).get(7)
eq_(u2.name, "jack")
self.assert_sql_count(testing.db, go, 0)
def go():
u2 = bq(sess).get(8)
eq_(u2.name, "ed")
self.assert_sql_count(testing.db, go, 1)
def test_scalar(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User.id))
sess = fixture_session()
bq += lambda q: q.filter(User.id == 7)
eq_(bq(sess).scalar(), 7)
def test_count(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = fixture_session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.id.in_([8, 9]))
eq_(bq(sess).count(), 2)
# original query still works
eq_(
{(u.id, u.name) for u in bq(sess).all()},
{(8, "ed"), (9, "fred")},
)
def test_count_with_bindparams(self):
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
sess = fixture_session()
eq_(bq(sess).count(), 4)
bq += lambda q: q.filter(User.name == bindparam("uname"))
# calling with *args
eq_(bq(sess).params(uname="fred").count(), 1)
# with multiple params, the **kwargs will be used
bq += lambda q: q.filter(User.id == bindparam("an_id"))
eq_(bq(sess).params(uname="fred", an_id=9).count(), 1)
eq_(
# wrong id, so 0 results:
bq(sess).params(uname="fred", an_id=8).count(),
0,
)
def test_get_pk_w_null(self):
"""test the re-implementation of logic to do get with IS NULL."""
class AddressUser:
pass
self.mapper_registry.map_imperatively(
AddressUser,
self.tables.users.outerjoin(self.tables.addresses),
properties={
"id": self.tables.users.c.id,
"address_id": self.tables.addresses.c.id,
},
)
bq = self.bakery(lambda s: s.query(AddressUser))
sess = fixture_session()
def go():
u1 = bq(sess).get((10, None))
eq_(u1.name, "chuck")
self.assert_sql_count(testing.db, go, 1)
u1 = sess.get(AddressUser, (10, None)) # noqa
def go():
u2 = bq(sess).get((10, None))
eq_(u2.name, "chuck")
self.assert_sql_count(testing.db, go, 0)
def test_get_includes_getclause(self):
# test issue #3597
User = self.classes.User
bq = self.bakery(lambda s: s.query(User))
for i in range(5):
sess = fixture_session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
eq_(len(bq._bakery), 2)
# simulate race where mapper._get_clause
# may be generated more than once
from sqlalchemy import inspect
del inspect(User).__dict__["_get_clause"]
for i in range(5):
sess = fixture_session()
u1 = bq(sess).get(7)
eq_(u1.name, "jack")
sess.close()
# this went from 4 to 3 as a result of #6055. by giving a name
# to the bind param in mapper._get_clause, while the baked cache
# here grows by one element, the SQL compiled_cache no longer
# changes because the keys of the bindparam() objects are passed
# explicitly as params to the execute() call as a result of
# _load_on_pk_identity() (either the one in baked or the one in
# loading.py), which then puts them
# in column_keys which makes them part of the cache key. These
# were previously anon names, now they are explicit so they
# stay across resets
eq_(len(bq._bakery), 3)
|
LikeQueryTest
|
python
|
pypa__warehouse
|
tests/unit/email/test_init.py
|
{
"start": 187645,
"end": 198119
}
|
class ____:
def test_send_removed_project_release_file_email_to_owner(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id_1",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
stub_submitter_user = pretend.stub(
id="id_2",
username="submitterusername",
name="",
email="submiteremail@example.com",
primary_email=pretend.stub(
email="submiteremail@example.com", verified=True
),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release-file/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release-file/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release-file/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
ids = [stub_submitter_user.id, stub_user.id]
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=ids.pop())
)
),
)
pyramid_request.user = stub_submitter_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
release = pretend.stub(
version="0.0.0",
project=pretend.stub(name="test_project"),
created=datetime.datetime(2017, 2, 5, 0, 0, 0, 0),
yanked_reason="",
)
result = email.send_removed_project_release_file_email(
pyramid_request,
[stub_user, stub_submitter_user],
file="test-file-0.0.0.tar.gz",
release=release,
submitter_name=stub_submitter_user.username,
submitter_role="Owner",
recipient_role="Owner",
)
assert result == {
"file": "test-file-0.0.0.tar.gz",
"project_name": release.project.name,
"release_version": release.version,
"submitter_name": stub_submitter_user.username,
"submitter_role": "owner",
"recipient_role_descr": "an owner",
}
subject_renderer.assert_(project_name="test_project")
subject_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(file="test-file-0.0.0.tar.gz")
body_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(project_name="test_project")
body_renderer.assert_(submitter_name=stub_submitter_user.username)
body_renderer.assert_(submitter_role="owner")
body_renderer.assert_(recipient_role_descr="an owner")
assert pyramid_request.task.calls == [
pretend.call(send_email),
pretend.call(send_email),
]
assert send_email.delay.calls == [
pretend.call(
"username <email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "email@example.com",
"subject": "Email Subject",
"redact_ip": True,
},
},
),
pretend.call(
"submitterusername <submiteremail@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_submitter_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "submiteremail@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
),
]
def test_send_removed_project_release_file_email_to_maintainer(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id_1",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
stub_submitter_user = pretend.stub(
id="id_2",
username="submitterusername",
name="",
email="submiteremail@example.com",
primary_email=pretend.stub(
email="submiteremail@example.com", verified=True
),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release-file/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release-file/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release-file/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
ids = [stub_submitter_user.id, stub_user.id]
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=ids.pop())
)
),
)
pyramid_request.user = stub_submitter_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
release = pretend.stub(
version="0.0.0",
project=pretend.stub(name="test_project"),
created=datetime.datetime(2017, 2, 5, 0, 0, 0, 0),
yanked_reason="",
)
result = email.send_removed_project_release_file_email(
pyramid_request,
[stub_user, stub_submitter_user],
file="test-file-0.0.0.tar.gz",
release=release,
submitter_name=stub_submitter_user.username,
submitter_role="Owner",
recipient_role="Maintainer",
)
assert result == {
"file": "test-file-0.0.0.tar.gz",
"project_name": release.project.name,
"release_version": release.version,
"submitter_name": stub_submitter_user.username,
"submitter_role": "owner",
"recipient_role_descr": "a maintainer",
}
subject_renderer.assert_(project_name="test_project")
subject_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(file="test-file-0.0.0.tar.gz")
body_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(project_name="test_project")
body_renderer.assert_(submitter_name=stub_submitter_user.username)
body_renderer.assert_(submitter_role="owner")
body_renderer.assert_(recipient_role_descr="a maintainer")
assert pyramid_request.task.calls == [
pretend.call(send_email),
pretend.call(send_email),
]
assert send_email.delay.calls == [
pretend.call(
"username <email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "email@example.com",
"subject": "Email Subject",
"redact_ip": True,
},
},
),
pretend.call(
"submitterusername <submiteremail@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_submitter_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "submiteremail@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
),
]
|
TestRemovedReleaseFileEmail
|
python
|
fastai__fastai
|
fastai/layers.py
|
{
"start": 22171,
"end": 24086
}
|
class ____(Module):
"Applies `module` over `tdim` identically for each step, use `low_mem` to compute one at a time."
def __init__(self, module, low_mem=False, tdim=1):
store_attr()
def forward(self, *tensors, **kwargs):
"input x with shape:(bs,seq_len,channels,width,height)"
if self.low_mem or self.tdim!=1:
return self.low_mem_forward(*tensors, **kwargs)
else:
#only support tdim=1
inp_shape = tensors[0].shape
bs, seq_len = inp_shape[0], inp_shape[1]
out = self.module(*[x.view(bs*seq_len, *x.shape[2:]) for x in tensors], **kwargs)
return self.format_output(out, bs, seq_len)
def low_mem_forward(self, *tensors, **kwargs):
"input x with shape:(bs,seq_len,channels,width,height)"
seq_len = tensors[0].shape[self.tdim]
args_split = [torch.unbind(x, dim=self.tdim) for x in tensors]
out = []
for i in range(seq_len):
out.append(self.module(*[args[i] for args in args_split]), **kwargs)
if isinstance(out[0], tuple):
return _stack_tups(out, stack_dim=self.tdim)
return torch.stack(out, dim=self.tdim)
def format_output(self, out, bs, seq_len):
"unstack from batchsize outputs"
if isinstance(out, tuple):
return tuple(out_i.view(bs, seq_len, *out_i.shape[1:]) for out_i in out)
return out.view(bs, seq_len,*out.shape[1:])
def __repr__(self):
return f'TimeDistributed({self.module})'
# %% ../nbs/01_layers.ipynb 158
from torch.jit import script
# %% ../nbs/01_layers.ipynb 159
@script
def _swish_jit_fwd(x): return x.mul(torch.sigmoid(x))
@script
def _swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
|
TimeDistributed
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/triggers/test_batch.py
|
{
"start": 896,
"end": 1571
}
|
class ____:
def test_serialization(self):
job_id = "test_job_id"
aws_conn_id = "aws_default"
region_name = "us-west-2"
trigger = BatchJobTrigger(
job_id=job_id,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.batch.BatchJobTrigger"
assert kwargs == {
"job_id": "test_job_id",
"waiter_delay": 5,
"waiter_max_attempts": 720,
"aws_conn_id": "aws_default",
"region_name": "us-west-2",
}
|
TestBatchJobTrigger
|
python
|
doocs__leetcode
|
solution/2800-2899/2846.Minimum Edge Weight Equilibrium Queries in a Tree/Solution.py
|
{
"start": 0,
"end": 1445
}
|
class ____:
def minOperationsQueries(
self, n: int, edges: List[List[int]], queries: List[List[int]]
) -> List[int]:
m = n.bit_length()
g = [[] for _ in range(n)]
f = [[0] * m for _ in range(n)]
p = [0] * n
cnt = [None] * n
depth = [0] * n
for u, v, w in edges:
g[u].append((v, w - 1))
g[v].append((u, w - 1))
cnt[0] = [0] * 26
q = deque([0])
while q:
i = q.popleft()
f[i][0] = p[i]
for j in range(1, m):
f[i][j] = f[f[i][j - 1]][j - 1]
for j, w in g[i]:
if j != p[i]:
p[j] = i
cnt[j] = cnt[i][:]
cnt[j][w] += 1
depth[j] = depth[i] + 1
q.append(j)
ans = []
for u, v in queries:
x, y = u, v
if depth[x] < depth[y]:
x, y = y, x
for j in reversed(range(m)):
if depth[x] - depth[y] >= (1 << j):
x = f[x][j]
for j in reversed(range(m)):
if f[x][j] != f[y][j]:
x, y = f[x][j], f[y][j]
if x != y:
x = p[x]
mx = max(cnt[u][j] + cnt[v][j] - 2 * cnt[x][j] for j in range(26))
ans.append(depth[u] + depth[v] - 2 * depth[x] - mx)
return ans
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/jamba/modular_jamba.py
|
{
"start": 27906,
"end": 29098
}
|
class ____(PreTrainedModel):
config: JambaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["JambaAttentionDecoderLayer", "JambaMambaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_is_stateful = True
_can_record_outputs = {
"hidden_states": [JambaAttentionDecoderLayer, JambaMambaDecoderLayer],
"attentions": JambaAttention,
"router_logits": OutputRecorder(nn.Linear, layer_name="router"),
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, JambaMambaMixer):
A = torch.arange(1, module.ssm_state_size + 1)[None, :]
A = A.expand(module.intermediate_size, -1).contiguous()
init.copy_(module.A_log, torch.log(A))
init.ones_(module.D)
elif isinstance(module, JambaExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@auto_docstring
|
JambaPreTrainedModel
|
python
|
psf__requests
|
src/requests/sessions.py
|
{
"start": 13254,
"end": 30503
}
|
class ____(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('https://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
... s.get('https://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
"headers",
"cookies",
"auth",
"proxies",
"hooks",
"params",
"verify",
"cert",
"adapters",
"stream",
"trust_env",
"max_redirects",
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
#: Defaults to `True`, requiring requests to verify the TLS certificate at the
#: remote end.
#: If verify is set to `False`, requests will accept any TLS certificate
#: presented by the server, and will ignore hostname mismatches and/or
#: expired certificates, which will make your application vulnerable to
#: man-in-the-middle (MitM) attacks.
#: Only set this to `False` for testing.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount("https://", HTTPAdapter())
self.mount("http://", HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies
)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(
request.headers, self.headers, dict_class=CaseInsensitiveDict
),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(
self,
method,
url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None,
json=None,
):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param hooks: (optional) Dictionary mapping hook name to one event or
list of events, event must be callable.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``. When set to
``False``, requests will accept any TLS certificate presented by
the server, and will ignore hostname mismatches and/or expired
certificates, which will make your application vulnerable to
man-in-the-middle (MitM) attacks. Setting verify to ``False``
may be useful during local development or testing.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
"timeout": timeout,
"allow_redirects": allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", True)
return self.request("GET", url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", True)
return self.request("OPTIONS", url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault("allow_redirects", False)
return self.request("HEAD", url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("POST", url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("PUT", url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("PATCH", url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request("DELETE", url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault("stream", self.stream)
kwargs.setdefault("verify", self.verify)
kwargs.setdefault("cert", self.cert)
if "proxies" not in kwargs:
kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError("You can only send PreparedRequests.")
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop("allow_redirects", True)
stream = kwargs.get("stream")
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook("response", hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Resolve redirects if allowed.
if allow_redirects:
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
history = [resp for resp in gen]
else:
history = []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(
self.resolve_redirects(r, request, yield_requests=True, **kwargs)
)
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get("no_proxy") if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for k, v in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration
# and be compatible with cURL.
if verify is True or verify is None:
verify = (
os.environ.get("REQUESTS_CA_BUNDLE")
or os.environ.get("CURL_CA_BUNDLE")
or verify
)
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for prefix, adapter in self.adapters.items():
if url.lower().startswith(prefix.lower()):
return adapter
# Nothing matches :-/
raise InvalidSchema(f"No connection adapters were found for {url!r}")
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by prefix length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
.. deprecated:: 1.0.0
This method has been deprecated since version 1.0.0 and is only kept for
backwards compatibility. New code should use :class:`~requests.sessions.Session`
to create a session. This may be removed at a future date.
:rtype: Session
"""
return Session()
|
Session
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/nn_test.py
|
{
"start": 63070,
"end": 66828
}
|
class ____(test_lib.TestCase):
def test1DTensor(self):
x = array_ops.ones([3, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool1d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test1DNumpyWithGolden(self):
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.array([[[3], [6], [5]], [[1], [0], [1]]], dtype=dtype)
ksize = 2
strides = 1
y = nn_ops.max_pool1d(x, ksize, strides, "SAME")
expected_y = np.array([[[6], [6], [5]], [[1], [1], [1]]], dtype=dtype)
self.assertAllEqual(self.evaluate(y), expected_y)
def test2DTensor(self):
x = array_ops.ones([3, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test2DNumpy(self):
# explicitly use float32 for ROCm, as MIOpen does not yet support float64
# np.ones defaults to using float64 when dtype is not explicitly specified
dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64
x = np.ones([3, 6, 6, 5], dtype=dtype)
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DTensor(self):
x = array_ops.ones([3, 7, 6, 6, 5])
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def test3DNumpy(self):
x = np.ones([3, 7, 6, 6, 5], dtype=np.float32)
ksize = 2
strides = 2
y1 = nn_ops.max_pool_v2(x, ksize, strides, "SAME")
y2 = nn_ops.max_pool3d(x, ksize, strides, "SAME")
self.assertAllEqual(self.evaluate(y1), self.evaluate(y2))
def testIncorrectSizeInputSmall(self):
x = array_ops.ones([3, 4])
with self.assertRaisesRegex(
ValueError,
"`input.shape.rank` must be 3, 4 or 5.*of rank 2."):
nn_ops.max_pool_v2(x, 2, 2, "SAME")
def testIncorrectSizeInput(self):
x = array_ops.ones([3, 4, 1, 2, 1, 2])
with self.assertRaisesRegex(
ValueError,
"`input.shape.rank` must be 3, 4 or 5.*of rank 6."):
nn_ops.max_pool_v2(x, 2, 2, "SAME")
@test_util.disable_xla("XLA catches the error and rethrows as different one")
def testIncoorectKSize(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError, "Sliding window ksize must be positive."
):
op = nn_ops.max_pool_v2(
array_ops.ones([3, 4, 4, 5]), [1, -1, -1, 1], 2, "SAME"
)
with test_util.use_gpu():
self.evaluate(op)
ksize = sys.maxsize + 100 # Set to a value larger than sys.maxsize
with self.assertRaises(
OverflowError if context.executing_eagerly() else ValueError
):
op = nn_ops.max_pool_v2(
array_ops.ones([3, 4, 4, 5]), ksize=ksize, strides=2, padding="SAME"
)
with test_util.use_gpu():
self.evaluate(op)
@test_util.run_all_in_graph_and_eager_modes
|
MaxPoolTest
|
python
|
ApeWorX__ape
|
src/ape/plugins/compiler.py
|
{
"start": 147,
"end": 966
}
|
class ____(PluginType):
"""
A plugin that implements the :class:`ape.api.CompilerAPI`, such
as the `ape-solidity plugin <https://github.com/ApeWorX/ape-solidity>`__
or the `ape-vyper plugin <https://github.com/ApeWorX/ape-vyper>`__.
"""
@hookspec
def register_compiler( # type: ignore[empty-body]
self,
) -> tuple[tuple[str], type["CompilerAPI"]]:
"""
A hook for returning the set of file extensions the plugin handles
and the compiler class that can be used to compile them.
Usage example::
@plugins.register(plugins.CompilerPlugin)
def register_compiler():
return (".json",), InterfaceCompiler
Returns:
tuple[tuple[str], type[:class:`~ape.api.CompilerAPI`]]
"""
|
CompilerPlugin
|
python
|
getsentry__sentry
|
fixtures/page_objects/base.py
|
{
"start": 409,
"end": 494
}
|
class ____:
def __init__(self, element):
self.element = element
|
BaseElement
|
python
|
chroma-core__chroma
|
chromadb/db/impl/sqlite.py
|
{
"start": 1885,
"end": 9426
}
|
class ____(MigratableDB, SqlEmbeddingsQueue, SqlSysDB):
_conn_pool: Pool
_settings: Settings
_migration_imports: Sequence[Traversable]
_db_file: str
_tx_stack: local
_is_persistent: bool
def __init__(self, system: System):
self._settings = system.settings
self._migration_imports = [
files("chromadb.migrations.embeddings_queue"),
files("chromadb.migrations.sysdb"),
files("chromadb.migrations.metadb"),
]
self._is_persistent = self._settings.require("is_persistent")
self._opentelemetry_client = system.require(OpenTelemetryClient)
if not self._is_persistent:
# In order to allow sqlite to be shared between multiple threads, we need to use a
# URI connection string with shared cache.
# See https://www.sqlite.org/sharedcache.html
# https://stackoverflow.com/questions/3315046/sharing-a-memory-database-between-different-threads-in-python-using-sqlite3-pa
self._db_file = "file::memory:?cache=shared"
self._conn_pool = LockPool(self._db_file, is_uri=True)
else:
self._db_file = (
self._settings.require("persist_directory") + "/chroma.sqlite3"
)
if not os.path.exists(self._db_file):
os.makedirs(os.path.dirname(self._db_file), exist_ok=True)
self._conn_pool = PerThreadPool(self._db_file)
self._tx_stack = local()
super().__init__(system)
@trace_method("SqliteDB.start", OpenTelemetryGranularity.ALL)
@override
def start(self) -> None:
super().start()
with self.tx() as cur:
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("PRAGMA case_sensitive_like = ON")
self.initialize_migrations()
if (
# (don't attempt to access .config if migrations haven't been run)
self._settings.require("migrations") == "apply"
and self.config.get_parameter("automatically_purge").value is False
):
logger.warning(
"⚠️ It looks like you upgraded from a version below 0.5.6 and could benefit from vacuuming your database. Run chromadb utils vacuum --help for more information."
)
@trace_method("SqliteDB.stop", OpenTelemetryGranularity.ALL)
@override
def stop(self) -> None:
super().stop()
self._conn_pool.close()
@staticmethod
@override
def querybuilder() -> Type[pypika.Query]:
return pypika.Query # type: ignore
@staticmethod
@override
def parameter_format() -> str:
return "?"
@staticmethod
@override
def migration_scope() -> str:
return "sqlite"
@override
def migration_dirs(self) -> Sequence[Traversable]:
return self._migration_imports
@override
def tx(self) -> TxWrapper:
if not hasattr(self._tx_stack, "stack"):
self._tx_stack.stack = []
return TxWrapper(self._conn_pool, stack=self._tx_stack)
@trace_method("SqliteDB.reset_state", OpenTelemetryGranularity.ALL)
@override
def reset_state(self) -> None:
if not self._settings.require("allow_reset"):
raise ValueError(
"Resetting the database is not allowed. Set `allow_reset` to true in the config in tests or other non-production environments where reset should be permitted."
)
with self.tx() as cur:
# Drop all tables
cur.execute(
"""
SELECT name FROM sqlite_master
WHERE type='table'
"""
)
for row in cur.fetchall():
cur.execute(f"DROP TABLE IF EXISTS {row[0]}")
self._conn_pool.close()
self.start()
super().reset_state()
@trace_method("SqliteDB.setup_migrations", OpenTelemetryGranularity.ALL)
@override
def setup_migrations(self) -> None:
with self.tx() as cur:
cur.execute(
"""
CREATE TABLE IF NOT EXISTS migrations (
dir TEXT NOT NULL,
version INTEGER NOT NULL,
filename TEXT NOT NULL,
sql TEXT NOT NULL,
hash TEXT NOT NULL,
PRIMARY KEY (dir, version)
)
"""
)
@trace_method("SqliteDB.migrations_initialized", OpenTelemetryGranularity.ALL)
@override
def migrations_initialized(self) -> bool:
with self.tx() as cur:
cur.execute(
"""SELECT count(*) FROM sqlite_master
WHERE type='table' AND name='migrations'"""
)
if cur.fetchone()[0] == 0:
return False
else:
return True
@trace_method("SqliteDB.db_migrations", OpenTelemetryGranularity.ALL)
@override
def db_migrations(self, dir: Traversable) -> Sequence[Migration]:
with self.tx() as cur:
cur.execute(
"""
SELECT dir, version, filename, sql, hash
FROM migrations
WHERE dir = ?
ORDER BY version ASC
""",
(dir.name,),
)
migrations = []
for row in cur.fetchall():
found_dir = cast(str, row[0])
found_version = cast(int, row[1])
found_filename = cast(str, row[2])
found_sql = cast(str, row[3])
found_hash = cast(str, row[4])
migrations.append(
Migration(
dir=found_dir,
version=found_version,
filename=found_filename,
sql=found_sql,
hash=found_hash,
scope=self.migration_scope(),
)
)
return migrations
@override
def apply_migration(self, cur: base.Cursor, migration: Migration) -> None:
cur.executescript(migration["sql"])
cur.execute(
"""
INSERT INTO migrations (dir, version, filename, sql, hash)
VALUES (?, ?, ?, ?, ?)
""",
(
migration["dir"],
migration["version"],
migration["filename"],
migration["sql"],
migration["hash"],
),
)
@staticmethod
@override
def uuid_from_db(value: Optional[Any]) -> Optional[UUID]:
return UUID(value) if value is not None else None
@staticmethod
@override
def uuid_to_db(uuid: Optional[UUID]) -> Optional[Any]:
return str(uuid) if uuid is not None else None
@staticmethod
@override
def unique_constraint_error() -> Type[BaseException]:
return sqlite3.IntegrityError
def vacuum(self, timeout: int = 5) -> None:
"""Runs VACUUM on the database. `timeout` is the maximum time to wait for an exclusive lock in seconds."""
conn = self._conn_pool.connect()
conn.execute(f"PRAGMA busy_timeout = {int(timeout) * 1000}")
conn.execute("VACUUM")
conn.execute(
"""
INSERT INTO maintenance_log (operation, timestamp)
VALUES ('vacuum', CURRENT_TIMESTAMP)
"""
)
|
SqliteDB
|
python
|
huggingface__transformers
|
tests/models/idefics/test_image_processing_idefics.py
|
{
"start": 4405,
"end": 7734
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = IdeficsImageProcessor if is_vision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = IdeficsImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "image_size"))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertNotEqual(image_processor.image_size, 30)
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, image_size=42)
self.assertEqual(image_processor.image_size, 42)
@require_torchvision
def test_torchvision_numpy_transforms_equivalency(self):
# as we had to reimplement the torchvision transforms using transformers utils we must check
# they both do the same
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
image_processor = self.image_processing_class(**self.image_processor_dict, return_tensors="pt")
print(image_inputs)
def convert_to_rgb(image):
# `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
# for transparent images. The call to `alpha_composite` handles this case
if image.mode == "RGB":
return image
image_rgba = image.convert("RGBA")
background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
alpha_composite = Image.alpha_composite(background, image_rgba)
alpha_composite = alpha_composite.convert("RGB")
return alpha_composite
image_size = image_processor.image_size
image_mean = image_processor.image_mean
image_std = image_processor.image_std
transform = transforms.Compose(
[
convert_to_rgb,
transforms.Resize((image_size, image_size), interpolation=transforms.InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=image_mean, std=image_std),
]
)
pixel_values_transform_implied = image_processor(image_inputs, transform=None, return_tensors="pt")
pixel_values_transform_supplied = image_processor(image_inputs, transform=transform, return_tensors="pt")
torch.testing.assert_close(pixel_values_transform_implied, pixel_values_transform_supplied, rtol=0.0, atol=0.0)
@unittest.skip(reason="not supported")
def test_call_numpy(self):
pass
@unittest.skip(reason="not supported")
def test_call_numpy_4_channels(self):
pass
@unittest.skip(reason="not supported")
def test_call_pil(self):
pass
@unittest.skip(reason="not supported")
def test_call_pytorch(self):
pass
|
IdeficsImageProcessingTest
|
python
|
vyperlang__vyper
|
vyper/venom/analysis/mem_ssa.py
|
{
"start": 2590,
"end": 2997
}
|
class ____(MemoryAccess):
"""Represents a phi node for memory states"""
def __init__(self, id: int, block: IRBasicBlock):
super().__init__(id)
self.block = block
self.operands: list[tuple[MemoryPhiOperand, IRBasicBlock]] = []
# Type aliases for signatures in this module
MemoryDefOrUse = MemoryDef | MemoryUse
MemoryPhiOperand = MemoryDef | MemoryPhi | LiveOnEntry
|
MemoryPhi
|
python
|
doocs__leetcode
|
solution/0700-0799/0764.Largest Plus Sign/Solution.py
|
{
"start": 0,
"end": 741
}
|
class ____:
def orderOfLargestPlusSign(self, n: int, mines: List[List[int]]) -> int:
dp = [[n] * n for _ in range(n)]
for x, y in mines:
dp[x][y] = 0
for i in range(n):
left = right = up = down = 0
for j, k in zip(range(n), reversed(range(n))):
left = left + 1 if dp[i][j] else 0
right = right + 1 if dp[i][k] else 0
up = up + 1 if dp[j][i] else 0
down = down + 1 if dp[k][i] else 0
dp[i][j] = min(dp[i][j], left)
dp[i][k] = min(dp[i][k], right)
dp[j][i] = min(dp[j][i], up)
dp[k][i] = min(dp[k][i], down)
return max(max(v) for v in dp)
|
Solution
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/experimental/benchmarks/pdsds.py
|
{
"start": 1716,
"end": 1847
}
|
class ____(metaclass=PDSDSQueriesMeta):
"""Base class for query loading."""
q_impl: str
name: str = "pdsds"
|
PDSDSQueries
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/halide.py
|
{
"start": 61617,
"end": 63426
}
|
class ____(SIMDScheduling):
kernel_type = HalideKernel # type: ignore[arg-type,assignment]
@classmethod
def get_backend_features(cls, device: torch.device) -> OrderedSet[BackendFeature]:
result = OrderedSet(
[
BackendFeature.TUPLE_REDUCTION,
BackendFeature.PREFER_STORE_LOOP_ORDER,
BackendFeature.REDUCE_TO_SINGLE_ELEMENT,
]
)
if config.halide.scan_kernels:
result.add(BackendFeature.SCAN)
return result
def define_kernel(self, src_code, node_schedule, kernel):
"""Codegen kernel definition to go in output wrapper code"""
wrapper = V.graph.wrapper_code
if src_code in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code]
else:
kernel_name = f"halide_kernel_{wrapper.next_kernel_suffix()}"
wrapper.src_to_kernel[src_code] = kernel_name
wrapper.add_import_once(
"from torch._inductor.runtime.hints import HalideMeta, HalideInputSpec"
)
compile_wrapper = IndentedBuffer()
compile_wrapper.writeline(
f"async_compile.halide({kernel.halide_kernel_meta()!r}, '''"
)
compile_wrapper.splice(src_code, strip=True)
compile_wrapper.writeline("''')")
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment = f"{origins}\n{detailed_origins}"
wrapper.define_kernel(
kernel_name, compile_wrapper.getvalue(), metadata_comment
)
if is_metric_table_enabled("kernel_metadata"):
log_kernel_metadata(kernel_name, "", src_code)
return kernel_name
|
HalideScheduling
|
python
|
pydata__xarray
|
xarray/core/indexes.py
|
{
"start": 996,
"end": 22711
}
|
class ____:
"""
Base class inherited by all xarray-compatible indexes.
Do not use this class directly for creating index objects. Xarray indexes
are created exclusively from subclasses of ``Index``, mostly via Xarray's
public API like ``Dataset.set_xindex``.
Every subclass must at least implement :py:meth:`Index.from_variables`. The
(re)implementation of the other methods of this base class is optional but
mostly required in order to support operations relying on indexes such as
label-based selection or alignment.
The ``Index`` API closely follows the :py:meth:`Dataset` and
:py:meth:`DataArray` API, e.g., for an index to support ``.sel()`` it needs
to implement :py:meth:`Index.sel`, to support ``.stack()`` and
``.unstack()`` it needs to implement :py:meth:`Index.stack` and
:py:meth:`Index.unstack`, etc.
When a method is not (re)implemented, depending on the case the
corresponding operation on a :py:meth:`Dataset` or :py:meth:`DataArray`
either will raise a ``NotImplementedError`` or will simply drop/pass/copy
the index from/to the result.
Do not use this class directly for creating index objects.
"""
@classmethod
def from_variables(
cls,
variables: Mapping[Any, Variable],
*,
options: Mapping[str, Any],
) -> Self:
"""Create a new index object from one or more coordinate variables.
This factory method must be implemented in all subclasses of Index.
The coordinate variables may be passed here in an arbitrary number and
order and each with arbitrary dimensions. It is the responsibility of
the index to check the consistency and validity of these coordinates.
Parameters
----------
variables : dict-like
Mapping of :py:class:`Variable` objects holding the coordinate labels
to index.
Returns
-------
index : Index
A new Index object.
"""
raise NotImplementedError()
@classmethod
def concat(
cls,
indexes: Sequence[Self],
dim: Hashable,
positions: Iterable[Iterable[int]] | None = None,
) -> Self:
"""Create a new index by concatenating one or more indexes of the same
type.
Implementation is optional but required in order to support
``concat``. Otherwise it will raise an error if the index needs to be
updated during the operation.
Parameters
----------
indexes : sequence of Index objects
Indexes objects to concatenate together. All objects must be of the
same type.
dim : Hashable
Name of the dimension to concatenate along.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
Returns
-------
index : Index
A new Index object.
"""
raise NotImplementedError()
@classmethod
def stack(cls, variables: Mapping[Any, Variable], dim: Hashable) -> Self:
"""Create a new index by stacking coordinate variables into a single new
dimension.
Implementation is optional but required in order to support ``stack``.
Otherwise it will raise an error when trying to pass the Index subclass
as argument to :py:meth:`Dataset.stack`.
Parameters
----------
variables : dict-like
Mapping of :py:class:`Variable` objects to stack together.
dim : Hashable
Name of the new, stacked dimension.
Returns
-------
index
A new Index object.
"""
raise NotImplementedError(
f"{cls!r} cannot be used for creating an index of stacked coordinates"
)
def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]:
"""Unstack a (multi-)index into multiple (single) indexes.
Implementation is optional but required in order to support unstacking
the coordinates from which this index has been built.
Returns
-------
indexes : tuple
A 2-length tuple where the 1st item is a dictionary of unstacked
Index objects and the 2nd item is a :py:class:`pandas.MultiIndex`
object used to unstack unindexed coordinate variables or data
variables.
"""
raise NotImplementedError()
def create_variables(
self, variables: Mapping[Any, Variable] | None = None
) -> IndexVars:
"""Maybe create new coordinate variables from this index.
This method is useful if the index data can be reused as coordinate
variable data. It is often the case when the underlying index structure
has an array-like interface, like :py:class:`pandas.Index` objects.
The variables given as argument (if any) are either returned as-is
(default behavior) or can be used to copy their metadata (attributes and
encoding) into the new returned coordinate variables.
Note: the input variables may or may not have been filtered for this
index.
Parameters
----------
variables : dict-like, optional
Mapping of :py:class:`Variable` objects.
Returns
-------
index_variables : dict-like
Dictionary of :py:class:`Variable` or :py:class:`IndexVariable`
objects.
"""
if variables is not None:
# pass through
return dict(**variables)
else:
return {}
def should_add_coord_to_array(
self,
name: Hashable,
var: Variable,
dims: set[Hashable],
) -> bool:
"""Define whether or not an index coordinate variable should be added to
a new DataArray.
This method is called repeatedly for each Variable associated with this
index when creating a new DataArray (via its constructor or from a
Dataset) or updating an existing one. The variables associated with this
index are the ones passed to :py:meth:`Index.from_variables` and/or
returned by :py:meth:`Index.create_variables`.
By default returns ``True`` if the dimensions of the coordinate variable
are a subset of the array dimensions and ``False`` otherwise (DataArray
model). This default behavior may be overridden in Index subclasses to
bypass strict conformance with the DataArray model. This is useful for
example to include the (n+1)-dimensional cell boundary coordinate
associated with an interval index.
Returning ``False`` will either:
- raise a :py:class:`CoordinateValidationError` when passing the
coordinate directly to a new or an existing DataArray, e.g., via
``DataArray.__init__()`` or ``DataArray.assign_coords()``
- drop the coordinate (and therefore drop the index) when a new
DataArray is constructed by indexing a Dataset
Parameters
----------
name : Hashable
Name of a coordinate variable associated to this index.
var : Variable
Coordinate variable object.
dims: tuple
Dimensions of the new DataArray object being created.
"""
return all(d in dims for d in var.dims)
def to_pandas_index(self) -> pd.Index:
"""Cast this xarray index to a pandas.Index object or raise a
``TypeError`` if this is not supported.
This method is used by all xarray operations that still rely on
pandas.Index objects.
By default it raises a ``TypeError``, unless it is re-implemented in
subclasses of Index.
"""
raise TypeError(f"{self!r} cannot be cast to a pandas.Index object")
def isel(
self, indexers: Mapping[Any, int | slice | np.ndarray | Variable]
) -> Index | None:
"""Maybe returns a new index from the current index itself indexed by
positional indexers.
This method should be re-implemented in subclasses of Index if the
wrapped index structure supports indexing operations. For example,
indexing a ``pandas.Index`` is pretty straightforward as it behaves very
much like an array. By contrast, it may be harder doing so for a
structure like a kd-tree that differs much from a simple array.
If not re-implemented in subclasses of Index, this method returns
``None``, i.e., calling :py:meth:`Dataset.isel` will either drop the
index in the resulting dataset or pass it unchanged if its corresponding
coordinate(s) are not indexed.
Parameters
----------
indexers : dict
A dictionary of positional indexers as passed from
:py:meth:`Dataset.isel` and where the entries have been filtered
for the current index.
Returns
-------
maybe_index : Index
A new Index object or ``None``.
"""
return None
def sel(self, labels: dict[Any, Any]) -> IndexSelResult:
"""Query the index with arbitrary coordinate label indexers.
Implementation is optional but required in order to support label-based
selection. Otherwise it will raise an error when trying to call
:py:meth:`Dataset.sel` with labels for this index coordinates.
Coordinate label indexers can be of many kinds, e.g., scalar, list,
tuple, array-like, slice, :py:class:`Variable`, :py:class:`DataArray`, etc.
It is the responsibility of the index to handle those indexers properly.
Parameters
----------
labels : dict
A dictionary of coordinate label indexers passed from
:py:meth:`Dataset.sel` and where the entries have been filtered
for the current index.
Returns
-------
sel_results : :py:class:`IndexSelResult`
An index query result object that contains dimension positional indexers.
It may also contain new indexes, coordinate variables, etc.
"""
raise NotImplementedError(f"{self!r} doesn't support label-based selection")
def join(self, other: Self, how: JoinOptions = "inner") -> Self:
"""Return a new index from the combination of this index with another
index of the same type.
Implementation is optional but required in order to support alignment.
Parameters
----------
other : Index
The other Index object to combine with this index.
join : str, optional
Method for joining the two indexes (see :py:func:`~xarray.align`).
Returns
-------
joined : Index
A new Index object.
"""
raise NotImplementedError(
f"{self!r} doesn't support alignment with inner/outer join method"
)
def reindex_like(self, other: Self) -> dict[Hashable, Any]:
"""Query the index with another index of the same type.
Implementation is optional but required in order to support alignment.
Parameters
----------
other : Index
The other Index object used to query this index.
Returns
-------
dim_positional_indexers : dict
A dictionary where keys are dimension names and values are positional
indexers.
"""
raise NotImplementedError(f"{self!r} doesn't support re-indexing labels")
@overload
def equals(self, other: Index) -> bool: ...
@overload
def equals(
self, other: Index, *, exclude: frozenset[Hashable] | None = None
) -> bool: ...
def equals(self, other: Index, **kwargs) -> bool:
"""Compare this index with another index of the same type.
Implementation is optional but required in order to support alignment.
Parameters
----------
other : Index
The other Index object to compare with this object.
exclude : frozenset of hashable, optional
Dimensions excluded from checking. It is None by default, (i.e.,
when this method is not called in the context of alignment). For a
n-dimensional index this option allows an Index to optionally ignore
any dimension in ``exclude`` when comparing ``self`` with ``other``.
For a 1-dimensional index this kwarg can be safely ignored, as this
method is not called when all of the index's dimensions are also
excluded from alignment (note: the index's dimensions correspond to
the union of the dimensions of all coordinate variables associated
with this index).
Returns
-------
is_equal : bool
``True`` if the indexes are equal, ``False`` otherwise.
"""
raise NotImplementedError()
def roll(self, shifts: Mapping[Any, int]) -> Self | None:
"""Roll this index by an offset along one or more dimensions.
This method can be re-implemented in subclasses of Index, e.g., when the
index can be itself indexed.
If not re-implemented, this method returns ``None``, i.e., calling
:py:meth:`Dataset.roll` will either drop the index in the resulting
dataset or pass it unchanged if its corresponding coordinate(s) are not
rolled.
Parameters
----------
shifts : mapping of hashable to int, optional
A dict with keys matching dimensions and values given
by integers to rotate each of the given dimensions, as passed
:py:meth:`Dataset.roll`.
Returns
-------
rolled : Index
A new index with rolled data.
"""
return None
def rename(
self,
name_dict: Mapping[Any, Hashable],
dims_dict: Mapping[Any, Hashable],
) -> Self:
"""Maybe update the index with new coordinate and dimension names.
This method should be re-implemented in subclasses of Index if it has
attributes that depend on coordinate or dimension names.
By default (if not re-implemented), it returns the index itself.
Warning: the input names are not filtered for this method, they may
correspond to any variable or dimension of a Dataset or a DataArray.
Parameters
----------
name_dict : dict-like
Mapping of current variable or coordinate names to the desired names,
as passed from :py:meth:`Dataset.rename_vars`.
dims_dict : dict-like
Mapping of current dimension names to the desired names, as passed
from :py:meth:`Dataset.rename_dims`.
Returns
-------
renamed : Index
Index with renamed attributes.
"""
return self
def copy(self, deep: bool = True) -> Self:
"""Return a (deep) copy of this index.
Implementation in subclasses of Index is optional. The base class
implements the default (deep) copy semantics.
Parameters
----------
deep : bool, optional
If true (default), a copy of the internal structures
(e.g., wrapped index) is returned with the new object.
Returns
-------
index : Index
A new Index object.
"""
return self._copy(deep=deep)
def __copy__(self) -> Self:
return self.copy(deep=False)
def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Index:
return self._copy(deep=True, memo=memo)
def _copy(self, deep: bool = True, memo: dict[int, Any] | None = None) -> Self:
cls = self.__class__
copied = cls.__new__(cls)
if deep:
for k, v in self.__dict__.items():
setattr(copied, k, copy.deepcopy(v, memo))
else:
copied.__dict__.update(self.__dict__)
return copied
def __getitem__(self, indexer: Any) -> Self:
raise NotImplementedError()
def _repr_inline_(self, max_width: int) -> str:
return self.__class__.__name__
def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:
from xarray.coding.cftimeindex import CFTimeIndex
if len(index) > 0 and index.dtype == "O" and not isinstance(index, CFTimeIndex):
try:
return CFTimeIndex(index)
except (ImportError, TypeError):
return index
else:
return index
def safe_cast_to_index(array: Any) -> pd.Index:
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
from xarray.namedarray.pycompat import to_numpy
if isinstance(array, PandasExtensionArray):
array = pd.Index(array.array)
if isinstance(array, pd.Index):
index = array
elif isinstance(array, DataArray | Variable):
# returns the original multi-index for pandas.MultiIndex level coordinates
index = array._to_index()
elif isinstance(array, Index):
index = array.to_pandas_index()
elif isinstance(array, PandasIndexingAdapter):
index = array.array
else:
kwargs: dict[str, Any] = {}
if hasattr(array, "dtype"):
if array.dtype.kind == "O":
kwargs["dtype"] = "object"
elif array.dtype == "float16":
emit_user_level_warning(
(
"`pandas.Index` does not support the `float16` dtype."
" Casting to `float64` for you, but in the future please"
" manually cast to either `float32` and `float64`."
),
category=DeprecationWarning,
)
kwargs["dtype"] = "float64"
index = pd.Index(to_numpy(array), **kwargs)
return _maybe_cast_to_cftimeindex(index)
def _sanitize_slice_element(x):
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
if not isinstance(x, tuple) and len(np.shape(x)) != 0:
raise ValueError(
f"cannot use non-scalar arrays in a slice for xarray indexing: {x}"
)
if isinstance(x, Variable | DataArray):
x = x.values
if isinstance(x, np.ndarray):
x = x[()]
return x
def _query_slice(index, label, coord_name="", method=None, tolerance=None):
if method is not None or tolerance is not None:
raise NotImplementedError(
"cannot use ``method`` argument if any indexers are slice objects"
)
indexer = index.slice_indexer(
_sanitize_slice_element(label.start),
_sanitize_slice_element(label.stop),
_sanitize_slice_element(label.step),
)
if not isinstance(indexer, slice):
# unlike pandas, in xarray we never want to silently convert a
# slice indexer into an array indexer
raise KeyError(
"cannot represent labeled-based slice indexer for coordinate "
f"{coord_name!r} with a slice over integer positions; the index is "
"unsorted or non-unique"
)
return indexer
def _asarray_tuplesafe(values):
"""
Convert values into a numpy array of at most 1-dimension, while preserving
tuples.
Adapted from pandas.core.common._asarray_tuplesafe
"""
if isinstance(values, tuple):
result = utils.to_0d_object_array(values)
else:
result = np.asarray(values)
if result.ndim == 2:
result = np.empty(len(values), dtype=object)
result[:] = values
return result
def _is_nested_tuple(possible_tuple):
return isinstance(possible_tuple, tuple) and any(
isinstance(value, tuple | list | slice) for value in possible_tuple
)
def normalize_label(value, dtype=None) -> np.ndarray:
if getattr(value, "ndim", 1) <= 1:
value = _asarray_tuplesafe(value)
if dtype is not None and dtype.kind == "f" and value.dtype.kind != "b":
# pd.Index built from coordinate with float precision != 64
# see https://github.com/pydata/xarray/pull/3153 for details
# bypass coercing dtype for boolean indexers (ignore index)
# see https://github.com/pydata/xarray/issues/5727
value = np.asarray(value, dtype=dtype)
return value
def as_scalar(value: np.ndarray):
# see https://github.com/pydata/xarray/pull/4292 for details
return value[()] if value.dtype.kind in "mM" else value.item()
def get_indexer_nd(index: pd.Index, labels, method=None, tolerance=None) -> np.ndarray:
"""Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional
labels
"""
flat_labels = np.ravel(labels)
if flat_labels.dtype == "float16":
flat_labels = flat_labels.astype("float64")
flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)
indexer = flat_indexer.reshape(labels.shape)
return indexer
T_PandasIndex = TypeVar("T_PandasIndex", bound="PandasIndex")
|
Index
|
python
|
getsentry__sentry
|
tests/sentry/features/test_manager.py
|
{
"start": 2031,
"end": 18305
}
|
class ____(TestCase):
def test_feature_registry(self) -> None:
manager = features.FeatureManager()
assert manager.all() == {}
manager.add("organizations:feature1", OrganizationFeature)
manager.add("projects:feature2", ProjectFeature)
manager.add("projects:feature3", ProjectFeature)
assert set(manager.all(OrganizationFeature).keys()) == {"organizations:feature1"}
assert set(manager.all(ProjectFeature).keys()) == {
"projects:feature2",
"projects:feature3",
}
def test_feature_registry_api_expose(self) -> None:
manager = features.FeatureManager()
assert manager.all() == {}
manager.add("organizations:feature1", OrganizationFeature)
manager.add("organizations:feature2", OrganizationFeature, api_expose=True)
manager.add("organizations:feature3", OrganizationFeature, api_expose=False)
exposed = {"organizations:feature2"}
hidden = {"organizations:feature1", "organizations:feature3"}
assert set(manager.all(OrganizationFeature).keys()) == exposed | hidden
assert (
set(manager.all(feature_type=OrganizationFeature, api_expose_only=True).keys())
== exposed
)
assert (
set(manager.all(feature_type=OrganizationFeature, api_expose_only=False).keys())
== exposed | hidden
)
def test_feature_register_default(self) -> None:
manager = features.FeatureManager()
manager.add("organizations:red-paint", OrganizationFeature, default=False)
assert set(manager.all(OrganizationFeature)) == {"organizations:red-paint"}
assert settings.SENTRY_FEATURES["organizations:red-paint"] is False
# Defaults should not override config data.
feature_config = {
"organizations:red-paint": True,
}
with override_settings(SENTRY_FEATURES=feature_config):
manager = features.FeatureManager()
manager.add("organizations:red-paint", OrganizationFeature, default=False)
assert settings.SENTRY_FEATURES["organizations:red-paint"] is True
def test_handlers(self) -> None:
project_flag = "projects:test_handlers"
test_user = self.create_user()
class TestProjectHandler(features.FeatureHandler):
features = {project_flag}
def __init__(self, true_set, false_set):
self.true_set = frozenset(true_set)
self.false_set = frozenset(false_set)
def has(self, feature, actor, skip_entity: bool | None = False):
assert actor == test_user
if feature.project in self.true_set:
return True
if feature.project in self.false_set:
return False
return None
def batch_has(self, *a, **k):
raise NotImplementedError("unreachable")
p1 = self.create_project()
p2 = self.create_project()
p3 = self.create_project()
p4 = self.create_project()
handlers = [
TestProjectHandler([], []),
TestProjectHandler([p1, p3], []),
TestProjectHandler([], [p2, p3]),
]
manager = features.FeatureManager()
manager.add(project_flag, ProjectFeature)
for handler in handlers:
manager.add_handler(handler)
assert manager.has(project_flag, p1, actor=test_user) is True
assert manager.has(project_flag, p2, actor=test_user) is False
assert manager.has(project_flag, p3, actor=test_user) is True
assert manager.has(project_flag, p4, actor=test_user) is False
assert manager.has_for_batch(
project_flag, mock.sentinel.organization, [p1, p2, p3, p4], actor=test_user
) == {p1: True, p2: False, p3: True, p4: False}
def test_entity_handler(self) -> None:
test_org = self.create_organization()
# Add a registered handler
registered_handler = mock.Mock()
registered_handler.features = ["organizations:feature1"]
manager = features.FeatureManager()
manager.add("organizations:feature1", OrganizationFeature)
# Add the entity handler
entity_handler = mock.Mock()
manager.add("organizations:unregistered-feature", OrganizationFeature)
# Non entity feature
manager.add("organizations:settings-feature", OrganizationFeature)
manager.add_handler(registered_handler)
manager.add_entity_handler(entity_handler)
# A feature with a registered handler shouldn't use the entity handler
assert manager.has("organizations:feature1", test_org)
assert len(entity_handler.has.mock_calls) == 0
assert len(registered_handler.mock_calls) == 1
# The feature isn't registered, so it should try checking the entity_handler
assert manager.has("organizations:unregistered-feature", test_org)
assert len(entity_handler.has.mock_calls) == 1
assert len(registered_handler.mock_calls) == 1
# The feature isn't registered, but lets skip the entity_handler
manager.has("organizations:unregistered-feature", test_org, skip_entity=True)
assert len(entity_handler.has.mock_calls) == 1
assert len(registered_handler.mock_calls) == 1
# The entity_handler doesn't have a response for this feature either, so settings should be checked instead
entity_handler.has.return_value = None
with mock.patch.dict(settings.SENTRY_FEATURES, {"organizations:settings-feature": "test"}):
assert manager.has("organizations:settings-feature", test_org) == "test"
assert len(entity_handler.mock_calls) == 2
def test_entity_handler_has_capture_error(self) -> None:
test_org = self.create_organization()
handler = mock.Mock(spec=features.FeatureHandler)
handler.has.side_effect = Exception("something bad")
handler.features = {"organizations:faulty"}
manager = features.FeatureManager()
manager.add("organizations:faulty", OrganizationFeature)
manager.add_entity_handler(handler)
with (
mock.patch("sentry.features.manager.sentry_sdk.capture_exception") as mock_capture,
override_options({"features.error.capture_rate": 1.0}),
):
res = manager.has("organizations:faulty", test_org)
assert res is False
assert mock_capture.call_count == 1
def test_has_for_batch(self) -> None:
test_user = self.create_user()
test_org = self.create_organization()
projects = [self.create_project(organization=test_org) for i in range(5)]
def create_handler(flags, result):
class OrganizationTestHandler(features.BatchFeatureHandler):
features = set(flags)
def __init__(self):
self.hit_counter = 0
def _check_for_batch(self, feature_name, organization, actor):
assert feature_name in self.features
assert organization == test_org
assert actor == test_user
self.hit_counter += 1
return result
def batch_has(self, *a, **k):
raise NotImplementedError("unreachable")
return OrganizationTestHandler()
yes_flag = "organizations:yes"
no_flag = "organizations:no"
null_handler = create_handler([yes_flag, no_flag], None)
yes_handler = create_handler([yes_flag], True)
after_yes_handler = create_handler([yes_flag], False)
no_handler = create_handler([no_flag], False)
after_no_handler = create_handler([no_flag], True)
manager = features.FeatureManager()
for flag in (yes_flag, no_flag):
manager.add(flag, OrganizationFeature)
for handler in (null_handler, yes_handler, after_yes_handler, no_handler, after_no_handler):
manager.add_handler(handler)
assert manager.has_for_batch(yes_flag, test_org, projects, actor=test_user) == {
p: True for p in projects
}
assert yes_handler.hit_counter == 1 # as opposed to len(projects)
assert after_yes_handler.hit_counter == 0
assert manager.has_for_batch(no_flag, test_org, projects, actor=test_user) == {
p: False for p in projects
}
assert no_handler.hit_counter == 1
assert after_no_handler.hit_counter == 0
assert null_handler.hit_counter == 2
def test_has_for_batch_capture_error(self) -> None:
org = self.create_organization()
project = self.create_project(organization=org)
handler = mock.Mock(spec=features.BatchFeatureHandler)
handler.features = {"organizations:faulty"}
handler.has_for_batch.side_effect = ValueError("invalid thing")
manager = features.FeatureManager()
manager.add("oragnizations:faulty", OrganizationFeature)
manager.add_handler(handler)
with (
mock.patch("sentry.features.manager.sentry_sdk.capture_exception") as mock_capture,
override_options({"features.error.capture_rate": 1.0}),
):
res = manager.has_for_batch("organizations:faulty", org, [project])
assert res == {}
assert mock_capture.call_count == 1
def test_batch_has(self) -> None:
manager = features.FeatureManager()
manager.add("auth:register")
manager.add("organizations:feature", OrganizationFeature)
manager.add("projects:feature", ProjectFeature)
manager.add_entity_handler(MockBatchHandler())
ret = manager.batch_has(["auth:register"], actor=self.user)
assert ret is not None
assert ret["unscoped"]["auth:register"]
ret = manager.batch_has(
["organizations:feature"], actor=self.user, organization=self.organization
)
assert ret is not None
assert ret[f"organization:{self.organization.id}"]["organizations:feature"]
ret = manager.batch_has(["projects:feature"], actor=self.user, projects=[self.project])
assert ret is not None
assert ret[f"project:{self.project.id}"]["projects:feature"]
def test_batch_has_error(self) -> None:
manager = features.FeatureManager()
manager.add("organizations:feature", OrganizationFeature)
manager.add("projects:feature", ProjectFeature)
handler = mock.Mock(spec=features.FeatureHandler)
handler.batch_has.side_effect = Exception("something bad")
manager.add_entity_handler(handler)
with (
mock.patch("sentry.features.manager.sentry_sdk.capture_exception") as mock_capture,
override_options({"features.error.capture_rate": 1.0}),
):
ret = manager.batch_has(["auth:register"], actor=self.user)
assert ret is None
assert mock_capture.call_count == 1
def test_batch_has_no_entity(self) -> None:
manager = features.FeatureManager()
manager.add("auth:register")
manager.add("organizations:feature", OrganizationFeature)
manager.add("projects:feature", ProjectFeature)
manager.add_handler(MockBatchHandler())
ret = manager.batch_has(["auth:register"], actor=self.user)
assert ret is not None
assert ret["unscoped"]["auth:register"]
ret = manager.batch_has(
["organizations:feature"], actor=self.user, organization=self.organization
)
assert ret is not None
assert ret[f"organization:{self.organization.id}"]["organizations:feature"]
ret = manager.batch_has(["projects:feature"], actor=self.user, projects=[self.project])
assert ret is not None
assert ret[f"project:{self.project.id}"]["projects:feature"]
def test_batch_has_no_entity_multiple_projects(self) -> None:
manager = features.FeatureManager()
manager.add("projects:feature", ProjectFeature)
manager.add_handler(MockBatchHandler())
projects = [self.project, self.create_project()]
result = manager.batch_has(["projects:feature"], actor=self.user, projects=projects)
assert result is not None
for project in projects:
assert result[f"project:{project.id}"]["projects:feature"]
def test_batch_has_for_organizations(self) -> None:
manager = features.FeatureManager()
manager.add("organizations:feature", OrganizationFeature)
manager.add_entity_handler(MockBatchHandler())
organizations = [self.organization, self.create_organization()]
result = manager.batch_has_for_organizations("organizations:feature", organizations)
assert result is not None
for org in organizations:
assert result[f"organization:{org.id}"]
def test_batch_has_for_organizations_no_entity_handler(self) -> None:
# Deliberately do NOT define batch_has_for_organizations
class NoBatchOrgHandler(features.BatchFeatureHandler):
features = {"organizations:feature"}
def has(self, feature, actor, skip_entity: bool | None = False):
return feature.name in self.features
def batch_has(
self, feature_names, *args: Any, projects=None, organization=None, **kwargs: Any
):
feature_results = {
feature_name: True
for feature_name in feature_names
if feature_name in self.features
}
if projects:
return {f"project:{project.id}": feature_results for project in projects}
if organization:
return {f"organization:{organization.id}": feature_results}
return {"unscoped": feature_results}
def _check_for_batch(self, feature_name, organization, actor):
raise NotImplementedError
manager = features.FeatureManager()
manager.add("organizations:feature", OrganizationFeature)
manager.add_handler(NoBatchOrgHandler())
organizations = [self.organization, self.create_organization()]
result = manager.batch_has_for_organizations("organizations:feature", organizations)
assert result is not None
for org in organizations:
assert result[f"organization:{org.id}"] is True
def test_has(self) -> None:
manager = features.FeatureManager()
manager.add("auth:register")
manager.add("organizations:feature", OrganizationFeature)
manager.add("projects:feature", ProjectFeature)
manager.add_handler(MockBatchHandler())
assert manager.has("organizations:feature", actor=self.user, organization=self.organization)
assert manager.has("projects:feature", actor=self.user, project=self.project)
assert manager.has("auth:register", actor=self.user)
def test_entity_feature_shim(self) -> None:
manager = features.FeatureManager()
manager.add("feat:1", OrganizationFeature)
manager.add("feat:2", OrganizationFeature, False)
manager.add("feat:3", OrganizationFeature, FeatureHandlerStrategy.INTERNAL)
manager.add("feat:4", OrganizationFeature, True)
manager.add("feat:5", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE)
assert "feat:1" not in manager.entity_features
assert "feat:2" not in manager.entity_features
assert "feat:3" not in manager.entity_features
assert "feat:4" in manager.entity_features
assert "feat:5" in manager.entity_features
def test_all(self) -> None:
manager = features.FeatureManager()
manager.add("feat:org", OrganizationFeature)
manager.add("feat:project", ProjectFeature, False)
manager.add("feat:system", SystemFeature, False)
assert list(manager.all().keys()) == ["feat:org", "feat:project", "feat:system"]
assert list(manager.all(OrganizationFeature).keys()) == ["feat:org"]
|
FeatureManagerTest
|
python
|
dagster-io__dagster
|
python_modules/dagster-test/dagster_test/toys/longitudinal.py
|
{
"start": 1106,
"end": 3959
}
|
class ____(Exception):
"""To distinguish from other errors."""
def make_op(
name,
asset_key=None,
error_rate=None,
data_size_fn=None,
sleep_factor=None,
has_input=False,
):
@op(
name=name,
config_schema={"partition": str},
ins={"the_input": In(dagster_type=Nothing)} if has_input else {},
out=Out(dagster_type=Nothing),
)
def made_op(context):
partition_date = datetime.strptime(context.op_config["partition"], DEFAULT_DATE_FORMAT)
if data_size_fn:
data_size = data_size_fn(partition_date)
sleep_time = sleep_factor * data_size
time.sleep(sleep_time)
rand = random()
if error_rate and rand < error_rate:
raise IntentionalRandomFailure(f"random {rand} < error rate {error_rate}")
if asset_key:
metadata = {"Data size (bytes)": data_size} if data_size_fn else None # pyright: ignore[reportPossiblyUnboundVariable]
yield AssetMaterialization(
asset_key=asset_key,
metadata=metadata,
partition=context.op_config.get("partition"),
)
return made_op
@graph
def longitudinal():
ingest_raw_video_views = make_op(
"ingest_raw_video_views",
asset_key="raw_video_views",
error_rate=0.15,
sleep_factor=SLEEP_INGEST,
data_size_fn=video_views_data_size,
)
update_video_views_table = make_op(
"update_video_views_table",
asset_key="video_views",
has_input=True,
error_rate=0.01,
sleep_factor=SLEEP_PERSIST,
data_size_fn=video_views_data_size,
)
ingest_raw_users = make_op(
"ingest_raw_users",
"raw_users",
error_rate=0.1,
sleep_factor=SLEEP_INGEST,
data_size_fn=users_data_size,
)
update_users_table = make_op(
"update_users_table",
asset_key="users",
has_input=True,
sleep_factor=SLEEP_PERSIST,
data_size_fn=users_data_size,
error_rate=0.01,
)
train_video_recommender_model = make_op(
"train_video_recommender_model",
has_input=True,
sleep_factor=SLEEP_TRAIN,
data_size_fn=combined_data_size,
)
video_views = update_video_views_table(ingest_raw_video_views())
users = update_users_table(ingest_raw_users())
train_video_recommender_model([video_views, users])
longitudinal_job = longitudinal.to_job(
name="longitudinal_no_schedule",
description=(
"Demo job that simulates updating tables of users and video views and training a "
"video recommendation model. The growth of execution-time and data-throughput follows"
"a sigmoidal curve."
),
resource_defs={"io_manager": fs_io_manager},
)
|
IntentionalRandomFailure
|
python
|
huggingface__transformers
|
src/transformers/models/perceiver/modeling_perceiver.py
|
{
"start": 91107,
"end": 98417
}
|
class ____(PerceiverAbstractDecoder):
"""
Multimodal decoding by composing uni-modal decoders. The *modalities* argument of the constructor is a dictionary
mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that
modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are
concatenated along the time dimension.
Next, there is a shared cross attention operation across all modalities.
Args:
config ([*PerceiverConfig*]):
Model configuration.
modalities (`dict[str, PerceiverAbstractDecoder]`):
Dictionary mapping modality name to the decoder of that modality.
num_outputs (`int`):
The number of outputs of the decoder.
output_num_channels (`int`):
The number of channels in the output.
min_padding_size (`int`, *optional*, defaults to 2):
The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
channels across all modalities plus min_padding_size.
subsampled_index_dims (`dict[str, PerceiverAbstractDecoder]`, *optional*):
Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that
modality.
"""
def __init__(
self,
config: PerceiverConfig,
modalities: dict[str, PerceiverAbstractDecoder],
num_outputs: int,
output_num_channels: int,
min_padding_size: Optional[int] = 2,
subsampled_index_dims: Optional[dict[str, PerceiverAbstractDecoder]] = None,
**decoder_kwargs,
) -> None:
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.subsampled_index_dims = subsampled_index_dims
self.min_padding_size = min_padding_size
self.output_num_channels = output_num_channels
self.num_outputs = num_outputs
self.decoder = PerceiverBasicDecoder(
config,
output_index_dims=(num_outputs,),
output_num_channels=output_num_channels,
position_encoding_type="none",
num_channels=self.num_query_channels,
**decoder_kwargs,
)
self.padding = nn.ParameterDict(
{
modality: nn.Parameter(torch.randn(1, self.num_query_channels - decoder.num_query_channels))
for modality, decoder in modalities.items()
}
)
@property
def num_query_channels(self) -> int:
max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items())
common_channel_size = max_channel_size + self.min_padding_size
return common_channel_size
def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None):
# Partition the flat inputs among the different modalities
inputs = restructure(modality_sizes, inputs)
# Obtain modality-specific decoders' queries
subsampled_points = subsampled_points or {}
decoder_queries = {}
for modality, decoder in self.modalities.items():
# Get input_without_pos for this modality if it exists.
input_without_pos = None
if inputs_without_pos is not None:
input_without_pos = inputs_without_pos.get(modality, None)
query = decoder.decoder_query(
inputs=inputs[modality],
modality_sizes=None,
inputs_without_pos=input_without_pos,
subsampled_points=subsampled_points.get(modality, None),
)
decoder_queries[modality] = query
# Pad all queries with trainable position encodings to make them have the same channels
def embed(modality, x):
x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
pos = self.padding[modality]
pos = torch.broadcast_to(pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]])
return torch.cat([x, pos], dim=2)
# Apply a predictable ordering to the modalities
return torch.cat(
[embed(modality, decoder_queries[modality]) for modality in sorted(self.modalities.keys())], dim=1
)
def forward(
self,
query: torch.Tensor,
z: torch.FloatTensor,
query_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> torch.Tensor:
# B x 1 x num_classes -> B x num_classes
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
return decoder_outputs
# Below: IO pre- and post-processor classes for Perceiver.
def space_to_depth(frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1) -> torch.Tensor:
"""
Space to depth transform. Rearranges blocks of spatial data, into depth.
This function assumes the channels to be first, but will place the channels last after transformation.
"""
if len(frames.shape) == 4:
batch_size, num_channels, height, width = frames.shape
# split up dimensions (height by spatial_block_size, width by spatial_block_size)
frames = frames.view(
batch_size,
num_channels,
height // spatial_block_size,
spatial_block_size,
width // spatial_block_size,
spatial_block_size,
)
# move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C)
frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous()
# concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C)
frames = frames.view(
batch_size,
height // spatial_block_size,
width // spatial_block_size,
(spatial_block_size**2) * num_channels,
)
return frames
elif len(frames.shape) == 5:
batch_size, time, num_channels, height, width = frames.shape
# split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size)
frames = frames.view(
batch_size,
time // temporal_block_size,
temporal_block_size,
num_channels,
height // spatial_block_size,
spatial_block_size,
width // spatial_block_size,
spatial_block_size,
)
# move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C)
frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
# concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C)
frames = frames.view(
batch_size,
time // temporal_block_size,
height // spatial_block_size,
width // spatial_block_size,
temporal_block_size * (spatial_block_size**2) * num_channels,
)
return frames
else:
raise ValueError(
"Frames should be of rank 4 (batch, channels, height, width)"
" or rank 5 (batch, time, channels, height, width)"
)
|
PerceiverMultimodalDecoder
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-xor-product.py
|
{
"start": 38,
"end": 404
}
|
class ____(object):
def maximumXorProduct(self, a, b, n):
"""
:type a: int
:type b: int
:type n: int
:rtype: int
"""
MOD = 10**9+7
for i in reversed(xrange(n)):
base = 1<<i
if min(a, b)&base == 0:
a, b = a^base, b^base
return (a%MOD)*(b%MOD)%MOD
|
Solution
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 31240,
"end": 54526
}
|
class ____(torch._dynamo.test_case.TestCase):
test_seq = make_test(Seq())
test_basicmodule1 = make_test(BasicModule())
test_basicmodule2 = make_test(BasicModule())
test_submodules1 = make_test(SubmoduleExample())
test_submodules2 = make_test(SubmoduleExample())
test_modulemethod1 = make_test(ModuleMethodCall())
test_modulemethod2 = make_test(ModuleMethodCall())
test_module_call_module_with_static_forward = make_test(
ModuleCallModuleWithStaticForward()
)
test_module_static_method = make_test(ModuleStaticMethodCall())
test_fnmember = make_test(FnMember())
test_fnmembercmp1 = make_test(FnMemberCmp(F.relu))
test_fnmembercmp2 = make_test(FnMemberCmp(None))
test_constloop = make_test(ConstLoop())
test_istraining1 = make_test(IsTrainingCheck())
test_istraining2 = make_test(IsTrainingCheck())
test_iseval1 = make_test(IsEvalCheck())
test_iseval2 = make_test(IsEvalCheck())
test_viamodulecall = make_test(ViaModuleCall())
test_isnonelayer = make_test(IsNoneLayer())
test_layerlist = make_test(LayerList())
test_tensorlist = make_test(TensorList())
test_intarg = make_test(IntArg())
test_cfgmod = make_test(CfgModule())
test_stringmember = make_test(StringMember())
test_modulelist = make_test(ModuleList())
test_modulelist_nested = make_test(NestedModuleList())
test_modulelist_custom = make_test(CustomGetItemModuleList())
test_moduledict = make_test(ModuleDict())
test_moduledict_custom = make_test(CustomGetItemModuleDict())
test_parameterdict = make_test(ParameterDict())
test_parameterdict_custom = make_test(CustomGetItemParameterDict())
test_super1 = make_test(SuperModule())
test_super2 = make_test(SuperModule2())
test_super_class_method = make_test(SuperChildCallsClassMethod())
test_children = make_test(Children())
test_named_children = make_test(NamedChildren())
test_densenet = make_test(DenseNetBlocks())
test_parameters1 = make_test(ParametersModule1())
test_parameters2 = make_test(ParametersModule2())
test_parameters3 = make_test(ParametersModule3(), expected_ops=5)
test_parameters4 = make_test(ParametersModule4())
test_parameters5 = make_test(ParametersModule5())
test_hasattr = make_test(HasAttrModule())
test_enumvalues = make_test(EnumValues())
test_access_by_keys = make_test(AccessByKeys())
test_module_class_method = make_test(ModuleClassMethodCall())
test_module_property = make_test(ModuleProperty())
test_forward_directly = make_test(CallForwardDirectly())
test_module_name_string = make_test(ModuleNameString())
test_module_attribute_precedence = make_test(ModuleAttributePrecedence())
test_module_guard_name_is_valid = make_test(ModuleGuardNameIsValid())
test_sequential_with_duplicated_module = make_test(SequentialWithDuplicatedModule())
test_sequential_with_duplicated_module2 = make_test(
SequentialWithDuplicatedModule2()
)
test_module_comparison = make_test(ModuleComparison())
def test_inject_module_parameters(self):
from collections import OrderedDict
class ZeROOrderedDict(OrderedDict):
def __init__(self, parent_module=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._parent_module = parent_module
def __getitem__(self, key):
param = super().__getitem__(key)
return param
def inject_parameters(module, cls):
for m in module.modules():
if cls == ZeROOrderedDict:
new_param = cls(parent_module=m)
else:
new_param = cls()
for key, param in m._parameters.items():
new_param[key] = param
m._parameters = new_param
model = ParametersModule5()
inject_parameters(model, ZeROOrderedDict)
model = torch.compile(model, backend="inductor")
x = torch.ones(10)
# model can be compiled without error
y = model(x)
def test_module_forward_has_graph_break(self):
m = ModuleForwardHasGraphBreak()
x = torch.rand([10, 10])
ref = m(x)
opt_m = torch.compile(m, backend="eager")
res = opt_m(x)
self.assertTrue(torch.allclose(ref, res))
def test_unsupportedmethod(self):
m = UnsupportedMethodCall()
i = torch.randn(10)
cnt = torch._dynamo.testing.CompileCounter()
opt_m = torch.compile(m, backend=cnt)
r = opt_m(i)
self.assertTrue(torch._dynamo.testing.same(r, m(i)))
self.assertEqual(cnt.op_count, 5)
def test_unsupportedmodule(self):
m = UnsupportedModuleCall()
i = torch.randn(10)
cnt = torch._dynamo.testing.CompileCounter()
opt_m = torch.compile(m, backend=cnt)
r = opt_m(i)
self.assertTrue(torch._dynamo.testing.same(r, m(i)))
self.assertEqual(cnt.op_count, 6)
@patch.object(torch._dynamo.config, "allow_unspec_int_on_nn_module", True)
def test_self_mutating1(self):
m1 = torch.nn.Linear(10, 10)
m2 = SelfMutatingModule(m1)
m3 = SelfMutatingModule(m1)
m4 = SelfMutatingModule(m1)
i = torch.randn(10)
out2 = [m2(i), m2(i), m2(i)]
cnt = torch._dynamo.testing.CompileCounter()
opt_m3 = torch._dynamo.optimize_assert(cnt)(m3)
opt_m4 = torch._dynamo.optimize_assert(cnt)(m4)
out3 = [opt_m3(i), opt_m3(i), opt_m3(i)]
out4 = [opt_m4(i), opt_m4(i), opt_m4(i)]
self.assertTrue(torch._dynamo.testing.same(out2, out3))
self.assertTrue(torch._dynamo.testing.same(out2, out4))
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(cnt.frame_count, """2""")
else:
self.assertExpectedInline(cnt.frame_count, """1""")
def test_nn_module_setattr(self):
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.var = 0
@torch.compile(backend="eager", dynamic=False)
def f(x, m):
return x + m.var
inp = torch.ones(3)
m = Mod()
self.assertEqual(f(inp, m), inp)
# In 3.13.0, setattr will not fire a __dict__'s watchers,
# so guards may not be invalidated.
m.var = 1
# should trigger a recompile
self.assertEqual(f(inp, m), inp + 1)
@patch.object(torch._dynamo.config, "raise_on_ctx_manager_usage", False)
def test_generation_tag(self):
cnt = torch._dynamo.testing.CompileCounter()
# guarantee that we have installed
# the generation tagging function
with torch._dynamo.optimize_assert(cnt):
pass
m1 = torch.nn.Linear(10, 10)
prev_generation = GenerationTracker.get_generation_value(m1)
cur_generation = prev_generation + 1
with torch._dynamo.optimize_assert(cnt):
m2 = torch.nn.Linear(10, 10)
self.assertEqual(GenerationTracker.get_generation_value(m1), prev_generation)
self.assertEqual(GenerationTracker.get_generation_value(m2), cur_generation)
# check that newly constructed instances
# also have the same generation (even if copied from an old instance)
m3 = deepcopy(m1)
self.assertEqual(GenerationTracker.get_generation_value(m3), cur_generation)
def test_simple_torch_function(self):
def foo(x):
# function call, twice to test wrapping
x = F.sigmoid(x)
x = F.sigmoid(x)
# method call, twice to test wrapping
x = x.sigmoid()
x = x.sigmoid()
return x
TensorProxy = temporary_tensor_subclass()
x = torch.randn(1).as_subclass(TensorProxy)
cnt = torch._dynamo.testing.CompileCounter()
out1 = foo(x)
opt_foo = torch.compile(foo, backend=cnt, fullgraph=True)
out2 = opt_foo(x)
self.assertEqual(cnt.op_count, 4)
self.assertTrue(torch._dynamo.testing.same(out1, out2))
def test_torch_function_with_closure(self):
def run():
def foo(x):
# function call, twice to test wrapping
x = F.sigmoid(x)
x = F.sigmoid(x)
# method call, twice to test wrapping
x = x.sigmoid()
x = x.sigmoid()
return x
counter = 0
def function():
nonlocal counter
# for now, only support reads from closure cells
# TODO(future PR): support writes as well
counter + 1
TensorProxy = temporary_tensor_subclass(function)
x = torch.randn(1).as_subclass(TensorProxy)
x = torch.randn(1)
cnt = torch._dynamo.testing.CompileCounter()
out1 = foo(x)
opt_foo = torch.compile(foo, backend=cnt, fullgraph=True)
out2 = opt_foo(x)
self.assertEqual(cnt.op_count, 4)
self.assertTrue(torch._dynamo.testing.same(out1, out2))
run()
def test_torch_mangled_class_name(self):
original = TensorWithTFOverrideVariable.global_mangled_class_name
results = []
def instrumented(self, tx):
result = original(self, tx)
results.append(result)
return result
TensorWithTFOverrideVariable.global_mangled_class_name = instrumented
def one_break(x):
x = F.sigmoid(x)
print() # force break
x = x.sigmoid()
return x
try:
TensorProxy = temporary_tensor_subclass()
x = torch.randn(1).as_subclass(TensorProxy)
x1 = one_break(x)
cnt = torch._dynamo.testing.CompileCounter()
opt_one_break = torch.compile(one_break, backend=cnt)
x2 = opt_one_break(x)
self.assertTrue(torch._dynamo.testing.same(x1, x2))
self.assertEqual(cnt.frame_count, 2)
self.assertEqual(cnt.op_count, 2)
compile_ids = set()
for r in results:
# A mangled classname looks like __subclass_TensorProxy_94524181138240_c0
# where the last segment contains the compile_id.
prefix = "__subclass_TensorProxy_"
before, sep, after = r.partition(prefix)
self.assertEqual(before, "")
self.assertEqual(sep, prefix)
class_type_id, compile_id = after.split("_")
self.assertTrue(class_type_id.isnumeric())
self.assertTrue(compile_id.startswith("c"))
cid = compile_id[1:]
self.assertTrue(cid.isnumeric())
compile_ids.add(cid)
self.assertEqual(len(compile_ids), 3)
finally:
TensorWithTFOverrideVariable.global_mangled_class_name = original
def test_nn_moduledict_contains(self):
class M(torch.nn.Module):
def __init__(self, module_dict):
super().__init__()
self.module_dict = module_dict
def forward(self, x):
if "foo" in self.module_dict:
x = torch.mul(x, 1.0)
x = torch.add(x, 1.0)
return x
module_dict = torch.nn.ModuleDict({"foo": torch.nn.Conv2d(1, 1, 1)})
m = M(module_dict)
data = torch.randn(1)
out1 = m(data)
cnt = torch._dynamo.testing.CompileCounter()
opt_m = torch._dynamo.optimize(cnt, nopython=True)(m)
out2 = opt_m(data)
self.assertEqual(cnt.op_count, 2)
self.assertTrue(torch._dynamo.testing.same(out1, out2))
module_dict = torch.nn.ModuleDict({"bar": torch.nn.Conv2d(1, 1, 1)})
m = M(module_dict)
data = torch.randn(1)
out1 = m(data)
cnt = torch._dynamo.testing.CompileCounter()
torch._dynamo.reset()
opt_m = torch._dynamo.optimize(cnt, nopython=True)(m)
out2 = opt_m(data)
self.assertEqual(cnt.op_count, 1)
self.assertTrue(torch._dynamo.testing.same(out1, out2))
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module1(self):
input_shape = (16, 3, 6, 7, 8)
cnt = torch._dynamo.testing.CompileCounter()
module = LazyModule()
def test_static_module():
input = torch.ones(*input_shape)
module(input)
# test no graph break
opt_test_static_module = torch.compile(
test_static_module, backend=cnt, fullgraph=True
)
opt_test_static_module()
self.assertTrue(
isinstance(module, MaterializedModule),
"Module should be transformed to an instance of MaterializedModule.",
)
self.assertEqual(module.param.shape, input_shape)
# test when mapped to UnspecializedNNModule
module = LazyModule()
def test_unspecialized():
nonlocal module
module = LazyModule()
input = torch.ones(*input_shape)
module(input)
opt_test_unspecialized = torch.compile(test_unspecialized, backend=cnt)
opt_test_unspecialized()
self.assertTrue(
isinstance(module, MaterializedModule),
"Module should be transformed to an instance of MaterializedModule.",
)
self.assertEqual(module.param.shape, input_shape)
# test with a static module in torch.*
module = torch.nn.modules.LazyBatchNorm3d(
affine=False, track_running_stats=False
)
cnt = torch._dynamo.testing.CompileCounter()
torch._dynamo.reset()
def test_torch_static():
input = torch.ones(*input_shape)
return module(input) # fully materialized
# test no graph break
opt_test_torch_static = torch.compile(
test_torch_static, backend=cnt, fullgraph=True
)
opt_test_torch_static()
out = opt_test_torch_static()
self.assertTrue(same(out, module(torch.ones(*input_shape))))
self.assertTrue(
isinstance(module, torch.nn.modules.batchnorm.BatchNorm3d),
"Module should be transformed to an instance of BatchNorm3d.",
)
self.assertEqual(cnt.frame_count, 1, "No guards should have triggered.")
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module2(self):
# Test FX graph 'call_module' works well if argument is lazy module
m = LazyMLP()
x = torch.rand([10, 10])
opt_m = torch.compile(m, backend="eager", fullgraph=True)
# We should run compile mode firstly, otherwise the module
# would be initialized when running eager mode.
res = opt_m(x)
ref = m(x)
self.assertTrue(torch.allclose(ref, res))
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module4(self):
m = LazyMLP()
x = torch.rand([10, 10])
cnt = torch._dynamo.testing.CompileCounter()
opt_m = torch.compile(m, backend=cnt, fullgraph=True)
# first iteration
res = opt_m(x)
ref = m(x)
self.assertTrue(torch.allclose(ref, res))
# input shape changed and second iteration
x = torch.rand([20, 20])
try:
opt_m(x)
except RuntimeError:
self.assertIn("must have same reduction dim", traceback.format_exc())
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module5(self):
# Test lazy module works well with list/tuple input
m = LazyModuleWithListInput()
x = [torch.rand([5, 5])] * 3 + [None]
opt_m = torch.compile(m, backend="eager", fullgraph=True)
res = opt_m(x)
ref = m(x)
self.assertTrue(torch.allclose(ref, res))
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module6(self):
# Test new lazy submodule in lazy module's initialize_parameters
m = LazyModuleWithLazySubmodule()
x = [torch.rand([5, 5])] * 3
opt_m = torch.compile(m, backend="eager", fullgraph=True)
res = opt_m(x)
ref = m(x)
self.assertTrue(torch.allclose(ref, res))
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module7(self):
# Test lazy module works well with namedtuple/dict input
m = LazyModuleWithNamedTupleInput()
x = MyInput(
x={"a": [torch.rand([5, 5])] * 3, "b": torch.rand([5, 5])},
y=torch.rand([5, 5]),
)
opt_m = torch.compile(backend="eager", fullgraph=True)(m)
res = opt_m(x)
ref = m(x)
self.assertTrue(torch.allclose(ref, res))
def test_lazy_module_no_cls_to_become(self):
# make sure super() works in the case where cls_to_become is None
m = LazyChildModuleNoClsToBecome()
x = torch.rand(2, 2)
opt_m = torch.compile(m, backend="eager", fullgraph=True)
res = opt_m(x)
ref = m(x)
self.assertTrue(torch.allclose(ref, res))
def test_lazy_module_kwargs(self):
m = LazyModuleKwArgs()
x = [torch.rand([5, 5])] * 3
y = [torch.rand([5, 5])] * 2
opt_m = torch.compile(backend="eager", fullgraph=True)(m)
exp_res = m(x, y)
self.assertTrue(torch.allclose(exp_res, opt_m(x, y)))
def test_lazy_module_bad_params(self):
m = LazyModuleBadInferParams()
x = [torch.rand([5, 5])] * 3
y = [torch.rand([5, 5])] * 2
# Note that this raises from within dynamo code, with no exception handling.
with self.assertRaises(AttributeError) as cm:
opt_m = torch.compile(backend="eager")(m)
exp_res = opt_m(x, y)
def test_lazy_module_bad_params_call_function(self):
class holder:
x = LazyModuleBadInferParams()
def apply(self, x, y):
self.x(x, y)
def m(x, y):
h = holder()
return h.apply(x, y)
x = [torch.rand([5, 5])] * 3
y = [torch.rand([5, 5])] * 2
opt_m = torch.compile(backend="eager")(m)
with self.assertRaises(AttributeError):
exp_res = opt_m(x, y)
# RuntimeError: SymIntArrayRef expected to contain only concrete integers
@expectedFailureDynamic
def test_lazy_module_speculation_log_divergence(self):
class ModWithOneLazyLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer = torch.nn.LazyLinear(8)
def forward(self, x):
return self.layer(x)
# This allows us to restart tracing without clearing speculation log
def id_and_fail_inlining(x):
torch._dynamo.graph_break()
return x
cnt = torch._dynamo.testing.CompileCounter()
@torch.compile(backend=cnt)
def test(mod, x):
res = mod(x)
# Speculation log must not diverge in the 2nd round of tracing,
# after we've initialized the `LazyLinear` into a `Linear` in the
# 1st round.
res2 = id_and_fail_inlining(res)
return res
mod = ModWithOneLazyLinear()
x = torch.ones(10, 3)
# Make sure we don't get recompilation across multiple runs
actual_res = test(mod, x)
expect_res = mod(x)
self.assertTrue(torch.allclose(expect_res, actual_res))
actual_res = test(mod, x)
expect_res = mod(x)
self.assertTrue(torch.allclose(expect_res, actual_res))
self.assertEqual(cnt.frame_count, 1)
def test_call_fn_with_non_const_inputs_safe(self):
class ModuleSpecialFwd(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=20, kernel_size=(5, 5)
)
def _conv_forward(self, x):
return self.conv._conv_forward(x, self.conv.weight, self.conv.bias)
def forward(self, x):
return self._conv_forward(x)
mod = ModuleSpecialFwd()
rx = torch.randn([3, 10, 10])
real = mod(rx)
graph, _ = torch._dynamo.export(mod)(rx)
self.assertTrue(torch._dynamo.testing.same(real, graph(rx)))
def test_conv_call_forward_directly(self):
m = ConvCallForwardDirectly()
x = torch.rand([4, 3, 9, 9])
ref = m(x)
opt_m = torch.compile(backend="eager", fullgraph=True)(m)
res = opt_m(x)
self.assertTrue(torch.allclose(ref, res))
def test_conv_transpose_call_forward_directly(self):
m = ConvTransposeCallForwardDirectly()
x = torch.rand([4, 4, 4, 4])
ref = m(x)
opt_m = torch.compile(backend="eager", fullgraph=True)(m)
res = opt_m(x)
self.assertTrue(torch.allclose(ref, res))
def test_conv_call_super_forward_directly(self):
x = torch.randn(4, 4)
m = ConvCallSuperForwardDirectly(4, 4, 4)
ref = m(x)
opt_m = torch.compile(backend="eager", fullgraph=True)(m)
res = opt_m(x)
self.assertTrue(torch.allclose(ref, res))
def test_conv_transpose_call_super_forward_directly(self):
x = torch.randn(4, 4, 4)
m = ConvTransposeCallSuperForwardDirectly(4, 4, 4)
ref = m(x)
opt_m = torch.compile(backend="eager", fullgraph=True)(m)
res = opt_m(x)
self.assertTrue(torch.allclose(ref, res))
@torch._dynamo.config.patch("allow_unspec_int_on_nn_module", True)
def test_nn_module_unspec_int_attr(self):
for module_class in [ModuleWithIntAttr, UnspecModuleWithIntAttr]:
mod = module_class()
cnt = torch._dynamo.testing.CompileCounter()
opt_mod = torch.compile(backend=cnt)(copy.deepcopy(mod))
x = torch.rand(3, 4)
# Compiling `self.step` as static
ref1 = mod(x)
res1 = opt_mod(x)
self.assertTrue(torch.allclose(ref1, res1))
self.assertEqual(cnt.frame_count, 1)
mod.step += 1
opt_mod.step += 1
# Second time: compiling `self.step` as dynamic
ref2 = mod(x)
res2 = opt_mod(x)
self.assertTrue(torch.allclose(ref2, res2))
self.assertEqual(cnt.frame_count, ifdynstaticdefault(2, 1))
mod.step += 1
opt_mod.step += 1
# Third time: no re-compilation!
ref3 = mod(x)
res3 = opt_mod(x)
self.assertTrue(torch.allclose(ref3, res3))
self.assertEqual(cnt.frame_count, ifdynstaticdefault(2, 1))
|
NNModuleTests
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/coordinator/fault_tolerance_test.py
|
{
"start": 1748,
"end": 2379
}
|
class ____(
fault_tolerance_test_base.BaseFaultToleranceTest, test.TestCase):
"""Single worker fault tolerance tests.
This covers the cases that ensure training can continue in a single-worker
cluster, even if the only worker can become unavailable at some point and
recovered (if there are multiple workers, it is possible that the training
succeeds with the workers that did not fail). Realistically single worker
is very rarely used, but the tests are important to ensure the correct
behaviors.
"""
def setUp(self):
super(SingleWorkerFaultToleranceTest, self).setUp(1, 1)
|
SingleWorkerFaultToleranceTest
|
python
|
huggingface__transformers
|
src/transformers/models/voxtral/modular_voxtral.py
|
{
"start": 1341,
"end": 1397
}
|
class ____(Qwen2AudioAttention):
pass
|
VoxtralAttention
|
python
|
doocs__leetcode
|
solution/1500-1599/1522.Diameter of N-Ary Tree/Solution.py
|
{
"start": 184,
"end": 748
}
|
class ____:
def diameter(self, root: 'Node') -> int:
"""
:type root: 'Node'
:rtype: int
"""
def dfs(root):
if root is None:
return 0
nonlocal ans
m1 = m2 = 0
for child in root.children:
t = dfs(child)
if t > m1:
m2, m1 = m1, t
elif t > m2:
m2 = t
ans = max(ans, m1 + m2)
return 1 + m1
ans = 0
dfs(root)
return ans
|
Solution
|
python
|
ray-project__ray
|
rllib/core/models/torch/utils.py
|
{
"start": 89,
"end": 2945
}
|
class ____(nn.Module):
"""A striding layer for doing torch Conv2DTranspose operations.
Using this layer before the 0-padding (on a 3D input "image") and before
the actual ConvTranspose2d allows for a padding="same" behavior that matches
100% that of a `tf.keras.layers.Conv2DTranspose` layer.
Examples:
Input image (4x4):
A B C D
E F G H
I J K L
M N O P
Stride with stride=2 -> output image=(7x7)
A 0 B 0 C 0 D
0 0 0 0 0 0 0
E 0 F 0 G 0 H
0 0 0 0 0 0 0
I 0 J 0 K 0 L
0 0 0 0 0 0 0
M 0 N 0 O 0 P
"""
def __init__(self, width, height, stride_w, stride_h):
"""Initializes a Stride2D instance.
Args:
width: The width of the 3D input "image".
height: The height of the 3D input "image".
stride_w: The stride in width direction, with which to stride the incoming
image.
stride_h: The stride in height direction, with which to stride the incoming
image.
"""
super().__init__()
self.width = width
self.height = height
self.stride_w = stride_w
self.stride_h = stride_h
self.register_buffer(
"zeros",
torch.zeros(
size=(
self.width * self.stride_w - (self.stride_w - 1),
self.height * self.stride_h - (self.stride_h - 1),
),
dtype=torch.float32,
),
)
self.out_width, self.out_height = self.zeros.shape[0], self.zeros.shape[1]
# Squeeze in batch and channel dims.
self.zeros = self.zeros.unsqueeze(0).unsqueeze(0)
where_template = torch.zeros(
(self.stride_w, self.stride_h), dtype=torch.float32
)
# Set upper/left corner to 1.0.
where_template[0][0] = 1.0
# then tile across the entire (strided) image size.
where_template = where_template.repeat((self.height, self.width))[
: -(self.stride_w - 1), : -(self.stride_h - 1)
]
# Squeeze in batch and channel dims and convert to bool.
where_template = where_template.unsqueeze(0).unsqueeze(0).bool()
self.register_buffer("where_template", where_template)
def forward(self, x):
# Repeat incoming image stride(w/h) times to match the strided output template.
repeated_x = (
x.repeat_interleave(self.stride_w, dim=-2).repeat_interleave(
self.stride_h, dim=-1
)
)[:, :, : -(self.stride_w - 1), : -(self.stride_h - 1)]
# Where `self.where_template` == 1.0 -> Use image pixel, otherwise use
# zero filler value.
return torch.where(self.where_template, repeated_x, self.zeros)
|
Stride2D
|
python
|
spyder-ide__spyder
|
spyder/plugins/console/widgets/internalshell.py
|
{
"start": 3470,
"end": 15996
}
|
class ____(PythonShellWidget):
"""Shell base widget: link between PythonShellWidget and Interpreter"""
# --- Signals
# This signal is emitted when the buffer is flushed
sig_refreshed = Signal()
# Request to show a status message on the main window
sig_show_status_requested = Signal(str)
# This signal emits a parsed error traceback text so we can then
# request opening the file that traceback comes from in the Editor.
sig_go_to_error_requested = Signal(str)
# TODO: I think this is not being used now?
sig_focus_changed = Signal()
def __init__(self, parent=None, commands=None, message=None,
max_line_count=300, exitfunc=None, profile=False,
multithreaded=True):
super().__init__(parent, get_conf_path('history_internal.py'),
profile=profile)
self.multithreaded = multithreaded
self.setMaximumBlockCount(max_line_count)
# Allow raw_input support:
self.input_loop = None
self.input_mode = False
# KeyboardInterrupt support
self.interrupted = False # used only for not-multithreaded mode
self.sig_keyboard_interrupt.connect(self.keyboard_interrupt)
# Code completion / calltips
# keyboard events management
self.eventqueue = []
# Init interpreter
self.exitfunc = exitfunc
commands = [] if commands is None else commands
self.commands = commands
self.message = message
self.interpreter = None
# Clear status bar
self.sig_show_status_requested.emit('')
# Embedded shell -- requires the monitor (which installs the
# 'open_in_spyder' function in builtins)
if hasattr(builtins, 'open_in_spyder'):
self.sig_go_to_error_requested.connect(
self.open_with_external_spyder)
#------ Interpreter
def start_interpreter(self, namespace):
"""Start Python interpreter."""
self.clear()
if self.interpreter is not None:
self.interpreter.closing()
self.interpreter = Interpreter(namespace, self.exitfunc,
SysOutput, WidgetProxy,
get_debug_level())
self.interpreter.stdout_write.data_avail.connect(self.stdout_avail)
self.interpreter.stderr_write.data_avail.connect(self.stderr_avail)
self.interpreter.widget_proxy.sig_set_readonly.connect(self.setReadOnly)
self.interpreter.widget_proxy.sig_new_prompt.connect(self.new_prompt)
self.interpreter.widget_proxy.sig_edit.connect(self.edit_script)
self.interpreter.widget_proxy.sig_wait_input.connect(self.wait_input)
if self.multithreaded:
self.interpreter.start()
# Interpreter banner
banner = create_banner(self.message)
self.write(banner, prompt=True)
# Initial commands
for cmd in self.commands:
self.run_command(cmd, history=False, new_prompt=False)
# First prompt
self.new_prompt(self.interpreter.p1)
self.sig_refreshed.emit()
return self.interpreter
def exit_interpreter(self):
"""Exit interpreter"""
self.interpreter.exit_flag = True
if self.multithreaded:
self.interpreter.stdin_write.write(b'\n')
self.interpreter.restore_stds()
def edit_script(self, filename, external_editor):
filename = str(filename)
if external_editor:
self.external_editor(filename)
else:
self.parent().edit_script(filename)
def stdout_avail(self):
"""Data is available in stdout, let's empty the queue and write it!"""
data = self.interpreter.stdout_write.empty_queue()
if data:
self.write(data)
def stderr_avail(self):
"""Data is available in stderr, let's empty the queue and write it!"""
data = self.interpreter.stderr_write.empty_queue()
if data:
self.write(data, error=True)
self.flush(error=True)
#------Raw input support
def wait_input(self, prompt=''):
"""Wait for input (raw_input support)"""
self.new_prompt(prompt)
self.setFocus()
self.input_mode = True
self.input_loop = QEventLoop(None)
self.input_loop.exec_()
self.input_loop = None
def end_input(self, cmd):
"""End of wait_input mode"""
self.input_mode = False
self.input_loop.exit()
self.interpreter.widget_proxy.end_input(cmd)
#----- Menus, actions, ...
def setup_context_menu(self):
"""Reimplement PythonShellWidget method"""
PythonShellWidget.setup_context_menu(self)
self.help_action = create_action(self, _("Help..."),
icon=ima.icon('DialogHelpButton'),
triggered=self.help)
self.menu.addAction(self.help_action)
@Slot()
def help(self):
"""Help on Spyder console"""
QMessageBox.about(self, _("Help"),
"""<b>%s</b>
<p><i>%s</i><br> edit foobar.py
<p><i>%s</i><br> xedit foobar.py
<p><i>%s</i><br> run foobar.py
<p><i>%s</i><br> clear x, y
<p><i>%s</i><br> !ls
<p><i>%s</i><br> object?
<p><i>%s</i><br> result = oedit(object)
""" % (_('Shell special commands:'),
_('Internal editor:'),
_('External editor:'),
_('Run script:'),
_('Remove references:'),
_('System commands:'),
_('Python help:'),
_('GUI-based editor:')))
#------ External editing
def open_with_external_spyder(self, text):
"""Load file in external Spyder's editor, if available
This method is used only for embedded consoles
(could also be useful if we ever implement the magic %edit command)"""
match = get_error_match(str(text))
if match:
fname, lnb = match.groups()
builtins.open_in_spyder(fname, int(lnb))
def set_external_editor(self, path, gotoline):
"""Set external editor path and gotoline option."""
self._path = path
self._gotoline = gotoline
def external_editor(self, filename, goto=-1):
"""
Edit in an external editor.
Recommended: SciTE (e.g. to go to line where an error did occur).
"""
editor_path = self._path
goto_option = self._gotoline
if os.path.isfile(editor_path):
try:
args = [filename]
if goto > 0 and goto_option:
args.append('%s%d'.format(goto_option, goto))
programs.run_program(editor_path, args)
except OSError:
self.write_error("External editor was not found:"
" %s\n" % editor_path)
#------ I/O
def flush(self, error=False, prompt=False):
"""Reimplement ShellBaseWidget method"""
PythonShellWidget.flush(self, error=error, prompt=prompt)
if self.interrupted:
self.interrupted = False
raise KeyboardInterrupt
#------ Clear terminal
def clear_terminal(self):
"""Reimplement ShellBaseWidget method"""
self.clear()
self.new_prompt(self.interpreter.p2 if self.interpreter.more else self.interpreter.p1)
#------ Keyboard events
def on_enter(self, command):
"""on_enter"""
if self.profile:
# Simple profiling test
t0 = time()
for _ in range(10):
self.execute_command(command)
self.insert_text(u"\n<Δt>=%dms\n" % (1e2*(time()-t0)))
self.new_prompt(self.interpreter.p1)
else:
self.execute_command(command)
self.__flush_eventqueue()
def keyPressEvent(self, event):
"""
Reimplement Qt Method
Enhanced keypress event handler
"""
if self.preprocess_keyevent(event):
# Event was accepted in self.preprocess_keyevent
return
self.postprocess_keyevent(event)
def __flush_eventqueue(self):
"""Flush keyboard event queue"""
while self.eventqueue:
past_event = self.eventqueue.pop(0)
self.postprocess_keyevent(past_event)
#------ Command execution
def keyboard_interrupt(self):
"""Simulate keyboard interrupt"""
if self.multithreaded:
self.interpreter.raise_keyboard_interrupt()
else:
if self.interpreter.more:
self.write_error("\nKeyboardInterrupt\n")
self.interpreter.more = False
self.new_prompt(self.interpreter.p1)
self.interpreter.resetbuffer()
else:
self.interrupted = True
def execute_lines(self, lines):
"""
Execute a set of lines as multiple command
lines: multiple lines of text to be executed as single commands
"""
for line in lines.splitlines():
stripped_line = line.strip()
if stripped_line.startswith('#'):
continue
self.write(line+os.linesep, flush=True)
self.execute_command(line+"\n")
self.flush()
def execute_command(self, cmd):
"""
Execute a command
cmd: one-line command only, with '\n' at the end
"""
if self.input_mode:
self.end_input(cmd)
return
if cmd.endswith('\n'):
cmd = cmd[:-1]
# cls command
if cmd == 'cls':
self.clear_terminal()
return
self.run_command(cmd)
def run_command(self, cmd, history=True, new_prompt=True):
"""Run command in interpreter"""
if not cmd:
cmd = ''
else:
if history:
self.add_to_history(cmd)
if not self.multithreaded:
if 'input' not in cmd:
self.interpreter.stdin_write.write(bytes(cmd + '\n', "utf-8"))
self.interpreter.run_line()
self.sig_refreshed.emit()
else:
self.write(_('In order to use commands like "raw_input" '
'or "input" run Spyder with the multithread '
'option (--multithread) from a system terminal'),
error=True)
else:
self.interpreter.stdin_write.write(bytes(cmd + '\n', "utf-8"))
#------ Code completion / Calltips
def _eval(self, text):
"""Is text a valid object?"""
return self.interpreter.eval(text)
def get_dir(self, objtxt):
"""Return dir(object)"""
obj, valid = self._eval(objtxt)
if valid:
return getobjdir(obj)
def get_globals_keys(self):
"""Return shell globals() keys"""
return list(self.interpreter.namespace.keys())
def get_cdlistdir(self):
"""Return shell current directory list dir"""
return os.listdir(getcwd_or_home())
def iscallable(self, objtxt):
"""Is object callable?"""
obj, valid = self._eval(objtxt)
if valid:
return callable(obj)
def get_arglist(self, objtxt):
"""Get func/method argument list"""
obj, valid = self._eval(objtxt)
if valid:
return getargtxt(obj)
def get__doc__(self, objtxt):
"""Get object __doc__"""
obj, valid = self._eval(objtxt)
if valid:
return obj.__doc__
def get_doc(self, objtxt):
"""Get object documentation dictionary"""
obj, valid = self._eval(objtxt)
if valid:
return getdoc(obj)
def get_source(self, objtxt):
"""Get object source"""
obj, valid = self._eval(objtxt)
if valid:
return getsource(obj)
def is_defined(self, objtxt, force_import=False):
"""Return True if object is defined"""
return self.interpreter.is_defined(objtxt, force_import)
|
InternalShell
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/channels.py
|
{
"start": 415431,
"end": 420788
}
|
class ____(
ValueChannelMixin, core.ValueDefWithConditionMarkPropFieldOrDatumDefnumber
):
"""
OpacityValue schema wrapper.
Parameters
----------
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalMarkPropFieldOrDatumDef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, :class:`ConditionalParameterMarkPropFieldOrDatumDef`, :class:`ConditionalPredicateMarkPropFieldOrDatumDef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
A field definition or one or more value definition(s) with a parameter predicate.
value : dict, float, :class:`ExprRef`
A constant value in visual domain (e.g., ``"red"`` / ``"#0099ff"`` / `gradient
definition <https://vega.github.io/vega-lite/docs/types.html#gradient>`__ for color,
values between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "opacity"
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> OpacityValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
test: Optional[str | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> OpacityValue: ...
@overload
def condition(
self,
*,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
empty: Optional[bool] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
) -> OpacityValue: ...
@overload
def condition(
self,
*,
bandPosition: Optional[float] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
empty: Optional[bool] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
) -> OpacityValue: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> OpacityValue: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> OpacityValue: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> OpacityValue: ...
def __init__(
self,
value,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
**kwds,
):
super().__init__(value=value, condition=condition, **kwds)
@with_property_setters
|
OpacityValue
|
python
|
kamyu104__LeetCode-Solutions
|
Python/largest-palindromic-number.py
|
{
"start": 71,
"end": 669
}
|
class ____(object):
def largestPalindromic(self, num):
"""
:type num: str
:rtype: str
"""
cnt = collections.Counter(num)
result = []
for i in reversed(xrange(10)):
if not cnt[str(i)]//2 or (i == 0 and not result):
continue
for _ in xrange(cnt[str(i)]//2):
result.append(str(i))
result.append(max([k for k, v in cnt.iteritems() if v%2] or [""]))
for i in reversed(xrange(len(result)-1)):
result.append(result[i])
return "".join(result) or "0"
|
Solution
|
python
|
cython__cython
|
Cython/Compiler/FlowControl.py
|
{
"start": 22569,
"end": 23006
}
|
class ____(TreeVisitor):
def __init__(self):
super().__init__()
self.assignments = []
def visit_Node(self):
self._visitchildren(self, None, None)
def visit_SingleAssignmentNode(self, node):
self.assignments.append((node.lhs, node.rhs))
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.assignments.append((lhs, node.rhs))
|
AssignmentCollector
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/numpy_ops/np_array_ops_test.py
|
{
"start": 19785,
"end": 41797
}
|
class ____(test.TestCase):
def setUp(self):
super(ArrayMethodsTest, self).setUp()
set_up_virtual_devices()
self.array_transforms = [
lambda x: x,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
]
def testAllAny(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arr = fn(arr)
self.match(
np_array_ops.all(arr, *args, **kwargs),
np.all(arr, *args, **kwargs))
self.match(
np_array_ops.any(arr, *args, **kwargs),
np.any(arr, *args, **kwargs))
run_test(0)
run_test(1)
run_test([])
run_test([[True, False], [True, True]])
run_test([[True, False], [True, True]], axis=0)
run_test([[True, False], [True, True]], axis=0, keepdims=True)
run_test([[True, False], [True, True]], axis=1)
run_test([[True, False], [True, True]], axis=1, keepdims=True)
run_test([[True, False], [True, True]], axis=(0, 1))
run_test([[True, False], [True, True]], axis=(0, 1), keepdims=True)
run_test([5.2, 3.5], axis=0)
run_test([1, 0], axis=0)
def testCompress(self):
def run_test(condition, arr, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arg1 = fn1(condition)
arg2 = fn2(arr)
self.match(
np_array_ops.compress(arg1, arg2, *args, **kwargs),
np.compress(
np.asarray(arg1).astype(np.bool_), arg2, *args, **kwargs))
run_test([True], 5)
run_test([False], 5)
run_test([], 5)
run_test([True, False, True], [1, 2, 3])
run_test([True, False], [1, 2, 3])
run_test([False, True], [[1, 2], [3, 4]])
run_test([1, 0, 1], [1, 2, 3])
run_test([1, 0], [1, 2, 3])
run_test([0, 1], [[1, 2], [3, 4]])
run_test([True], [[1, 2], [3, 4]])
run_test([False, True], [[1, 2], [3, 4]], axis=1)
run_test([False, True], [[1, 2], [3, 4]], axis=0)
run_test([False, True], [[1, 2], [3, 4]], axis=-1)
run_test([False, True], [[1, 2], [3, 4]], axis=-2)
def testCopy(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.copy(arg, *args, **kwargs),
np.copy(arg, *args, **kwargs))
run_test([])
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([True])
run_test(np.arange(9).reshape((3, 3)).tolist())
a = np_array_ops.asarray(0)
self.assertNotIn('CPU:1', a.backing_device)
with ops.device('CPU:1'):
self.assertIn('CPU:1', np_array_ops.array(a, copy=True)
.backing_device)
self.assertIn('CPU:1', np_array_ops.array(np.array(0), copy=True)
.backing_device)
def testCumProdAndSum(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.cumprod(arg, *args, **kwargs),
np.cumprod(arg, *args, **kwargs))
self.match(
np_array_ops.cumsum(arg, *args, **kwargs),
np.cumsum(arg, *args, **kwargs))
run_test([])
run_test([1, 2, 3])
run_test([1, 2, 3], dtype=float)
run_test([1, 2, 3], dtype=np.float32)
run_test([1, 2, 3], dtype=np.float64)
run_test([1., 2., 3.])
run_test([1., 2., 3.], dtype=int)
run_test([1., 2., 3.], dtype=np.int32)
run_test([1., 2., 3.], dtype=np.int64)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
def testImag(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.imag(arg, *args, **kwargs),
# np.imag may return a scalar so we convert to a np.ndarray.
np.array(np.imag(arg, *args, **kwargs)))
run_test(1)
run_test(5.5)
run_test(5 + 3j)
run_test(3j)
run_test([])
run_test([1, 2, 3])
run_test([1 + 5j, 2 + 3j])
run_test([[1 + 5j, 2 + 3j], [1 + 7j, 2 + 8j]])
def testAMaxAMin(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
self.match(
np_array_ops.amax(arr_arg, axis=axis_arg, *args, **kwargs),
np.amax(arr_arg, axis=axis, *args, **kwargs))
self.match(
np_array_ops.amin(arr_arg, axis=axis_arg, *args, **kwargs),
np.amin(arr_arg, axis=axis, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
self.assertRaises(ValueError, np_array_ops.amax, np.ones([2, 2]), out=[])
self.assertRaises(ValueError, np_array_ops.amin, np.ones([2, 2]), out=[])
def testMean(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
self.match(
np_array_ops.mean(arr_arg, axis=axis_arg, *args, **kwargs),
np.mean(arr_arg, axis=axis, *args, **kwargs))
run_test([1, 2, 1])
run_test([1., 2., 1.])
run_test([1., 2., 1.], dtype=int)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
self.assertRaises(ValueError, np_array_ops.mean, np.ones([2, 2]), out=[])
def testStd(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
self.match(
np_array_ops.std(arr_arg, axis=axis_arg, *args, **kwargs),
np.std(arr_arg, axis=axis, *args, **kwargs))
run_test([1, 2, 1])
run_test([1., 2., 1.])
run_test([1.j, 2., 1.j])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
def testVar(self):
def run_test(arr, *args, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axis_arg = fn2(axis) if axis is not None else None
self.match(
np_array_ops.var(arr_arg, axis=axis_arg, *args, **kwargs),
np.var(arr_arg, axis=axis, *args, **kwargs))
run_test([1, 2, 1])
run_test([1., 2., 1.])
run_test([1.j, 2., 1.j])
run_test([1., 2., 1.], dtype=np.int64)
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
self.assertRaises(ValueError, np_array_ops.var, np.ones([2, 2]), out=[])
def testProd(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.prod(arg, *args, **kwargs),
np.prod(arg, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test(np.array([1, 2, 3], dtype=np.int16))
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
run_test([[1, 2], [3, 4]], axis=(0, 1))
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(0, 2), keepdims=True)
run_test(np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0))
run_test(
np.arange(8).reshape((2, 2, 2)).tolist(), axis=(2, 0), keepdims=True)
def _testReduce(self, math_fun, np_fun, name):
axis_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64),
]
def run_test(a, **kwargs):
axis = kwargs.pop('axis', None)
for fn1 in self.array_transforms:
for fn2 in axis_transforms:
arg1 = fn1(a)
axis_arg = fn2(axis) if axis is not None else None
self.match(
math_fun(arg1, axis=axis_arg, **kwargs),
np_fun(arg1, axis=axis, **kwargs),
msg='{}({}, axis={}, keepdims={})'.format(name, arg1, axis,
kwargs.get('keepdims')))
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
run_test([[2, -3], [-6, 7]], axis=0)
run_test([[2, -3], [-6, 7]], axis=0, keepdims=True)
run_test([[2, -3], [-6, 7]], axis=1)
run_test([[2, -3], [-6, 7]], axis=1, keepdims=True)
run_test([[2, -3], [-6, 7]], axis=(0, 1))
run_test([[2, -3], [-6, 7]], axis=(1, 0))
def testSum(self):
self._testReduce(np_array_ops.sum, np.sum, 'sum')
def testAmax(self):
self._testReduce(np_array_ops.amax, np.amax, 'amax')
def testSize(self):
def run_test(arr, axis=None):
onp_arr = np.array(arr)
self.assertEqual(np_array_ops.size(arr, axis), np.size(onp_arr, axis))
run_test(np_array_ops.array([1]))
run_test(np_array_ops.array([1, 2, 3, 4, 5]))
run_test(np_array_ops.ones((2, 3, 2)))
run_test(np_array_ops.ones((3, 2)))
run_test(np_array_ops.zeros((5, 6, 7)))
run_test(1)
run_test(np_array_ops.ones((3, 2, 1)))
run_test(constant_op.constant(5))
run_test(constant_op.constant([1, 1, 1]))
self.assertRaises(NotImplementedError, np_array_ops.size, np.ones((2, 2)),
1)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=None)])
def f(arr):
arr = np_array_ops.asarray(arr)
return np_array_ops.size(arr)
self.assertEqual(f(np_array_ops.ones((3, 2))).numpy(), 6)
def testRavel(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.ravel(arg, *args, **kwargs),
np.ravel(arg, *args, **kwargs))
run_test(5)
run_test(5.)
run_test([])
run_test([[]])
run_test([[], []])
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]])
run_test(np.arange(8).reshape((2, 2, 2)).tolist())
def testReal(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.real(arg, *args, **kwargs),
np.array(np.real(arg, *args, **kwargs)))
run_test(1)
run_test(5.5)
run_test(5 + 3j)
run_test(3j)
run_test([])
run_test([1, 2, 3])
run_test([1 + 5j, 2 + 3j])
run_test([[1 + 5j, 2 + 3j], [1 + 7j, 2 + 8j]])
def testRepeat(self):
def run_test(arr, repeats, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
repeats_arg = fn2(repeats)
self.match(
np_array_ops.repeat(arr_arg, repeats_arg, *args, **kwargs),
np.repeat(arr_arg, repeats_arg, *args, **kwargs))
run_test(1, 2)
run_test([1, 2], 2)
run_test([1, 2], [2])
run_test([1, 2], [1, 2])
run_test([[1, 2], [3, 4]], 3, axis=0)
run_test([[1, 2], [3, 4]], 3, axis=1)
run_test([[1, 2], [3, 4]], [3], axis=0)
run_test([[1, 2], [3, 4]], [3], axis=1)
run_test([[1, 2], [3, 4]], [3, 2], axis=0)
run_test([[1, 2], [3, 4]], [3, 2], axis=1)
run_test([[1, 2], [3, 4]], [3, 2], axis=-1)
run_test([[1, 2], [3, 4]], [3, 2], axis=-2)
def testAround(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_array_ops.around(arg, *args, **kwargs),
np.around(arg, *args, **kwargs))
run_test(5.5)
run_test(5.567, decimals=2)
run_test([])
run_test([1.27, 2.49, 2.75], decimals=1)
run_test([23.6, 45.1], decimals=-1)
def testReshape(self):
def run_test(arr, newshape, *args, **kwargs):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
newshape_arg = fn2(newshape)
self.match(
np_array_ops.reshape(arr_arg, newshape_arg, *args, **kwargs),
np.reshape(arr_arg, newshape, *args, **kwargs))
run_test(5, [-1])
run_test([], [-1])
run_test([1, 2, 3], [1, 3])
run_test([1, 2, 3], [3, 1])
run_test([1, 2, 3, 4], [2, 2])
run_test([1, 2, 3, 4], [2, 1, 2])
def testExpandDims(self):
def run_test(arr, axis):
self.match(np_array_ops.expand_dims(arr, axis), np.expand_dims(arr, axis))
run_test([1, 2, 3], 0)
run_test([1, 2, 3], 1)
def testSqueeze(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
# Note: np.squeeze ignores the axis arg for non-ndarray objects.
# This looks like a bug: https://github.com/numpy/numpy/issues/8201
# So we convert the arg to np.ndarray before passing to np.squeeze.
self.match(
np_array_ops.squeeze(arg, *args, **kwargs),
np.squeeze(np.array(arg), *args, **kwargs))
run_test(5)
run_test([])
run_test([5])
run_test([[1, 2, 3]])
run_test([[[1], [2], [3]]])
run_test([[[1], [2], [3]]], axis=0)
run_test([[[1], [2], [3]]], axis=2)
run_test([[[1], [2], [3]]], axis=(0, 2))
run_test([[[1], [2], [3]]], axis=-1)
run_test([[[1], [2], [3]]], axis=-3)
def testTranspose(self):
def run_test(arr, axes=None):
for fn1 in self.array_transforms:
for fn2 in self.array_transforms:
arr_arg = fn1(arr)
axes_arg = fn2(axes) if axes is not None else None
self.match(
np_array_ops.transpose(arr_arg, axes_arg),
np.transpose(arr_arg, axes))
run_test(5)
run_test([])
run_test([5])
run_test([5, 6, 7])
run_test(np.arange(30).reshape(2, 3, 5).tolist())
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [0, 1, 2])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [0, 2, 1])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [1, 0, 2])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [1, 2, 0])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [2, 0, 1])
run_test(np.arange(30).reshape(2, 3, 5).tolist(), [2, 1, 0])
def match_shape(self, actual, expected, msg=None):
if msg:
msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format(
msg, expected.shape, actual.shape)
self.assertEqual(actual.shape, expected.shape, msg=msg)
def match_dtype(self, actual, expected, msg=None):
if msg:
msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format(
msg, expected.dtype, actual.dtype)
self.assertEqual(actual.dtype, expected.dtype, msg=msg)
def match(self, actual, expected, msg=None, check_dtype=True):
msg_ = 'Expected: {} Actual: {}'.format(expected, actual)
if msg:
msg = '{} {}'.format(msg_, msg)
else:
msg = msg_
self.assertIsInstance(actual, np_arrays.ndarray)
if check_dtype:
self.match_dtype(actual, expected, msg)
self.match_shape(actual, expected, msg)
if not actual.shape.rank:
self.assertAllClose(actual.tolist(), expected.tolist())
else:
self.assertAllClose(actual.tolist(), expected.tolist())
def testPad(self):
t = [[1, 2, 3], [4, 5, 6]]
paddings = [[
1,
1,
], [2, 2]]
self.assertAllEqual(
np_array_ops.pad(t, paddings, 'constant'),
[[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 2, 3, 0, 0], [0, 0, 4, 5, 6, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
self.assertAllEqual(
np_array_ops.pad(t, paddings, 'reflect'),
[[6, 5, 4, 5, 6, 5, 4], [3, 2, 1, 2, 3, 2, 1], [6, 5, 4, 5, 6, 5, 4],
[3, 2, 1, 2, 3, 2, 1]])
self.assertAllEqual(
np_array_ops.pad(t, paddings, 'symmetric'),
[[2, 1, 1, 2, 3, 3, 2], [2, 1, 1, 2, 3, 3, 2], [5, 4, 4, 5, 6, 6, 5],
[5, 4, 4, 5, 6, 6, 5]])
def testTake(self):
a = [4, 3, 5, 7, 6, 8]
indices = [0, 1, 4]
self.assertAllEqual([4, 3, 6], np_array_ops.take(a, indices))
indices = [[0, 1], [2, 3]]
self.assertAllEqual([[4, 3], [5, 7]], np_array_ops.take(a, indices))
a = [[4, 3, 5], [7, 6, 8]]
self.assertAllEqual([[4, 3], [5, 7]], np_array_ops.take(a, indices))
a = np.random.rand(2, 16, 3)
axis = 1
self.assertAllEqual(
np.take(a, indices, axis=axis),
np_array_ops.take(a, indices, axis=axis))
def testTakeAlongAxis(self):
rng = np.random.default_rng()
x = rng.standard_normal((2, 3)).astype(np.float32)
ind = rng.integers(0, 3, (2, 5)).astype(np.int64)
out_expected = np.take_along_axis(x, ind, axis=1)
out = np_array_ops.take_along_axis(x, ind, axis=1)
self.assertAllEqual(out, out_expected)
def testWhere(self):
self.assertAllEqual([[1.0, 1.0], [1.0, 1.0]],
np_array_ops.where([True], [1.0, 1.0],
[[0, 0], [0, 0]]))
def testShape(self):
self.assertAllEqual((1, 2), np_array_ops.shape([[0, 0]]))
def testSwapaxes(self):
x = [[1, 2, 3]]
self.assertAllEqual([[1], [2], [3]], np_array_ops.swapaxes(x, 0, 1))
self.assertAllEqual([[1], [2], [3]], np_array_ops.swapaxes(x, -2, -1))
x = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
self.assertAllEqual([[[0, 4], [2, 6]], [[1, 5], [3, 7]]],
np_array_ops.swapaxes(x, 0, 2))
self.assertAllEqual([[[0, 4], [2, 6]], [[1, 5], [3, 7]]],
np_array_ops.swapaxes(x, -3, -1))
def testMoveaxis(self):
def _test(*args):
# pylint: disable=no-value-for-parameter
expected = np.moveaxis(*args)
raw_ans = np_array_ops.moveaxis(*args)
self.assertAllEqual(expected, raw_ans)
a = np.random.rand(1, 2, 3, 4, 5, 6)
# Basic
_test(a, (0, 2), (3, 5))
_test(a, (0, 2), (-1, -3))
_test(a, (-6, -4), (3, 5))
_test(a, (-6, -4), (-1, -3))
_test(a, 0, 4)
_test(a, -6, -2)
_test(a, tuple(range(6)), tuple(range(6)))
_test(a, tuple(range(6)), tuple(reversed(range(6))))
_test(a, (), ())
def testNdim(self):
self.assertAllEqual(0, np_array_ops.ndim(0.5))
self.assertAllEqual(1, np_array_ops.ndim([1, 2]))
def testIsscalar(self):
self.assertTrue(np_array_ops.isscalar(0.5))
self.assertTrue(np_array_ops.isscalar(5))
self.assertTrue(np_array_ops.isscalar(False))
self.assertFalse(np_array_ops.isscalar([1, 2]))
def assertListEqual(self, a, b):
self.assertAllEqual(len(a), len(b))
for x, y in zip(a, b):
self.assertAllEqual(x, y)
def testSplit(self):
x = np_array_ops.arange(9)
y = np_array_ops.split(x, 3)
self.assertListEqual([([0, 1, 2]), ([3, 4, 5]), ([6, 7, 8])], y)
x = np_array_ops.arange(8)
y = np_array_ops.split(x, [3, 5, 6, 10])
self.assertListEqual([([0, 1, 2]), ([3, 4]), ([5]), ([6, 7]), ([])], y)
def testHSplitBecomesVsplitFor1DInput(self):
@def_function.function
def f(arr):
return np_array_ops.hsplit(arr, 2)
x = np_array_ops.arange(4)
self.assertListEqual([[0, 1], [2, 3]], f(x))
def testSign(self):
state = np.random.RandomState(0)
test_types = [np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128]
test_shapes = [(), (1,), (2, 3, 4), (2, 3, 0, 4)]
for dtype in test_types:
for shape in test_shapes:
if np.issubdtype(dtype, np.complexfloating):
arr = (np.asarray(state.randn(*shape) * 100, dtype=dtype) +
1j * np.asarray(state.randn(*shape) * 100, dtype=dtype))
else:
arr = np.asarray(state.randn(*shape) * 100, dtype=dtype)
self.match(np_array_ops.sign(arr), np.sign(arr))
|
ArrayMethodsTest
|
python
|
scikit-learn__scikit-learn
|
sklearn/cluster/_birch.py
|
{
"start": 3685,
"end": 9566
}
|
class ____:
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : list
List of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
Manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray of shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray of shape (branching_factor + 1, n_features)
View of ``init_centroids_``.
squared_norm_ : ndarray of shape (branching_factor + 1,)
View of ``init_sq_norm_``.
"""
def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype)
self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype)
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[: n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[: n_samples + 1]
def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.0
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[
closest_index
].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[
closest_index
].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_,
threshold,
branching_factor,
)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2
)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
|
_CFNode
|
python
|
pyca__cryptography
|
src/cryptography/x509/name.py
|
{
"start": 490,
"end": 3536
}
|
class ____(utils.Enum):
BitString = 3
OctetString = 4
UTF8String = 12
NumericString = 18
PrintableString = 19
T61String = 20
IA5String = 22
UTCTime = 23
GeneralizedTime = 24
VisibleString = 26
UniversalString = 28
BMPString = 30
_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}
_NAMEOID_DEFAULT_TYPE: dict[ObjectIdentifier, _ASN1Type] = {
NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,
NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,
NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,
NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,
NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,
NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,
}
# Type alias
_OidNameMap = typing.Mapping[ObjectIdentifier, str]
_NameOidMap = typing.Mapping[str, ObjectIdentifier]
#: Short attribute names from RFC 4514:
#: https://tools.ietf.org/html/rfc4514#page-7
_NAMEOID_TO_NAME: _OidNameMap = {
NameOID.COMMON_NAME: "CN",
NameOID.LOCALITY_NAME: "L",
NameOID.STATE_OR_PROVINCE_NAME: "ST",
NameOID.ORGANIZATION_NAME: "O",
NameOID.ORGANIZATIONAL_UNIT_NAME: "OU",
NameOID.COUNTRY_NAME: "C",
NameOID.STREET_ADDRESS: "STREET",
NameOID.DOMAIN_COMPONENT: "DC",
NameOID.USER_ID: "UID",
}
_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}
_NAMEOID_LENGTH_LIMIT = {
NameOID.COUNTRY_NAME: (2, 2),
NameOID.JURISDICTION_COUNTRY_NAME: (2, 2),
NameOID.COMMON_NAME: (1, 64),
}
def _escape_dn_value(val: str | bytes) -> str:
"""Escape special characters in RFC4514 Distinguished Name value."""
if not val:
return ""
# RFC 4514 Section 2.4 defines the value as being the # (U+0023) character
# followed by the hexadecimal encoding of the octets.
if isinstance(val, bytes):
return "#" + binascii.hexlify(val).decode("utf8")
# See https://tools.ietf.org/html/rfc4514#section-2.4
val = val.replace("\\", "\\\\")
val = val.replace('"', '\\"')
val = val.replace("+", "\\+")
val = val.replace(",", "\\,")
val = val.replace(";", "\\;")
val = val.replace("<", "\\<")
val = val.replace(">", "\\>")
val = val.replace("\0", "\\00")
if val[0] == "#" or (val[0] == " " and len(val) > 1):
val = "\\" + val
if val[-1] == " ":
val = val[:-1] + "\\ "
return val
def _unescape_dn_value(val: str) -> str:
if not val:
return ""
# See https://tools.ietf.org/html/rfc4514#section-3
# special = escaped / SPACE / SHARP / EQUALS
# escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
def sub(m):
val = m.group(0)
# Special character escape
if len(val) == 2:
return val[1:]
# Unicode string of hex
return binascii.unhexlify(val.replace("\\", "")).decode()
return _RFC4514NameParser._PAIR_MULTI_RE.sub(sub, val)
NameAttributeValueType = typing.TypeVar(
"NameAttributeValueType",
typing.Union[str, bytes],
str,
bytes,
covariant=True,
)
|
_ASN1Type
|
python
|
cython__cython
|
tests/run/pep526_variable_annotations.py
|
{
"start": 2802,
"end": 7762
}
|
class ____(object):
pass
c = Cls()
c.x: int = 0 # Annotates c.x with int.
c.y: int # Annotates c.y with int.
d = {}
d['a']: int = 0 # Annotates d['a'] with int.
d['b']: int # Annotates d['b'] with int.
(x): int # Annotates x with int, (x) treated as expression by compiler.
(y): int = 0 # Same situation here.
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode//DictIterationNextNode",
)
def iter_declared_dict(d):
"""
>>> d = {1.1: 2.5, 3.3: 4.5}
>>> iter_declared_dict(d)
7.0
# specialized "compiled" test in module-level __doc__
"""
typed_dict : Dict[cython.float, cython.float] = d
s = 0.0
for key in typed_dict:
s += d[key]
return s
@cython.test_assert_path_exists(
"//WhileStatNode",
"//WhileStatNode//DictIterationNextNode",
)
def iter_declared_dict_arg(d : Dict[cython.float, cython.float]):
"""
>>> d = {1.1: 2.5, 3.3: 4.5}
>>> iter_declared_dict_arg(d)
7.0
# module level "compiled" test in __doc__ below
"""
s = 0.0
for key in d:
s += d[key]
return s
def literal_list_ptr():
"""
>>> literal_list_ptr()
4
"""
a : cython.p_int = [1, 2, 3, 4, 5]
return a[3]
def test_subscripted_types():
"""
>>> test_subscripted_types()
dict object
dict object
list object
list object
list object
set object
"""
a1: typing.Dict[cython.int, cython.float] = {}
a2: dict[cython.int, cython.float] = {}
b1: List[cython.int] = []
b2: list[cython.int] = []
b3: List = [] # doesn't need to be subscripted
c: _SET_[object] = set()
print(cython.typeof(a1) + (" object" if not cython.compiled else ""))
print(cython.typeof(a2) + (" object" if not cython.compiled else ""))
print(cython.typeof(b1) + (" object" if not cython.compiled else ""))
print(cython.typeof(b2) + (" object" if not cython.compiled else ""))
print(cython.typeof(b3) + (" object" if not cython.compiled else ""))
print(cython.typeof(c) + (" object" if not cython.compiled else ""))
def test_use_typing_attributes_as_non_annotations():
"""
>>> test_use_typing_attributes_as_non_annotations()
typing.Tuple typing.Tuple[int]
Optional True
Optional True
Optional True
Union typing.FrozenSet
Union typing.Dict
"""
x1 = typing.Tuple
x2 = typing.Tuple[int]
y1 = typing.Optional
# It's important for the test that FrozenSet isn't available in the module namespace,
# since one bug would have looked it up there rather than as an attribute of typing
y2 = typing.Optional[typing.FrozenSet]
z1 = Optional
z2 = Optional[Dict]
q1 = typing.Union
q2 = typing.Union[typing.FrozenSet]
w1 = Union
w2 = Union[Dict]
def name_of(special_decl):
try:
return special_decl.__name__
except AttributeError:
return str(special_decl).partition('.')[-1]
# The result of printing "Optional[type]" is slightly version-dependent
# so accept different forms.
allowed_optional_frozenset_strings = [
"typing.Union[typing.FrozenSet, NoneType]",
"typing.Optional[typing.FrozenSet]",
"typing.FrozenSet | None",
]
allowed_optional_dict_strings = [
"typing.Union[typing.Dict, NoneType]",
"typing.Optional[typing.Dict]",
"typing.Dict | None",
]
print(x1, x2)
print(name_of(y1), y1 is z1 or (y1, z1))
print(name_of(y1), str(y2) in allowed_optional_frozenset_strings or str(y2))
print(name_of(z1), str(z2) in allowed_optional_dict_strings or str(z2))
print(name_of(q1), str(q2) in ["typing.Union[typing.FrozenSet, NoneType]", "typing.FrozenSet | None"] or str(q2))
print(name_of(w1), str(w2) in ["typing.Union[typing.Dict, NoneType]", "typing.Dict | None"] or str(w2))
try:
import numpy.typing as npt
import numpy as np
except ImportError:
# we can't actually use numpy typing right now, it was just part
# of a reproducer that caused a compiler crash. We don't need it
# available to use it in annotations, so don't fail if it's not there
pass
def list_float_to_numpy(z: List[float]) -> List[npt.NDArray[np.float64]]:
# since we're not actually requiring numpy, don't make the return type match
assert cython.typeof(z) == 'list'
return [z[0]]
if cython.compiled:
__doc__ = """
# passing non-dicts to variables declared as dict now fails
>>> class D(object):
... def __getitem__(self, x): return 2
... def __iter__(self): return iter([1, 2, 3])
>>> iter_declared_dict(D()) # doctest:+IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: Expected dict, got D
>>> iter_declared_dict_arg(D()) # doctest:+IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: Expected dict, got D
"""
_WARNINGS = """
"""
|
Cls
|
python
|
ray-project__ray
|
python/ray/data/_internal/datasource/parquet_datasource.py
|
{
"start": 38211,
"end": 46993
}
|
class ____:
# Estimated avg byte size of a row (in-memory)
avg_row_in_mem_bytes: Optional[int]
# Corresponding file metadata
metadata: "pyarrow._parquet.FileMetaData"
def estimate_in_memory_bytes(self) -> Optional[int]:
if self.avg_row_in_mem_bytes is None:
return None
return self.avg_row_in_mem_bytes * self.metadata.num_rows
def _estimate_files_encoding_ratio(
fragments: List[_ParquetFragment],
file_infos: List[_ParquetFileInfo],
) -> float:
"""Return an estimate of the Parquet files encoding ratio.
To avoid OOMs, it is safer to return an over-estimate than an underestimate.
"""
if not DataContext.get_current().decoding_size_estimation:
return PARQUET_ENCODING_RATIO_ESTIMATE_DEFAULT
assert len(file_infos) == len(fragments)
# Estimate size of the rows in a file in memory
estimated_in_mem_size_arr = [
fi.estimate_in_memory_bytes() if fi is not None else None for fi in file_infos
]
file_size_arr = [f.file_size for f in fragments]
estimated_encoding_ratios = [
float(in_mem_size) / file_size
for in_mem_size, file_size in zip(estimated_in_mem_size_arr, file_size_arr)
if file_size > 0 and in_mem_size is not None
]
# Return default estimate of 5 if all sampled files turned out to be empty
if not estimated_encoding_ratios:
return PARQUET_ENCODING_RATIO_ESTIMATE_DEFAULT
estimated_ratio = np.mean(estimated_encoding_ratios)
logger.info(f"Estimated parquet encoding ratio is {estimated_ratio:.3f}.")
return max(estimated_ratio, PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND)
def _fetch_file_infos(
sampled_fragments: List[_ParquetFragment],
*,
columns: Optional[List[str]],
schema: Optional["pyarrow.Schema"],
local_scheduling: Optional[bool],
) -> List[Optional[_ParquetFileInfo]]:
fetch_file_info = cached_remote_fn(_fetch_parquet_file_info)
futures = []
for fragment in sampled_fragments:
# Sample the first rows batch in i-th file.
# Use SPREAD scheduling strategy to avoid packing many sampling tasks on
# same machine to cause OOM issue, as sampling can be memory-intensive.
futures.append(
fetch_file_info.options(
scheduling_strategy=local_scheduling
or DataContext.get_current().scheduling_strategy,
# Retry in case of transient errors during sampling.
retry_exceptions=[OSError],
).remote(
fragment,
columns=columns,
schema=schema,
)
)
sample_bar = ProgressBar("Parquet dataset sampling", len(futures), unit="file")
file_infos = sample_bar.fetch_until_complete(futures)
sample_bar.close()
return file_infos
def _estimate_reader_batch_size(
file_infos: List[Optional[_ParquetFileInfo]], target_block_size: Optional[int]
) -> Optional[int]:
if target_block_size is None:
return None
avg_num_rows_per_block = [
target_block_size / fi.avg_row_in_mem_bytes
for fi in file_infos
if (
fi is not None
and fi.avg_row_in_mem_bytes is not None
and fi.avg_row_in_mem_bytes > 0
)
]
if not avg_num_rows_per_block:
return DEFAULT_PARQUET_READER_ROW_BATCH_SIZE
estimated_batch_size: int = max(math.ceil(np.mean(avg_num_rows_per_block)), 1)
logger.info(f"Estimated parquet reader batch size at {estimated_batch_size} rows")
return estimated_batch_size
def get_parquet_dataset(paths, filesystem, dataset_kwargs):
import pyarrow.parquet as pq
# If you pass a list containing a single directory path to `ParquetDataset`, PyArrow
# errors with 'IsADirectoryError: Path ... points to a directory, but only file
# paths are supported'. To avoid this, we pass the directory path directly.
if len(paths) == 1:
paths = paths[0]
try:
dataset = pq.ParquetDataset(
paths,
**dataset_kwargs,
filesystem=filesystem,
)
except TypeError:
# Fallback: resolve filesystem locally in the worker
try:
resolved_paths, resolved_filesystem = _resolve_paths_and_filesystem(
paths, filesystem=None
)
resolved_filesystem = RetryingPyFileSystem.wrap(
resolved_filesystem,
retryable_errors=DataContext.get_current().retried_io_errors,
)
dataset = pq.ParquetDataset(
resolved_paths,
**dataset_kwargs,
filesystem=resolved_filesystem,
)
except OSError as os_e:
_handle_read_os_error(os_e, paths)
except OSError as e:
_handle_read_os_error(e, paths)
return dataset
def _sample_fragments(
fragments: List[_ParquetFragment],
) -> List[_ParquetFragment]:
if not fragments:
return []
target_num_samples = math.ceil(
len(fragments) * PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO
)
target_num_samples = max(
min(target_num_samples, PARQUET_ENCODING_RATIO_ESTIMATE_MAX_NUM_SAMPLES),
PARQUET_ENCODING_RATIO_ESTIMATE_MIN_NUM_SAMPLES,
)
# Make sure number of samples doesn't exceed total # of files
target_num_samples = min(target_num_samples, len(fragments))
# Evenly distributed to choose which file to sample, to avoid biased prediction
# if data is skewed.
pivots = np.linspace(0, len(fragments) - 1, target_num_samples).astype(int)
return [fragments[idx] for idx in pivots.tolist()]
def _add_partitions_to_table(
partition_col_values: Dict[str, PartitionDataType], table: "pyarrow.Table"
) -> "pyarrow.Table":
for partition_col, value in partition_col_values.items():
field_index = table.schema.get_field_index(partition_col)
if field_index == -1:
table = BlockAccessor.for_block(table).fill_column(partition_col, value)
elif log_once(f"duplicate_partition_field_{partition_col}"):
logger.warning(
f"The partition field '{partition_col}' also exists in the Parquet "
f"file. Ray Data will default to using the value in the Parquet file."
)
return table
def _get_partition_columns_schema(
partitioning: Partitioning,
file_paths: List[str],
) -> "pyarrow.Schema":
"""Return a new schema with partition fields added.
This function infers the partition fields from the first file path in the dataset.
"""
import pyarrow as pa
# If the dataset is empty, we can't infer the partitioning
if len(file_paths) == 0:
return pa.schema([])
# If the dataset isn't partitioned, there's no partition schema
elif partitioning is None:
return pa.schema([])
first_path = file_paths[0]
fields = []
parser = PathPartitionParser(partitioning)
partitions = parser(first_path)
for field_name in partitions:
if field_name in partitioning.field_types:
field_type = pa.from_numpy_dtype(partitioning.field_types[field_name])
else:
field_type = pa.string()
# Without this check, we would add the same partition field multiple times,
# which silently fails when asking for `pa.field()`.
fields.append(pa.field(field_name, field_type))
return pa.schema(fields)
def _infer_data_and_partition_columns(
user_specified_columns: List[str],
fragment: "ParquetFileFragment",
partitioning: Optional[Partitioning],
) -> Tuple[List[str], List[str]]:
"""Infer which columns are in the files and which columns are partition columns.
This function uses the schema and path of the first file to infer what columns
represent.
Args:
user_specified_columns: A list of column names that the user specified.
fragment: The first fragment in the dataset.
partitioning: The partitioning scheme used to partition the data.
Returns:
A tuple of lists of column names. The first list contains the columns that are
in the file, and the second list contains the columns that are partition
columns.
"""
data_columns = [
column
for column in user_specified_columns
if column in fragment.physical_schema.names
]
if partitioning is not None:
parse = PathPartitionParser(partitioning)
partitions = parse(fragment.path)
partition_columns = [
column for column in user_specified_columns if column in partitions
]
else:
partition_columns = []
return data_columns, partition_columns
|
_ParquetFileInfo
|
python
|
nryoung__algorithms
|
tests/test_data_structures.py
|
{
"start": 20416,
"end": 20875
}
|
class ____(unittest.TestCase):
"""
Test Union Find Implementation
"""
def test_union_find(self):
self.uf = union_find.UnionFind(4)
self.uf.make_set(4)
self.uf.union(1, 0)
self.uf.union(3, 4)
self.assertEqual(self.uf.find(1), 0)
self.assertEqual(self.uf.find(3), 4)
self.assertEqual(self.uf.is_connected(0, 1), True)
self.assertEqual(self.uf.is_connected(3, 4), True)
|
TestUnionFind
|
python
|
redis__redis-py
|
tests/test_scenario/fault_injector_client.py
|
{
"start": 393,
"end": 704
}
|
class ____(str, Enum):
DMC_RESTART = "dmc_restart"
FAILOVER = "failover"
RESHARD = "reshard"
SEQUENCE_OF_ACTIONS = "sequence_of_actions"
NETWORK_FAILURE = "network_failure"
EXECUTE_RLUTIL_COMMAND = "execute_rlutil_command"
EXECUTE_RLADMIN_COMMAND = "execute_rladmin_command"
|
ActionType
|
python
|
django__django
|
tests/queries/models.py
|
{
"start": 17129,
"end": 17391
}
|
class ____(models.Model):
modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE)
modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE)
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
|
Ticket23605B
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/check_ops_test.py
|
{
"start": 49422,
"end": 54908
}
|
class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_raises_if_rank_mismatch_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
with self.assertRaisesRegex(ValueError, "fail.*must have rank.*in.*1.*2"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
self.evaluate(array_ops.identity(tensor_rank0))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_raises_if_rank_mismatch_dynamic_rank(self):
with self.cached_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, (1, 2), message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank0 = constant_op.constant(42, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank0))
@test_util.run_deprecated_v1
def test_rank_zero_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.cached_session():
tensor_rank0 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank0, desired_ranks)]):
array_ops.identity(tensor_rank0).eval(feed_dict={tensor_rank0: 42.0})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_doesnt_raise_if_rank_matches_static_rank(self):
tensor_rank1 = constant_op.constant([42, 43], name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
self.evaluate(array_ops.identity(tensor_rank1))
@test_util.run_deprecated_v1
def test_rank_one_tensor_doesnt_raise_if_rank_matches_dynamic_rank(self):
with self.cached_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
for desired_ranks in ((0, 1, 2), (1, 0, 2), (1, 2, 0)):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, desired_ranks)]):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes
def test_rank_one_tensor_raises_if_rank_mismatches_static_rank(self):
tensor_rank1 = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegex(ValueError, "rank"):
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
self.evaluate(array_ops.identity(tensor_rank1))
@test_util.run_deprecated_v1
def test_rank_one_tensor_raises_if_rank_mismatches_dynamic_rank(self):
with self.cached_session():
tensor_rank1 = array_ops.placeholder(dtypes.float32, name="my_tensor")
with ops.control_dependencies([
check_ops.assert_rank_in(tensor_rank1, (0, 2))]):
with self.assertRaisesOpError("my_tensor.*rank"):
array_ops.identity(tensor_rank1).eval(feed_dict={
tensor_rank1: (42.0, 43.0)
})
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_scalar_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
desired_ranks = (
np.array(1, dtype=np.int32),
np.array((2, 1), dtype=np.int32))
with self.assertRaisesRegex(ValueError, "Rank must be a scalar"):
check_ops.assert_rank_in(tensor, desired_ranks)
@test_util.run_deprecated_v1
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.cached_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
desired_ranks = (
array_ops.placeholder(dtypes.int32, name="rank0_tensor"),
array_ops.placeholder(dtypes.int32, name="rank1_tensor"))
with self.assertRaisesOpError("Rank must be a scalar"):
with ops.control_dependencies(
(check_ops.assert_rank_in(tensor, desired_ranks),)):
array_ops.identity(tensor).eval(feed_dict={
desired_ranks[0]: 1,
desired_ranks[1]: [2, 1],
})
@test_util.run_in_graph_and_eager_modes
def test_raises_if_rank_is_not_integer_static(self):
tensor = constant_op.constant((42, 43), name="my_tensor")
with self.assertRaisesRegex(TypeError, "must be of type tf.int32"):
check_ops.assert_rank_in(tensor, (1, .5,))
@test_util.run_deprecated_v1
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.cached_session():
tensor = constant_op.constant(
(42, 43), dtype=dtypes.float32, name="my_tensor")
rank_tensor = array_ops.placeholder(dtypes.float32, name="rank_tensor")
with self.assertRaisesRegex(TypeError, "must be of type tf.int32"):
with ops.control_dependencies(
[check_ops.assert_rank_in(tensor, (1, rank_tensor))]):
array_ops.identity(tensor).eval(feed_dict={rank_tensor: .5})
|
AssertRankInTest
|
python
|
tiangolo__fastapi
|
docs_src/security/tutorial004.py
|
{
"start": 1121,
"end": 4172
}
|
class ____(User):
hashed_password: str
password_hash = PasswordHash.recommended()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return password_hash.verify(plain_password, hashed_password)
def get_password_hash(password):
return password_hash.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: Union[timedelta, None] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme)):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except InvalidTokenError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(current_user: User = Depends(get_current_user)):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(current_user: User = Depends(get_current_active_user)):
return current_user
@app.get("/users/me/items/")
async def read_own_items(current_user: User = Depends(get_current_active_user)):
return [{"item_id": "Foo", "owner": current_user.username}]
|
UserInDB
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
|
{
"start": 20960,
"end": 21015
}
|
class ____(TypeDefinition):
pass
|
TypeSystemDefinition
|
python
|
python-markdown__markdown
|
markdown/inlinepatterns.py
|
{
"start": 26829,
"end": 33159
}
|
class ____(InlineProcessor):
""" Return a link element from the given match. """
RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE)
RE_TITLE_CLEAN = re.compile(r'\s')
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
""" Return an `a` [`Element`][xml.etree.ElementTree.Element] or `(None, None, None)`. """
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
href, title, index, handled = self.getLink(data, index)
if not handled:
return None, None, None
el = etree.Element("a")
el.text = text
el.set("href", href)
if title is not None:
el.set("title", title)
return el, m.start(0), index
def getLink(self, data: str, index: int) -> tuple[str, str | None, int, bool]:
"""Parse data between `()` of `[Text]()` allowing recursive `()`. """
href = ''
title: str | None = None
handled = False
m = self.RE_LINK.match(data, pos=index)
if m and m.group(1):
# Matches [Text](<link> "title")
href = m.group(1)[1:-1].strip()
if m.group(2):
title = m.group(2)[1:-1]
index = m.end(0)
handled = True
elif m:
# Track bracket nesting and index in string
bracket_count = 1
backtrack_count = 1
start_index = m.end()
index = start_index
last_bracket = -1
# Primary (first found) quote tracking.
quote: str | None = None
start_quote = -1
exit_quote = -1
ignore_matches = False
# Secondary (second found) quote tracking.
alt_quote = None
start_alt_quote = -1
exit_alt_quote = -1
# Track last character
last = ''
for pos in range(index, len(data)):
c = data[pos]
if c == '(':
# Count nested (
# Don't increment the bracket count if we are sure we're in a title.
if not ignore_matches:
bracket_count += 1
elif backtrack_count > 0:
backtrack_count -= 1
elif c == ')':
# Match nested ) to (
# Don't decrement if we are sure we are in a title that is unclosed.
if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)):
bracket_count = 0
elif not ignore_matches:
bracket_count -= 1
elif backtrack_count > 0:
backtrack_count -= 1
# We've found our backup end location if the title doesn't resolve.
if backtrack_count == 0:
last_bracket = index + 1
elif c in ("'", '"'):
# Quote has started
if not quote:
# We'll assume we are now in a title.
# Brackets are quoted, so no need to match them (except for the final one).
ignore_matches = True
backtrack_count = bracket_count
bracket_count = 1
start_quote = index + 1
quote = c
# Secondary quote (in case the first doesn't resolve): [text](link'"title")
elif c != quote and not alt_quote:
start_alt_quote = index + 1
alt_quote = c
# Update primary quote match
elif c == quote:
exit_quote = index + 1
# Update secondary quote match
elif alt_quote and c == alt_quote:
exit_alt_quote = index + 1
index += 1
# Link is closed, so let's break out of the loop
if bracket_count == 0:
# Get the title if we closed a title string right before link closed
if exit_quote >= 0 and quote == last:
href = data[start_index:start_quote - 1]
title = ''.join(data[start_quote:exit_quote - 1])
elif exit_alt_quote >= 0 and alt_quote == last:
href = data[start_index:start_alt_quote - 1]
title = ''.join(data[start_alt_quote:exit_alt_quote - 1])
else:
href = data[start_index:index - 1]
break
if c != ' ':
last = c
# We have a scenario: `[test](link"notitle)`
# When we enter a string, we stop tracking bracket resolution in the main counter,
# but we do keep a backup counter up until we discover where we might resolve all brackets
# if the title string fails to resolve.
if bracket_count != 0 and backtrack_count == 0:
href = data[start_index:last_bracket - 1]
index = last_bracket
bracket_count = 0
handled = bracket_count == 0
if title is not None:
title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip())))
href = self.unescape(href).strip()
return href, title, index, handled
def getText(self, data: str, index: int) -> tuple[str, int, bool]:
"""Parse the content between `[]` of the start of an image or link
resolving nested square brackets.
"""
bracket_count = 1
text = []
for pos in range(index, len(data)):
c = data[pos]
if c == ']':
bracket_count -= 1
elif c == '[':
bracket_count += 1
index += 1
if bracket_count == 0:
break
text.append(c)
return ''.join(text), index, bracket_count == 0
|
LinkInlineProcessor
|
python
|
pandas-dev__pandas
|
pandas/tests/arrays/categorical/test_repr.py
|
{
"start": 204,
"end": 681
}
|
class ____:
def test_print(self, using_infer_string):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
dtype = "str" if using_infer_string else "object"
expected = [
"['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']",
f"Categories (3, {dtype}): ['a' < 'b' < 'c']",
]
expected = "\n".join(expected)
actual = repr(factor)
assert actual == expected
|
TestCategoricalReprWithFactor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sum-of-nodes-with-even-valued-grandparent.py
|
{
"start": 191,
"end": 669
}
|
class ____(object):
def sumEvenGrandparent(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def sumEvenGrandparentHelper(root, p, gp):
return sumEvenGrandparentHelper(root.left, root.val, p) + \
sumEvenGrandparentHelper(root.right, root.val, p) + \
(root.val if gp is not None and gp % 2 == 0 else 0) if root else 0
return sumEvenGrandparentHelper(root, None, None)
|
Solution
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/credentials.py
|
{
"start": 1114,
"end": 3305
}
|
class ____(NamedTuple):
email: str | None # the user's email.
is_valid: bool # whether the email is valid.
def email_prompt() -> str:
# Emoji can cause encoding errors on non-UTF-8 terminals
# (See https://github.com/streamlit/streamlit/issues/2284.)
# WT_SESSION is a Windows Terminal specific environment variable. If it exists,
# we are on the latest Windows Terminal that supports emojis
show_emoji = sys.stdout.encoding == "utf-8" and (
not env_util.IS_WINDOWS or os.environ.get("WT_SESSION")
)
# IMPORTANT: Break the text below at 80 chars.
return f"""
{"👋 " if show_emoji else ""}{cli_util.style_for_cli("Welcome to Streamlit!", bold=True)}
If you'd like to receive helpful onboarding emails, news, offers, promotions,
and the occasional swag, please enter your email address below. Otherwise,
leave this field blank.
{cli_util.style_for_cli("Email: ", fg="blue")}"""
_TELEMETRY_HEADLESS_TEXT = """
Collecting usage statistics. To deactivate, set browser.gatherUsageStats to false.
"""
def _send_email(email: str | None) -> None:
"""Send the user's email for metrics, if submitted."""
import requests
if email is None or "@" not in email:
return
metrics_url = ""
try:
response_json = requests.get(
"https://data.streamlit.io/metrics.json", timeout=2
).json()
metrics_url = response_json.get("url", "")
except Exception:
_LOGGER.exception("Failed to fetch metrics URL")
return
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "localhost:8501",
"referer": "localhost:8501/",
}
data = {
"anonymous_id": None,
"messageId": str(uuid4()),
"event": "submittedEmail",
"author_email": email,
"source": "provided_email",
"type": "track",
"userId": email,
}
response = requests.post(
metrics_url,
headers=headers,
data=json.dumps(data).encode(),
timeout=10,
)
response.raise_for_status()
|
_Activation
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/callbackProtocol7.py
|
{
"start": 500,
"end": 664
}
|
class ____(Protocol):
def __call__(self, x: int, y: int = 2, /): ...
def test2(p1: P1, p2: P2, p3: P3, p4: P4):
x1: P1 = p2
x2: P1 = p3
x3: P1 = p4
|
P4
|
python
|
zarr-developers__zarr-python
|
src/zarr/storage/_fsspec.py
|
{
"start": 2101,
"end": 14699
}
|
class ____(Store):
"""
Store for remote data based on FSSpec.
Parameters
----------
fs : AsyncFileSystem
The Async FSSpec filesystem to use with this store.
read_only : bool
Whether the store is read-only
path : str
The root path of the store. This should be a relative path and must not include the
filesystem scheme.
allowed_exceptions : tuple[type[Exception], ...]
When fetching data, these cases will be deemed to correspond to missing keys.
Attributes
----------
fs
allowed_exceptions
supports_writes
supports_deletes
supports_listing
Raises
------
TypeError
If the Filesystem does not support async operations.
ValueError
If the path argument includes a scheme.
Warns
-----
ZarrUserWarning
If the file system (fs) was not created with `asynchronous=True`.
See Also
--------
FsspecStore.from_upath
FsspecStore.from_url
"""
# based on FSSpec
supports_writes: bool = True
supports_deletes: bool = True
supports_listing: bool = True
fs: AsyncFileSystem
allowed_exceptions: tuple[type[Exception], ...]
path: str
def __init__(
self,
fs: AsyncFileSystem,
read_only: bool = False,
path: str = "/",
allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS,
) -> None:
super().__init__(read_only=read_only)
self.fs = fs
self.path = path
self.allowed_exceptions = allowed_exceptions
if not self.fs.async_impl:
raise TypeError("Filesystem needs to support async operations.")
if not self.fs.asynchronous:
warnings.warn(
f"fs ({fs}) was not created with `asynchronous=True`, this may lead to surprising behavior",
category=ZarrUserWarning,
stacklevel=2,
)
@classmethod
def from_upath(
cls,
upath: Any,
read_only: bool = False,
allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS,
) -> FsspecStore:
"""
Create a FsspecStore from an upath object.
Parameters
----------
upath : UPath
The upath to the root of the store.
read_only : bool
Whether the store is read-only, defaults to False.
allowed_exceptions : tuple, optional
The exceptions that are allowed to be raised when accessing the
store. Defaults to ALLOWED_EXCEPTIONS.
Returns
-------
FsspecStore
"""
return cls(
fs=upath.fs,
path=upath.path.rstrip("/"),
read_only=read_only,
allowed_exceptions=allowed_exceptions,
)
@classmethod
def from_mapper(
cls,
fs_map: FSMap,
read_only: bool = False,
allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS,
) -> FsspecStore:
"""
Create a FsspecStore from a FSMap object.
Parameters
----------
fs_map : FSMap
Fsspec mutable mapping object.
read_only : bool
Whether the store is read-only, defaults to False.
allowed_exceptions : tuple, optional
The exceptions that are allowed to be raised when accessing the
store. Defaults to ALLOWED_EXCEPTIONS.
Returns
-------
FsspecStore
"""
fs = _make_async(fs_map.fs)
return cls(
fs=fs,
path=fs_map.root,
read_only=read_only,
allowed_exceptions=allowed_exceptions,
)
@classmethod
def from_url(
cls,
url: str,
storage_options: dict[str, Any] | None = None,
read_only: bool = False,
allowed_exceptions: tuple[type[Exception], ...] = ALLOWED_EXCEPTIONS,
) -> FsspecStore:
"""
Create a FsspecStore from a URL. The type of store is determined from the URL scheme.
Parameters
----------
url : str
The URL to the root of the store.
storage_options : dict, optional
The options to pass to fsspec when creating the filesystem.
read_only : bool
Whether the store is read-only, defaults to False.
allowed_exceptions : tuple, optional
The exceptions that are allowed to be raised when accessing the
store. Defaults to ALLOWED_EXCEPTIONS.
Returns
-------
FsspecStore
"""
try:
from fsspec import url_to_fs
except ImportError:
# before fsspec==2024.3.1
from fsspec.core import url_to_fs
opts = storage_options or {}
opts = {"asynchronous": True, **opts}
fs, path = url_to_fs(url, **opts)
if not fs.async_impl:
fs = _make_async(fs)
return cls(fs=fs, path=path, read_only=read_only, allowed_exceptions=allowed_exceptions)
def with_read_only(self, read_only: bool = False) -> FsspecStore:
# docstring inherited
return type(self)(
fs=self.fs,
path=self.path,
allowed_exceptions=self.allowed_exceptions,
read_only=read_only,
)
async def clear(self) -> None:
# docstring inherited
try:
for subpath in await self.fs._find(self.path, withdirs=True):
if subpath != self.path:
await self.fs._rm(subpath, recursive=True)
except FileNotFoundError:
pass
def __repr__(self) -> str:
return f"<FsspecStore({type(self.fs).__name__}, {self.path})>"
def __eq__(self, other: object) -> bool:
return (
isinstance(other, type(self))
and self.path == other.path
and self.read_only == other.read_only
and self.fs == other.fs
)
async def get(
self,
key: str,
prototype: BufferPrototype,
byte_range: ByteRequest | None = None,
) -> Buffer | None:
# docstring inherited
if not self._is_open:
await self._open()
path = _dereference_path(self.path, key)
try:
if byte_range is None:
value = prototype.buffer.from_bytes(await self.fs._cat_file(path))
elif isinstance(byte_range, RangeByteRequest):
value = prototype.buffer.from_bytes(
await self.fs._cat_file(
path,
start=byte_range.start,
end=byte_range.end,
)
)
elif isinstance(byte_range, OffsetByteRequest):
value = prototype.buffer.from_bytes(
await self.fs._cat_file(path, start=byte_range.offset, end=None)
)
elif isinstance(byte_range, SuffixByteRequest):
value = prototype.buffer.from_bytes(
await self.fs._cat_file(path, start=-byte_range.suffix, end=None)
)
else:
raise ValueError(f"Unexpected byte_range, got {byte_range}.")
except self.allowed_exceptions:
return None
except OSError as e:
if "not satisfiable" in str(e):
# this is an s3-specific condition we probably don't want to leak
return prototype.buffer.from_bytes(b"")
raise
else:
return value
async def set(
self,
key: str,
value: Buffer,
byte_range: tuple[int, int] | None = None,
) -> None:
# docstring inherited
if not self._is_open:
await self._open()
self._check_writable()
if not isinstance(value, Buffer):
raise TypeError(
f"FsspecStore.set(): `value` must be a Buffer instance. Got an instance of {type(value)} instead."
)
path = _dereference_path(self.path, key)
# write data
if byte_range:
raise NotImplementedError
await self.fs._pipe_file(path, value.to_bytes())
async def delete(self, key: str) -> None:
# docstring inherited
self._check_writable()
path = _dereference_path(self.path, key)
try:
await self.fs._rm(path)
except FileNotFoundError:
pass
except self.allowed_exceptions:
pass
async def delete_dir(self, prefix: str) -> None:
# docstring inherited
if not self.supports_deletes:
raise NotImplementedError(
"This method is only available for stores that support deletes."
)
self._check_writable()
path_to_delete = _dereference_path(self.path, prefix)
with suppress(*self.allowed_exceptions):
await self.fs._rm(path_to_delete, recursive=True)
async def exists(self, key: str) -> bool:
# docstring inherited
path = _dereference_path(self.path, key)
exists: bool = await self.fs._exists(path)
return exists
async def get_partial_values(
self,
prototype: BufferPrototype,
key_ranges: Iterable[tuple[str, ByteRequest | None]],
) -> list[Buffer | None]:
# docstring inherited
if key_ranges:
# _cat_ranges expects a list of paths, start, and end ranges, so we need to reformat each ByteRequest.
key_ranges = list(key_ranges)
paths: list[str] = []
starts: list[int | None] = []
stops: list[int | None] = []
for key, byte_range in key_ranges:
paths.append(_dereference_path(self.path, key))
if byte_range is None:
starts.append(None)
stops.append(None)
elif isinstance(byte_range, RangeByteRequest):
starts.append(byte_range.start)
stops.append(byte_range.end)
elif isinstance(byte_range, OffsetByteRequest):
starts.append(byte_range.offset)
stops.append(None)
elif isinstance(byte_range, SuffixByteRequest):
starts.append(-byte_range.suffix)
stops.append(None)
else:
raise ValueError(f"Unexpected byte_range, got {byte_range}.")
else:
return []
# TODO: expectations for exceptions or missing keys?
res = await self.fs._cat_ranges(paths, starts, stops, on_error="return")
# the following is an s3-specific condition we probably don't want to leak
res = [b"" if (isinstance(r, OSError) and "not satisfiable" in str(r)) else r for r in res]
for r in res:
if isinstance(r, Exception) and not isinstance(r, self.allowed_exceptions):
raise r
return [None if isinstance(r, Exception) else prototype.buffer.from_bytes(r) for r in res]
async def list(self) -> AsyncIterator[str]:
# docstring inherited
allfiles = await self.fs._find(self.path, detail=False, withdirs=False)
for onefile in (a.removeprefix(self.path + "/") for a in allfiles):
yield onefile
async def list_dir(self, prefix: str) -> AsyncIterator[str]:
# docstring inherited
prefix = f"{self.path}/{prefix.rstrip('/')}"
try:
allfiles = await self.fs._ls(prefix, detail=False)
except FileNotFoundError:
return
for onefile in (a.replace(prefix + "/", "") for a in allfiles):
yield onefile.removeprefix(self.path).removeprefix("/")
async def list_prefix(self, prefix: str) -> AsyncIterator[str]:
# docstring inherited
for onefile in await self.fs._find(
f"{self.path}/{prefix}", detail=False, maxdepth=None, withdirs=False
):
yield onefile.removeprefix(f"{self.path}/")
async def getsize(self, key: str) -> int:
path = _dereference_path(self.path, key)
info = await self.fs._info(path)
size = info.get("size")
if size is None:
# Not all filesystems support size. Fall back to reading the entire object
return await super().getsize(key)
else:
# fsspec doesn't have typing. We'll need to assume or verify this is true
return int(size)
|
FsspecStore
|
python
|
jina-ai__jina
|
jina/clients/base/stream_rpc.py
|
{
"start": 85,
"end": 2110
}
|
class ____:
"""Class that encapsulated the methods required to run a stream rpc call from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
**kwargs
):
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def stream_rpc_with_retry(self):
"""Wraps the stream rpc logic with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaRPCStub(self.channel)
async for resp in stub.Call(
self.req_iter,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
):
callback_exec(
response=resp,
logger=self.logger,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
)
if self.show_progress:
self.p_bar.update()
yield resp
|
StreamRpc
|
python
|
pydantic__pydantic
|
pydantic/plugin/__init__.py
|
{
"start": 4969,
"end": 6291
}
|
class ____(BaseValidateHandlerProtocol, Protocol):
"""Event handler for `SchemaValidator.validate_json`."""
def on_enter(
self,
input: str | bytes | bytearray,
*,
strict: bool | None = None,
extra: ExtraValues | None = None,
context: Any | None = None,
self_instance: Any | None = None,
by_alias: bool | None = None,
by_name: bool | None = None,
) -> None:
"""Callback to be notified of validation start, and create an instance of the event handler.
Args:
input: The JSON data to be validated.
strict: Whether to validate the object in strict mode.
extra: Whether to ignore, allow, or forbid extra data during model validation.
context: The context to use for validation, this is passed to functional validators.
self_instance: An instance of a model to set attributes on from validation, this is used when running
validation from the `__init__` method of a model.
by_alias: Whether to use the field's alias to match the input data to an attribute.
by_name: Whether to use the field's name to match the input data to an attribute.
"""
StringInput: TypeAlias = 'dict[str, StringInput]'
|
ValidateJsonHandlerProtocol
|
python
|
huggingface__transformers
|
src/transformers/models/gemma2/modular_gemma2.py
|
{
"start": 18745,
"end": 18807
}
|
class ____(GemmaPreTrainedModel):
pass
|
Gemma2PreTrainedModel
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/skill_list_params.py
|
{
"start": 331,
"end": 1099
}
|
class ____(TypedDict, total=False):
limit: int
"""Number of results to return per page.
Maximum value is 100. Defaults to 20.
"""
page: Optional[str]
"""Pagination token for fetching a specific page of results.
Pass the value from a previous response's `next_page` field to get the next page
of results.
"""
source: Optional[str]
"""Filter skills by source.
If provided, only skills from the specified source will be returned:
- `"custom"`: only return user-created skills
- `"anthropic"`: only return Anthropic-created skills
"""
betas: Annotated[List[AnthropicBetaParam], PropertyInfo(alias="anthropic-beta")]
"""Optional header to specify the beta version(s) you want to use."""
|
SkillListParams
|
python
|
PrefectHQ__prefect
|
src/prefect/cli/_prompts.py
|
{
"start": 10528,
"end": 10920
}
|
class ____(PromptBase[str]):
response_type: type[str] = str
validate_error_message = "[prompt.invalid]Please enter a valid RRule string"
def process_response(self, value: str) -> str:
try:
RRuleSchedule.validate_rrule_str(value)
return value
except ValueError:
raise InvalidResponse(self.validate_error_message)
|
RRuleStringPrompt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.