language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
src/sentry/consumers/__init__.py
|
{
"start": 26317,
"end": 26744
}
|
class ____(ProcessingStrategyFactory):
def __init__(self, healthcheck_file_path: str, inner: ProcessingStrategyFactory):
self.healthcheck_file_path = healthcheck_file_path
self.inner = inner
def create_with_partitions(self, commit, partitions):
rv = self.inner.create_with_partitions(commit, partitions)
return Healthcheck(self.healthcheck_file_path, rv)
|
HealthcheckStrategyFactoryWrapper
|
python
|
getsentry__sentry
|
src/sentry/testutils/cases.py
|
{
"start": 27626,
"end": 27825
}
|
class ____(BaseTestCase, BaseAPITransactionTestCase, APITestCaseMixin):
# We need Django to flush all databases.
databases: set[str] | str = "__all__"
method = "get"
|
APITransactionTestCase
|
python
|
getsentry__sentry
|
src/sentry/conduit/endpoints/organization_conduit_demo.py
|
{
"start": 780,
"end": 895
}
|
class ____(serializers.Serializer):
conduit = ConduitCredentialsSerializer()
|
ConduitCredentialsResponseSerializer
|
python
|
catalyst-team__catalyst
|
examples/recsys/multivae.py
|
{
"start": 1159,
"end": 3418
}
|
class ____(nn.Module):
def __init__(self, p_dims, q_dims=None, dropout=0.5):
super().__init__()
self.p_dims = p_dims
if q_dims:
assert (
q_dims[0] == p_dims[-1]
), "In and Out dimensions must equal to each other"
assert (
q_dims[-1] == p_dims[0]
), "Latent dimension for p- and q- network mismatches."
self.q_dims = q_dims
else:
self.q_dims = p_dims[::-1]
# Last dimension of q- network is for mean and variance
self.encoder = nn.Sequential()
self.encoder.add_module("normalize", Normalize())
self.encoder.add_module("dropout", nn.Dropout(dropout))
for i, (d_in, d_out) in enumerate(zip(self.q_dims[:-2], self.q_dims[1:-1])):
self.encoder.add_module(f"encoder_fc_{i + 1}", nn.Linear(d_in, d_out))
self.encoder.add_module(f"encoder_tanh_{i + 1}", nn.Tanh())
self.encoder.add_module(
f"encoder_fc_{len(self.q_dims) - 1}",
nn.Linear(self.q_dims[-2], self.q_dims[-1] * 2),
)
self.decoder = nn.Sequential()
for i, (d_in, d_out) in enumerate(zip(self.p_dims[:-2], self.p_dims[1:-1])):
self.decoder.add_module(f"decoder_fc_{i + 1}", nn.Linear(d_in, d_out))
self.decoder.add_module(f"decoder_tanh_{i + 1}", nn.Tanh())
self.decoder.add_module(
f"decoder_fc_{len(self.p_dims) - 1}",
nn.Linear(self.p_dims[-2], self.p_dims[-1]),
)
self.encoder.apply(self.init_weights)
self.decoder.apply(self.init_weights)
def forward(self, x):
z = self.encoder(x)
mu, logvar = z[:, : self.q_dims[-1]], z[:, self.q_dims[-1] :]
z = self.reparameterize(mu, logvar)
z = self.decoder(z)
return z, mu, logvar
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
else:
return mu
def init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0)
|
MultiVAE
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 393817,
"end": 395741
}
|
class ____(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
# is_absolute bool True for absolute imports, False otherwise
child_attrs = []
is_absolute = False
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(
self.module_name, self.pos, relative_level=0 if self.is_absolute else -1)
if "." in self.module_name:
names = [EncodedString(name) for name in self.module_name.split(".")]
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.add_imported_module(module_scope)
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
entry = env.declare_module(name, module_scope, self.pos)
entry.known_standard_library_import = self.module_name
if self.module_name in utility_code_for_cimports:
env.use_utility_code(utility_code_for_cimports[self.module_name](env.context.options))
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if self.module_name == "numpy":
cimport_numpy_check(self, code)
|
CImportStatNode
|
python
|
spyder-ide__spyder
|
spyder/plugins/completion/api.py
|
{
"start": 17401,
"end": 17579
}
|
class ____:
"""Language Server Protocol event types."""
DOCUMENT = 'textDocument'
WORKSPACE = 'workspace'
WINDOW = 'window'
CODE_LENS = 'codeLens'
|
LSPEventTypes
|
python
|
celery__celery
|
t/unit/app/test_beat.py
|
{
"start": 657,
"end": 892
}
|
class ____:
started = False
stopped = False
def __init__(self, *args, **kwargs):
pass
def start(self, **kwargs):
self.started = True
def stop(self, **kwargs):
self.stopped = True
|
MockService
|
python
|
getlogbook__logbook
|
src/logbook/handlers.py
|
{
"start": 53285,
"end": 60145
}
|
class ____(Handler, StringFormatterHandlerMixin):
"""A handler class which sends formatted logging records to a
syslog server. By default it will send to it via unix socket.
"""
default_format_string = SYSLOG_FORMAT_STRING
# priorities
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
level_priority_map = {
DEBUG: LOG_DEBUG,
INFO: LOG_INFO,
NOTICE: LOG_NOTICE,
WARNING: LOG_WARNING,
ERROR: LOG_ERR,
CRITICAL: LOG_CRIT,
}
def __init__(
self,
application_name=None,
address=None,
facility="user",
socktype=socket.SOCK_DGRAM,
level=NOTSET,
format_string=None,
filter=None,
bubble=False,
record_delimiter=None,
):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.application_name = application_name
if address is None:
if sys.platform == "darwin":
address = "/var/run/syslog"
else:
address = "/dev/log"
self.remote_address = self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self._connect_unixsocket()
self.enveloper = self.unix_envelope
default_delimiter = "\x00"
else:
self._connect_netsocket()
self.enveloper = self.net_envelope
default_delimiter = "\n"
self.record_delimiter = (
default_delimiter if record_delimiter is None else record_delimiter
)
self.connection_exception = getattr(
__builtins__, "BrokenPipeError", socket.error
)
def _connect_unixsocket(self):
self.unixsocket = True
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
self.socket.connect(self.address)
except OSError:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(self.address)
def _connect_netsocket(self):
self.unixsocket = False
self.socket = socket.socket(socket.AF_INET, self.socktype)
if self.socktype == socket.SOCK_STREAM:
self.socket.connect(self.remote_address)
self.address = self.socket.getsockname()
def encode_priority(self, record):
facility = self.facility_names[self.facility]
priority = self.level_priority_map.get(record.level, self.LOG_WARNING)
return (facility << 3) | priority
def wrap_segments(self, record, before):
msg = self.format(record)
segments = [segment for segment in msg.split(self.record_delimiter)]
return (before + segment + self.record_delimiter for segment in segments)
def unix_envelope(self, record):
before = "<{}>{}".format(
self.encode_priority(record),
self.application_name + ":" if self.application_name else "",
)
return self.wrap_segments(record, before)
def net_envelope(self, record):
# Gross but effective
try:
format_string = self.format_string
application_name = self.application_name
if (
not application_name
and record.channel
and "{record.channel}: " in format_string
):
self.format_string = format_string.replace("{record.channel}: ", "")
self.application_name = record.channel
# RFC 5424: <PRIVAL>version timestamp hostname app-name procid
# msgid structured-data message
before = "<{}>1 {}Z {} {} {} - - ".format(
self.encode_priority(record),
record.time.isoformat(),
socket.gethostname(),
self.application_name if self.application_name else "-",
record.process,
)
return self.wrap_segments(record, before)
finally:
self.format_string = format_string
self.application_name = application_name
def emit(self, record):
for segment in self.enveloper(record):
self.send_to_socket(segment.encode("utf-8"))
def send_to_socket(self, data):
if self.unixsocket:
try:
self.socket.send(data)
except OSError:
self._connect_unixsocket()
self.socket.send(data)
elif self.socktype == socket.SOCK_DGRAM:
# the flags are no longer optional on Python 3
self.socket.sendto(data, 0, self.address)
else:
try:
self.socket.sendall(data)
except self.connection_exception:
self._connect_netsocket()
self.socket.send(data)
def close(self):
self.socket.close()
|
SyslogHandler
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/inconsistentConstructor1.py
|
{
"start": 104,
"end": 165
}
|
class ____:
def __init__(self, a: int) -> None: ...
|
Parent1
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/triton.py
|
{
"start": 63838,
"end": 64723
}
|
class ____(MMTemplateConfigMixin):
"""
Small mixin to ensure that the workspace arg is correct for TMA
and TMA specific filtering can happen.
"""
def get_extra_kwargs(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> dict[str, Any]:
kwargs = super().get_extra_kwargs(kernel_inputs, op_name)
kwargs["workspace_arg"] = get_tma_workspace_arg(
num_tma_descriptors=2,
device=kernel_inputs.device(),
)
return kwargs
# pyrefly: ignore [bad-override]
def _filter_configs(self, configs: list[BaseConfig]) -> list[BaseConfig]:
"""
TMA specific filtering, as num_warps=2 not safe for TMA
"""
configs = [c for c in configs if c.num_warps != 2]
return super()._filter_configs(configs)
# TMA-specific mixin for TMA templates
|
TMAWorkspaceMixin
|
python
|
celery__celery
|
t/smoke/workers/latest.py
|
{
"start": 198,
"end": 1761
}
|
class ____(CeleryWorkerContainer):
"""Defines the configurations for a Celery worker container.
This worker will install the latest version of Celery from PyPI.
"""
@property
def client(self) -> Any:
return self
@classmethod
def log_level(cls) -> str:
return "INFO"
@classmethod
def worker_name(cls) -> str:
return "celery_latest_tests_worker"
@classmethod
def worker_queue(cls) -> str:
return "celery_latest_tests_queue"
# Build the image from the PyPI Dockerfile
celery_latest_worker_image = build(
path=".",
dockerfile="t/smoke/workers/docker/pypi",
tag="t/smoke/worker:latest",
buildargs=CeleryLatestWorkerContainer.buildargs(),
)
# Define container settings
celery_latest_worker_container = container(
image="{celery_latest_worker_image.id}",
environment=fxtr("default_worker_env"),
network="{default_pytest_celery_network.name}",
volumes={"{default_worker_volume.name}": defaults.DEFAULT_WORKER_VOLUME},
wrapper_class=CeleryLatestWorkerContainer,
timeout=defaults.DEFAULT_WORKER_CONTAINER_TIMEOUT,
command=CeleryLatestWorkerContainer.command(),
)
@pytest.fixture
def celery_latest_worker(
celery_latest_worker_container: CeleryLatestWorkerContainer,
celery_setup_app: Celery,
) -> CeleryTestWorker:
"""Creates a pytest-celery worker node from the worker container."""
worker = CeleryTestWorker(celery_latest_worker_container, app=celery_setup_app)
yield worker
worker.teardown()
|
CeleryLatestWorkerContainer
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_table34.py
|
{
"start": 315,
"end": 1528
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table34.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"num_format": "0.0000"})
data = [
["Foo", 1234, 0, 4321],
["Bar", 1256, 0, 4320],
["Baz", 2234, 0, 4332],
["Bop", 1324, 0, 4333],
]
worksheet.set_column("C:F", 10.288)
worksheet.add_table(
"C2:F6",
{
"data": data,
"columns": [
{},
{},
{},
{"formula": "Table1[[#This Row],[Column3]]", "format": format1},
],
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/gradients_test.py
|
{
"start": 28589,
"end": 32313
}
|
class ____(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testHessian1D(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = x^T A x is H = A + A^T.
m = 4
rng = np.random.RandomState([1, 2, 3])
mat_value = rng.randn(m, m).astype("float32")
x_value = rng.randn(m).astype("float32")
hess_value = mat_value + mat_value.T
with self.session():
mat = constant_op.constant(mat_value)
x = constant_op.constant(x_value)
x_mat_x = math_ops.reduce_sum(x[:, None] * mat * x[None, :])
hess = gradients.hessians(x_mat_x, x)[0]
hess_actual = self.evaluate(hess)
self.assertAllClose(hess_value, hess_actual)
@test_util.run_v1_only("b/120545219")
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.session():
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [
math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)
]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
@test_util.run_v1_only("b/120545219")
def testHessianInvalidDimension(self):
for shape in [(10, 10), None]:
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape)
# Expect a ValueError because the dimensions are wrong
with self.assertRaises(ValueError):
gradients.hessians(x, x)
@test_util.run_v1_only("b/120545219")
def testHessian2D_square_matrix(self):
# Manually compute the Hessian explicitly for a low-dimensional problem
# and check that `hessian` matches. Specifically, the Hessian of
# f(x) = 1/2 * x^T * x is H = constant (block identity matrix)
m = 3
rng = np.random.RandomState([1, 2, 3])
x_value = rng.randn(m, m).astype("float32")
with self.session():
x = constant_op.constant(x_value)
x_square = math_ops.reduce_sum(
math_ops.matmul(array_ops.transpose(x), x) * 0.5
)
hess = gradients.hessians(x_square, x)[0]
hess_actual = self.evaluate(hess)
hess_value = np.bmat([
[elem*np.ones((m, m)) for elem in vec]
for vec in np.eye(m)
]).astype("float32")
self.assertAllEqual((m, m, m, m), hess_actual.shape)
self.assertAllClose(hess_value, hess_actual.reshape((m * m, m * m)))
@test_util.run_v1_only("b/120545219")
def testHessian2D_non_square_matrix(self):
m = 3
n = 4
rng = np.random.RandomState([1, 2, 3])
x_value = rng.randn(m, n).astype("float32")
with self.session():
x = constant_op.constant(x_value)
x_square = math_ops.reduce_sum(
math_ops.matmul(array_ops.transpose(x), x) * 0.5
)
hess = gradients.hessians(x_square, x)[0]
hess_actual = self.evaluate(hess)
hess_value = np.bmat([
[elem*np.ones((n, n)) for elem in vec]
for vec in np.eye(m)
]).astype("float32")
self.assertAllEqual((m, n, m, n), hess_actual.shape)
self.assertAllClose(hess_value, hess_actual.reshape((m * n, m * n)))
|
HessianTest
|
python
|
TheAlgorithms__Python
|
data_structures/linked_list/middle_element_of_linked_list.py
|
{
"start": 144,
"end": 1543
}
|
class ____:
def __init__(self):
self.head = None
def push(self, new_data: int) -> int:
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
return self.head.data
def middle_element(self) -> int | None:
"""
>>> link = LinkedList()
>>> link.middle_element()
No element found.
>>> link.push(5)
5
>>> link.push(6)
6
>>> link.push(8)
8
>>> link.push(8)
8
>>> link.push(10)
10
>>> link.push(12)
12
>>> link.push(17)
17
>>> link.push(7)
7
>>> link.push(3)
3
>>> link.push(20)
20
>>> link.push(-20)
-20
>>> link.middle_element()
12
>>>
"""
slow_pointer = self.head
fast_pointer = self.head
if self.head:
while fast_pointer and fast_pointer.next:
fast_pointer = fast_pointer.next.next
slow_pointer = slow_pointer.next
return slow_pointer.data
else:
print("No element found.")
return None
if __name__ == "__main__":
link = LinkedList()
for _ in range(int(input().strip())):
data = int(input().strip())
link.push(data)
print(link.middle_element())
|
LinkedList
|
python
|
openai__openai-python
|
src/openai/types/vector_stores/file_batch_list_files_params.py
|
{
"start": 224,
"end": 1451
}
|
class ____(TypedDict, total=False):
vector_store_id: Required[str]
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""
before: str
"""A cursor for use in pagination.
`before` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, starting with obj_foo, your
subsequent call can include before=obj_foo in order to fetch the previous page
of the list.
"""
filter: Literal["in_progress", "completed", "failed", "cancelled"]
"""Filter by file status.
One of `in_progress`, `completed`, `failed`, `cancelled`.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
`asc` for ascending order and `desc` for descending order.
"""
|
FileBatchListFilesParams
|
python
|
fluentpython__example-code-2e
|
15-more-types/cafeteria/contravariant.py
|
{
"start": 102,
"end": 164
}
|
class ____(Refuse):
"""Biodegradable refuse."""
|
Biodegradable
|
python
|
pypa__setuptools
|
pkg_resources/tests/test_resources.py
|
{
"start": 554,
"end": 1069
}
|
class ____(pkg_resources.EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self, *pairs) -> None:
self.metadata = dict(pairs)
def has_metadata(self, name) -> bool:
return name in self.metadata
def get_metadata(self, name):
return self.metadata[name]
def get_metadata_lines(self, name):
return pkg_resources.yield_lines(self.get_metadata(name))
dist_from_fn = pkg_resources.Distribution.from_filename
|
Metadata
|
python
|
astropy__astropy
|
astropy/modeling/projections.py
|
{
"start": 18609,
"end": 19766
}
|
class ____(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = _ParameterDS(default=1.0)
lam = _ParameterDS(default=1.0)
def _mu_validator(self, value):
if np.any(value == -self.lam):
raise InputParameterError("CYP projection is not defined for mu = -lambda")
mu._validator = _mu_validator
def _lam_validator(self, value):
if np.any(value == -self.mu):
raise InputParameterError("CYP projection is not defined for lambda = -mu")
lam._validator = _lam_validator
|
Pix2Sky_CylindricalPerspective
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/citations_web_search_result_location.py
|
{
"start": 236,
"end": 429
}
|
class ____(BaseModel):
cited_text: str
encrypted_index: str
title: Optional[str] = None
type: Literal["web_search_result_location"]
url: str
|
CitationsWebSearchResultLocation
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-postgres/llama_index/vector_stores/postgres/base.py
|
{
"start": 6171,
"end": 50044
}
|
class ____(BasePydanticVectorStore):
"""
Postgres Vector Store.
Examples:
`pip install llama-index-vector-stores-postgres`
```python
from llama_index.vector_stores.postgres import PGVectorStore
# Create PGVectorStore instance
vector_store = PGVectorStore.from_params(
database="vector_db",
host="localhost",
password="password",
port=5432,
user="postgres",
table_name="paul_graham_essay",
embed_dim=1536 # openai embedding dimension
use_halfvec=True # Enable half precision
)
```
"""
stores_text: bool = True
flat_metadata: bool = False
connection_string: str
async_connection_string: str
table_name: str
schema_name: str
embed_dim: int
hybrid_search: bool
text_search_config: str
cache_ok: bool
perform_setup: bool
debug: bool
use_jsonb: bool
create_engine_kwargs: Dict
initialization_fail_on_error: bool = False
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None
hnsw_kwargs: Optional[Dict[str, Any]]
use_halfvec: bool = False
_base: Any = PrivateAttr()
_table_class: Any = PrivateAttr()
_engine: Optional[sqlalchemy.engine.Engine] = PrivateAttr(default=None)
_session: sqlalchemy.orm.Session = PrivateAttr()
_async_engine: Optional[sqlalchemy.ext.asyncio.AsyncEngine] = PrivateAttr(
default=None
)
_async_session: sqlalchemy.ext.asyncio.AsyncSession = PrivateAttr()
_is_initialized: bool = PrivateAttr(default=False)
_customize_query_fn: Optional[Callable[[Select, Any, Any], Select]] = PrivateAttr(
default=None
)
def __init__(
self,
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
async_connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
table_name: Optional[str] = None,
schema_name: Optional[str] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
hnsw_kwargs: Optional[Dict[str, Any]] = None,
create_engine_kwargs: Optional[Dict[str, Any]] = None,
initialization_fail_on_error: bool = False,
use_halfvec: bool = False,
engine: Optional[sqlalchemy.engine.Engine] = None,
async_engine: Optional[sqlalchemy.ext.asyncio.AsyncEngine] = None,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
customize_query_fn: Optional[Callable[[Select, Any, Any], Select]] = None,
) -> None:
"""
Constructor.
Args:
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to postgres db.
async_connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to async pg db.
table_name (str): Table name.
schema_name (str): Schema name.
hybrid_search (bool, optional): Enable hybrid search. Defaults to False.
text_search_config (str, optional): Text search configuration. Defaults to "english".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
cache_ok (bool, optional): Enable cache. Defaults to False.
perform_setup (bool, optional): If db should be set up. Defaults to True.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
hnsw_kwargs (Optional[Dict[str, Any]], optional): HNSW kwargs, a dict that
contains "hnsw_ef_construction", "hnsw_ef_search", "hnsw_m", and optionally "hnsw_dist_method". Defaults to None,
which turns off HNSW search.
create_engine_kwargs (Optional[Dict[str, Any]], optional): Engine parameters to pass to create_engine. Defaults to None.
use_halfvec (bool, optional): If `True`, use half-precision vectors. Defaults to False.
engine (Optional[sqlalchemy.engine.Engine], optional): SQLAlchemy engine instance to use. Defaults to None.
async_engine (Optional[sqlalchemy.ext.asyncio.AsyncEngine], optional): SQLAlchemy async engine instance to use. Defaults to None.
indexed_metadata_keys (Optional[List[Tuple[str, str]]], optional): Set of metadata keys with their type to index. Defaults to None.
customize_query_fn (Optional[Callable[[Select, Any, Any], Select]], optional): Function used to customize PostgreSQL queries. Defaults to None.
"""
table_name = table_name.lower() if table_name else "llamaindex"
schema_name = schema_name.lower() if schema_name else "public"
if hybrid_search and text_search_config is None:
raise ValueError(
"Sparse vector index creation requires "
"a text search configuration specification."
)
from sqlalchemy.orm import declarative_base
super().__init__(
connection_string=str(connection_string),
async_connection_string=str(async_connection_string),
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
hnsw_kwargs=hnsw_kwargs,
create_engine_kwargs=create_engine_kwargs or {},
initialization_fail_on_error=initialization_fail_on_error,
use_halfvec=use_halfvec,
indexed_metadata_keys=indexed_metadata_keys,
)
# sqlalchemy model
self._base = declarative_base()
self._table_class = get_data_model(
self._base,
table_name,
schema_name,
hybrid_search,
text_search_config,
cache_ok,
embed_dim=embed_dim,
use_jsonb=use_jsonb,
use_halfvec=use_halfvec,
indexed_metadata_keys=indexed_metadata_keys,
)
# both engine and async_engine must be provided, or both must be None
if engine is not None and async_engine is not None:
self._engine = engine
self._async_engine = async_engine
elif engine is None and async_engine is None:
pass
else:
raise ValueError(
"Both engine and async_engine must be provided, or both must be None"
)
self._customize_query_fn = customize_query_fn
async def close(self) -> None:
if not self._is_initialized:
return
if self._engine:
self._engine.dispose()
if self._async_engine:
await self._async_engine.dispose()
@classmethod
def class_name(cls) -> str:
return "PGVectorStore"
@classmethod
def from_params(
cls,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
table_name: str = "llamaindex",
schema_name: str = "public",
connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
async_connection_string: Optional[Union[str, sqlalchemy.engine.URL]] = None,
hybrid_search: bool = False,
text_search_config: str = "english",
embed_dim: int = 1536,
cache_ok: bool = False,
perform_setup: bool = True,
debug: bool = False,
use_jsonb: bool = False,
hnsw_kwargs: Optional[Dict[str, Any]] = None,
create_engine_kwargs: Optional[Dict[str, Any]] = None,
use_halfvec: bool = False,
indexed_metadata_keys: Optional[Set[Tuple[str, PGType]]] = None,
customize_query_fn: Optional[Callable[[Select, Any, Any], Select]] = None,
) -> "PGVectorStore":
"""
Construct from params.
Args:
host (Optional[str], optional): Host of postgres connection. Defaults to None.
port (Optional[str], optional): Port of postgres connection. Defaults to None.
database (Optional[str], optional): Postgres DB name. Defaults to None.
user (Optional[str], optional): Postgres username. Defaults to None.
password (Optional[str], optional): Postgres password. Defaults to None.
table_name (str): Table name. Defaults to "llamaindex".
schema_name (str): Schema name. Defaults to "public".
connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to postgres db
async_connection_string (Union[str, sqlalchemy.engine.URL]): Connection string to async pg db
hybrid_search (bool, optional): Enable hybrid search. Defaults to False.
text_search_config (str, optional): Text search configuration. Defaults to "english".
embed_dim (int, optional): Embedding dimensions. Defaults to 1536.
cache_ok (bool, optional): Enable cache. Defaults to False.
perform_setup (bool, optional): If db should be set up. Defaults to True.
debug (bool, optional): Debug mode. Defaults to False.
use_jsonb (bool, optional): Use JSONB instead of JSON. Defaults to False.
hnsw_kwargs (Optional[Dict[str, Any]], optional): HNSW kwargs, a dict that
contains "hnsw_ef_construction", "hnsw_ef_search", "hnsw_m", and optionally "hnsw_dist_method". Defaults to None,
which turns off HNSW search.
create_engine_kwargs (Optional[Dict[str, Any]], optional): Engine parameters to pass to create_engine. Defaults to None.
use_halfvec (bool, optional): If `True`, use half-precision vectors. Defaults to False.
indexed_metadata_keys (Optional[Set[Tuple[str, str]]], optional): Set of metadata keys to index. Defaults to None.
customize_query_fn (Optional[Callable[[Select, Any, Any], Select]], optional): Function used to customize PostgreSQL queries. Defaults to None.
Returns:
PGVectorStore: Instance of PGVectorStore constructed from params.
"""
conn_str = (
connection_string
or f"postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}"
)
async_conn_str = async_connection_string or (
f"postgresql+asyncpg://{user}:{password}@{host}:{port}/{database}"
)
return cls(
connection_string=conn_str,
async_connection_string=async_conn_str,
table_name=table_name,
schema_name=schema_name,
hybrid_search=hybrid_search,
text_search_config=text_search_config,
embed_dim=embed_dim,
cache_ok=cache_ok,
perform_setup=perform_setup,
debug=debug,
use_jsonb=use_jsonb,
hnsw_kwargs=hnsw_kwargs,
create_engine_kwargs=create_engine_kwargs,
use_halfvec=use_halfvec,
indexed_metadata_keys=indexed_metadata_keys,
customize_query_fn=customize_query_fn,
)
@property
def client(self) -> Any:
if not self._is_initialized:
return None
return self._engine
def _connect(self) -> Any:
from sqlalchemy import create_engine
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
self._engine = self._engine or create_engine(
self.connection_string, echo=self.debug, **self.create_engine_kwargs
)
self._session = sessionmaker(self._engine)
self._async_engine = self._async_engine or create_async_engine(
self.async_connection_string, **self.create_engine_kwargs
)
self._async_session = sessionmaker(self._async_engine, class_=AsyncSession) # type: ignore
def _create_schema_if_not_exists(self) -> bool:
"""
Create the schema if it does not exist.
Returns True if the schema was created, False if it already existed.
"""
if not re.match(r"^[A-Za-z_][A-Za-z0-9_]*$", self.schema_name):
raise ValueError(f"Invalid schema_name: {self.schema_name}")
with self._session() as session, session.begin():
# Check if the specified schema exists with "CREATE" statement
check_schema_statement = sqlalchemy.text(
f"SELECT schema_name FROM information_schema.schemata WHERE schema_name = :schema_name"
).bindparams(schema_name=self.schema_name)
result = session.execute(check_schema_statement).fetchone()
# If the schema does not exist, then create it
schema_doesnt_exist = result is None
if schema_doesnt_exist:
create_schema_statement = sqlalchemy.text(
# DDL won't tolerate quoted string literal here for schema_name,
# so use a format string to embed the schema_name directly, instead of a param.
f"CREATE SCHEMA IF NOT EXISTS {self.schema_name}"
)
session.execute(create_schema_statement)
session.commit()
return schema_doesnt_exist
def _create_tables_if_not_exists(self) -> None:
with self._session() as session, session.begin():
self._table_class.__table__.create(session.connection(), checkfirst=True)
def _create_extension(self) -> None:
import sqlalchemy
with self._session() as session, session.begin():
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS vector")
session.execute(statement)
session.commit()
def _create_hnsw_index(self) -> None:
import sqlalchemy
if (
"hnsw_ef_construction" not in self.hnsw_kwargs
or "hnsw_m" not in self.hnsw_kwargs
):
raise ValueError(
"Make sure hnsw_ef_search, hnsw_ef_construction, and hnsw_m are in hnsw_kwargs."
)
hnsw_ef_construction = self.hnsw_kwargs.pop("hnsw_ef_construction")
hnsw_m = self.hnsw_kwargs.pop("hnsw_m")
# If user didn’t specify an operator, pick a default based on whether halfvec is used
if "hnsw_dist_method" in self.hnsw_kwargs:
hnsw_dist_method = self.hnsw_kwargs.pop("hnsw_dist_method")
else:
if self.use_halfvec:
hnsw_dist_method = "halfvec_l2_ops"
else:
# Default to vector_cosine_ops
hnsw_dist_method = "vector_cosine_ops"
index_name = f"{self._table_class.__tablename__}_embedding_idx"
with self._session() as session, session.begin():
statement = sqlalchemy.text(
f"CREATE INDEX IF NOT EXISTS {index_name} "
f"ON {self.schema_name}.{self._table_class.__tablename__} "
f"USING hnsw (embedding {hnsw_dist_method}) "
f"WITH (m = {hnsw_m}, ef_construction = {hnsw_ef_construction})"
)
session.execute(statement)
session.commit()
def _initialize(self) -> None:
fail_on_error = self.initialization_fail_on_error
if not self._is_initialized:
self._connect()
if self.perform_setup:
try:
self._create_schema_if_not_exists()
except Exception as e:
_logger.warning(f"PG Setup: Error creating schema: {e}")
if fail_on_error:
raise
try:
self._create_extension()
except Exception as e:
_logger.warning(f"PG Setup: Error creating extension: {e}")
if fail_on_error:
raise
try:
self._create_tables_if_not_exists()
except Exception as e:
_logger.warning(f"PG Setup: Error creating tables: {e}")
if fail_on_error:
raise
if self.hnsw_kwargs is not None:
try:
self._create_hnsw_index()
except Exception as e:
_logger.warning(f"PG Setup: Error creating HNSW index: {e}")
if fail_on_error:
raise
self._is_initialized = True
def _node_to_table_row(self, node: BaseNode) -> Any:
return self._table_class(
node_id=node.node_id,
embedding=node.get_embedding(),
text=node.get_content(metadata_mode=MetadataMode.NONE),
metadata_=node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
),
)
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
self._initialize()
ids = []
with self._session() as session, session.begin():
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
session.add(item)
session.commit()
return ids
async def async_add(self, nodes: List[BaseNode], **kwargs: Any) -> List[str]:
self._initialize()
ids = []
async with self._async_session() as session, session.begin():
for node in nodes:
ids.append(node.node_id)
item = self._node_to_table_row(node)
session.add(item)
await session.commit()
return ids
def _to_postgres_operator(self, operator: FilterOperator) -> str:
if operator == FilterOperator.EQ:
return "="
elif operator == FilterOperator.GT:
return ">"
elif operator == FilterOperator.LT:
return "<"
elif operator == FilterOperator.NE:
return "!="
elif operator == FilterOperator.GTE:
return ">="
elif operator == FilterOperator.LTE:
return "<="
elif operator == FilterOperator.IN:
return "IN"
elif operator == FilterOperator.NIN:
return "NOT IN"
elif operator == FilterOperator.CONTAINS:
return "@>"
elif operator == FilterOperator.TEXT_MATCH:
return "LIKE"
elif operator == FilterOperator.TEXT_MATCH_INSENSITIVE:
return "ILIKE"
elif operator == FilterOperator.IS_EMPTY:
return "IS NULL"
elif operator == FilterOperator.ANY:
return "?|"
elif operator == FilterOperator.ALL:
return "?&"
else:
_logger.warning(f"Unknown operator: {operator}, fallback to '='")
return "="
def _build_filter_clause(self, filter_: MetadataFilter) -> Any:
from sqlalchemy import text
if filter_.operator in [FilterOperator.IN, FilterOperator.NIN]:
# Expects a single value in the metadata, and a list to compare
# In Python, to create a tuple with a single element, you need to include a comma after the element
# This code will correctly format the IN clause whether there is one element or multiple elements in the list:
filter_value = ", ".join(f"'{e}'" for e in filter_.value)
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"({filter_value})"
)
elif filter_.operator in [FilterOperator.ANY, FilterOperator.ALL]:
# Expects a text array stored in the metadata, and a list of values to compare
# Works with text[] arrays using PostgreSQL ?| (ANY) and ?& (ALL) operators
# Example: metadata_::jsonb->'tags' ?| array['AI', 'ML']
filter_value = ", ".join(f"'{e}'" for e in filter_.value)
return text(
f"metadata_::jsonb->'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"array[{filter_value}]"
)
elif filter_.operator == FilterOperator.CONTAINS:
# Expects a list stored in the metadata, and a single value to compare
return text(
f"metadata_::jsonb->'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"'[\"{filter_.value}\"]'"
)
elif (
filter_.operator == FilterOperator.TEXT_MATCH
or filter_.operator == FilterOperator.TEXT_MATCH_INSENSITIVE
):
# Where the operator is text_match or ilike, we need to wrap the filter in '%' characters
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"'%{filter_.value}%'"
)
elif filter_.operator == FilterOperator.IS_EMPTY:
# Where the operator is is_empty, we need to check if the metadata is null
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)}"
)
else:
# Check if value is a number. If so, cast the metadata value to a float
# This is necessary because the metadata is stored as a string
try:
return text(
f"(metadata_->>'{filter_.key}')::float "
f"{self._to_postgres_operator(filter_.operator)} "
f"{float(filter_.value)}"
)
except ValueError:
# If not a number, then treat it as a string
return text(
f"metadata_->>'{filter_.key}' "
f"{self._to_postgres_operator(filter_.operator)} "
f"'{filter_.value}'"
)
def _recursively_apply_filters(self, filters: List[MetadataFilters]) -> Any:
"""
Returns a sqlalchemy where clause.
"""
import sqlalchemy
sqlalchemy_conditions = {
"or": sqlalchemy.sql.or_,
"and": sqlalchemy.sql.and_,
}
if filters.condition not in sqlalchemy_conditions:
raise ValueError(
f"Invalid condition: {filters.condition}. "
f"Must be one of {list(sqlalchemy_conditions.keys())}"
)
return sqlalchemy_conditions[filters.condition](
*(
(
self._build_filter_clause(filter_)
if not isinstance(filter_, MetadataFilters)
else self._recursively_apply_filters(filter_)
)
for filter_ in filters.filters
)
)
def _apply_filters_and_limit(
self,
stmt: "Select",
limit: int,
metadata_filters: Optional[MetadataFilters] = None,
) -> Any:
if metadata_filters:
stmt = stmt.where( # type: ignore
self._recursively_apply_filters(metadata_filters)
)
return stmt.limit(limit) # type: ignore
def _build_query(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> Any:
from sqlalchemy import text, select
stmt = select( # type: ignore
self._table_class.id,
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
self._table_class.embedding.cosine_distance(embedding).label("distance"),
).order_by(text("distance asc"))
if self._customize_query_fn is not None:
stmt = self._customize_query_fn(stmt, self._table_class, **kwargs)
return self._apply_filters_and_limit(stmt, limit, metadata_filters)
def _query_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters, **kwargs)
with self._session() as session, session.begin():
from sqlalchemy import text
if kwargs.get("ivfflat_probes"):
ivfflat_probes = kwargs.get("ivfflat_probes")
session.execute(
text(f"SET ivfflat.probes = :ivfflat_probes"),
{"ivfflat_probes": ivfflat_probes},
)
if self.hnsw_kwargs:
hnsw_ef_search = (
kwargs.get("hnsw_ef_search") or self.hnsw_kwargs["hnsw_ef_search"]
)
session.execute(
text(f"SET hnsw.ef_search = :hnsw_ef_search"),
{"hnsw_ef_search": hnsw_ef_search},
)
res = session.execute(
stmt,
)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
custom_fields={
key: val
for key, val in item._asdict().items()
if key not in ["id", "node_id", "text", "metadata_", "distance"]
},
similarity=(1 - item.distance) if item.distance is not None else 0,
)
for item in res.all()
]
async def _aquery_with_score(
self,
embedding: Optional[List[float]],
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> List[DBEmbeddingRow]:
stmt = self._build_query(embedding, limit, metadata_filters, **kwargs)
async with self._async_session() as async_session, async_session.begin():
from sqlalchemy import text
if self.hnsw_kwargs:
hnsw_ef_search = (
kwargs.get("hnsw_ef_search") or self.hnsw_kwargs["hnsw_ef_search"]
)
await async_session.execute(
text(f"SET hnsw.ef_search = {hnsw_ef_search}"),
)
if kwargs.get("ivfflat_probes"):
ivfflat_probes = kwargs.get("ivfflat_probes")
await async_session.execute(
text(f"SET ivfflat.probes = :ivfflat_probes"),
{"ivfflat_probes": ivfflat_probes},
)
res = await async_session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
custom_fields={
key: val
for key, val in item._asdict().items()
if key not in ["id", "node_id", "text", "metadata_", "distance"]
},
similarity=(1 - item.distance) if item.distance is not None else 0,
)
for item in res.all()
]
def _build_sparse_query(
self,
query_str: Optional[str],
limit: int,
metadata_filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> Any:
from sqlalchemy import type_coerce
from sqlalchemy.sql import func, text, select
from sqlalchemy.types import UserDefinedType
class REGCONFIG(UserDefinedType):
# The TypeDecorator.cache_ok class-level flag indicates if this custom TypeDecorator is safe to be used as part of a cache key.
# If the TypeDecorator is not guaranteed to produce the same bind/result behavior and SQL generation every time,
# this flag should be set to False; otherwise if the class produces the same behavior each time, it may be set to True.
cache_ok = True
def get_col_spec(self, **kw: Any) -> str:
return "regconfig"
if query_str is None:
raise ValueError("query_str must be specified for a sparse vector query.")
# Remove special characters used by ts_query (essentially, all punctuation except single periods within words)
# and collapse multiple spaces
query_str = re.sub(r"(?!\b\.\b)\W+", " ", query_str).strip()
# Replace space with "|" to perform an OR search for higher recall
query_str = query_str.replace(" ", "|")
ts_query = func.to_tsquery(
type_coerce(self.text_search_config, REGCONFIG),
query_str,
)
stmt = (
select( # type: ignore
self._table_class.id,
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
func.ts_rank(self._table_class.text_search_tsv, ts_query).label("rank"),
)
.where(self._table_class.text_search_tsv.op("@@")(ts_query))
.order_by(text("rank desc"))
)
if self._customize_query_fn is not None:
stmt = self._customize_query_fn(stmt, self._table_class, **kwargs)
# type: ignore
return self._apply_filters_and_limit(stmt, limit, metadata_filters)
async def _async_sparse_query_with_rank(
self,
query_str: Optional[str] = None,
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_sparse_query(query_str, limit, metadata_filters)
async with self._async_session() as async_session, async_session.begin():
res = await async_session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
custom_fields={
key: val
for key, val in item._asdict().items()
if key not in ["id", "node_id", "text", "metadata_", "rank"]
},
similarity=item.rank,
)
for item in res.all()
]
def _sparse_query_with_rank(
self,
query_str: Optional[str] = None,
limit: int = 10,
metadata_filters: Optional[MetadataFilters] = None,
) -> List[DBEmbeddingRow]:
stmt = self._build_sparse_query(query_str, limit, metadata_filters)
with self._session() as session, session.begin():
res = session.execute(stmt)
return [
DBEmbeddingRow(
node_id=item.node_id,
text=item.text,
metadata=item.metadata_,
custom_fields={
key: val
for key, val in item._asdict().items()
if key not in ["id", "node_id", "text", "metadata_", "rank"]
},
similarity=item.rank,
)
for item in res.all()
]
async def _async_hybrid_query(
self, query: VectorStoreQuery, **kwargs: Any
) -> List[DBEmbeddingRow]:
import asyncio
if query.alpha is not None:
_logger.warning("postgres hybrid search does not support alpha parameter.")
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = await asyncio.gather(
self._aquery_with_score(
query.query_embedding,
query.similarity_top_k,
query.filters,
**kwargs,
),
self._async_sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
),
)
dense_results, sparse_results = results
all_results = dense_results + sparse_results
return _dedup_results(all_results)
def _hybrid_query(
self, query: VectorStoreQuery, **kwargs: Any
) -> List[DBEmbeddingRow]:
if query.alpha is not None:
_logger.warning("postgres hybrid search does not support alpha parameter.")
sparse_top_k = query.sparse_top_k or query.similarity_top_k
dense_results = self._query_with_score(
query.query_embedding,
query.similarity_top_k,
query.filters,
**kwargs,
)
sparse_results = self._sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
all_results = dense_results + sparse_results
return _dedup_results(all_results)
def _db_rows_to_query_result(
self, rows: List[DBEmbeddingRow]
) -> VectorStoreQueryResult:
nodes = []
similarities = []
ids = []
for db_embedding_row in rows:
try:
node = metadata_dict_to_node(db_embedding_row.metadata)
node.set_content(str(db_embedding_row.text))
except Exception:
# NOTE: deprecated legacy logic for backward compatibility
node = TextNode(
id_=db_embedding_row.node_id,
text=db_embedding_row.text,
metadata=db_embedding_row.metadata,
)
if db_embedding_row.custom_fields:
node.metadata["custom_fields"] = db_embedding_row.custom_fields
similarities.append(db_embedding_row.similarity)
ids.append(db_embedding_row.node_id)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=similarities,
ids=ids,
)
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
self._initialize()
if query.mode == VectorStoreQueryMode.HYBRID:
results = await self._async_hybrid_query(query, **kwargs)
elif query.mode in [
VectorStoreQueryMode.SPARSE,
VectorStoreQueryMode.TEXT_SEARCH,
]:
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = await self._async_sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
elif query.mode == VectorStoreQueryMode.DEFAULT:
results = await self._aquery_with_score(
query.query_embedding,
query.similarity_top_k,
query.filters,
**kwargs,
)
else:
raise ValueError(f"Invalid query mode: {query.mode}")
return self._db_rows_to_query_result(results)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
self._initialize()
if query.mode == VectorStoreQueryMode.HYBRID:
results = self._hybrid_query(query, **kwargs)
elif query.mode in [
VectorStoreQueryMode.SPARSE,
VectorStoreQueryMode.TEXT_SEARCH,
]:
sparse_top_k = query.sparse_top_k or query.similarity_top_k
results = self._sparse_query_with_rank(
query.query_str, sparse_top_k, query.filters
)
elif query.mode == VectorStoreQueryMode.DEFAULT:
results = self._query_with_score(
query.query_embedding,
query.similarity_top_k,
query.filters,
**kwargs,
)
else:
raise ValueError(f"Invalid query mode: {query.mode}")
return self._db_rows_to_query_result(results)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
from sqlalchemy import delete
self._initialize()
with self._session() as session, session.begin():
stmt = delete(self._table_class).where(
self._table_class.metadata_["ref_doc_id"].astext == ref_doc_id
)
session.execute(stmt)
session.commit()
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
from sqlalchemy import delete
self._initialize()
async with self._async_session() as session, session.begin():
stmt = delete(self._table_class).where(
self._table_class.metadata_["ref_doc_id"].astext == ref_doc_id
)
await session.execute(stmt)
await session.commit()
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Deletes nodes.
Args:
node_ids (Optional[List[str]], optional): IDs of nodes to delete. Defaults to None.
filters (Optional[MetadataFilters], optional): Metadata filters. Defaults to None.
"""
if not node_ids and not filters:
return
from sqlalchemy import delete
self._initialize()
with self._session() as session, session.begin():
stmt = delete(self._table_class)
if node_ids:
stmt = stmt.where(self._table_class.node_id.in_(node_ids))
if filters:
stmt = stmt.where(self._recursively_apply_filters(filters))
session.execute(stmt)
session.commit()
async def adelete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""
Deletes nodes asynchronously.
Args:
node_ids (Optional[List[str]], optional): IDs of nodes to delete. Defaults to None.
filters (Optional[MetadataFilters], optional): Metadata filters. Defaults to None.
"""
if not node_ids and not filters:
return
from sqlalchemy import delete
self._initialize()
async with self._async_session() as async_session, async_session.begin():
stmt = delete(self._table_class)
if node_ids:
stmt = stmt.where(self._table_class.node_id.in_(node_ids))
if filters:
stmt = stmt.where(self._recursively_apply_filters(filters))
await async_session.execute(stmt)
await async_session.commit()
def clear(self) -> None:
"""Clears table."""
from sqlalchemy import delete
self._initialize()
with self._session() as session, session.begin():
stmt = delete(self._table_class)
session.execute(stmt)
session.commit()
async def aclear(self) -> None:
"""Asynchronously clears table."""
from sqlalchemy import delete
self._initialize()
async with self._async_session() as async_session, async_session.begin():
stmt = delete(self._table_class)
await async_session.execute(stmt)
await async_session.commit()
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
assert node_ids is not None or filters is not None, (
"Either node_ids or filters must be provided"
)
self._initialize()
from sqlalchemy import select
stmt = select(
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
self._table_class.embedding,
)
if node_ids:
stmt = stmt.where(self._table_class.node_id.in_(node_ids))
if filters:
filter_clause = self._recursively_apply_filters(filters)
stmt = stmt.where(filter_clause)
nodes: List[BaseNode] = []
with self._session() as session, session.begin():
res = session.execute(stmt).fetchall()
for item in res:
node_id = item.node_id
text = item.text
metadata = item.metadata_
embedding = item.embedding
custom_fields = {
key: val
for key, val in item._asdict().items()
if key not in ["id", "node_id", "text", "metadata_"]
}
try:
node = metadata_dict_to_node(metadata)
node.set_content(str(text))
node.embedding = embedding
except Exception:
node = TextNode(
id_=node_id,
text=text,
metadata=metadata,
embedding=embedding,
)
nodes.append(node)
return nodes
async def aget_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes asynchronously from vector store."""
assert node_ids is not None or filters is not None, (
"Either node_ids or filters must be provided"
)
self._initialize()
from sqlalchemy import select
stmt = select(
self._table_class.node_id,
self._table_class.text,
self._table_class.metadata_,
self._table_class.embedding,
)
if node_ids:
stmt = stmt.where(self._table_class.node_id.in_(node_ids))
if filters:
filter_clause = self._recursively_apply_filters(filters)
stmt = stmt.where(filter_clause)
nodes: List[BaseNode] = []
async with self._async_session() as session, session.begin():
res = (await session.execute(stmt)).fetchall()
for item in res:
node_id = item.node_id
text = item.text
metadata = item.metadata_
embedding = item.embedding
custom_fields = {
key: val
for key, val in item._asdict().items()
if key not in ["id", "node_id", "text", "metadata_"]
}
try:
node = metadata_dict_to_node(metadata)
node.set_content(str(text))
node.embedding = embedding
except Exception:
node = TextNode(
id_=node_id,
text=text,
metadata=metadata,
embedding=embedding,
)
nodes.append(node)
return nodes
def _dedup_results(results: List[DBEmbeddingRow]) -> List[DBEmbeddingRow]:
seen_ids = set()
deduped_results = []
for result in results:
if result.node_id not in seen_ids:
deduped_results.append(result)
seen_ids.add(result.node_id)
return deduped_results
|
PGVectorStore
|
python
|
pytorch__pytorch
|
tools/linter/adapters/_linter/messages.py
|
{
"start": 85,
"end": 224
}
|
class ____(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
@dc.dataclass
|
LintSeverity
|
python
|
redis__redis-py
|
tests/test_asyncio/test_credentials.py
|
{
"start": 4571,
"end": 9308
}
|
class ____:
@skip_if_redis_enterprise()
async def test_only_pass_without_creds_provider(
self, r_required_pass_teardown, create_redis
):
# test for default user (`username` is supposed to be optional)
password = "password"
r = r_required_pass_teardown(password)
await init_required_pass(r, password)
assert await r.auth(password) is True
r2 = await create_redis(flushdb=False, password=password)
assert await r2.ping() is True
@skip_if_redis_enterprise()
async def test_user_and_pass_without_creds_provider(
self, r_acl_teardown, create_redis
):
"""
Test backward compatibility with username and password
"""
# test for other users
username = "username"
password = "password"
r = r_acl_teardown(username)
await init_acl_user(r, username, password)
r2 = await create_redis(flushdb=False, username=username, password=password)
assert await r2.ping() is True
@pytest.mark.parametrize("username", ["username", None])
@skip_if_redis_enterprise()
@pytest.mark.onlynoncluster
async def test_credential_provider_with_supplier(
self, r_acl_teardown, r_required_pass_teardown, create_redis, username
):
creds_provider = AsyncRandomAuthCredProvider(
user=username,
endpoint="localhost",
)
auth_args = creds_provider.get_credentials()
password = auth_args[-1]
if username:
r = r_acl_teardown(username)
await init_acl_user(r, username, password)
else:
r = r_required_pass_teardown(password)
await init_required_pass(r, password)
r2 = await create_redis(flushdb=False, credential_provider=creds_provider)
assert await r2.ping() is True
async def test_async_credential_provider_no_password_success(
self, r_acl_teardown, create_redis
):
username = "username"
r = r_acl_teardown(username)
await init_acl_user(r, username, "")
r2 = await create_redis(
flushdb=False,
credential_provider=NoPassCredProvider(),
)
assert await r2.ping() is True
@pytest.mark.onlynoncluster
async def test_credential_provider_no_password_error(
self, r_acl_teardown, create_redis
):
username = "username"
r = r_acl_teardown(username)
await init_acl_user(r, username, "password")
with pytest.raises(AuthenticationError) as e:
await create_redis(
flushdb=False,
credential_provider=NoPassCredProvider(),
single_connection_client=True,
)
assert e.match("invalid username-password")
assert await r.acl_deluser(username)
@pytest.mark.onlynoncluster
async def test_password_and_username_together_with_cred_provider_raise_error(
self, r_acl_teardown, create_redis
):
username = "username"
r = r_acl_teardown(username)
await init_acl_user(r, username, "password")
cred_provider = UsernamePasswordCredentialProvider(
username="username", password="password"
)
with pytest.raises(DataError) as e:
await create_redis(
flushdb=False,
username="username",
password="password",
credential_provider=cred_provider,
single_connection_client=True,
)
assert e.match(
"'username' and 'password' cannot be passed along with "
"'credential_provider'."
)
@pytest.mark.onlynoncluster
async def test_change_username_password_on_existing_connection(
self, r_acl_teardown, create_redis
):
username = "origin_username"
password = "origin_password"
new_username = "new_username"
new_password = "new_password"
r = r_acl_teardown(username)
await init_acl_user(r, username, password)
r2 = await create_redis(flushdb=False, username=username, password=password)
assert await r2.ping() is True
conn = await r2.connection_pool.get_connection()
await conn.send_command("PING")
assert str_if_bytes(await conn.read_response()) == "PONG"
assert conn.username == username
assert conn.password == password
await init_acl_user(r, new_username, new_password)
conn.password = new_password
conn.username = new_username
await conn.send_command("PING")
assert str_if_bytes(await conn.read_response()) == "PONG"
@pytest.mark.asyncio
|
TestCredentialsProvider
|
python
|
django__django
|
tests/inspectdb/models.py
|
{
"start": 1864,
"end": 3053
}
|
class ____(models.Model):
id = models.AutoField(primary_key=True)
big_int_field = models.BigIntegerField()
bool_field = models.BooleanField(default=False)
null_bool_field = models.BooleanField(null=True)
char_field = models.CharField(max_length=10)
null_char_field = models.CharField(max_length=10, blank=True, null=True)
date_field = models.DateField()
date_time_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=6, decimal_places=1)
email_field = models.EmailField()
file_field = models.FileField(upload_to="unused")
file_path_field = models.FilePathField()
float_field = models.FloatField()
int_field = models.IntegerField()
gen_ip_address_field = models.GenericIPAddressField(protocol="ipv4")
pos_big_int_field = models.PositiveBigIntegerField()
pos_int_field = models.PositiveIntegerField()
pos_small_int_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField()
small_int_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField()
uuid_field = models.UUIDField()
|
ColumnTypes
|
python
|
fsspec__filesystem_spec
|
fsspec/implementations/http_sync.py
|
{
"start": 818,
"end": 871
}
|
class ____(urllib.error.HTTPError): ...
|
JsHttpException
|
python
|
huggingface__transformers
|
src/transformers/models/sam2_video/modeling_sam2_video.py
|
{
"start": 28554,
"end": 29659
}
|
class ____(PreTrainedModel):
config_class = Sam2VideoConfig
base_model_prefix = "sam2_video"
main_input_name = "pixel_values"
input_modalities = "video"
_supports_sdpa = True
_supports_flash_attn_2 = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Sam2VideoModel):
if module.no_memory_positional_encoding is not None:
init.zeros_(module.no_memory_positional_encoding)
if module.memory_temporal_positional_encoding is not None:
init.zeros_(module.memory_temporal_positional_encoding)
if module.no_object_pointer is not None:
init.zeros_(module.no_object_pointer)
if module.occlusion_spatial_embedding_parameter is not None:
init.zeros_(module.occlusion_spatial_embedding_parameter)
if isinstance(module, Sam2VideoMemoryFuserCXBlock):
if module.scale is not None:
init.zeros_(module.scale)
|
Sam2VideoPreTrainedModel
|
python
|
huggingface__transformers
|
src/transformers/models/unispeech/modeling_unispeech.py
|
{
"start": 4129,
"end": 5927
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name="weight", dim=2)
if hasattr(self.conv, "parametrizations"):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
UniSpeechPositionalConvEmbedding
|
python
|
apache__airflow
|
providers/standard/src/airflow/providers/standard/operators/python.py
|
{
"start": 41707,
"end": 51481
}
|
class ____(_BasePythonVirtualenvOperator):
"""
Run a function in a virtualenv that is not re-created.
Reused as is without the overhead of creating the virtual environment (with certain caveats).
The function must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtual environment runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
If Airflow is installed in the external environment in different version that the version
used by the operator, the operator will fail.,
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ExternalPythonOperator`
:param python: Full path string (file-system specific) that points to a Python binary inside
a virtual environment that should be used (in ``VENV/bin`` folder). Should be absolute path
(so usually start with "/" or "X:/" depending on the filesystem/os used).
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtual environment.
:param serializer: Which serializer use to serialize the args and result. It can be one of the following:
- ``"pickle"``: (default) Use pickle for serialization. Included in the Python Standard Library.
- ``"cloudpickle"``: Use cloudpickle for serialize more complex types,
this requires to include cloudpickle in your requirements.
- ``"dill"``: Use dill for serialize more complex types,
this requires to include dill in your requirements.
:param op_args: A list of positional arguments to pass to python_callable.
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:param expect_airflow: expect Airflow to be installed in the target environment. If true, the operator
will raise warning if Airflow is not installed, and it will attempt to load Airflow
macros when starting.
:param skip_on_exit_code: If python_callable exits with this exit code, leave the task
in ``skipped`` state (default: None). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param env_vars: A dictionary containing additional environment variables to set for the virtual
environment when it is executed.
:param inherit_env: Whether to inherit the current environment variables when executing the virtual
environment. If set to ``True``, the virtual environment will inherit the environment variables
of the parent process (``os.environ``). If set to ``False``, the virtual environment will be
executed with a clean environment.
"""
template_fields: Sequence[str] = tuple({"python"}.union(PythonOperator.template_fields))
def __init__(
self,
*,
python: str,
python_callable: Callable,
serializer: _SerializerTypeDef | None = None,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
string_args: Iterable[str] | None = None,
templates_dict: dict | None = None,
templates_exts: list[str] | None = None,
expect_airflow: bool = True,
expect_pendulum: bool = False,
skip_on_exit_code: int | Container[int] | None = None,
env_vars: dict[str, str] | None = None,
inherit_env: bool = True,
**kwargs,
):
if not python:
raise ValueError("Python Path must be defined in ExternalPythonOperator")
self.python = python
self.expect_pendulum = expect_pendulum
super().__init__(
python_callable=python_callable,
serializer=serializer,
op_args=op_args,
op_kwargs=op_kwargs,
string_args=string_args,
templates_dict=templates_dict,
templates_exts=templates_exts,
expect_airflow=expect_airflow,
skip_on_exit_code=skip_on_exit_code,
env_vars=env_vars,
inherit_env=inherit_env,
**kwargs,
)
def execute_callable(self):
python_path = Path(self.python)
if not python_path.exists():
raise ValueError(f"Python Path '{python_path}' must exists")
if not python_path.is_file():
raise ValueError(f"Python Path '{python_path}' must be a file")
if not python_path.is_absolute():
raise ValueError(f"Python Path '{python_path}' must be an absolute path.")
python_version = _PythonVersionInfo.from_executable(self.python)
if python_version.major != sys.version_info.major and (self.op_args or self.op_kwargs):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for ExternalPythonOperator. Please use string_args."
f"Sys version: {sys.version_info}. "
f"Virtual environment version: {python_version}"
)
return self._execute_python_callable_in_subprocess(python_path)
def _iter_serializable_context_keys(self):
yield from self.BASE_SERIALIZABLE_CONTEXT_KEYS
if self.expect_airflow and self._get_airflow_version_from_target_env():
yield from self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
elif self._is_pendulum_installed_in_target_env():
yield from self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS
def _is_pendulum_installed_in_target_env(self) -> bool:
try:
subprocess.check_call([self.python, "-c", "import pendulum"])
return True
except Exception as e:
if self.expect_pendulum:
self.log.warning("When checking for Pendulum installed in virtual environment got %s", e)
self.log.warning(
"Pendulum is not properly installed in the virtual environment "
"Pendulum context keys will not be available. "
"Please Install Pendulum or Airflow in your virtual environment to access them."
)
return False
@property
def _external_airflow_version_script(self):
"""
Return python script which determines the version of the Apache Airflow.
Import airflow as a module might take a while as a result,
obtaining a version would take up to 1 second.
On the other hand, `importlib.metadata.version` will retrieve the package version pretty fast
something below 100ms; this includes new subprocess overhead.
Possible side effect: It might be a situation that `importlib.metadata` is not available (Python < 3.8),
as well as backport `importlib_metadata` which might indicate that venv doesn't contain an `apache-airflow`
or something wrong with the environment.
"""
return textwrap.dedent(
"""
try:
from importlib.metadata import version
except ImportError:
from importlib_metadata import version
print(version("apache-airflow"))
"""
)
def _get_airflow_version_from_target_env(self) -> str | None:
from airflow import __version__ as airflow_version
try:
result = subprocess.check_output(
[self.python, "-c", self._external_airflow_version_script],
text=True,
)
target_airflow_version = result.strip()
if target_airflow_version != airflow_version:
raise AirflowConfigException(
f"The version of Airflow installed for the {self.python} "
f"({target_airflow_version}) is different than the runtime Airflow version: "
f"{airflow_version}. Make sure your environment has the same Airflow version "
f"installed as the Airflow runtime."
)
return target_airflow_version
except Exception as e:
if self.expect_airflow:
self.log.warning("When checking for Airflow installed in virtual environment got %s", e)
self.log.warning(
"This means that Airflow is not properly installed by %s. "
"Airflow context keys will not be available. "
"Please Install Airflow %s in your environment to access them.",
self.python,
airflow_version,
)
return None
|
ExternalPythonOperator
|
python
|
google__jax
|
tests/batching_test.py
|
{
"start": 1458,
"end": 49078
}
|
class ____(jtu.JaxTestCase):
def testConstantFunction(self):
ans = vmap(lambda x: 3)(np.ones(4))
expected = 3 * np.ones(4)
self.assertAllClose(ans, expected, check_dtypes=False)
@jax.default_matmul_precision("float32")
def testNestedBatchingMatMat(self):
matvec = vmap(jnp.vdot, in_axes=(0, None))
matmat = vmap(matvec, in_axes=(None, 1), out_axes=1)
R = self.rng().randn
A = R(4, 3)
B = R(3, 2)
ans = matmat(A, B)
expected = np.dot(A, B)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = make_jaxpr(matmat)(A, B)
self.assertLen(jaxpr.jaxpr.eqns, 1)
def testPerExampleGradients(self):
def predict(params, inputs):
for W, b in params:
outputs = jnp.dot(W, inputs) + b
inputs = jnp.tanh(outputs)
return outputs
def loss(params, data):
inputs, targets = data
predictions = predict(params, inputs)
return jnp.sum((predictions - targets)**2)
batch_size = 5
layer_sizes = [3, 2, 4]
R = self.rng().randn
params = [(R(m, n), R(m))
for m, n in zip(layer_sizes[1:], layer_sizes[:-1])]
input_batch = R(5, 3)
target_batch = R(5, 4)
batch = (input_batch, target_batch)
ans = vmap(partial(grad(loss), params))(batch)
for ans_pair, param_pair in zip(ans, params):
dW, db = ans_pair
W, b = param_pair
self.assertEqual(dW.shape, (batch_size,) + W.shape)
self.assertEqual(db.shape, (batch_size,) + b.shape)
@jax.default_matmul_precision("float32")
def testJacobians(self):
def jacbwd(f, x):
y, pullback = vjp(f, x)
std_basis = np.eye(np.size(y)).reshape((-1,) + np.shape(y))
jac_flat, = vmap(pullback, out_axes=np.ndim(y))(std_basis)
return jac_flat.reshape(np.shape(y) + np.shape(x))
def jacfwd(f, x):
pushfwd = lambda v: jvp(f, (x,), (v,))
std_basis = np.eye(np.size(x)).reshape((-1,) + np.shape(x))
y, jac_flat = vmap(pushfwd, out_axes=(None, 0))(std_basis)
return jac_flat.reshape(np.shape(y) + np.shape(x))
R = self.rng().randn
A = R(4, 3)
b = R(4)
f = lambda x: jnp.tanh(jnp.dot(A, x) + b)
x = R(3)
self.assertAllClose(jacfwd(f, x), jacbwd(f, x), check_dtypes=False)
def testBatchOfCompile(self):
side = []
@jit
def f(x):
side.append(None)
return x + x
g = jit(vmap(f))
self.assertAllClose(g(np.ones(2)), 2 * np.ones(2), check_dtypes=False)
self.assertEqual(len(side), 1)
self.assertAllClose(g(2 * np.ones(2)), 4 * np.ones(2),
check_dtypes=False)
self.assertEqual(len(side), 1)
def testSliceLax(self):
fun = lambda x: lax.slice(x, (2,), (4,))
R = self.rng().randn
x = R(5, 10)
ans = vmap(fun)(x)
expected_ans = x[:, 2:4]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testSliceNumpy(self):
fun = lambda x: x[:, 2]
R = self.rng().randn
x = R(10, 5, 3, 7)
ans = vmap(fun)(x)
expected_ans = x[:, :, 2]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testRevLax(self):
fun = lambda x: lax.rev(x, [0])
R = self.rng().randn
x = R(2, 3)
ans = vmap(fun)(x)
expected_ans = x[:, ::-1]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
ans = vmap(fun, (1,), 1)(x)
expected_ans = x[::-1, :]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testRevNumpy(self):
fun = lambda x: x[:, ::-1]
R = self.rng().randn
x = R(3, 2, 4)
ans = vmap(fun)(x)
expected_ans = x[:, :, ::-1]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
ans = vmap(fun, (1,), 1)(x)
expected_ans = x[:, :, ::-1]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
ans = vmap(fun, (2,), 2)(x)
expected_ans = x[:, ::-1, :]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testNpMaximum(self):
fun = lambda x: jnp.maximum(x, 0.0)
R = self.rng().randn
x = R(10, 5, 3, 7)
ans = vmap(fun)(x)
expected_ans = np.maximum(x, 0.0)
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testNpGtrThan(self):
R = self.rng().randn
x = R(10, 5, 3, 7)
ans = vmap(lambda x: x > 1.0)(x)
expected_ans = x > 1.0
self.assertAllClose(ans, expected_ans)
@jax.default_matmul_precision("float32")
def testNpMaximumPerExampleGrad(self):
R = self.rng().randn
x = R(10, 5)
W = R(5, 5)
fun = lambda W, x: jnp.sum(jnp.maximum(jnp.dot(x, W), 0.0) ** 2)
ans = vmap(partial(grad(fun), W))(x)
W_t = jnp.transpose(W)
for i in range(10):
x_ex = x[i:i + 1]
expected_ans = 2.0 * jnp.dot(
jnp.maximum(jnp.dot(W_t, jnp.transpose(x_ex)), 0.0), x_ex)
expected_ans = jnp.transpose(expected_ans)
self.assertAllClose(ans[i], expected_ans, check_dtypes=False)
# Replace the default TF32 with float32 in order to make it pass on A100
@jax.default_matmul_precision("float32")
def testDotGeneral(self):
R = self.rng().randn
x = R(10, 3, 4, 5)
y = R(10, 3, 5, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun)(x, y)
expected = lax.dot_general(x, y, [((3,), (2,)), ((0, 1), (0, 1))])
self.assertAllClose(ans, expected)
x = R(3, 4, 10, 5)
y = R(3, 10, 5, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun, in_axes=(2, 1))(x, y)
expected = np.stack([fun(x[..., i, :], y[:, i, ...]) for i in range(10)])
self.assertAllClose(ans, expected)
x = R(3, 4, 5, 10)
y = R(3, 5, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun, in_axes=(3, None))(x, y)
expected = np.stack([fun(x[..., i], y) for i in range(10)])
self.assertAllClose(ans, expected)
x = R(3, 4, 5)
y = R(3, 5, 10, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun, in_axes=(None, 2))(x, y)
expected = np.stack([fun(x, y[..., i, :]) for i in range(10)])
self.assertAllClose(ans, expected)
x = R(4)
y = R(4, 10)
fun = lambda x, y: lax.dot_general(x, y, [((0,), (0,)), ((), ())])
ans = vmap(fun, in_axes=(None, 1))(x, y)
expected = np.stack([fun(x, y[..., i]) for i in range(10)])
self.assertAllClose(ans, expected)
def testDot(self):
# these tests are based on @shoyer's notebook studying gufuncs
def vecvec(a, b):
dot = jnp.dot
for ndim in range(1, max(a.ndim, b.ndim)):
a_ax = 0 if a.ndim > ndim else None
b_ax = 0 if b.ndim > ndim else None
dot = vmap(dot, in_axes=(a_ax, b_ax))
return dot(a, b)
assert vecvec(jnp.zeros((3,)), jnp.zeros((3,))).shape == ()
assert vecvec(jnp.zeros((2, 3)), jnp.zeros((3,))).shape == (2,)
assert vecvec(jnp.zeros((4, 2, 3)), jnp.zeros((3,))).shape == (4, 2)
def testDot2(self):
R = self.rng().randn
xs = R(10, 3)
ys = R(10, 3)
ans = vmap(jnp.dot)(xs, ys)
expected = np.einsum('ni,ni->n', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot3(self):
R = self.rng().randn
xs = R(5, 8, 10)
ys = R(10, 1)
ans = vmap(jnp.dot, in_axes=(1, None))(xs, ys)
expected = np.einsum('inj,jk->nik', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot4(self):
R = self.rng().randn
xs = R(3, 2)
ys = R(3)
ans = vmap(jnp.dot, in_axes=(1, None))(xs, ys)
expected = np.einsum('ij,i->j', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPad(self):
R = self.rng().randn
fun = lambda x: lax.pad(x, np.float32(0), [(1, 2, 1)])
x = R(5, 10).astype(np.float32)
ans = vmap(fun)(x)
expected_ans = jnp.stack(list(map(fun, x)))
self.assertAllClose(ans, expected_ans, check_dtypes=False)
fun = lambda x: lax.pad(x, np.float32(0), [(1, 2, 1), (0, 1, 0)])
x = R(5, 10, 3).astype(np.float32)
ans = vmap(fun)(x)
expected_ans = jnp.stack(list(map(fun, x)))
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testConcatenate(self):
R = lambda *shape: self.rng().randn(*shape).astype(np.float32)
fun = lambda *args: lax.concatenate(args, dimension=0)
x, y, z = R(10, 2, 3), R(1, 10, 3), R(4, 3)
ans = vmap(fun, in_axes=(0, 1, None))(x, y, z)
expected_ans = np.concatenate([x, np.swapaxes(y, 0, 1),
np.broadcast_to(z, (10, 4, 3))], 1)
self.assertAllClose(ans, expected_ans, check_dtypes=False)
fun = lambda *args: lax.concatenate(args, dimension=1)
x, y, z = R(10, 2, 1), R(2, 3), R(2, 4, 10)
ans = vmap(fun, in_axes=(0, None, 2))(x, y, z)
expected_ans = np.concatenate([x, np.broadcast_to(y, (10, 2, 3)),
np.moveaxis(z, 2, 0)], 2)
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testJacobianIssue54(self):
# test modeling the code in https://github.com/jax-ml/jax/issues/54
def func(xs):
return jnp.array(list(xs))
xs = jnp.ones((5, 1))
jacrev(func)(xs) # don't crash
jacfwd(func)(xs) # don't crash
def testAny(self):
# test modeling the code in https://github.com/jax-ml/jax/issues/108
ans = vmap(jnp.any)(jnp.array([[True, False], [False, False]]))
expected = jnp.array([True, False])
self.assertAllClose(ans, expected)
def testHessian(self):
# test based on code from sindhwani@google
def fun(x, t):
return jnp.sum(jnp.power(jnp.maximum(x, 0.0), 2)) + t
x = np.array([-1., -0.5, 0., 0.5, 1.0])
ans = hessian(lambda x: fun(x, 0.0))(x)
expected = np.array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0.,0.5, 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
self.assertAllClose(ans, expected, check_dtypes=False)
def testDynamicSlice(self):
# test dynamic_slice via numpy indexing syntax
# see https://github.com/jax-ml/jax/issues/1613 for an explanation of why we
# need to use np rather than np to create x and idx
x = jnp.arange(30).reshape((10, 3))
ans = vmap(lambda x, i: x[i], in_axes=(0, None))(x, 1)
expected = x[:, 1]
self.assertAllClose(ans, expected, check_dtypes=False)
idx = jnp.array([0, 1, 2, 1, 0] * 2)
ans = vmap(lambda x, i: x[i], in_axes=(0, 0))(x, idx)
expected = x[np.arange(10), idx]
self.assertAllClose(ans, expected, check_dtypes=False)
x = jnp.arange(3)
idx = jnp.array([0, 1, 2, 1, 0] * 2)
ans = vmap(lambda x, i: x[i], in_axes=(None, 0))(x, idx)
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testDynamicUpdateSlice(self):
x = self.rng().randn(10, 3)
y = self.rng().randn(10)
ans = vmap(lambda x, y, i: lax.dynamic_update_index_in_dim(x, y, i, axis=0),
in_axes=(0, 0, None))(x, y, 1)
expected = x.copy()
expected[:, 1] = y
self.assertAllClose(ans, expected, check_dtypes=False)
x = self.rng().randn(3)
idx = np.array([0, 1, 2, 1, 0] * 2)
ans = vmap(lambda x, y, i: lax.dynamic_update_index_in_dim(x, y, i, axis=0),
in_axes=(None, 0, 0))(x, y, idx)
expected = np.broadcast_to(x, (10, 3)).copy()
expected[np.arange(10), idx] = y
self.assertAllClose(ans, expected, check_dtypes=False)
@jax.legacy_prng_key('allow')
def testRandom(self):
seeds = vmap(random.PRNGKey)(np.arange(10))
ans = vmap(partial(random.normal, shape=(3, 2)))(seeds)
expected = np.stack([random.normal(random.PRNGKey(seed), (3, 2))
for seed in np.arange(10)])
self.assertAllClose(ans, expected, check_dtypes=False)
assert len(np.unique(ans)) == 10 * 3 * 2
def testSort(self):
v = np.arange(12)[::-1].reshape(3, 4)
sv = vmap(partial(lax.sort, dimension=0), (0,))(v)
self.assertAllClose(sv, v[:, ::-1])
sv = vmap(partial(lax.sort, dimension=-1), (0,))(v)
self.assertAllClose(sv, v[:, ::-1])
sv = vmap(partial(lax.sort, dimension=0), (1,))(v)
self.assertAllClose(sv, v[::-1, :].T)
sv = vmap(partial(lax.sort, dimension=0), (1,), 1)(v)
self.assertAllClose(sv, v[::-1, :])
def testSortKeyVal(self):
k = np.arange(12)[::-1].reshape(3, 4)
v = self.rng().permutation(12).reshape(3, 4)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (0, 0))(k, v)
self.assertAllClose(sk, k[:, ::-1])
self.assertAllClose(sv, v[:, ::-1])
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, 1), 1)(k, v)
self.assertAllClose(sk, k[::-1, :])
self.assertAllClose(sv, v[::-1, :])
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (0, 1))(k, v.T)
self.assertAllClose(sk, k[:, ::-1])
self.assertAllClose(sv, v[:, ::-1])
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, 0))(k.T, v)
self.assertAllClose(sk, k[:, ::-1])
self.assertAllClose(sv, v[:, ::-1])
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (None, 0))(k[0], v)
self.assertAllClose(sk, np.broadcast_to(k[0, ::-1], (3, 4)))
self.assertAllClose(sv, v[:, ::-1])
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, None))(k.T, v[0])
self.assertAllClose(sk, k[:, ::-1])
self.assertAllClose(sv, np.broadcast_to(v[0, ::-1], (3, 4)))
def testConvGeneralDilated(self):
W = jnp.array(self.rng().randn(3, 3, 1, 5), dtype=np.float32)
X = jnp.array(self.rng().randn(10, 5, 5, 1), dtype=np.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
return y
grad_loss = grad(lambda params, x: jnp.mean(f(params, x) ** 2))
# Test forward prop.
per_example = vmap(partial(f, W))(jnp.reshape(X, (10, 1, 5, 5, 1)))
per_example = jnp.reshape(per_example, (10, 5, 5, 5))
per_example_direct = f(W, X)
self.assertAllClose(per_example, per_example_direct)
# Test gradients.
per_example = vmap(partial(grad_loss, W))(jnp.reshape(X, (10, 1, 5, 5, 1)))
per_example_direct = []
for i in range(10):
g = grad_loss(W, jnp.reshape(X[i], (1, 5, 5, 1)))
per_example_direct += [
jnp.reshape(g, (1,) + g.shape)]
per_example_direct = jnp.concatenate(per_example_direct, axis=0)
self.assertAllClose(per_example, per_example_direct,
rtol=2e-2, atol=2e-3)
def testConvGeneralDilatedBatchNotMajor(self):
W = jnp.array(self.rng().randn(3, 3, 1, 4), dtype=np.float32)
x = jnp.array(self.rng().randn(3, 5, 7, 5, 1), dtype=np.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('HNWC', 'HWIO', 'HWNC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
return y
per_example = vmap(partial(f, W))(x)
per_example = jnp.reshape(jnp.transpose(per_example, (1, 2, 0, 3, 4)),
(5, 5, 21, 4))
per_example_direct = f(W, jnp.reshape(jnp.transpose(x, (1, 0, 2, 3, 4)),
(5, 21, 5, 1)))
self.assertAllClose(per_example, per_example_direct)
@parameterized.named_parameters(
{"testcase_name": f"_op={name}", "op": op, "unit": unit}
for name, op, unit in [("max", lax.max, -jnp.inf), ("min", lax.min, jnp.inf)])
def testMinMaxPool(self, op, unit):
W = jnp.array(self.rng().randn(3, 3, 1, 5), dtype=np.float32)
X = jnp.array(self.rng().randn(10, 5, 5, 1), dtype=np.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
y = lax.reduce_window(
y, unit, op, (1, 2, 2, 1), (1, 1, 1, 1), 'SAME')
return y
grad_loss = grad(lambda params, x: jnp.mean(f(params, x) ** 2))
# Test forward prop.
per_example = vmap(partial(f, W))(jnp.reshape(X, (10, 1, 5, 5, 1)))
per_example = jnp.reshape(per_example, (10, 5, 5, 5))
per_example_direct = f(W, X)
self.assertAllClose(per_example, per_example_direct)
# Test gradients.
per_example = vmap(partial(grad_loss, W))(jnp.reshape(X, (10, 1, 5, 5, 1)))
per_example_direct = []
for i in range(10):
g = grad_loss(W, jnp.reshape(X[i], (1, 5, 5, 1)))
per_example_direct += [
jnp.reshape(g, (1,) + g.shape)]
per_example_direct = jnp.concatenate(per_example_direct, axis=0)
self.assertAllClose(per_example, per_example_direct, rtol=5e-2, atol=1e-3)
def testSumPool(self):
W = jnp.array(self.rng().randn(3, 3, 1, 5), dtype=np.float32)
X = jnp.array(self.rng().randn(10, 5, 5, 1), dtype=np.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
y = lax.reduce_window(
y, 0.0, lax.add, (1, 2, 2, 1), (1, 1, 1, 1), 'SAME')
return y
grad_loss = grad(lambda params, x: jnp.mean(f(params, x) ** 2))
# Test forward prop.
per_example = vmap(partial(f, W))(jnp.reshape(X, (10, 1, 5, 5, 1)))
per_example = jnp.reshape(per_example, (10, 5, 5, 5))
per_example_direct = f(W, X)
self.assertAllClose(per_example, per_example_direct)
# Test gradients.
per_example = vmap(partial(grad_loss, W))(jnp.reshape(X, (10, 1, 5, 5, 1)))
per_example_direct = []
for i in range(10):
g = grad_loss(W, jnp.reshape(X[i], (1, 5, 5, 1)))
per_example_direct += [
jnp.reshape(g, (1,) + g.shape)]
per_example_direct = jnp.concatenate(per_example_direct, axis=0)
self.assertAllClose(per_example, per_example_direct,
rtol=3e-2, atol=1e-3)
def testCumProd(self):
x = jnp.arange(9).reshape(3, 3) + 1
y = vmap(lambda x: jnp.cumprod(x, axis=-1))(x)
self.assertAllClose(jnp.cumprod(x, axis=1), y)
def testSelect(self):
pred = np.array([True, False])
on_true = np.array([0, 1])
on_false = np.array([2, 3])
ans = vmap(lax.select)(pred, on_true, on_false)
expected = np.array([0, 3])
self.assertAllClose(ans, expected)
pred = np.array([False, True])
on_true = np.array([0, 1])
on_false = np.array([2, 3])
ans = vmap(lax.select, (0, None, None))(pred, on_true, on_false)
expected = np.array([[2, 3],
[0, 1]])
self.assertAllClose(ans, expected)
pred = True
on_true = np.array([0, 1], np.float32)
on_false = np.array(3, np.float32)
ans = vmap(lax.select, (None, 0, None))(pred, on_true, on_false)
expected = np.array([0, 1], np.float32)
self.assertAllClose(ans, expected)
pred = np.array([False, True])
on_true = np.array([0, 1], np.float32)
on_false = np.array(3, np.float32)
ans = vmap(lax.select, (0, 0, None))(pred, on_true, on_false)
expected = np.array([3, 1], np.float32)
self.assertAllClose(ans, expected)
pred = np.array([False, True])
on_true = np.array([2], np.float32)
on_false = np.array([[3, 4]], np.float32)
ans = vmap(lax.select, (0, None, 1), 1)(pred, on_true, on_false)
expected = np.array([[3, 2]], np.float32)
self.assertAllClose(ans, expected)
def testLaxLinalgCholesky(self):
a = self.rng().randn(10, 5, 5).astype(np.float32)
a = np.matmul(a, np.conj(np.swapaxes(a, -1, -2)))
ans = vmap(lax.linalg.cholesky)(a)
expected = np.linalg.cholesky(a)
self.assertAllClose(ans, expected, check_dtypes=False, atol=1E-3)
b = self.rng().randn(10, 5, 5).astype(np.float32)
b = np.matmul(b, np.conj(np.swapaxes(b, -1, -2)))
b_trans = np.swapaxes(b, 0, 1) # shape is (5, 10, 5)
ans = vmap(lax.linalg.cholesky, in_axes=1, out_axes=0)(b_trans)
expected = np.linalg.cholesky(b)
self.assertAllClose(ans, expected, check_dtypes=False, rtol=1e-4)
def testLaxLinalgTriangularSolve(self):
a = self.rng().randn(4, 10, 4).astype(np.float32)
a += np.eye(4, dtype=jnp.float32)[:, None, :]
b = self.rng().randn(5, 4, 10).astype(np.float32)
ans = vmap(lax.linalg.triangular_solve, in_axes=(1, 2))(a, b)
expected = np.stack(
[lax.linalg.triangular_solve(a[:, i], b[..., i]) for i in range(10)])
self.assertAllClose(ans, expected, atol=1e-5, rtol=1e-5)
ans = vmap(lax.linalg.triangular_solve, in_axes=(None, 2))(a[:, 0], b)
expected = np.stack(
[lax.linalg.triangular_solve(a[:, 0], b[..., i]) for i in range(10)])
self.assertAllClose(ans, expected)
ans = vmap(lax.linalg.triangular_solve, in_axes=(1, None))(a, b[..., 0])
expected = np.stack(
[lax.linalg.triangular_solve(a[:, i], b[..., 0]) for i in range(10)])
self.assertAllClose(ans, expected, atol=1e-5, rtol=1e-5)
def testLaxLinalgTridiagonalSolve(self):
dl = self.rng().randn(4, 10).astype(np.float32)
d = self.rng().randn(4, 10).astype(np.float32) + 1.
du = self.rng().randn(4, 10).astype(np.float32)
b = self.rng().randn(4, 5, 10).astype(np.float32)
ans = vmap(lax.linalg.tridiagonal_solve, in_axes=(1, 1, 1, 2))(dl, d, du, b)
expected = np.stack(
[lax.linalg.tridiagonal_solve(
dl[:, i], d[:, i], du[:, i], b[..., i]) for i in range(10)])
self.assertAllClose(ans, expected, atol=1e-5, rtol=1e-5)
ans = vmap(lax.linalg.tridiagonal_solve, in_axes=(None, None, None, 2))(
dl[:, 0], d[:, 0], du[:, 0], b)
expected = np.stack(
[lax.linalg.tridiagonal_solve(
dl[:, 0], d[:, 0], du[:, 0], b[..., i]) for i in range(10)])
self.assertAllClose(ans, expected)
ans = vmap(lax.linalg.tridiagonal_solve, in_axes=(1, 1, 1, None))(
dl, d, du, b[..., 0])
expected = np.stack(
[lax.linalg.tridiagonal_solve(
dl[:, i], d[:, i], du[:, i], b[..., 0]) for i in range(10)])
self.assertAllClose(ans, expected, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes}
for dtype in [np.float32, np.int32]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (3, 5), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, (10, 3), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(1, (10, 3, 5), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, (10, 5, 3), np.array([[0, 2], [1, 0]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,),
start_index_map=(0, 1)),
(1, 3)),
])
def testGatherBatchedOperand(self, axis, shape, dtype, idxs, dnums, slice_sizes):
rng = jtu.rand_default(self.rng())
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
operand = rng(shape, dtype)
ans = vmap(fun, (axis, None))(operand, idxs)
expected = np.stack([fun(operand[(slice(None),) * axis + (i,)], idxs)
for i in range(operand.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes}
for dtype in [np.float32, np.float64]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (3, 5), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, (10, 3), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(1, (10, 3, 5), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, (10, 5, 3), np.array([[0, 2], [1, 0]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,),
start_index_map=(0, 1)),
(1, 3))
])
def testGatherGradBatchedOperand(self, axis, shape, dtype, idxs, dnums, slice_sizes):
rng = jtu.rand_default(self.rng())
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
gfun = grad(lambda x, idx: jnp.sum(jnp.sin(fun(x, idx))))
operand = rng(shape, dtype)
ans = vmap(gfun, (axis, None))(operand, idxs)
expected = np.stack([gfun(operand[(slice(None),) * axis + (i,)], idxs)
for i in range(operand.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes}
for dtype in [np.float32, np.int32]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (5,), np.array([[[0], [2]], [[1], [3]]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)), (1,)),
(1, (10,), np.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)), (2,)),
(1, (10, 5), np.array([[0, 2, 1], [0, 3, 3]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)), (1, 3)),
(0, (10, 5), np.array([[[0, 1], [2, 0]],
[[1, 0], [2, 3]]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)), (1, 3)),
])
def testGatherBatchedIndices(self, axis, shape, dtype, idxs, dnums, slice_sizes):
rng = jtu.rand_default(self.rng())
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
operand = rng(shape, dtype)
ans = vmap(fun, (None, axis))(operand, idxs)
expected = np.stack([fun(operand, idxs[(slice(None),) * axis + (i,)])
for i in range(idxs.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes}
for dtype in [np.float32, np.float64]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (5,), np.array([[[0], [2]], [[1], [3]]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)), (1,)),
(1, (10,), np.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)), (2,)),
(1, (10, 5), np.array([[0, 2, 1], [0, 3, 3]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)), (1, 3)),
(0, (10, 5), np.array([[[0, 1], [2, 0]],
[[1, 0], [2, 3]]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)), (1, 3)),
])
def testGatherGradBatchedIndices(self, axis, shape, dtype, idxs, dnums, slice_sizes):
rng = jtu.rand_default(self.rng())
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
gfun = grad(lambda x, idx: jnp.sum(jnp.sin(fun(x, idx))))
operand = rng(shape, dtype)
ans = vmap(gfun, (None, axis))(operand, idxs)
expected = np.stack([gfun(operand, idxs[(slice(None),) * axis + (i,)])
for i in range(idxs.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,
dnums, slice_sizes),
"op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype":
dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes}
for dtype in [np.float32, np.int32]
for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [
(0, 0, (2, 5), np.array([[[0], [2]], [[1], [3]]]),
lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, 1, (10, 2), np.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(0, 1, (2, 10, 5,), np.array([[[0, 2, 1], [0, 3, 3]]]).T,
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, 0, (10, 5, 2), np.array([[[0, 2], [1, 0]],
[[1, 0], [2, 0]]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
])
def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums, slice_sizes):
rng = jtu.rand_default(self.rng())
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
operand = rng(shape, dtype)
assert operand.shape[op_axis] == idxs.shape[idxs_axis]
ans = vmap(fun, (op_axis, idxs_axis))(operand, idxs)
expected = np.stack([fun(operand[(slice(None),) * op_axis + (i,)],
idxs[(slice(None),) * idxs_axis + (i,)])
for i in range(idxs.shape[idxs_axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,
dnums, slice_sizes),
"op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype":
dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes}
for dtype in [np.float32]
for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [
(0, 0, (2, 5), np.array([[[0], [2]], [[1], [3]]]),
lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, 1, (10, 2), np.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(0, 1, (2, 10, 5,), np.array([[[0, 2, 1], [0, 3, 3]]]).T,
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, 0, (10, 5, 2), np.array([[[0, 2], [1, 0]],
[[1, 0], [2, 0]]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
])
def testGatherGradBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,
slice_sizes):
rng = jtu.rand_default(self.rng())
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
gfun = grad(lambda x, idx: jnp.sum(jnp.sin(fun(x, idx))))
operand = rng(shape, dtype)
assert operand.shape[op_axis] == idxs.shape[idxs_axis]
ans = vmap(gfun, (op_axis, idxs_axis))(operand, idxs)
expected = np.stack([gfun(operand[(slice(None),) * op_axis + (i,)],
idxs[(slice(None),) * idxs_axis + (i,)])
for i in range(idxs.shape[idxs_axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
def testNumpyIndexing1(self):
a = jnp.arange(2 * 3 * 4).reshape((2, 3, 4))
ind = np.array([[0, 1],
[2, 0]])
def f(a, ind):
return a[:, ind]
expected = np.stack([f(a, ind[i, :]) for i in range(ind.shape[0])])
ans = vmap(f, (None, 0))(a, ind)
assert np.all(ans == expected)
def testNumpyIndexing2(self):
a = jnp.arange(2 * 3 * 4).reshape((2, 3, 4))
def f(a):
inds = jnp.array([0, 2])
return a[:, inds]
ans = vmap(f)(a)
expected = np.stack([f(a[:, i, :]) for i in range(a.shape[1])], axis=1)
assert np.all(ans == expected)
def testTranspose(self):
x = np.arange(4 * 3 * 3).reshape((4, 3, 3))
ans = vmap(lambda x: x + x.T)(x)
expected = x + np.swapaxes(x, -1, -2)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTransposePermutation(self):
x = np.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))
ans = vmap(lambda x: jnp.transpose(x, (1, 0, 2)))(x)
expected = np.transpose(x, (0, 2, 1, 3))
self.assertAllClose(ans, expected, check_dtypes=False)
x = np.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))
ans = vmap(lambda x: jnp.transpose(x, (1, 2, 0)))(x)
expected = np.transpose(x, (0, 2, 3, 1))
self.assertAllClose(ans, expected, check_dtypes=False)
x = np.arange(6 * 3 * 4 * 5).reshape((3, 4, 6, 5))
ans = vmap(lambda x: jnp.transpose(x, (1, 2, 0)), in_axes=2)(x)
expected = np.transpose(x, (2, 1, 3, 0))
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue354(self):
psd_mat = self.rng().randn(20, 10)
psd_mat = psd_mat.T.dot(psd_mat)
vec = self.rng().randn(10)
def f(scale):
scaled_mat = scale[jnp.newaxis] * psd_mat
chol = jnp.linalg.cholesky(scaled_mat)
return -0.5 * jnp.sum((jnp.einsum('ij,j->i', chol, vec))**2)
vmapped_f = vmap(f)
vmapped_f_grad = grad(lambda x: jnp.sum(vmapped_f(x)))
scales = np.array([[0.1], [0.2], [0.3], [0.4], [0.5]])
ans = vmapped_f_grad(scales) # don't crash!
expected = np.stack([grad(f)(scale) for scale in scales])
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=jtu.default_gradient_tolerance)
@jax.legacy_prng_key('allow')
def testIssue489(self):
# https://github.com/jax-ml/jax/issues/489
def f(key):
def body_fn(uk):
key = uk[1]
u = random.uniform(key, ())
key, _ = random.split(key)
return u, key
u, _ = lax.while_loop(lambda uk: uk[0] > 0.5, body_fn, (1., key))
return u
with jax.debug_key_reuse(False):
print(vmap(f)(random.split(random.PRNGKey(0), 2))) # no crash
def testEmptyTuples(self):
# Ensure there is no crash when a vectorized input contains empty tuples.
result = vmap(lambda x, _: x + 1)(np.array([0, 1]), ())
self.assertAllClose(result, np.array([1, 2]), check_dtypes=False)
# Ensure there is no crash when a vectorized output contains empty tuples.
result, empty_tuple = vmap(lambda x: (x + 1, ()))(np.array([0, 1]))
self.assertAllClose(result, np.array([1, 2]), check_dtypes=False)
self.assertEqual((), empty_tuple)
def testIndexAddBatchedIndexesOnly(self):
f = lambda x, idx, y: jnp.asarray(x).at[idx].add(y)
result = vmap(f, (None, 0, None))(np.zeros((10,)), np.arange(10,), 1.)
self.assertAllClose(result, np.eye(10), check_dtypes=False)
def testIssue1170(self):
def f(index1, index2):
return jnp.arange(36).reshape(6, 6)[index1, index2]
g = jax.jit(jax.pmap(f))
ans = g(index1=np.asarray([1]), index2=np.asarray([2]))
expected = g(np.asarray([1]), np.asarray([2]))
self.assertAllClose(ans, expected)
def testIssue3883(self):
def scalar_f(x):
return lax.dynamic_slice(x, [], [])
xs = jnp.array([1, 2, 3, 4])
ans = vmap(scalar_f)(xs)
expected = jnp.array([scalar_f(x) for x in xs])
self.assertAllClose(ans, expected)
def scalar_f2(x):
return lax.dynamic_update_slice(x, 7, [])
xs = jnp.array([1, 2, 3, 4])
ans = vmap(scalar_f2)(xs)
expected = jnp.array([scalar_f2(x) for x in xs])
self.assertAllClose(ans, expected)
@parameterized.named_parameters(
{"testcase_name": "_{}_vmap_names={}_collective_names={}".format(
collective.__name__.replace(" ", ""),
"".join(vmap_names), "".join(collective_names)),
"collective": collective, "bulk_op": bulk_op, "vmap_names": vmap_names,
"collective_names": collective_names}
for collective, bulk_op in [(lax.psum, jnp.sum),
(lax.pmax, jnp.max),
(lax.pmin, jnp.min)]
for vmap_names in [('i',), ('i', 'j'), ('i', 'j', 'k')]
for subset_size in range(1, len(vmap_names) + 1)
for collective_subset in it.combinations(vmap_names, subset_size)
for collective_names in it.permutations(collective_subset))
def testCommAssocCollective(self, collective, bulk_op, vmap_names, collective_names):
shape = (2, 2, 2)
x = jnp.arange(np.prod(shape), dtype=jnp.float32).reshape(shape)
# To test relative permutations of the order in which the axis names appear
# in the primitive call versus the order the vmaps are applied, we always
# apply vmaps in the order of the `vmap_names` argument, and apply the
# collective with names according to the `collective_names` argument.
f = lambda x: x - collective(x, collective_names)
# Use non-zero in and out axes to improve the coverage
for i, axis_name in enumerate(vmap_names):
f = vmap(f, axis_name=axis_name, in_axes=i, out_axes=i)
pos_axis = [i for i, name in enumerate(vmap_names) if name in collective_names]
self.assertAllClose(f(x), x - bulk_op(x, axis=pos_axis, keepdims=True))
if collective is lax.psum:
jtu.check_grads(f, (x,), 2, eps=1)
def testPPermute(self):
nelem = 10
ntests = 10
x = np.arange(nelem)
rng = self.rng()
for i in range(ntests):
perm = np.arange(nelem)
rng.shuffle(perm)
perm_pairs = np.stack([np.arange(nelem), perm], axis=-1)
rng.shuffle(perm_pairs)
self.assertAllClose(
vmap(lambda x: x - lax.ppermute(x, 'i', perm_pairs), axis_name='i')(x),
x - x[np.argsort(perm)])
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}_vmap={vmap_axis}",
"split_axis": split_axis, "concat_axis": concat_axis, "vmap_axis": vmap_axis}
for split_axis, concat_axis, vmap_axis in it.product(range(3), range(3), range(4)))
def testAllToAll(self, vmap_axis, split_axis, concat_axis):
shape = (4, 4, 4, 4)
x = np.arange(np.prod(shape)).reshape(shape)
f = vmap(lambda x: lax.all_to_all(x, 'i', split_axis, concat_axis),
in_axes=vmap_axis, axis_name='i')
y = f(x)
ref = jnp.moveaxis(x, (vmap_axis, split_axis + (vmap_axis <= split_axis)),
(concat_axis + 1, 0))
self.assertAllClose(y, ref)
@parameterized.named_parameters(
{"testcase_name": f"_split={split_axis}_concat={concat_axis}_vmap={vmap_axis}",
"split_axis": split_axis, "concat_axis": concat_axis, "vmap_axis": vmap_axis}
for split_axis, concat_axis, vmap_axis in it.product(range(2), range(2), range(3)))
def testAllToAllSplitAxis(self, vmap_axis, split_axis, concat_axis):
shape = (4, 4, 4)
x = np.arange(np.prod(shape)).reshape(shape)
@partial(vmap, in_axes=vmap_axis, axis_name='i')
@partial(vmap, in_axes=vmap_axis, axis_name='j')
def f(x):
return lax.all_to_all(x, ('i', 'j'), split_axis, concat_axis)
unroll_shape = (2, 2, *shape[1:])
unroll_shape = list(shape)
unroll_shape[vmap_axis:vmap_axis+1] = (2, 2)
x_unroll = x.reshape(unroll_shape)
y_unrolled = f(x_unroll)
y = y_unrolled.reshape(shape)
if vmap_axis <= split_axis:
split_axis += 1
ref = jnp.moveaxis(x, (vmap_axis, split_axis),
(concat_axis + 1, 0))
self.assertAllClose(y, ref)
def testNegativeAxes(self):
x = np.arange(3*4*5).reshape(3, 4, 5)
self.assertAllClose(jax.vmap(jnp.sum, in_axes=-3)(x),
jnp.sum(x, axis=(1, 2)))
self.assertAllClose(jax.vmap(jnp.sum, in_axes=-2)(x),
jnp.sum(x, axis=(0, 2)))
self.assertAllClose(jax.vmap(jnp.sum, in_axes=-1)(x),
jnp.sum(x, axis=(0, 1)))
error = (r"vmap was requested to map its argument along axis -4, which "
r"implies that its rank should be at least 4, but is only 3 "
r"\(its shape is \(3, 4, 5\)\)")
with self.assertRaisesRegex(ValueError, error):
jax.vmap(jnp.sum, in_axes=-4)(x)
id = lambda y: y
self.assertAllClose(x, jax.vmap(id, in_axes=0, out_axes=-3)(x))
self.assertAllClose(x.transpose(1, 0, 2),
jax.vmap(id, in_axes=0, out_axes=-2)(x))
self.assertAllClose(x.transpose(1, 2, 0),
jax.vmap(id, in_axes=0, out_axes=-1)(x))
with self.assertRaisesRegex(ValueError, "axis -4 is out of bounds.*"):
jax.vmap(id, in_axes=0, out_axes=-4)(x)
self.assertAllClose(
np.full((5,), 7),
jax.vmap(lambda *xs: xs, in_axes=(0, None), out_axes=(0, -1))(
np.arange(5), 7)[1])
with self.assertRaisesRegex(ValueError, "axis -2 is out of bounds.*"):
jax.vmap(lambda *xs: xs, in_axes=(0, None), out_axes=(0, -2))(
np.arange(5), 7)
def testAxisIndex(self):
x = np.arange(10, dtype='int32')
self.assertAllClose(
vmap(lambda x: x - lax.axis_index('i'), axis_name='i')(x),
x - np.arange(x.shape[0], dtype='int32'))
def testVmapKwargs(self):
# https://github.com/jax-ml/jax/issues/912
def f(a, b):
return (2*a, 3*b)
x = vmap(f)(jnp.array([1]), jnp.array([2])) # works
y = vmap(f)(a=jnp.array([1]), b=jnp.array([2])) # doesn't work
self.assertAllClose(x, y)
def testGradOfPsum(self):
a = jnp.ones(5)
f = vmap(jax.grad(lambda x: -lax.psum(x, 'i')), out_axes=None, axis_name='i')
self.assertEqual(
f(a),
core.jaxpr_as_fun(jax.make_jaxpr(f)(a))(a)[0])
def testAllGatherToUnmapped(self):
def f(x):
return lax.all_gather(x, axis_name='i')
x = jnp.arange(15).reshape((3, 5))
# Original mapped axis becomes first axis of unmapped return value.
self.assertAllClose(vmap(f, axis_name='i', in_axes=1, out_axes=None)(x), x.T)
def testBatchedAllGather(self):
def f(x):
return lax.all_gather(x, axis_name='i')
x = jnp.arange(15).reshape((3, 5))
res = vmap(vmap(f, axis_name='i', out_axes=None), axis_name='j')(x)
self.assertAllClose(res, x)
res = vmap(vmap(f, axis_name='j'), axis_name='i', out_axes=None)(x)
self.assertAllClose(res, x.T)
def testAllGatherTiled(self):
def f(x):
return lax.all_gather(x, axis_name='i', tiled=True)
x = jnp.arange(60).reshape((4, 3, 5))
res = vmap(f, axis_name='i', in_axes=(1,), out_axes=None)(x)
self.assertAllClose(res, x.transpose((1, 0, 2)).reshape(-1, 5))
def testBatchedAllGatherTiled(self):
def f(x):
return lax.all_gather(x, axis_name='i', tiled=True)
x = jnp.arange(60).reshape((4, 3, 5))
res = vmap(vmap(f, in_axes=1, out_axes=1), axis_name='i', in_axes=1, out_axes=None)(x)
self.assertAllClose(res, x.transpose((1, 0, 2)).reshape(-1, 5))
def testAllGatherVjp(self):
def f(x):
return lax.all_gather(x, axis_name='i')
rng = self.rng()
x = rng.randn(3, 4)
y_bar = rng.randn(3, 3, 4)
x_bar, = vmap(lambda x, y_bar: vjp(f, x)[1](y_bar), axis_name='i')(x, y_bar)
self.assertAllClose(x_bar, np.sum(y_bar, axis=0))
def testAllGatherOfConst(self):
def f(x):
a = lax.all_gather(jnp.ones_like(x), axis_name='i')
b = lax.all_gather(1, axis_name='i')
return a, b
x = jnp.arange(15).reshape((3, 5))
a, b = vmap(f, axis_name='i', in_axes=1, out_axes=None)(x)
self.assertAllClose(a, jnp.ones(shape=(5, 3), dtype=x.dtype))
self.assertAllClose(b, jnp.ones(shape=(5,), dtype=b.dtype))
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_collective={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, collective.__name__.replace(" ", "")),
"shape": shape, "dtype": dtype, "axis": axis,
"collective": collective, "bulk_op": bulk_op}
for collective, bulk_op in [(parallel.pargmax, jnp.argmax),
(parallel.pargmin, jnp.argmin)]
for dtype in [np.float32, np.int32]
for shape in [(7,), (5, 8)]
for axis in range(len(shape))
)
def testArgAllReduce(self, shape, dtype, axis, collective, bulk_op):
rng = jtu.rand_default(self.rng())
x = rng(shape, dtype)
ans = vmap(lambda x: collective(x, 'i'), in_axes=axis, out_axes=None,
axis_name='i')(x)
expected = bulk_op(x, axis=axis)
self.assertAllClose(ans, expected, check_dtypes=False)
def testReduceScatterAutodiff(self):
f = vmap(partial(lax.psum_scatter, axis_name='i'), axis_name='i')
x = self.rng().randn(3, 3, 4)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testNonJaxTypedOutput(self):
with self.assertRaisesRegex(
TypeError, "Output from batched function.*is not a valid JAX type"):
vmap(lambda x: "hello")(np.arange(5))
def testIssue6096(self):
def f(x):
return jsp.special.betainc(jnp.ones(3), 1., x)
self.assertEqual(f(jnp.ones(3)).shape, (3,))
self.assertEqual(jax.vmap(f)(jnp.ones((2, 3))).shape, (2, 3))
def testPpermuteBatcherTrivial(self):
# https://github.com/jax-ml/jax/issues/8688
def ppermute(input):
return jax.lax.ppermute(input, axis_name="i", perm=[[0, 1], [1, 0]])
grad_fn = jax.grad(ppermute)
vmapped_gradients_fn = jax.vmap(grad_fn, axis_name="i")
vector = jax.numpy.array([1., 2.])
ans = vmapped_gradients_fn(vector) # doesn't crash
self.assertAllClose(ans, jnp.ones(2), check_dtypes=False)
def testBatchingPreservesWeakType(self):
# Regression test for https://github.com/jax-ml/jax/issues/10025
x = jnp.ravel(1)
self.assertTrue(dtypes.is_weakly_typed(x))
@vmap
def f(x):
self.assertTrue(dtypes.is_weakly_typed(x), f"{x} is not weakly-typed")
return x
y = f(x)
self.assertTrue(dtypes.is_weakly_typed(y))
Array = Any
ArrayElt = Any
Int = Union[int, core.Tracer]
# Can't used NamedTuple here b/c those are pytrees
|
BatchingTest
|
python
|
google__jax
|
jax/_src/tree_util.py
|
{
"start": 14229,
"end": 15830
}
|
class ____:
pass
@export
def tree_reduce(function: Callable[[T, Any], T],
tree: Any,
initializer: T | Unspecified = Unspecified(),
is_leaf: Callable[[Any], bool] | None = None) -> T:
"""Alias of :func:`jax.tree.reduce`."""
if isinstance(initializer, Unspecified):
return functools.reduce(function, tree_leaves(tree, is_leaf=is_leaf))
else:
return functools.reduce(function, tree_leaves(tree, is_leaf=is_leaf), initializer)
def _parallel_reduce(
sequence: list[T],
operation: Callable[[T, T], T],
identity: T | Unspecified = Unspecified(),
) -> T:
length = len(sequence)
if length == 0:
if isinstance(identity, Unspecified):
raise TypeError("Must specify identity for parallel reduction of empty sequence.")
return identity
elif length == 1:
return sequence[0]
else:
index = length // 2
a = _parallel_reduce(sequence[:index], operation, identity)
b = _parallel_reduce(sequence[index:], operation, identity)
return operation(a, b)
@export
def tree_reduce_associative(
operation: Callable[[T, T], T],
tree: Any,
*,
identity: T | Unspecified = Unspecified(),
is_leaf: Callable[[Any], bool] | None = None,
) -> T:
"""Alias of :func:`jax.tree.reduce_associative`."""
sequence = tree_leaves(tree, is_leaf=is_leaf)
return _parallel_reduce(sequence, operation, identity)
@export
def tree_all(tree: Any, *, is_leaf: Callable[[Any], bool] | None = None) -> bool:
"""Alias of :func:`jax.tree.all`."""
return all(tree_leaves(tree, is_leaf=is_leaf))
|
Unspecified
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_object_position16.py
|
{
"start": 315,
"end": 933
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position16.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, None, None, {"hidden": 1})
worksheet.insert_image("A9", self.image_dir + "red.png", {"x_offset": 192})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
walkccc__LeetCode
|
solutions/352. Data Stream as Disjoint Intervals/352.py
|
{
"start": 42,
"end": 1499
}
|
class ____:
def __init__(self):
self.intervals = SortedDict() # {start: (start, end)}
def addNum(self, val: int) -> None:
if val in self.intervals:
return
lo = self._lowerKey(val)
hi = self._higherKey(val)
# {lo, map[lo][1]} + val + {hi, map[hi][1]} = {lo, map[hi][1]}
if lo >= 0 and hi >= 0 and self.intervals[lo][1] + 1 == val and val + 1 == hi:
self.intervals[lo][1] = self.intervals[hi][1]
del self.intervals[hi]
# {lo, map[lo][1]} + val = {lo, val}
# Prevent adding duplicate entry by using '>=' instead of '=='.
elif lo >= 0 and self.intervals[lo][1] + 1 >= val:
self.intervals[lo][1] = max(self.intervals[lo][1], val)
elif hi >= 0 and val + 1 == hi:
# val + {hi, map[hi][1]} = {val, map[hi][1]}
self.intervals[val] = [val, self.intervals[hi][1]]
del self.intervals[hi]
else:
self.intervals[val] = [val, val]
def getIntervals(self) -> list[list[int]]:
return list(self.intervals.values())
def _lowerKey(self, key: int):
"""Returns the maximum key in `self.intervals` < `key`."""
i = self.intervals.bisect_left(key)
if i == 0:
return -1
return self.intervals.peekitem(i - 1)[0]
def _higherKey(self, key: int):
"""Returns the minimum key in `self.intervals` < `key`."""
i = self.intervals.bisect_right(key)
if i == len(self.intervals):
return -1
return self.intervals.peekitem(i)[0]
|
SummaryRanges
|
python
|
google__pytype
|
pytype/overlays/fiddle_overlay.py
|
{
"start": 9789,
"end": 9976
}
|
class ____(Buildable):
"""An instantiation of a fiddle.Config with a particular template."""
def __init__(self, *args, **kwargs):
super().__init__("Config", *args, **kwargs)
|
Config
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/cutedsl/cutedsl_template.py
|
{
"start": 4933,
"end": 7069
}
|
class ____(ChoiceCaller):
"""Caller for CuteDSL templates that integrates with the autotuning system."""
def __init__(
self,
name: str,
input_nodes: list[Buffer],
layout: Layout,
make_kernel_render: Any,
bmreq: CuteDSLBenchmarkRequest,
template: "CuteDSLTemplate",
mutated_inputs: Optional[Iterable[IRNode]] = None,
):
super().__init__(
name=name,
input_nodes=input_nodes,
layout=layout,
description=f"CuteDSL template {name}",
)
self.make_kernel_render = make_kernel_render
self.bmreq = bmreq
self.template = template
self.mutated_inputs = mutated_inputs
def __str__(self) -> str:
return f"CuteDSLTemplateCaller({self.name})"
def benchmark(self, *args, out) -> float:
"""Benchmark the kernel execution."""
return self.bmreq.benchmark(*args, out=out)
def output_node(self) -> Union[TensorBox, ShapeAsConstantBuffer]:
"""Create the output node for this template choice."""
return TensorBox.create(
CuteDSLTemplateBuffer(
layout=self.layout,
inputs=self.input_nodes,
make_kernel_render=self.make_kernel_render,
template=self.template,
mutated_inputs=self.mutated_inputs,
)
)
def call_name(self) -> str:
"""Return the kernel call name."""
return self.name
def to_callable(self) -> Any:
"""Return callable that can execute this kernel."""
return self.make_kernel_render
def hash_key(self) -> str:
"""Return unique hash key for this choice."""
return "-".join(
[
self.name.rsplit("_", 1)[0],
self.bmreq.module_cache_key,
]
)
def info_dict(self) -> dict[str, Any]:
"""Return information about this kernel."""
return {
"name": self.name,
"backend": "CuteDSL",
"template": self.template.name,
}
|
CuteDSLTemplateCaller
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/metaclass3.py
|
{
"start": 405,
"end": 446
}
|
class ____(metaclass=Meta3):
pass
|
Base4
|
python
|
pytorch__pytorch
|
test/inductor/test_control_deps.py
|
{
"start": 419,
"end": 2583
}
|
class ____(InductorTestCase):
@config.patch(reorder_for_locality=False)
@requires_gpu()
def test_control_deps_prevents_fusion(self):
def fn(a, b):
c = a + 1
d = b @ b
e = c * 2
return d, e
# Custom pass to add control dependency from d -> c
def add_control_deps(graph):
nodes = list(graph.nodes)
nodes = [n for n in graph.nodes if n.op == "call_function"]
assert len(nodes) == 3
c_node = nodes[0]
d_node = nodes[1]
e_node = nodes[2]
assert d_node.target == torch.ops.aten.mm.default
from torch.utils._ordered_set import OrderedSet
deps_map = {d_node: OrderedSet([c_node]), e_node: OrderedSet([d_node])}
torch._inductor.fx_passes.control_dependencies.preserve_node_ordering(
graph, deps_map
)
sub_g = graph.find_nodes(
op="call_function", target=torch.ops.higher_order.control_deps
)
assert len(sub_g) == 2
assert list(sub_g[0].meta["val"].shape) == [256, 256]
assert list(sub_g[1].meta["val"].shape) == [256, 256]
for attr in graph.find_nodes(op="get_attr"):
for n in getattr(graph.owning_module, attr.target).graph.nodes:
assert list(n.meta["val"].shape) == [256, 256]
return graph
with torch._inductor.config.patch(
post_grad_custom_post_pass=add_control_deps,
):
compiled_fn = torch.compile(fn)
a = torch.rand([256, 256], device=GPU_TYPE)
b = torch.rand([256, 256], device=GPU_TYPE)
_, code = run_and_get_code(torch.compile(fn), a, b)
result = compiled_fn(a, b)
FileCheck().check(".run(").check("extern_kernels.mm(").check(".run(").run(
code[0]
)
expected = fn(a, b)
torch.testing.assert_close(result, expected)
if __name__ == "__main__":
if IS_LINUX and HAS_CUDA_AND_TRITON:
run_tests(needs="filelock")
|
TestControlDeps
|
python
|
tensorflow__tensorflow
|
tensorflow/python/debug/lib/debug_events_writer_test.py
|
{
"start": 28784,
"end": 38569
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def jsonRoundTripCheck(self, obj):
self.assertEqual(
json_lib.dumps(json_lib.loads(json_lib.dumps(obj)), sort_keys=True),
json_lib.dumps(obj, sort_keys=True))
def testExecutionDigestWithNoOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=None)
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], None)
def testExecutionDigestWithTwoOutputsToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357, 2468])
json = execution_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357, 2468))
def testExecutionNoGraphNoInputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
graph_id=None,
input_tensor_ids=None,
output_tensor_ids=[2468],
debug_tensor_values=([1, 0],))
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertIsNone(json["graph_id"])
self.assertIsNone(json["input_tensor_ids"])
self.assertEqual(json["output_tensor_ids"], (2468,))
self.assertEqual(json["debug_tensor_values"], ([1, 0],))
def testExecutionNoGraphNoInputButWithOutputToJson(self):
execution_digest = debug_events_reader.ExecutionDigest(
1234, 5678, "FooOp", output_tensor_device_ids=[1357])
execution = debug_events_reader.Execution(
execution_digest,
"localhost",
("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=None,
debug_tensor_values=None)
json = execution.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["output_tensor_device_ids"], (1357,))
self.assertEqual(json["host_name"], "localhost")
self.assertEqual(json["stack_frame_ids"], ("a1", "b2"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.FULL_HEALTH)
self.assertEqual(json["graph_id"], "abcd")
self.assertEqual(json["input_tensor_ids"], (13, 37))
self.assertIsNone(json["output_tensor_ids"])
self.assertIsNone(json["debug_tensor_values"])
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testExecutionWithNoOutputTensorsReturnsZeroForNumOutputs(
self, output_tensor_ids):
execution = debug_events_reader.Execution(
debug_events_reader.ExecutionDigest(1234, 5678, "FooOp"),
"localhost", ("a1", "b2"),
debug_event_pb2.TensorDebugMode.FULL_HEALTH,
graph_id="abcd",
input_tensor_ids=[13, 37],
output_tensor_ids=output_tensor_ids,
debug_tensor_values=None)
self.assertEqual(execution.num_outputs, 0)
def testDebuggedDeviceToJons(self):
debugged_device = debug_events_reader.DebuggedDevice("/TPU:3", 4)
self.assertEqual(debugged_device.to_json(), {
"device_name": "/TPU:3",
"device_id": 4,
})
def testDebuggedGraphToJonsWitouthNameInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
None,
"b1c2",
outer_graph_id=None,
)
self.assertEqual(
debugged_graph.to_json(), {
"name": None,
"graph_id": "b1c2",
"outer_graph_id": None,
"inner_graph_ids": [],
})
def testDebuggedGraphToJonsWithNameAndInnerOuterGraphIds(self):
debugged_graph = debug_events_reader.DebuggedGraph(
"loss_function",
"b1c2",
outer_graph_id="a0b1",
)
debugged_graph.add_inner_graph_id("c2d3")
debugged_graph.add_inner_graph_id("c2d3e4")
self.assertEqual(
debugged_graph.to_json(), {
"name": "loss_function",
"graph_id": "b1c2",
"outer_graph_id": "a0b1",
"inner_graph_ids": ["c2d3", "c2d3e4"],
})
@parameterized.named_parameters(
("EmptyList", []),
("None", None),
)
def testGraphOpDigestWithNoOutpusReturnsNumOutputsZero(
self, output_tensor_ids):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2",
output_tensor_ids,
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
self.assertEqual(op_creation_digest.num_outputs, 0)
def testGraphOpCreationDigestNoInputNoDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=None,
device_name=None)
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertIsNone(json["input_names"])
self.assertIsNone(json["device_name"])
def testGraphOpCreationDigestWithInputsAndDeviceNameToJson(self):
op_creation_digest = debug_events_reader.GraphOpCreationDigest(
1234,
5678,
"deadbeef",
"FooOp",
"Model_1/Foo_2", [135],
"machine.cluster", ("a1", "a2"),
input_names=["Bar_1", "Qux_2"],
device_name="/device:GPU:0")
json = op_creation_digest.to_json()
self.jsonRoundTripCheck(json)
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_tensor_ids"], (135,))
self.assertEqual(json["host_name"], "machine.cluster")
self.assertEqual(json["stack_frame_ids"], ("a1", "a2"))
self.assertEqual(json["input_names"], ("Bar_1", "Qux_2"))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceDigestToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
json = trace_digest.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
def testGraphExecutionTraceWithTensorDebugValueAndDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH,
debug_tensor_value=[3, 1], device_name="/device:GPU:0")
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.CURT_HEALTH)
self.assertEqual(json["debug_tensor_value"], (3, 1))
self.assertEqual(json["device_name"], "/device:GPU:0")
def testGraphExecutionTraceNoTensorDebugValueNoDeviceNameToJson(self):
trace_digest = debug_events_reader.GraphExecutionTraceDigest(
1234, 5678, "FooOp", "Model_1/Foo_2", 1, "deadbeef")
trace = debug_events_reader.GraphExecutionTrace(
trace_digest, ["g1", "g2", "deadbeef"],
debug_event_pb2.TensorDebugMode.NO_TENSOR,
debug_tensor_value=None, device_name=None)
json = trace.to_json()
self.assertEqual(json["wall_time"], 1234)
self.assertEqual(json["op_type"], "FooOp")
self.assertEqual(json["op_name"], "Model_1/Foo_2")
self.assertEqual(json["output_slot"], 1)
self.assertEqual(json["graph_id"], "deadbeef")
self.assertEqual(json["graph_ids"], ("g1", "g2", "deadbeef"))
self.assertEqual(json["tensor_debug_mode"],
debug_event_pb2.TensorDebugMode.NO_TENSOR)
self.assertIsNone(json["debug_tensor_value"])
self.assertIsNone(json["device_name"])
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
DataObjectsTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_responses/records/fields/dict_template_path.py
|
{
"start": 151,
"end": 426
}
|
class ____(Path):
def update(self, template: Dict[str, Any], value: Dict[str, Any]) -> None:
template.clear()
template.update(value)
def write(self, template: Dict[str, Any], value: Dict[str, Any]) -> None:
template.update(value)
|
DictTemplatePath
|
python
|
pypa__warehouse
|
warehouse/packaging/models.py
|
{
"start": 35655,
"end": 35833
}
|
class ____(db.ModelBase):
__tablename__ = "file_registry"
id: Mapped[int] = mapped_column(primary_key=True)
filename: Mapped[str] = mapped_column(unique=True)
|
Filename
|
python
|
joke2k__faker
|
tests/providers/test_job.py
|
{
"start": 3211,
"end": 3400
}
|
class ____:
"""Test hy_AM job provider"""
def test_job(self, faker, num_samples):
for _ in range(num_samples):
assert faker.job() in HyAmJobProvider.jobs
|
TestHyAm
|
python
|
cython__cython
|
Cython/Compiler/MatchCaseNodes.py
|
{
"start": 4309,
"end": 5709
}
|
class ____(Node):
"""
pattern PatternNode
body StatListNode
guard ExprNode or None
"""
child_attrs = ["pattern", "body", "guard"]
def is_irrefutable(self):
if isinstance(self.pattern, ErrorNode):
return True # value doesn't really matter
return self.pattern.is_irrefutable() and not self.guard
def is_simple_value_comparison(self):
if self.guard:
return False
return self.pattern.is_simple_value_comparison()
def validate_targets(self):
if isinstance(self.pattern, ErrorNode):
return
self.pattern.get_targets()
def validate_irrefutable(self):
if isinstance(self.pattern, ErrorNode):
return
self.pattern.validate_irrefutable()
def analyse_declarations(self, env):
self.pattern.analyse_declarations(env)
if self.guard:
self.guard.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_case_expressions(self, subject_node, env):
if self.guard:
error(self.pos, "Cases with guards are currently not supported")
return self
error(self.pos, "This case statement is not yet supported")
return self
def generate_execution_code(self, code):
error(self.pos, "This case statement is not yet supported")
|
MatchCaseNode
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-operations-to-make-the-array-alternating.py
|
{
"start": 62,
"end": 742
}
|
class ____(object):
def minimumOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
even_top = collections.Counter(nums[i] for i in xrange(0, len(nums), 2)).most_common(2) # Time: O(nlogk)
odd_top = collections.Counter(nums[i] for i in xrange(1, len(nums), 2)).most_common(2) # Time: O(nlogk)
if not odd_top or even_top[0][0] != odd_top[0][0]:
return len(nums)-even_top[0][1]-(odd_top[0][1] if odd_top else 0)
return min(len(nums)-even_top[0][1]-(odd_top[1][1] if len(odd_top) == 2 else 0),
len(nums)-odd_top[0][1]-(even_top[1][1] if len(even_top) == 2 else 0))
|
Solution
|
python
|
pytorch__pytorch
|
torch/profiler/_pattern_matcher.py
|
{
"start": 19294,
"end": 25016
}
|
class ____(Pattern):
def __init__(self, prof: profile, should_benchmark: bool = False) -> None:
super().__init__(prof, should_benchmark)
self.name = "Matrix Multiplication Dimension Not Aligned Pattern"
self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension."
self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp"
@property
def skip(self) -> bool:
return not self.prof.with_stack or not self.prof.record_shapes
def match(self, event: _ProfilerEvent) -> bool:
def mutiple_of(shapes, multiple):
return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:])
if event.name not in ("aten::mm", "aten::bmm", "aten::addmm"):
return False
if not input_dtypes(event):
return False
arg_dtype = input_dtypes(event)[0]
if arg_dtype in (torch.bfloat16, torch.half) and not mutiple_of(
input_shapes(event), 8
):
return True
return False
def benchmark(self, events: list[_ProfilerEvent]):
def closest_multiple(shapes, multiple):
return [multiple * math.ceil(shape / multiple) for shape in shapes]
shapes_factor_map = {input_shapes(event): 0.0 for event in events}
for shape in shapes_factor_map:
matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16)
matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16)
not_aligned_dim_timer = benchmark.Timer(
stmt="torch.mm(matrixA, matrixB)",
globals={"matrixA": matrixA, "matrixB": matrixB},
)
matrixA = torch.randn(
closest_multiple(shape[0], 8), device="cuda", dtype=torch.float16
)
matrixB = torch.randn(
closest_multiple(shape[1], 8), device="cuda", dtype=torch.float16
)
aligned_dim_timer = benchmark.Timer(
stmt="torch.mm(matrixA, matrixB)",
globals={"matrixA": matrixA, "matrixB": matrixB},
)
not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean
aligned_dim_time = aligned_dim_timer.timeit(10).mean
shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time
return shapes_factor_map
def source_code_location(event: Optional[_ProfilerEvent]) -> str:
while event:
if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall:
assert isinstance(
event.extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall)
)
if not event.extra_fields.caller.file_name.startswith("torch" + os.sep):
return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}"
event = event.parent
return "No source code location found"
def input_shapes(event: _ProfilerEvent):
assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
return tuple(tuple(getattr(i, "sizes", ())) for i in event.extra_fields.inputs)
def input_dtypes(event: _ProfilerEvent):
assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
return tuple(getattr(i, "dtype", None) for i in event.extra_fields.inputs)
def report_all_anti_patterns(
prof,
should_benchmark: bool = False,
print_enable: bool = True,
json_report_dir: Optional[str] = None,
) -> None:
report_dict: dict = {}
anti_patterns = [
ExtraCUDACopyPattern(prof, should_benchmark),
# ForLoopIndexingPattern(prof, should_benchmark),
FP32MatMulPattern(prof, should_benchmark),
OptimizerSingleTensorPattern(prof, should_benchmark),
SynchronizedDataLoaderPattern(prof, should_benchmark),
GradNotSetToNonePattern(prof, should_benchmark),
Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark),
MatMulDimInFP16Pattern(prof, should_benchmark),
]
reported = set()
summaries = []
message_list = [f"{'-' * 40}TorchTidy Report{'-' * 40}"]
message_list.append("Matched Events:")
for anti_pattern in anti_patterns:
matched_events = anti_pattern.matched_events()
if not matched_events:
continue
summaries.append(anti_pattern.summary(matched_events))
for event in matched_events:
report_msg = anti_pattern.report(event)
if report_msg not in reported:
message_list.append(report_msg)
reported.add(report_msg)
src_location, line_no = source_code_location(event).split(":")
report_dict.setdefault(src_location, []).append(
{
"line_number": int(line_no),
"name": anti_pattern.name,
"url": anti_pattern.url,
"message": anti_pattern.description,
}
)
if json_report_dir is not None:
json_report_path = os.path.join(json_report_dir, "torchtidy_report.json")
if os.path.exists(json_report_path):
with open(json_report_path) as f:
exisiting_report = json.load(f)
exisiting_report.update(report_dict)
report_dict = exisiting_report
with open(json_report_path, "w") as f:
json.dump(report_dict, f, indent=4)
message_list.append("Summary:")
message_list += summaries
message_list.append(f"{'-' * 40}TorchTidy Report{'-' * 40}")
if print_enable:
print("\n".join(message_list))
|
MatMulDimInFP16Pattern
|
python
|
coleifer__peewee
|
tests/prefetch_tests.py
|
{
"start": 21411,
"end": 21508
}
|
class ____(TestModel):
name = TextField()
a = ForeignKeyField(A)
x = ForeignKeyField(X)
|
B
|
python
|
wandb__wandb
|
wandb/integration/dspy/dspy.py
|
{
"start": 1481,
"end": 15544
}
|
class ____(dspy.utils.BaseCallback):
"""W&B callback for tracking DSPy evaluation and optimization.
This callback logs evaluation scores, per-step predictions (optional), and
a table capturing the DSPy program signature over time. It can also save
the best program as a W&B Artifact for reproducibility.
Examples:
Basic usage within DSPy settings:
```python
import dspy
import wandb
from wandb.integration.dspy import WandbDSPyCallback
with wandb.init(project="dspy-optimization") as run:
dspy.settings.callbacks.append(WandbDSPyCallback(run=run))
# Run your DSPy optimization/evaluation
```
"""
def __init__(self, log_results: bool = True, run: Run | None = None) -> None:
"""Initialize the callback.
Args:
log_results (bool): Whether to log per-evaluation prediction tables.
run (Run | None): Optional W&B run to use. Defaults to the
current global run if available.
Raises:
wandb.Error: If no active run is provided or found.
"""
# If no run is provided, use the current global run if available.
if run is None:
if wandb.run is None:
raise wandb.Error(
"You must call `wandb.init()` before instantiating WandbDSPyCallback()."
)
run = wandb.run
self.log_results = log_results
with telemetry.context(run=run) as tel:
tel.feature.dspy_callback = True
self._run = run
self._did_log_config: bool = False
self._program_info: dict[str, Any] = {}
self._program_table: wandb.Table | None = None
self._row_idx: int = 0
def _flatten_dict(
self, nested: Any, parent_key: str = "", sep: str = "."
) -> dict[str, Any]:
"""Recursively flatten arbitrarily nested mappings and sequences.
Args:
nested (Any): Nested structure of mappings/lists to flatten.
parent_key (str): Prefix to prepend to keys in the flattened output.
sep (str): Key separator for nested fields.
Returns:
dict[str, Any]: Flattened dictionary representation.
"""
flat: dict[str, Any] = {}
def _walk(obj: Any, base: str) -> None:
if isinstance(obj, Mapping):
for k, v in obj.items():
new_key = f"{base}{sep}{k}" if base else str(k)
_walk(v, new_key)
elif isinstance(obj, Sequence) and not isinstance(
obj, (str, bytes, bytearray)
):
for idx, v in enumerate(obj):
new_key = f"{base}{sep}{idx}" if base else str(idx)
_walk(v, new_key)
else:
# Base can be empty only if the top-level is a scalar; guard against that.
key = base if base else ""
if key:
flat[key] = obj
_walk(nested, parent_key)
return flat
def _extract_fields(self, fields: list[dict[str, Any]]) -> dict[str, str]:
"""Convert signature fields to a flat mapping of strings.
Note:
The input is expected to be a dict-like mapping from field names to
field metadata. Values are stringified for logging.
Args:
fields (list[dict[str, Any]]): Mapping of field name to metadata.
Returns:
dict[str, str]: Mapping of field name to string value.
"""
return {k: str(v) for k, v in fields.items()}
def _extract_program_info(self, program_obj: Any) -> dict[str, Any]:
"""Extract signature-related info from a DSPy program.
Attempts to read the program signature, instructions, input and output
fields from a DSPy `Predict` parameter if available.
Args:
program_obj (Any): DSPy program/module instance.
Returns:
dict[str, Any]: Flattened dictionary of signature metadata.
"""
info_dict = {}
if program_obj is None:
return info_dict
try:
sig = next(
param.signature
for _, param in program_obj.named_parameters()
if isinstance(param, dspy.Predict)
)
if getattr(sig, "signature", None):
info_dict["signature"] = sig.signature
if getattr(sig, "instructions", None):
info_dict["instructions"] = sig.instructions
if getattr(sig, "input_fields", None):
input_fields = sig.input_fields
info_dict["input_fields"] = self._extract_fields(input_fields)
if getattr(sig, "output_fields", None):
output_fields = sig.output_fields
info_dict["output_fields"] = self._extract_fields(output_fields)
return self._flatten_dict(info_dict)
except Exception as e:
logger.warning(
"Failed to extract program info from Evaluate instance: %s", e
)
return info_dict
def on_evaluate_start(
self,
call_id: str,
instance: Any,
inputs: dict[str, Any],
) -> None:
"""Handle start of a DSPy evaluation call.
Logs non-private fields from the evaluator instance to W&B config and
captures program signature info for later logging.
Args:
call_id (str): Unique identifier for the evaluation call.
instance (Any): The evaluation instance (e.g., `dspy.Evaluate`).
inputs (dict[str, Any]): Inputs passed to the evaluation (may
include a `program` key with the DSPy program).
"""
if not self._did_log_config:
instance_vars = vars(instance) if hasattr(instance, "__dict__") else {}
serializable = {
k: v for k, v in instance_vars.items() if not k.startswith("_")
}
if "devset" in serializable:
# we don't want to log the devset in the config
del serializable["devset"]
self._run.config.update(serializable)
self._did_log_config = True
# 2) Build/append program signature tables from the 'program' inputs
if program_obj := inputs.get("program"):
self._program_info = self._extract_program_info(program_obj)
def on_evaluate_end(
self,
call_id: str,
outputs: Any | None,
exception: Exception | None = None,
) -> None:
"""Handle end of a DSPy evaluation call.
If available, logs a numeric `score` metric and (optionally) per-step
prediction tables. Always appends a row to the program-signature table.
Args:
call_id (str): Unique identifier for the evaluation call.
outputs (Any | None): Evaluation outputs; supports
`dspy.evaluate.evaluate.EvaluationResult`.
exception (Exception | None): Exception raised during evaluation, if any.
"""
# The `BaseCallback` does not define the interface for the `outputs` parameter,
# Currently, we know of `EvaluationResult` which is a subclass of `dspy.Prediction`.
# We currently support this type and will warn the user if a different type is passed.
score: float | None = None
if exception is None:
if isinstance(outputs, dspy.evaluate.evaluate.EvaluationResult):
# log the float score as a wandb metric
score = outputs.score
wandb.log({"score": float(score)}, step=self._row_idx)
# Log the predictions as a separate table for each eval end.
# We know that results if of type `list[tuple["dspy.Example", "dspy.Example", Any]]`
results = outputs.results
if self.log_results:
rows = self._parse_results(results)
if rows:
self._log_predictions_table(rows)
else:
wandb.termwarn(
f"on_evaluate_end received unexpected outputs type: {type(outputs)}. "
"Expected dspy.evaluate.evaluate.EvaluationResult; skipping logging score and `log_results`."
)
else:
wandb.termwarn(
f"on_evaluate_end received exception: {exception}. "
"Skipping logging score and `log_results`."
)
# Log the program signature iteratively
if self._program_table is None:
columns = ["step", *self._program_info.keys()]
if isinstance(score, float):
columns.append("score")
self._program_table = wandb.Table(columns=columns, log_mode="INCREMENTAL")
if self._program_table is not None:
values = list(self._program_info.values())
if isinstance(score, float):
values.append(score)
self._program_table.add_data(
self._row_idx,
*values,
)
self._run.log(
{"program_signature": self._program_table}, step=self._row_idx
)
self._row_idx += 1
def _parse_results(
self,
results: list[tuple[dspy.Example, dspy.Prediction | dspy.Completions, bool]],
) -> list[dict[str, Any]]:
"""Normalize evaluation results into serializable row dicts.
Args:
results (list[tuple]): Sequence of `(example, prediction, is_correct)`
tuples from DSPy evaluation.
Returns:
list[dict[str, Any]]: Rows with `example`, `prediction`, `is_correct`.
"""
_rows: list[dict[str, Any]] = []
for example, prediction, is_correct in results:
if isinstance(prediction, dspy.Prediction):
prediction_dict = prediction.toDict()
if isinstance(prediction, dspy.Completions):
prediction_dict = prediction.items()
row: dict[str, Any] = {
"example": example.toDict(),
"prediction": prediction_dict,
"is_correct": is_correct,
}
_rows.append(row)
return _rows
def _log_predictions_table(self, rows: list[dict[str, Any]]) -> None:
"""Log a W&B Table of predictions for the current evaluation step.
Args:
rows (list[dict[str, Any]]): Prediction rows to log.
"""
rows = _flatten_rows(rows)
columns = list(rows[0].keys())
data: list[list[Any]] = [list(row.values()) for row in rows]
preds_table = wandb.Table(columns=columns, data=data, log_mode="IMMUTABLE")
self._run.log({f"predictions_{self._row_idx}": preds_table}, step=self._row_idx)
def log_best_model(
self,
model: dspy.Module,
*,
save_program: bool = True,
save_dir: str | None = None,
filetype: Literal["json", "pkl"] = "json",
aliases: Sequence[str] = ("best", "latest"),
artifact_name: str = "dspy-program",
) -> None:
"""Save and log the best DSPy program as a W&B Artifact.
You can choose to save the full program (architecture + state) or only
the state to a single file (JSON or pickle).
Args:
model (dspy.Module): DSPy module to save.
save_program (bool): Save full program directory if True; otherwise
save only the state file. Defaults to `True`.
save_dir (str): Directory to store program files before logging. Defaults to a
subdirectory `dspy_program` within the active run's files directory
(i.e., `wandb.run.dir`).
filetype (Literal["json", "pkl"]): State file format when
`save_program` is False. Defaults to `json`.
aliases (Sequence[str]): Aliases for the logged Artifact version. Defaults to `("best", "latest")`.
artifact_name (str): Base name for the Artifact. Defaults to `dspy-program`.
Examples:
Save the complete program and add aliases:
```python
callback.log_best_model(
optimized_program, save_program=True, aliases=("best", "production")
)
```
Save only the state as JSON:
```python
callback.log_best_model(
optimized_program, save_program=False, filetype="json"
)
```
"""
# Derive metadata to help discoverability in the UI
info_dict = self._extract_program_info(model)
metadata = {
"dspy_version": getattr(dspy, "__version__", "unknown"),
"module_class": model.__class__.__name__,
**info_dict,
}
artifact = wandb.Artifact(
name=f"{artifact_name}-{self._run.id}",
type="model",
metadata=metadata,
)
# Resolve and normalize the save directory in a cross-platform way
if save_dir is None:
save_dir = os.path.join(self._run.dir, "dspy_program")
save_dir = os.path.normpath(save_dir)
try:
os.makedirs(save_dir, exist_ok=True)
except Exception as exc:
wandb.termwarn(
f"Could not create or access directory '{save_dir}': {exc}. Skipping artifact logging."
)
return
# Save per requested mode
if save_program:
model.save(save_dir, save_program=True)
artifact.add_dir(save_dir)
else:
filename = f"program.{filetype}"
file_path = os.path.join(save_dir, filename)
model.save(file_path, save_program=False)
artifact.add_file(file_path)
self._run.log_artifact(artifact, aliases=list(aliases))
|
WandbDSPyCallback
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/higher_order_ops.py
|
{
"start": 82326,
"end": 92046
}
|
class ____(TorchHigherOrderOperatorVariable):
supports_input_mutation = False
supports_aliasing = False
@raise_hard_error_if_graph_break(
reason="associative_scan must be captured completely with torch.compile."
)
def _call_function(
self,
tx: "InstructionTranslator",
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
from torch._higher_order_ops.utils import first_slice_copy
args, kwargs = LazyVariableTracker.realize_all((args, kwargs))
def arg_extractor(combine_fn, xs, additional_inputs):
return combine_fn, xs, additional_inputs
combine_fn, xs, additional_inputs = arg_extractor(*args, **kwargs)
if args[0].python_type() is functools.partial:
# This is the standard case when the user calls the frontend
# and the frontend invokes dynamo
if len(args) != 2:
unimplemented(
gb_type="torch.associative_scan: improper args",
context=f"args: {args}",
explanation=f"torch.associative_scan expects 2 positional arguments (got {len(args)}) "
"Usage: associative_scan(combine_fn, xs)",
hints=[
*graph_break_hints.USER_ERROR,
],
)
xs_treespec = args[0].keywords["spec"]
# combine_fn input check
# We need to get the pure combine_fn from the functools.partial
_check_supported_callable_arg(
tx, combine_fn.keywords["combine_fn"], "combine_fn"
)
else:
# This case is hit during re-tracing, for example in export tests
# In this case, the combine_fn is a callable and not a functools.partial
xs_treespec = _make_inlined(tx, pytree.tree_structure)(xs)
_check_supported_callable_arg(tx, combine_fn, "combine_fn")
# xs input check
if not isinstance(xs, (ListVariable, TupleVariable)):
unimplemented(
gb_type="torch.associative_scan: improper xs",
context=str(xs),
explanation=f"Expected xs to be a list/tuple but got {xs.python_type()}",
hints=[
*graph_break_hints.DYNAMO_BUG,
],
)
xs_vars = xs.unpack_var_sequence(tx)
_check_all_tensorvariable(xs_vars)
# additional_inputs input check
if not isinstance(additional_inputs, (ListVariable, TupleVariable)):
unimplemented(
gb_type="torch.associative_scan: improper additional_inputs",
context=str(additional_inputs),
explanation=f"Expected additional_inputs to be a list/tuple but got {additional_inputs.python_type()}",
hints=[
*graph_break_hints.DYNAMO_BUG,
],
)
additional_inputs_vars = additional_inputs.unpack_var_sequence(tx)
_check_all_tensorvariable(additional_inputs_vars)
scan_length = get_fake_value(xs_vars[0].as_proxy().node, tx).size()[0]
if scan_length == 0:
unimplemented(
gb_type="torch.associative_scan: zero-sized tensor",
context=str(xs_vars[0]),
explanation="associative_scan() operator doesn't support zero-sized tensors during tracing.",
hints=[
*graph_break_hints.USER_ERROR,
],
)
# Trace the subgraph
# The sub_args is a slice of original input, e.g. if input.size is (3, 4), and scan dim=0
# the sub_args shape will be (4, ).
with discard_graph_changes(tx):
sub_args = [
_make_inlined(tx, first_slice_copy)(leaf)
for leaf in itertools.chain(xs_vars, xs_vars)
]
sub_args_additional_inputs = [
t.call_method(tx, "clone", args=(), kwargs={})
for t in additional_inputs_vars
]
sub_args = sub_args + sub_args_additional_inputs
(
(combine_result, _combine_spec),
combine_graph,
combine_lifted_freevars,
) = speculate_subgraph(
tx,
combine_fn,
sub_args,
sub_kwargs={},
description="associative_scan_combine_fn",
source_target=self.value,
set_subgraph_inputs="flatten_manual",
supports_input_mutation=self.supports_input_mutation,
supports_aliasing=self.supports_aliasing,
)
# Ensure that the output of scan is a flattened list of elements,
# because downstream operations assume that the output of HOPs
# is flattened
output_node = combine_graph.find_nodes(op="output")[0]
output_node.args = (pytree.tree_leaves(output_node.args),)
combine_graph.lint()
# Collect the results from the combine_fn
results, _combine_treespec = _make_inlined(tx, pytree.tree_flatten)(
combine_result
).unpack_var_sequence(tx)
# Check whether the combine_fn returns one child tree for the output.
if _combine_treespec.as_python_constant().num_leaves < 1:
unimplemented(
gb_type="torch.associative_scan: combine_fn improper number of leaves",
context=str(_combine_treespec.as_python_constant()),
explanation="combine_fn needs to produce one pytree for the output "
f"but combine_fn produces the pytree {_combine_treespec.as_python_constant()}.",
hints=[
*graph_break_hints.USER_ERROR,
],
)
# Check whether the outs produced by combine_fn has the same treespec as xs
# We need to have this check this way, because in case init is a TreeSpec and carry
# but carry is only a LeafSpec, these two cannot be compared correctly.
if (
xs_treespec.as_python_constant().is_leaf()
!= _combine_treespec.as_python_constant().is_leaf()
) or not _make_inlined(tx, pytree.TreeSpec.__eq__)(
xs_treespec, _combine_treespec
).as_python_constant():
unimplemented(
gb_type="torch.associative_scan: mismatched input/output tree structure",
context=f"xs: {xs_treespec.as_python_constant()}, output: {_combine_treespec.as_python_constant()}",
explanation="The tree structure of the xs and the outs of the combine_fn are are expected to be identical, but got "
f"xs: {xs_treespec.as_python_constant()} vs output: {_combine_treespec.as_python_constant()}.",
hints=[
*graph_break_hints.USER_ERROR,
],
)
# We set include contiguity=False because we have vmap x HOP tests, where if
# include_contiguity=True will call t.is_contiguous inside of vmap and get an error
# "querying is_contiguous inside of vmap for memory_format other than
# torch.contiguous_format is not yet implemented". This is okay because stride
# is still checked.
check_meta_consistency_vt(
[_make_inlined(tx, first_slice_copy)(t) for t in xs_vars],
results.items,
"initial_xs",
"combine_fn_output",
include_contiguity=False,
)
combine_gm = torch.fx.GraphModule(dict(tx.output.nn_modules), combine_graph)
combine_freevars_proxy = tuple(combine_lifted_freevars.keys())
# Compute the proxies for the input check
proxy_vars_inputcheck = (
tuple(sarg.as_proxy() for sarg in sub_args) + combine_freevars_proxy
)
from torch._higher_order_ops.utils import _maybe_fake_tracing
from torch._inductor.utils import is_pointwise_use
with tx.fake_mode:
sub_args_fake = [
(
leaf.node.meta["example_value"].clone()
if hasattr(leaf.node.meta["example_value"], "clone")
else leaf.node.meta["example_value"]
)
for leaf in pytree.tree_leaves(proxy_vars_inputcheck)
]
pre_dispatch = False
fx = _maybe_fake_tracing(
combine_gm, sub_args_fake, pre_dispatch=pre_dispatch
)
for node in fx.graph.nodes:
# Check that the combine_fn is pointwise, if combine_mode='pointwise'
if not all(
is_pointwise_use(use) or use.op == "output" for use in node.users
):
raise RuntimeError(
"For combine_mode='pointwise', the combine_fn needs to be pointwise"
)
combine_fn_name = tx.output.install_subgraph(
"associative_scan_combine_fn", combine_gm
)
# Compute the proxies
xs_proxy = xs.as_proxy()
combine_freevars_proxy = tuple(combine_lifted_freevars.keys())
additional_inputs_proxy = additional_inputs.as_proxy() + combine_freevars_proxy
p_args = (
make_attr(tx, combine_fn_name),
xs_proxy,
additional_inputs_proxy,
)
return _call_function_and_unflatten_output(
tx,
torch.ops.higher_order.associative_scan,
p_args,
{},
None,
OutputSpec(xs_treespec),
None,
)
|
AssociativeScanHigherOrderVariable
|
python
|
neetcode-gh__leetcode
|
python/0121-best-time-to-buy-and-sell-stock.py
|
{
"start": 0,
"end": 272
}
|
class ____:
def maxProfit(self, prices: List[int]) -> int:
res = 0
lowest = prices[0]
for price in prices:
if price < lowest:
lowest = price
res = max(res, price - lowest)
return res
|
Solution
|
python
|
google__flatbuffers
|
python/flatbuffers/flexbuffers.py
|
{
"start": 27553,
"end": 44454
}
|
class ____:
"""Helper class to encode structural data into flexbuffers format."""
def __init__(
self,
share_strings=False,
share_keys=True,
force_min_bit_width=BitWidth.W8,
):
self._share_strings = share_strings
self._share_keys = share_keys
self._force_min_bit_width = force_min_bit_width
self._string_pool = Pool()
self._key_pool = Pool()
self._finished = False
self._buf = bytearray()
self._stack = []
def __len__(self):
return len(self._buf)
@property
def StringPool(self):
return self._string_pool
@property
def KeyPool(self):
return self._key_pool
def Clear(self):
self._string_pool.Clear()
self._key_pool.Clear()
self._finished = False
self._buf = bytearray()
self._stack = []
def Finish(self):
"""Finishes encoding process and returns underlying buffer."""
if self._finished:
raise RuntimeError('builder has been already finished')
# If you hit this exception, you likely have objects that were never
# included in a parent. You need to have exactly one root to finish a
# buffer. Check your Start/End calls are matched, and all objects are inside
# some other object.
if len(self._stack) != 1:
raise RuntimeError('internal stack size must be one')
value = self._stack[0]
byte_width = self._Align(value.ElemWidth(len(self._buf)))
self._WriteAny(value, byte_width=byte_width) # Root value
self._Write(U, value.StoredPackedType(), byte_width=1) # Root type
self._Write(U, byte_width, byte_width=1) # Root size
self.finished = True
return self._buf
def _ReadKey(self, offset):
key = self._buf[offset:]
return key[: key.find(0)]
def _Align(self, alignment):
byte_width = 1 << alignment
self._buf.extend(b'\x00' * _PaddingBytes(len(self._buf), byte_width))
return byte_width
def _Write(self, fmt, value, byte_width):
self._buf.extend(_Pack(fmt, value, byte_width))
def _WriteVector(self, fmt, values, byte_width):
self._buf.extend(_PackVector(fmt, values, byte_width))
def _WriteOffset(self, offset, byte_width):
relative_offset = len(self._buf) - offset
assert byte_width == 8 or relative_offset < (1 << (8 * byte_width))
self._Write(U, relative_offset, byte_width)
def _WriteAny(self, value, byte_width):
fmt = {
Type.NULL: U,
Type.BOOL: U,
Type.INT: I,
Type.UINT: U,
Type.FLOAT: F,
}.get(value.Type)
if fmt:
self._Write(fmt, value.Value, byte_width)
else:
self._WriteOffset(value.Value, byte_width)
def _WriteBlob(self, data, append_zero, type_):
bit_width = BitWidth.U(len(data))
byte_width = self._Align(bit_width)
self._Write(U, len(data), byte_width)
loc = len(self._buf)
self._buf.extend(data)
if append_zero:
self._buf.append(0)
self._stack.append(Value(loc, type_, bit_width))
return loc
def _WriteScalarVector(self, element_type, byte_width, elements, fixed):
"""Writes scalar vector elements to the underlying buffer."""
bit_width = BitWidth.B(byte_width)
# If you get this exception, you're trying to write a vector with a size
# field that is bigger than the scalars you're trying to write (e.g. a
# byte vector > 255 elements). For such types, write a "blob" instead.
if BitWidth.U(len(elements)) > bit_width:
raise ValueError('too many elements for the given byte_width')
self._Align(bit_width)
if not fixed:
self._Write(U, len(elements), byte_width)
loc = len(self._buf)
fmt = {Type.INT: I, Type.UINT: U, Type.FLOAT: F}.get(element_type)
if not fmt:
raise TypeError('unsupported element_type')
self._WriteVector(fmt, elements, byte_width)
type_ = Type.ToTypedVector(element_type, len(elements) if fixed else 0)
self._stack.append(Value(loc, type_, bit_width))
return loc
def _CreateVector(self, elements, typed, fixed, keys=None):
"""Writes vector elements to the underlying buffer."""
length = len(elements)
if fixed and not typed:
raise ValueError('fixed vector must be typed')
# Figure out smallest bit width we can store this vector with.
bit_width = max(self._force_min_bit_width, BitWidth.U(length))
prefix_elems = 1 # Vector size
if keys:
bit_width = max(bit_width, keys.ElemWidth(len(self._buf)))
prefix_elems += 2 # Offset to the keys vector and its byte width.
vector_type = Type.KEY
# Check bit widths and types for all elements.
for i, e in enumerate(elements):
bit_width = max(bit_width, e.ElemWidth(len(self._buf), prefix_elems + i))
if typed:
if i == 0:
vector_type = e.Type
else:
if vector_type != e.Type:
raise RuntimeError('typed vector elements must be of the same type')
if fixed and not Type.IsFixedTypedVectorElementType(vector_type):
raise RuntimeError('must be fixed typed vector element type')
byte_width = self._Align(bit_width)
# Write vector. First the keys width/offset if available, and size.
if keys:
self._WriteOffset(keys.Value, byte_width)
self._Write(U, 1 << keys.MinBitWidth, byte_width)
if not fixed:
self._Write(U, length, byte_width)
# Then the actual data.
loc = len(self._buf)
for e in elements:
self._WriteAny(e, byte_width)
# Then the types.
if not typed:
for e in elements:
self._buf.append(e.StoredPackedType(bit_width))
if keys:
type_ = Type.MAP
else:
if typed:
type_ = Type.ToTypedVector(vector_type, length if fixed else 0)
else:
type_ = Type.VECTOR
return Value(loc, type_, bit_width)
def _PushIndirect(self, value, type_, bit_width):
byte_width = self._Align(bit_width)
loc = len(self._buf)
fmt = {Type.INDIRECT_INT: I, Type.INDIRECT_UINT: U, Type.INDIRECT_FLOAT: F}[
type_
]
self._Write(fmt, value, byte_width)
self._stack.append(Value(loc, type_, bit_width))
@InMapForString
def String(self, value):
"""Encodes string value."""
reset_to = len(self._buf)
encoded = value.encode('utf-8')
loc = self._WriteBlob(encoded, append_zero=True, type_=Type.STRING)
if self._share_strings:
prev_loc = self._string_pool.FindOrInsert(encoded, loc)
if prev_loc is not None:
del self._buf[reset_to:]
self._stack[-1]._value = loc = prev_loc # pylint: disable=protected-access
return loc
@InMap
def Blob(self, value):
"""Encodes binary blob value.
Args:
value: A byte/bytearray value to encode
Returns:
Offset of the encoded value in underlying the byte buffer.
"""
return self._WriteBlob(value, append_zero=False, type_=Type.BLOB)
def Key(self, value):
"""Encodes key value.
Args:
value: A byte/bytearray/str value to encode. Byte object must not contain
zero bytes. String object must be convertible to ASCII.
Returns:
Offset of the encoded value in the underlying byte buffer.
"""
if isinstance(value, (bytes, bytearray)):
encoded = value
else:
encoded = value.encode('ascii')
if 0 in encoded:
raise ValueError('key contains zero byte')
loc = len(self._buf)
self._buf.extend(encoded)
self._buf.append(0)
if self._share_keys:
prev_loc = self._key_pool.FindOrInsert(encoded, loc)
if prev_loc is not None:
del self._buf[loc:]
loc = prev_loc
self._stack.append(Value.Key(loc))
return loc
def Null(self, key=None):
"""Encodes None value."""
if key:
self.Key(key)
self._stack.append(Value.Null())
@InMap
def Bool(self, value):
"""Encodes boolean value.
Args:
value: A boolean value.
"""
self._stack.append(Value.Bool(value))
@InMap
def Int(self, value, byte_width=0):
"""Encodes signed integer value.
Args:
value: A signed integer value.
byte_width: Number of bytes to use: 1, 2, 4, or 8.
"""
bit_width = BitWidth.I(value) if byte_width == 0 else BitWidth.B(byte_width)
self._stack.append(Value.Int(value, bit_width))
@InMap
def IndirectInt(self, value, byte_width=0):
"""Encodes signed integer value indirectly.
Args:
value: A signed integer value.
byte_width: Number of bytes to use: 1, 2, 4, or 8.
"""
bit_width = BitWidth.I(value) if byte_width == 0 else BitWidth.B(byte_width)
self._PushIndirect(value, Type.INDIRECT_INT, bit_width)
@InMap
def UInt(self, value, byte_width=0):
"""Encodes unsigned integer value.
Args:
value: An unsigned integer value.
byte_width: Number of bytes to use: 1, 2, 4, or 8.
"""
bit_width = BitWidth.U(value) if byte_width == 0 else BitWidth.B(byte_width)
self._stack.append(Value.UInt(value, bit_width))
@InMap
def IndirectUInt(self, value, byte_width=0):
"""Encodes unsigned integer value indirectly.
Args:
value: An unsigned integer value.
byte_width: Number of bytes to use: 1, 2, 4, or 8.
"""
bit_width = BitWidth.U(value) if byte_width == 0 else BitWidth.B(byte_width)
self._PushIndirect(value, Type.INDIRECT_UINT, bit_width)
@InMap
def Float(self, value, byte_width=0):
"""Encodes floating point value.
Args:
value: A floating point value.
byte_width: Number of bytes to use: 4 or 8.
"""
bit_width = BitWidth.F(value) if byte_width == 0 else BitWidth.B(byte_width)
self._stack.append(Value.Float(value, bit_width))
@InMap
def IndirectFloat(self, value, byte_width=0):
"""Encodes floating point value indirectly.
Args:
value: A floating point value.
byte_width: Number of bytes to use: 4 or 8.
"""
bit_width = BitWidth.F(value) if byte_width == 0 else BitWidth.B(byte_width)
self._PushIndirect(value, Type.INDIRECT_FLOAT, bit_width)
def _StartVector(self):
"""Starts vector construction."""
return len(self._stack)
def _EndVector(self, start, typed, fixed):
"""Finishes vector construction by encodung its elements."""
vec = self._CreateVector(self._stack[start:], typed, fixed)
del self._stack[start:]
self._stack.append(vec)
return vec.Value
@contextlib.contextmanager
def Vector(self, key=None):
if key:
self.Key(key)
try:
start = self._StartVector()
yield self
finally:
self._EndVector(start, typed=False, fixed=False)
@InMap
def VectorFromElements(self, elements):
"""Encodes sequence of any elements as a vector.
Args:
elements: sequence of elements, they may have different types.
"""
with self.Vector():
for e in elements:
self.Add(e)
@contextlib.contextmanager
def TypedVector(self, key=None):
if key:
self.Key(key)
try:
start = self._StartVector()
yield self
finally:
self._EndVector(start, typed=True, fixed=False)
@InMap
def TypedVectorFromElements(self, elements, element_type=None):
"""Encodes sequence of elements of the same type as typed vector.
Args:
elements: Sequence of elements, they must be of the same type.
element_type: Suggested element type. Setting it to None means determining
correct value automatically based on the given elements.
"""
if isinstance(elements, array.array):
if elements.typecode == 'f':
self._WriteScalarVector(Type.FLOAT, 4, elements, fixed=False)
elif elements.typecode == 'd':
self._WriteScalarVector(Type.FLOAT, 8, elements, fixed=False)
elif elements.typecode in ('b', 'h', 'i', 'l', 'q'):
self._WriteScalarVector(
Type.INT, elements.itemsize, elements, fixed=False
)
elif elements.typecode in ('B', 'H', 'I', 'L', 'Q'):
self._WriteScalarVector(
Type.UINT, elements.itemsize, elements, fixed=False
)
else:
raise ValueError('unsupported array typecode: %s' % elements.typecode)
else:
add = self.Add if element_type is None else self.Adder(element_type)
with self.TypedVector():
for e in elements:
add(e)
@InMap
def FixedTypedVectorFromElements(
self, elements, element_type=None, byte_width=0
):
"""Encodes sequence of elements of the same type as fixed typed vector.
Args:
elements: Sequence of elements, they must be of the same type. Allowed
types are `Type.INT`, `Type.UINT`, `Type.FLOAT`. Allowed number of
elements are 2, 3, or 4.
element_type: Suggested element type. Setting it to None means determining
correct value automatically based on the given elements.
byte_width: Number of bytes to use per element. For `Type.INT` and
`Type.UINT`: 1, 2, 4, or 8. For `Type.FLOAT`: 4 or 8. Setting it to 0
means determining correct value automatically based on the given
elements.
"""
if not 2 <= len(elements) <= 4:
raise ValueError('only 2, 3, or 4 elements are supported')
types = {type(e) for e in elements}
if len(types) != 1:
raise TypeError('all elements must be of the same type')
(type_,) = types
if element_type is None:
element_type = {int: Type.INT, float: Type.FLOAT}.get(type_)
if not element_type:
raise TypeError('unsupported element_type: %s' % type_)
if byte_width == 0:
width = {
Type.UINT: BitWidth.U,
Type.INT: BitWidth.I,
Type.FLOAT: BitWidth.F,
}[element_type]
byte_width = 1 << max(width(e) for e in elements)
self._WriteScalarVector(element_type, byte_width, elements, fixed=True)
def _StartMap(self):
"""Starts map construction."""
return len(self._stack)
def _EndMap(self, start):
"""Finishes map construction by encodung its elements."""
# Interleaved keys and values on the stack.
stack = self._stack[start:]
if len(stack) % 2 != 0:
raise RuntimeError('must be even number of keys and values')
for key in stack[::2]:
if key.Type is not Type.KEY:
raise RuntimeError('all map keys must be of %s type' % Type.KEY)
pairs = zip(stack[::2], stack[1::2]) # [(key, value), ...]
pairs = sorted(pairs, key=lambda pair: self._ReadKey(pair[0].Value))
del self._stack[start:]
for pair in pairs:
self._stack.extend(pair)
keys = self._CreateVector(self._stack[start::2], typed=True, fixed=False)
values = self._CreateVector(
self._stack[start + 1 :: 2], typed=False, fixed=False, keys=keys
)
del self._stack[start:]
self._stack.append(values)
return values.Value
@contextlib.contextmanager
def Map(self, key=None):
if key:
self.Key(key)
try:
start = self._StartMap()
yield self
finally:
self._EndMap(start)
def MapFromElements(self, elements):
start = self._StartMap()
for k, v in elements.items():
self.Key(k)
self.Add(v)
self._EndMap(start)
def Adder(self, type_):
return {
Type.BOOL: self.Bool,
Type.INT: self.Int,
Type.INDIRECT_INT: self.IndirectInt,
Type.UINT: self.UInt,
Type.INDIRECT_UINT: self.IndirectUInt,
Type.FLOAT: self.Float,
Type.INDIRECT_FLOAT: self.IndirectFloat,
Type.KEY: self.Key,
Type.BLOB: self.Blob,
Type.STRING: self.String,
}[type_]
@InMapForString
def Add(self, value):
"""Encodes value of any supported type."""
if value is None:
self.Null()
elif isinstance(value, bool):
self.Bool(value)
elif isinstance(value, int):
self.Int(value)
elif isinstance(value, float):
self.Float(value)
elif isinstance(value, str):
self.String(value)
elif isinstance(value, (bytes, bytearray)):
self.Blob(value)
elif isinstance(value, dict):
with self.Map():
for k, v in value.items():
self.Key(k)
self.Add(v)
elif isinstance(value, array.array):
self.TypedVectorFromElements(value)
elif _IsIterable(value):
self.VectorFromElements(value)
else:
raise TypeError('unsupported python type: %s' % type(value))
@property
def LastValue(self):
return self._stack[-1]
@InMap
def ReuseValue(self, value):
self._stack.append(value)
def GetRoot(buf):
"""Returns root `Ref` object for the given buffer."""
if len(buf) < 3:
raise ValueError('buffer is too small')
byte_width = buf[-1]
return Ref.PackedType(
Buf(buf, -(2 + byte_width)), byte_width, packed_type=buf[-2]
)
def Dumps(obj):
"""Returns bytearray with the encoded python object."""
fbb = Builder()
fbb.Add(obj)
return fbb.Finish()
def Loads(buf):
"""Returns python object decoded from the buffer."""
return GetRoot(buf).Value
|
Builder
|
python
|
google__flatbuffers
|
python/flatbuffers/number_types.py
|
{
"start": 2637,
"end": 2680
}
|
class ____(Uint32Flags):
pass
|
UOffsetTFlags
|
python
|
falconry__falcon
|
tests/test_after_hooks.py
|
{
"start": 4354,
"end": 4541
}
|
class ____:
@falcon.after(fluffiness_in_the_head, 'fluffy')
async def on_get(self, req, resp, field1, field2):
self.fields = (field1, field2)
|
ClassResourceWithURIFieldsAsync
|
python
|
airbytehq__airbyte
|
airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/utils/backward_compatibility.py
|
{
"start": 11894,
"end": 14985
}
|
class ____(BaseDiffChecker):
"""A class to perform backward compatibility checks on a discoverd catalog diff"""
context = BackwardIncompatibilityContext.DISCOVER
def compute_diffs(self):
self.streams_json_schemas_diff = DeepDiff(
{stream_name: airbyte_stream.dict().pop("json_schema") for stream_name, airbyte_stream in self._previous.items()},
{stream_name: airbyte_stream.dict().pop("json_schema") for stream_name, airbyte_stream in self._current.items()},
view="tree",
ignore_order=True,
)
self.streams_cursor_fields_diff = DeepDiff(
{stream_name: airbyte_stream.dict().pop("default_cursor_field") for stream_name, airbyte_stream in self._previous.items()},
{stream_name: airbyte_stream.dict().pop("default_cursor_field") for stream_name, airbyte_stream in self._current.items()},
view="tree",
)
def assert_is_backward_compatible(self):
self.check_if_stream_was_removed(self.streams_json_schemas_diff)
self.check_if_type_of_type_field_changed(self.streams_json_schemas_diff, allow_type_widening=False)
self.check_if_value_of_a_field_changed(self.streams_json_schemas_diff, "type")
self.check_if_value_of_a_field_changed(self.streams_json_schemas_diff, "format")
self.check_if_value_of_a_field_changed(self.streams_json_schemas_diff, "airbyte_type")
self.check_if_field_removed(self.streams_json_schemas_diff)
self.check_if_cursor_field_was_changed(self.streams_cursor_fields_diff)
def check_if_field_removed(self, diff: DeepDiff):
"""Check if a property was removed from the catalog."""
removed_properties = []
for removal in diff.get("dictionary_item_removed", []):
removal_path_parts = removal.path(output_format="list")
if "properties" in removal_path_parts:
removal_path_human_readable = ".".join(removal_path_parts)
removed_properties.append(removal_path_human_readable)
if removed_properties:
self._raise_error(f"The following properties were removed: {', '.join(removed_properties)}", diff)
def check_if_stream_was_removed(self, diff: DeepDiff):
"""Check if a stream was removed from the catalog."""
removed_streams = []
for removal in diff.get("dictionary_item_removed", []):
if removal.path() != "root" and removal.up.path() == "root":
removed_streams.append(removal.path(output_format="list")[0])
if removed_streams:
self._raise_error(f"The following streams were removed: {','.join(removed_streams)}", diff)
def check_if_cursor_field_was_changed(self, diff: DeepDiff):
"""Check if a default cursor field value was changed."""
invalid_changes = {"values_changed", "iterable_item_added", "iterable_item_removed"}
if any([change in invalid_changes for change in diff.keys()]):
self._raise_error("The value of 'default_cursor_field' was changed", diff)
|
CatalogDiffChecker
|
python
|
numpy__numpy
|
numpy/f2py/tests/test_common.py
|
{
"start": 74,
"end": 459
}
|
class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "common", "block.f")]
def test_common_block(self):
self.module.initcb()
assert self.module.block.long_bn == np.array(1.0, dtype=np.float64)
assert self.module.block.string_bn == np.array("2", dtype="|S1")
assert self.module.block.ok == np.array(3, dtype=np.int32)
|
TestCommonBlock
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/datasets.py
|
{
"start": 21552,
"end": 29512
}
|
class ____(NonStrictDataModel):
"""
:param id: Frame id. Must be unique within the dataset's version. If already
exists, will cause existing frame to be updated
:type id: str
:param context_id: Context ID. Used for the default frames sorting. If not set
then it is filled from the uri of the first source.
:type context_id: str
:param timestamp: Frame's offset in milliseconds, used primarily for video
content. Used for the default frames sorting as the secondary key (with the
primary key being 'context_id'). For images, this value should typically be 0.
If not set, value is filled from the timestamp of the first source. We
recommend using this field only in cases concerning the default sorting
behavior.
:type timestamp: int
:param rois: Frame regions of interest
:type rois: Sequence[Roi]
:param meta: Additional metadata dictionary for the frame. Please note that
using this field effectively defines a schema (dictionary structure and types
used as values) - frames within the same dataset cannot use conflicting schemas
for this field (see documentation for more details).
:type meta: dict
:param meta_blob: Non searchable metadata dictionary for the frame. The fields
in this object cannot be searched by and are not added to the frame schema
:type meta_blob: dict
:param blob: Raw data (blob) for the frame
:type blob: str
:param sources: Sources of this frame
:type sources: Sequence[Source]
"""
_schema = {
"properties": {
"blob": {
"description": "Raw data (blob) for the frame",
"type": ["string", "null"],
},
"context_id": {
"description": (
"Context ID. Used for the default frames sorting. If not set then it is filled from the "
"uri of the first source."
),
"type": ["string", "null"],
},
"id": {
"description": (
"Frame id. Must be unique within the dataset's version. If already exists, "
"will cause existing frame to be updated"
),
"type": ["string", "null"],
},
"meta": {
"additionalProperties": True,
"description": (
"Additional metadata dictionary for the frame. Please note that using this field effectively"
" defines a schema (dictionary structure and types used as values) - frames within the same dataset"
" cannot use conflicting schemas for this field (see documentation for more details)."
),
"type": ["object", "null"],
},
"meta_blob": {
"additionalProperties": True,
"description": (
"Non searchable metadata dictionary for the frame. The fields in this object cannot be searched by"
" and are not added to the frame schema"
),
"type": ["object", "null"],
},
"rois": {
"description": "Frame regions of interest",
"items": {"$ref": "#/definitions/roi"},
"type": ["array", "null"],
},
"sources": {
"description": "Sources of this frame",
"items": {"$ref": "#/definitions/source"},
"type": "array",
},
"timestamp": {
"description": (
"Frame's offset in milliseconds, used primarily for video content. Used for the default frames"
" sorting as the secondary key (with the primary key being 'context_id'). For images, this value"
" should typically be 0. If not set, value is filled from the timestamp of the first source. We"
" recommend using this field only in cases concerning the default sorting behavior."
),
"type": ["integer", "null"],
},
},
"required": ["sources"],
"type": "object",
}
def __init__(
self,
sources,
id=None,
context_id=None,
timestamp=None,
rois=None,
meta=None,
meta_blob=None,
blob=None,
**kwargs
):
super(Frame, self).__init__(**kwargs)
self.id = id
self.context_id = context_id
self.timestamp = timestamp
self.rois = rois
self.meta = meta
self.meta_blob = meta_blob
self.blob = blob
self.sources = sources
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("context_id")
def context_id(self):
return self._property_context_id
@context_id.setter
def context_id(self, value):
if value is None:
self._property_context_id = None
return
self.assert_isinstance(value, "context_id", six.string_types)
self._property_context_id = value
@schema_property("timestamp")
def timestamp(self):
return self._property_timestamp
@timestamp.setter
def timestamp(self, value):
if value is None:
self._property_timestamp = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "timestamp", six.integer_types)
self._property_timestamp = value
@schema_property("rois")
def rois(self):
return self._property_rois
@rois.setter
def rois(self, value):
if value is None:
self._property_rois = None
return
self.assert_isinstance(value, "rois", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Roi.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "rois", Roi, is_array=True)
self._property_rois = value
@schema_property("meta")
def meta(self):
return self._property_meta
@meta.setter
def meta(self, value):
if value is None:
self._property_meta = None
return
self.assert_isinstance(value, "meta", (dict,))
self._property_meta = value
@schema_property("meta_blob")
def meta_blob(self):
return self._property_meta_blob
@meta_blob.setter
def meta_blob(self, value):
if value is None:
self._property_meta_blob = None
return
self.assert_isinstance(value, "meta_blob", (dict,))
self._property_meta_blob = value
@schema_property("blob")
def blob(self):
return self._property_blob
@blob.setter
def blob(self, value):
if value is None:
self._property_blob = None
return
self.assert_isinstance(value, "blob", six.string_types)
self._property_blob = value
@schema_property("sources")
def sources(self):
return self._property_sources
@sources.setter
def sources(self, value):
if value is None:
self._property_sources = None
return
self.assert_isinstance(value, "sources", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Source.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "sources", Source, is_array=True)
self._property_sources = value
|
Frame
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/transfers/test_bigquery_to_mysql.py
|
{
"start": 1694,
"end": 6413
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
def test_execute_good_request_to_bq(self, mock_hook):
destination_table = "table"
operator = BigQueryToMySqlOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=destination_table,
replace=False,
)
operator.execute(None)
mock_hook.return_value.list_rows.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=1000,
selected_fields=None,
start_index=0,
)
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mysql.MySqlHook")
def test_get_openlineage_facets_on_complete_no_selected_fields(self, mock_mysql_hook, mock_bq_hook):
mock_bq_client = MagicMock()
mock_bq_client.get_table.return_value = _make_bq_table(["id", "name", "value"])
mock_bq_hook.get_client.return_value = mock_bq_client
mock_bq_hook.return_value = mock_bq_hook
db_info = MagicMock(scheme="mysql", authority="localhost:3306", database="mydb")
mock_mysql_hook.get_openlineage_database_info.return_value = db_info
mock_mysql_hook.return_value = mock_mysql_hook
op = BigQueryToMySqlOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name="destination",
selected_fields=None,
database="mydb",
)
op.bigquery_hook = mock_bq_hook
op.bigquery_hook.project_id = TEST_PROJECT
op.mysql_hook = mock_mysql_hook
context = mock.MagicMock()
op.execute(context=context)
result = op.get_openlineage_facets_on_complete(None)
assert len(result.inputs) == 1
assert len(result.outputs) == 1
input_ds = result.inputs[0]
assert input_ds.namespace == "bigquery"
assert input_ds.name == f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}"
assert "schema" in input_ds.facets
schema_fields = [f.name for f in input_ds.facets["schema"].fields]
assert set(schema_fields) == {"id", "name", "value"}
output_ds = result.outputs[0]
assert output_ds.namespace == "mysql://localhost:3306"
assert output_ds.name == "mydb.destination"
assert "columnLineage" in output_ds.facets
col_lineage = output_ds.facets["columnLineage"]
assert set(col_lineage.fields.keys()) == {"id", "name", "value"}
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mysql.MySqlHook")
def test_get_openlineage_facets_on_complete_selected_fields(self, mock_mysql_hook, mock_bq_hook):
mock_bq_client = MagicMock()
mock_bq_client.get_table.return_value = _make_bq_table(["id", "name", "value"])
mock_bq_hook.get_client.return_value = mock_bq_client
mock_bq_hook.return_value = mock_bq_hook
db_info = MagicMock(scheme="mysql", authority="localhost:3306", database="mydb")
mock_mysql_hook.get_openlineage_database_info.return_value = db_info
mock_mysql_hook.return_value = mock_mysql_hook
op = BigQueryToMySqlOperator(
task_id=TASK_ID,
dataset_table=f"{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name="destination",
selected_fields=["id", "name"],
database="mydb",
)
op.bigquery_hook = mock_bq_hook
op.bigquery_hook.project_id = TEST_PROJECT
op.mysql_hook = mock_mysql_hook
context = mock.MagicMock()
op.execute(context=context)
result = op.get_openlineage_facets_on_complete(None)
assert len(result.inputs) == 1
assert len(result.outputs) == 1
input_ds = result.inputs[0]
assert input_ds.namespace == "bigquery"
assert input_ds.name == f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}"
assert "schema" in input_ds.facets
schema_fields = [f.name for f in input_ds.facets["schema"].fields]
assert set(schema_fields) == {"id", "name"}
output_ds = result.outputs[0]
assert output_ds.namespace == "mysql://localhost:3306"
assert output_ds.name == "mydb.destination"
assert "columnLineage" in output_ds.facets
col_lineage = output_ds.facets["columnLineage"]
assert set(col_lineage.fields.keys()) == {"id", "name"}
|
TestBigQueryToMySqlOperator
|
python
|
huggingface__transformers
|
src/transformers/models/markuplm/modeling_markuplm.py
|
{
"start": 30422,
"end": 34640
}
|
class ____(MarkupLMPreTrainedModel):
# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with bert->markuplm, Bert->MarkupLM
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
xpath_tags_seq: Optional[torch.Tensor] = None,
xpath_subs_seq: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
r"""
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> processor.parse_html = False
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
>>> nodes = ["hello", "world"]
>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
>>> node_labels = [1, 2]
>>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = outputs[0]
prediction_scores = self.classifier(sequence_output) # (batch_size, seq_length, node_type_size)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(
prediction_scores.view(-1, self.config.num_labels),
labels.view(-1),
)
return TokenClassifierOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
MarkupLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
|
MarkupLMForTokenClassification
|
python
|
huggingface__transformers
|
src/transformers/models/rt_detr/modeling_rt_detr_resnet.py
|
{
"start": 12508,
"end": 15247
}
|
class ____(RTDetrResNetPreTrainedModel, BackboneMixin):
has_attentions = False
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embedding_size] + config.hidden_sizes
self.embedder = RTDetrResNetEmbeddings(config)
self.encoder = RTDetrResNetEncoder(config)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import RTDetrResNetConfig, RTDetrResNetBackbone
>>> import torch
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
from ...utils.deprecation import deprecate_kwarg
>>> config = RTDetrResNetConfig()
>>> model = RTDetrResNetBackbone(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = [
"RTDetrResNetBackbone",
"RTDetrResNetPreTrainedModel",
]
|
RTDetrResNetBackbone
|
python
|
plotly__plotly.py
|
plotly/graph_objs/sankey/_legendgrouptitle.py
|
{
"start": 233,
"end": 2932
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "sankey"
_path_str = "sankey.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.sankey.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.sankey.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.sankey.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sankey.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Legendgrouptitle
|
python
|
tensorflow__tensorflow
|
tensorflow/python/feature_column/feature_column_v2_test.py
|
{
"start": 142635,
"end": 158831
}
|
class ____(test.TestCase):
def test_defaults_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.string)
}, column.parse_example_spec)
self.assertTrue(column._is_v2_column)
def test_key_should_be_string(self):
with self.assertRaisesRegex(ValueError, 'key must be a string.'):
fc.categorical_column_with_vocabulary_list(
key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo'))
def test_defaults_int(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
self.assertEqual('aaa', column.name)
self.assertEqual('aaa', column.key)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, column.parse_example_spec)
def test_all_constructor_args(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
dtype=dtypes.int32,
default_value=-99)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_deep_copy(self):
original = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32)
for column in (original, copy.deepcopy(original)):
self.assertEqual('aaa', column.name)
self.assertEqual(3, column.num_buckets)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int32)
}, column.parse_example_spec)
def test_invalid_dtype(self):
with self.assertRaisesRegex(ValueError, 'dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.float32)
def test_invalid_mapping_dtype(self):
with self.assertRaisesRegex(ValueError,
r'vocabulary dtype must be string or integer'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12., 24., 36.))
def test_mismatched_int_dtype(self):
with self.assertRaisesRegex(ValueError,
r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
dtype=dtypes.int32)
def test_mismatched_string_dtype(self):
with self.assertRaisesRegex(ValueError,
r'dtype.*and vocabulary dtype.*do not match'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string)
def test_none_mapping(self):
with self.assertRaisesRegex(ValueError,
r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=None)
def test_empty_mapping(self):
with self.assertRaisesRegex(ValueError,
r'vocabulary_list.*must be non-empty'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=tuple([]))
def test_duplicate_mapping(self):
with self.assertRaisesRegex(ValueError, 'Duplicate keys'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 12))
def test_invalid_num_oov_buckets(self):
with self.assertRaisesRegex(ValueError, 'Invalid num_oov_buckets'):
fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1)
def test_invalid_buckets_and_default_value(self):
with self.assertRaisesRegex(ValueError,
'both num_oov_buckets and default_value'):
fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=(12, 24, 36),
num_oov_buckets=100,
default_value=2)
def test_invalid_input_dtype_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(12, 24, 36),
dense_shape=(2, 2))
with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_invalid_input_dtype_string(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(12, 24, 36))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('omar', 'stringer', 'marlo'),
dense_shape=(2, 2))
with self.assertRaisesRegex(ValueError, 'dtype must be compatible'):
column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
def test_parse_example_string(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
data = example_pb2.Example(
features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[b'omar', b'stringer']))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]],
values=np.array([b'omar', b'stringer'], dtype=np.object_),
dense_shape=[1, 2]), self.evaluate(features['aaa']))
def test_parse_example_int(self):
a = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=(11, 21, 31))
data = example_pb2.Example(
features=feature_pb2.Features(
feature={
'aaa':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[11, 21]))
}))
features = parsing_ops.parse_example(
serialized=[data.SerializeToString()],
features=fc.make_parse_example_spec_v2([a]))
self.assertIn('aaa', features)
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1]], values=[11, 21], dense_shape=[1, 2]),
self.evaluate(features['aaa']))
def test_get_sparse_tensors(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
self.evaluate(id_weight_pair.id_tensor))
def test_transform_feature(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_tensor = fc._transform_features_v2({
'aaa': inputs
}, [column], None)[column]
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=inputs.dense_shape), self.evaluate(id_tensor))
def test_get_sparse_tensors_dense_input(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': (('marlo', ''), ('skywalker', 'omar'))
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, -1, 0), dtype=np.int64),
dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor))
def test_get_sparse_tensors_default_value_in_vocabulary(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
default_value=2)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 2, 0), dtype=np.int64),
dense_shape=inputs.dense_shape),
self.evaluate(id_weight_pair.id_tensor))
def test_get_sparse_tensors_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 2)),
values=('marlo', 'skywalker', 'omar', 'heisenberg'),
dense_shape=(2, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 33, 0, 62), dtype=np.int64),
dense_shape=inputs.dense_shape),
self.evaluate(id_weight_pair.id_tensor))
def test_get_sparse_tensors_int32(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((11, 100, 30, 22), dtype=np.int32),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, -1, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
self.evaluate(id_weight_pair.id_tensor))
def test_get_sparse_tensors_int32_dense_input(self):
default_value = -100
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
default_value=default_value)
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa':
np.array(((11, -1, -1), (100, 30, -1), (-1, -1, 22)),
dtype=np.int32)
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=np.array((2, default_value, 0, 4), dtype=np.int64),
dense_shape=(3, 3)), self.evaluate(id_weight_pair.id_tensor))
def test_get_sparse_tensors_int32_with_oov_buckets(self):
column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32),
dtype=dtypes.int32,
num_oov_buckets=100)
inputs = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (2, 2)),
values=(11, 100, 30, 22),
dense_shape=(3, 3))
id_weight_pair = column.get_sparse_tensors(
fc.FeatureTransformationCache({
'aaa': inputs
}), None)
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self,
sparse_tensor.SparseTensorValue(
indices=inputs.indices,
values=np.array((2, 60, 0, 4), dtype=np.int64),
dense_shape=inputs.dense_shape),
self.evaluate(id_weight_pair.id_tensor))
def test_old_linear_model(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(4, wire_column.num_buckets)
with ops.Graph().as_default():
predictions = fc_old.linear_model({
wire_column.name:
sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=('marlo', 'skywalker', 'omar'),
dense_shape=(2, 2))
}, (wire_column,))
bias = get_linear_model_bias()
wire_var = get_linear_model_column_var(wire_column)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose((0.,), self.evaluate(bias))
self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var))
self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions))
self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,))))
# 'marlo' -> 2: wire_var[2] = 3
# 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5
self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions))
def test_serialization(self):
wire_column = fc.categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'),
num_oov_buckets=1)
self.assertEqual(['aaa'], wire_column.parents)
config = wire_column.get_config()
self.assertEqual({
'default_value': -1,
'dtype': 'string',
'key': 'aaa',
'num_oov_buckets': 1,
'vocabulary_list': ('omar', 'stringer', 'marlo')
}, config)
self.assertEqual(wire_column,
fc.VocabularyListCategoricalColumn.from_config(config))
|
VocabularyListCategoricalColumnTest
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 64058,
"end": 66051
}
|
class ____(TypedDict, total=False):
type: Required[Literal['generator']]
items_schema: CoreSchema
min_length: int
max_length: int
ref: str
metadata: dict[str, Any]
serialization: IncExSeqOrElseSerSchema
def generator_schema(
items_schema: CoreSchema | None = None,
*,
min_length: int | None = None,
max_length: int | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: IncExSeqOrElseSerSchema | None = None,
) -> GeneratorSchema:
"""
Returns a schema that matches a generator value, e.g.:
```py
from typing import Iterator
from pydantic_core import SchemaValidator, core_schema
def gen() -> Iterator[int]:
yield 1
schema = core_schema.generator_schema(items_schema=core_schema.int_schema())
v = SchemaValidator(schema)
v.validate_python(gen())
```
Unlike other types, validated generators do not raise ValidationErrors eagerly,
but instead will raise a ValidationError when a violating value is actually read from the generator.
This is to ensure that "validated" generators retain the benefit of lazy evaluation.
Args:
items_schema: The value must be a generator with items that match this schema
min_length: The value must be a generator that yields at least this many items
max_length: The value must be a generator that yields at most this many items
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='generator',
items_schema=items_schema,
min_length=min_length,
max_length=max_length,
ref=ref,
metadata=metadata,
serialization=serialization,
)
IncExDict = set[Union[int, str]]
|
GeneratorSchema
|
python
|
getsentry__sentry
|
src/sentry/integrations/messaging/linkage.py
|
{
"start": 4037,
"end": 10490
}
|
class ____(LinkageView, ABC):
@property
@abstractmethod
def external_id_parameter(self) -> str:
raise NotImplementedError
# TODO: Replace thw two template properties below with base templates for all
# integrations to use. Add service-specific parts to the context as needed.
@property
@abstractmethod
def confirmation_template(self) -> str:
"""Path to the HTML template to render for a non-POST request."""
raise NotImplementedError
@property
@abstractmethod
def expired_link_template(self) -> str:
"""Path to the HTML template to show when a link is expired."""
raise NotImplementedError
@abstractmethod
def get_success_template_and_context(
self, params: Mapping[str, Any], integration: Integration | None
) -> tuple[str, dict[str, Any]]:
"""HTML content to show when the operation has been completed."""
raise NotImplementedError
@method_decorator(never_cache)
def dispatch(self, request: HttpRequest, signed_params: str) -> HttpResponseBase:
try:
params = unsign(signed_params, salt=self.salt)
except (SignatureExpired, BadSignature) as e:
logger.warning("dispatch.signature_error", exc_info=e)
self.capture_metric("failure", tags={"error": str(e)})
return render_to_response(
self.expired_link_template,
request=request,
)
organization: RpcOrganization | None = None
integration: Integration | None = None
idp: IdentityProvider | None = None
integration_id = params.get("integration_id")
try:
if integration_id:
organization, integration, idp = get_identity_or_404(
self.provider, request.user, integration_id=integration_id
)
except Http404:
logger.exception("get_identity_error", extra={"integration_id": integration_id})
self.capture_metric("failure.get_identity")
return self.render_error_page(
request,
status=404,
body_text="HTTP 404: Could not find the identity.",
)
logger.info(
"get_identity_success",
extra={"integration_id": integration_id, "provider": self.provider_slug},
)
self.capture_metric("success.get_identity")
params.update({"organization": organization, "integration": integration, "idp": idp})
dispatch_kwargs = dict(
organization=organization, integration=integration, idp=idp, params=params
)
dispatch_kwargs = {k: v for (k, v) in dispatch_kwargs.items() if v is not None}
return super().dispatch(request, **dispatch_kwargs)
def get(self, request: Request, *args, **kwargs) -> HttpResponse:
params = kwargs["params"]
context = {"organization": params["organization"]}
integration = params.get("integration")
if integration:
context["provider"] = integration.get_provider()
return render_to_response(self.confirmation_template, request=request, context=context)
def post(self, request: Request, *args: Any, **kwargs: Any) -> HttpResponse:
if isinstance(request.user, AnonymousUser):
return HttpResponse(status=401)
try:
organization: RpcOrganization | None = kwargs.get("organization")
integration: Integration | None = kwargs.get("integration")
idp: IdentityProvider | None = kwargs.get("idp")
params_dict: Mapping[str, Any] = kwargs["params"]
external_id: str = params_dict[self.external_id_parameter]
except KeyError as e:
event = self.capture_metric("failure.post.missing_params", tags={"error": str(e)})
logger.exception(event)
return self.render_error_page(
request,
status=400,
body_text="HTTP 400: Missing required parameters.",
)
exc_response = self.persist_identity(idp, external_id, request)
if exc_response is not None:
return exc_response
self.notify_on_success(external_id, params_dict, integration)
self.capture_metric("success.post")
self.record_analytic(request.user.id)
if organization is not None:
self._send_nudge_notification(organization, request)
success_template, success_context = self.get_success_template_and_context(
params_dict, integration
)
return render_to_response(success_template, request=request, context=success_context)
def _send_nudge_notification(self, organization: RpcOrganization, request: Request):
# TODO: Delete this if no longer needed
user: User = request.user # type: ignore[assignment]
controller = NotificationController(
recipients=[user],
organization_id=organization.id,
provider=self.external_provider_enum,
)
has_provider_settings = controller.user_has_any_provider_settings(
self.external_provider_enum
)
if not has_provider_settings:
# Expects Organization, not RpcOrganization. Suspect this to be a bug
# that isn't being hit because these notifications aren't being sent.
nudge_notification = IntegrationNudgeNotification(organization, user, self.provider) # type: ignore[arg-type]
nudge_notification.send()
@abstractmethod
def persist_identity(
self, idp: IdentityProvider | None, external_id: str, request: HttpRequest
) -> HttpResponse | None:
"""Execute the operation on the Identity table.
Return a response to trigger an early halt under exceptional conditions.
Return None if everything is normal.
"""
raise NotImplementedError
def notify_on_success(
self, external_id: str, params: Mapping[str, Any], integration: Integration | None
) -> None:
"""On success, notify the user through the messaging client.
No-op by default.
:param external_id: the `Identity.external_id` value (the messaging service's ID)
:param params: raw params from the incoming request
:param integration: affected Integration entity, if any
"""
|
IdentityLinkageView
|
python
|
huggingface__transformers
|
tests/models/umt5/test_modeling_umt5.py
|
{
"start": 18937,
"end": 20988
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (UMT5EncoderModel, UMT5ForTokenClassification) if is_torch_available() else ()
test_resize_embeddings = False
pipeline_model_mapping = (
{
"token-classification": UMT5ForTokenClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = UMT5EncoderOnlyModelTester(self)
self.config_tester = ConfigTester(self, config_class=UMT5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
def test_with_token_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs)
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if tokenizer_name is None:
return True
# `UMT5EncoderOnlyModelTest` is not working well with slow tokenizers (for some models) and we don't want to touch the file
# `src/transformers/data/processors/squad.py` (where this test fails for this model)
if pipeline_test_case_name == "TokenClassificationPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
@require_torch
@require_sentencepiece
@require_tokenizers
|
UMT5EncoderOnlyModelTest
|
python
|
django__django
|
tests/async/models.py
|
{
"start": 300,
"end": 389
}
|
class ____(models.Model):
simples = models.ManyToManyField("SimpleModel")
|
ManyToManyModel
|
python
|
pypa__pip
|
src/pip/_vendor/pygments/lexer.py
|
{
"start": 12100,
"end": 12247
}
|
class ____(str): # pylint: disable=invalid-name
"""
Indicates that a state should include rules from another state.
"""
pass
|
include
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/reshape.py
|
{
"start": 5195,
"end": 6948
}
|
class ____:
def setup(self):
N = 100000
fac1 = np.array(["A", "B", "C"], dtype="O")
fac2 = np.array(["one", "two"], dtype="O")
ind1 = np.random.randint(0, 3, size=N)
ind2 = np.random.randint(0, 2, size=N)
self.df = DataFrame(
{
"key1": fac1.take(ind1),
"key2": fac2.take(ind2),
"key3": fac2.take(ind2),
"value1": np.random.randn(N),
"value2": np.random.randn(N),
"value3": np.random.randn(N),
}
)
self.df2 = DataFrame(
{"col1": list("abcde"), "col2": list("fghij"), "col3": [1, 2, 3, 4, 5]}
)
self.df2.col1 = self.df2.col1.astype("category")
self.df2.col2 = self.df2.col2.astype("category")
def time_pivot_table(self):
self.df.pivot_table(index="key1", columns=["key2", "key3"])
def time_pivot_table_agg(self):
self.df.pivot_table(
index="key1", columns=["key2", "key3"], aggfunc=["sum", "mean"]
)
def time_pivot_table_margins(self):
self.df.pivot_table(index="key1", columns=["key2", "key3"], margins=True)
def time_pivot_table_categorical(self):
self.df2.pivot_table(
index="col1", values="col3", columns="col2", aggfunc="sum", fill_value=0
)
def time_pivot_table_categorical_observed(self):
self.df2.pivot_table(
index="col1",
values="col3",
columns="col2",
aggfunc="sum",
fill_value=0,
observed=True,
)
def time_pivot_table_margins_only_column(self):
self.df.pivot_table(columns=["key1", "key2", "key3"], margins=True)
|
PivotTable
|
python
|
ray-project__ray
|
python/ray/autoscaler/_private/gcp/node.py
|
{
"start": 517,
"end": 3477
}
|
class ____ from ``GCPNode``. Those classes are essentially dicts
with some extra methods. The instances of those classes will be created
from API responses.
The ``GCPNodeType`` enum is a lightweight way to classify nodes.
Currently, Compute and TPU resources & nodes are supported.
In order to add support for new resources, create classes inheriting from
``GCPResource`` and ``GCPNode``, update the ``GCPNodeType`` enum,
update the ``_generate_node_name`` method and finally update the
node provider.
"""
import abc
import logging
import re
import time
from collections import UserDict
from copy import deepcopy
from enum import Enum
from functools import wraps
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
import httplib2
from google_auth_httplib2 import AuthorizedHttp
from googleapiclient.discovery import Resource
from googleapiclient.errors import HttpError
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME
logger = logging.getLogger(__name__)
INSTANCE_NAME_MAX_LEN = 64
INSTANCE_NAME_UUID_LEN = 8
MAX_POLLS = 12
# TPUs take a long while to respond, so we increase the MAX_POLLS
# considerably - this probably could be smaller
# TPU deletion uses MAX_POLLS
MAX_POLLS_TPU = MAX_POLLS * 8
POLL_INTERVAL = 5
def _retry_on_exception(
exception: Union[Exception, Tuple[Exception]],
regex: Optional[str] = None,
max_retries: int = MAX_POLLS,
retry_interval_s: int = POLL_INTERVAL,
):
"""Retry a function call n-times for as long as it throws an exception."""
def dec(func):
@wraps(func)
def wrapper(*args, **kwargs):
def try_catch_exc():
try:
value = func(*args, **kwargs)
return value
except Exception as e:
if not isinstance(e, exception) or (
regex and not re.search(regex, str(e))
):
raise e
return e
for _ in range(max_retries):
ret = try_catch_exc()
if not isinstance(ret, Exception):
break
time.sleep(retry_interval_s)
if isinstance(ret, Exception):
raise ret
return ret
return wrapper
return dec
def _generate_node_name(labels: dict, node_suffix: str) -> str:
"""Generate node name from labels and suffix.
This is required so that the correct resource can be selected
when the only information autoscaler has is the name of the node.
The suffix is expected to be one of 'compute' or 'tpu'
(as in ``GCPNodeType``).
"""
name_label = labels[TAG_RAY_NODE_NAME]
assert len(name_label) <= (INSTANCE_NAME_MAX_LEN - INSTANCE_NAME_UUID_LEN - 1), (
name_label,
len(name_label),
)
return f"{name_label}-{uuid4().hex[:INSTANCE_NAME_UUID_LEN]}-{node_suffix}"
|
inheriting
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/rust_tracing.py
|
{
"start": 1392,
"end": 4050
}
|
class ____(Enum):
Ignore = auto()
Exc = auto()
Breadcrumb = auto()
Event = auto()
def tracing_level_to_sentry_level(level):
# type: (str) -> sentry_sdk._types.LogLevelStr
level = RustTracingLevel(level)
if level in (RustTracingLevel.Trace, RustTracingLevel.Debug):
return "debug"
elif level == RustTracingLevel.Info:
return "info"
elif level == RustTracingLevel.Warn:
return "warning"
elif level == RustTracingLevel.Error:
return "error"
else:
# Better this than crashing
return "info"
def extract_contexts(event: Dict[str, Any]) -> Dict[str, Any]:
metadata = event.get("metadata", {})
contexts = {}
location = {}
for field in ["module_path", "file", "line"]:
if field in metadata:
location[field] = metadata[field]
if len(location) > 0:
contexts["rust_tracing_location"] = location
fields = {}
for field in metadata.get("fields", []):
fields[field] = event.get(field)
if len(fields) > 0:
contexts["rust_tracing_fields"] = fields
return contexts
def process_event(event: Dict[str, Any]) -> None:
metadata = event.get("metadata", {})
logger = metadata.get("target")
level = tracing_level_to_sentry_level(metadata.get("level"))
message = event.get("message") # type: sentry_sdk._types.Any
contexts = extract_contexts(event)
sentry_event = {
"logger": logger,
"level": level,
"message": message,
"contexts": contexts,
} # type: sentry_sdk._types.Event
sentry_sdk.capture_event(sentry_event)
def process_exception(event: Dict[str, Any]) -> None:
process_event(event)
def process_breadcrumb(event: Dict[str, Any]) -> None:
level = tracing_level_to_sentry_level(event.get("metadata", {}).get("level"))
message = event.get("message")
sentry_sdk.add_breadcrumb(level=level, message=message)
def default_span_filter(metadata: Dict[str, Any]) -> bool:
return RustTracingLevel(metadata.get("level")) in (
RustTracingLevel.Error,
RustTracingLevel.Warn,
RustTracingLevel.Info,
)
def default_event_type_mapping(metadata: Dict[str, Any]) -> EventTypeMapping:
level = RustTracingLevel(metadata.get("level"))
if level == RustTracingLevel.Error:
return EventTypeMapping.Exc
elif level in (RustTracingLevel.Warn, RustTracingLevel.Info):
return EventTypeMapping.Breadcrumb
elif level in (RustTracingLevel.Debug, RustTracingLevel.Trace):
return EventTypeMapping.Ignore
else:
return EventTypeMapping.Ignore
|
EventTypeMapping
|
python
|
lepture__authlib
|
authlib/oidc/core/claims.py
|
{
"start": 6849,
"end": 6953
}
|
class ____(IDToken):
RESPONSE_TYPES = ("code",)
REGISTERED_CLAIMS = _REGISTERED_CLAIMS
|
CodeIDToken
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_operators.py
|
{
"start": 155494,
"end": 157239
}
|
class ____(fixtures.TestBase):
def _assert_types(self, expr):
eq_(expr[0]._type_affinity, Integer)
eq_(expr[1]._type_affinity, String)
eq_(expr[2]._type_affinity, LargeBinary()._type_affinity)
def test_type_coercion_on_eq(self):
a, b, c = (
column("a", Integer),
column("b", String),
column("c", LargeBinary),
)
t1 = tuple_(a, b, c)
expr = t1 == (3, "hi", "there")
self._assert_types([bind.type for bind in expr.right.clauses])
def test_type_coercion_on_in(self):
a, b, c = (
column("a", Integer),
column("b", String),
column("c", LargeBinary),
)
t1 = tuple_(a, b, c)
expr = t1.in_([(3, "hi", "there"), (4, "Q", "P")])
eq_(len(expr.right.value), 2)
self._assert_types(expr.right.type.types)
# since we want to infer "binary"
def test_tuple_type_expanding_inference(self):
a, b, c = column("a"), column("b"), column("c")
t1 = tuple_(a, b, c)
expr = t1.in_([(3, "hi", b"there"), (4, "Q", b"P")])
eq_(len(expr.right.value), 2)
self._assert_types(expr.right.type.types)
def test_tuple_type_plain_inference(self):
a, b, c = column("a"), column("b"), column("c")
t1 = tuple_(a, b, c)
expr = t1 == (3, "hi", b"there")
self._assert_types(expr.right.type.types)
def test_tuple_type_left_type_ignored(self):
a, b = column("a", sqltypes.Date), column("b", sqltypes.DateTime)
c = column("c", sqltypes.Float)
t1 = tuple_(a, b, c)
expr = t1.in_([(3, "hi", b"there")])
self._assert_types(expr.right.type.types)
|
TupleTypingTest
|
python
|
sympy__sympy
|
sympy/polys/polyoptions.py
|
{
"start": 18337,
"end": 18545
}
|
class ____(BooleanOption, Flag, metaclass=OptionType):
"""``formal`` flag to polynomial manipulation functions. """
option = 'formal'
@classmethod
def default(cls):
return False
|
Formal
|
python
|
sanic-org__sanic
|
sanic/http/http3.py
|
{
"start": 2549,
"end": 8428
}
|
class ____(Receiver, Stream):
"""HTTP/3 receiver implementation."""
stage: Stage
request: Request
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.request_body = None
self.stage = Stage.IDLE
self.headers_sent = False
self.response: Optional[BaseHTTPResponse] = None
self.request_max_size = self.protocol.request_max_size
self.request_bytes = 0
async def run(self, exception: Optional[Exception] = None):
"""Handle the request and response cycle."""
self.stage = Stage.HANDLER
self.head_only = self.request.method.upper() == "HEAD"
if exception:
logger.info( # no cov
f"{Colors.BLUE}[exception]: "
f"{Colors.RED}{exception}{Colors.END}",
exc_info=True,
extra={"verbosity": 1},
)
await self.error_response(exception)
else:
try:
logger.info( # no cov
f"{Colors.BLUE}[request]:{Colors.END} {self.request}",
extra={"verbosity": 1},
)
await self.protocol.request_handler(self.request)
except Exception as e: # no cov
# This should largely be handled within the request handler.
# But, just in case...
await self.run(e)
self.stage = Stage.IDLE
async def error_response(self, exception: Exception) -> None:
"""Handle response when exception encountered"""
# From request and handler states we can respond, otherwise be silent
app = self.protocol.app
await app.handle_exception(self.request, exception)
def _prepare_headers(
self, response: BaseHTTPResponse
) -> list[tuple[bytes, bytes]]:
size = len(response.body) if response.body else 0
headers = response.headers
status = response.status
if not has_message_body(status) and (
size
or "content-length" in headers
or "transfer-encoding" in headers
):
headers.pop("content-length", None)
headers.pop("transfer-encoding", None)
logger.warning( # no cov
f"Message body set in response on {self.request.path}. "
f"A {status} response may only have headers, no body."
)
elif "content-length" not in headers:
if size:
headers["content-length"] = size
else:
headers["transfer-encoding"] = "chunked"
headers = [
(b":status", str(response.status).encode()),
*response.processed_headers,
]
return headers
def send_headers(self) -> None:
"""Send response headers to client"""
logger.debug( # no cov
f"{Colors.BLUE}[send]: {Colors.GREEN}HEADERS{Colors.END}",
extra={"verbosity": 2},
)
if not self.response:
raise RuntimeError("no response")
response = self.response
headers = self._prepare_headers(response)
self.protocol.connection.send_headers(
stream_id=self.request.stream_id,
headers=headers,
)
self.headers_sent = True
self.stage = Stage.RESPONSE
if self.response.body and not self.head_only:
self._send(self.response.body, False)
elif self.head_only:
self.future.cancel()
def respond(self, response: BaseHTTPResponse) -> BaseHTTPResponse:
"""Prepare response to client"""
logger.debug( # no cov
f"{Colors.BLUE}[respond]:{Colors.END} {response}",
extra={"verbosity": 2},
)
if self.stage is not Stage.HANDLER:
self.stage = Stage.FAILED
raise RuntimeError("Response already started")
# Disconnect any earlier but unused response object
if self.response is not None:
self.response.stream = None
self.response, response.stream = response, self
return response
def receive_body(self, data: bytes) -> None:
"""Receive request body from client"""
self.request_bytes += len(data)
if self.request_bytes > self.request_max_size:
raise PayloadTooLarge("Request body exceeds the size limit")
self.request.body += data
async def send(self, data: bytes, end_stream: bool) -> None:
"""Send data to client"""
logger.debug( # no cov
f"{Colors.BLUE}[send]: {Colors.GREEN}data={data.decode()} "
f"end_stream={end_stream}{Colors.END}",
extra={"verbosity": 2},
)
self._send(data, end_stream)
def _send(self, data: bytes, end_stream: bool) -> None:
if not self.headers_sent:
self.send_headers()
if self.stage is not Stage.RESPONSE:
raise ServerError(f"not ready to send: {self.stage}")
# Chunked
if (
self.response
and self.response.headers.get("transfer-encoding") == "chunked"
):
size = len(data)
if end_stream:
data = (
b"%x\r\n%b\r\n0\r\n\r\n" % (size, data)
if size
else b"0\r\n\r\n"
)
elif size:
data = b"%x\r\n%b\r\n" % (size, data)
logger.debug( # no cov
f"{Colors.BLUE}[transmitting]{Colors.END}",
extra={"verbosity": 2},
)
self.protocol.connection.send_data(
stream_id=self.request.stream_id,
data=data,
end_stream=end_stream,
)
self.transmit()
if end_stream:
self.stage = Stage.IDLE
|
HTTPReceiver
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 92425,
"end": 93095
}
|
class ____(Elemwise):
_parameters = ["frame"]
def __str__(self):
return f"{self._operator_repr} {self.frame}"
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection):
if isinstance(self.frame, Expr):
return plain_column_projection(self, parent, dependents)
else:
frame = self.frame
return type(self)(frame)
def _node_label_args(self):
return [self.frame]
def _divisions(self):
if is_index_like(self._meta):
return (None,) * (self.frame.npartitions + 1)
else:
return super()._divisions()
|
Unaryop
|
python
|
apache__airflow
|
providers/teradata/tests/unit/teradata/hooks/test_tpt.py
|
{
"start": 954,
"end": 9965
}
|
class ____:
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.models.Connection")
def test_init_with_ssh(self, mock_conn, mock_ssh_hook):
hook = TptHook(ssh_conn_id="ssh_default")
assert hook.ssh_conn_id == "ssh_default"
assert hook.ssh_hook is not None
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.models.Connection")
def test_init_without_ssh(self, mock_conn, mock_ssh_hook):
hook = TptHook()
assert hook.ssh_conn_id is None
assert hook.ssh_hook is None
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.models.Connection")
@patch("airflow.providers.teradata.hooks.tpt.TptHook._execute_tbuild_via_ssh")
@patch("airflow.providers.teradata.hooks.tpt.TptHook._execute_tbuild_locally")
def test_execute_ddl_dispatch(self, mock_local, mock_ssh, mock_conn, mock_ssh_hook):
# Local execution
hook = TptHook()
mock_local.return_value = 0
assert hook.execute_ddl("SOME DDL", "/tmp") == 0
mock_local.assert_called_once()
# SSH execution
hook = TptHook(ssh_conn_id="ssh_default")
hook.ssh_hook = MagicMock()
mock_ssh.return_value = 0
assert hook.execute_ddl("SOME DDL", "/tmp") == 0
mock_ssh.assert_called_once()
def test_execute_ddl_empty_script(self):
hook = TptHook()
with pytest.raises(ValueError, match="TPT script must not be empty"):
hook.execute_ddl("", "/tmp")
def test_execute_ddl_empty_script_content(self):
hook = TptHook()
with pytest.raises(ValueError, match="TPT script content must not be empty after processing"):
hook.execute_ddl(" ", "/tmp") # Only whitespace
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.providers.teradata.hooks.tpt.terminate_subprocess")
@patch("airflow.providers.teradata.hooks.tpt.secure_delete")
@patch("airflow.providers.teradata.hooks.tpt.set_local_file_permissions")
@patch("airflow.providers.teradata.hooks.tpt.subprocess.Popen")
@patch("airflow.providers.teradata.hooks.tpt.shutil.which", return_value="/usr/bin/tbuild")
def test_execute_tbuild_locally_success(
self, mock_which, mock_popen, mock_set_permissions, mock_secure_delete, mock_terminate, mock_ssh_hook
):
hook = TptHook()
process = MagicMock()
process.stdout.readline.side_effect = [b"All good\n", b""]
process.wait.return_value = None
process.returncode = 0
mock_popen.return_value = process
result = hook._execute_tbuild_locally("CREATE TABLE test (id INT);")
assert result == 0
mock_set_permissions.assert_called_once()
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.providers.teradata.hooks.tpt.terminate_subprocess")
@patch("airflow.providers.teradata.hooks.tpt.secure_delete")
@patch("airflow.providers.teradata.hooks.tpt.set_local_file_permissions")
@patch("airflow.providers.teradata.hooks.tpt.subprocess.Popen")
@patch("airflow.providers.teradata.hooks.tpt.shutil.which", return_value="/usr/bin/tbuild")
def test_execute_tbuild_locally_failure(
self, mock_which, mock_popen, mock_set_permissions, mock_secure_delete, mock_terminate, mock_ssh_hook
):
hook = TptHook()
process = MagicMock()
process.stdout.readline.side_effect = [b"error: failed\n", b""]
process.wait.return_value = None
process.returncode = 1
mock_popen.return_value = process
with pytest.raises(RuntimeError):
hook._execute_tbuild_locally("CREATE TABLE test (id INT);")
mock_set_permissions.assert_called_once()
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.providers.teradata.hooks.tpt.execute_remote_command")
@patch("airflow.providers.teradata.hooks.tpt.remote_secure_delete")
@patch("airflow.providers.teradata.hooks.tpt.secure_delete")
@patch("airflow.providers.teradata.hooks.tpt.set_remote_file_permissions")
@patch("airflow.providers.teradata.hooks.tpt.decrypt_remote_file")
@patch("airflow.providers.teradata.hooks.tpt.transfer_file_sftp")
@patch("airflow.providers.teradata.hooks.tpt.generate_encrypted_file_with_openssl")
@patch("airflow.providers.teradata.hooks.tpt.generate_random_password")
@patch("airflow.providers.teradata.hooks.tpt.verify_tpt_utility_on_remote_host")
@patch("airflow.providers.teradata.hooks.tpt.write_file")
def test_execute_tbuild_via_ssh_success(
self,
mock_write_file,
mock_verify_tpt,
mock_gen_password,
mock_encrypt_file,
mock_transfer_file,
mock_decrypt_file,
mock_set_permissions,
mock_secure_delete,
mock_remote_secure_delete,
mock_execute_remote_command,
mock_ssh_hook,
):
"""Test successful execution of tbuild via SSH"""
# Setup hook with SSH
hook = TptHook(ssh_conn_id="ssh_default")
hook.ssh_hook = MagicMock()
# Mock SSH client
mock_ssh_client = MagicMock()
hook.ssh_hook.get_conn.return_value.__enter__.return_value = mock_ssh_client
# Mock execute_remote_command
mock_execute_remote_command.return_value = (0, "DDL executed successfully", "")
# Mock password generation
mock_gen_password.return_value = "test_password"
# Execute the method
result = hook._execute_tbuild_via_ssh("CREATE TABLE test (id INT);", "/tmp")
# Assertions
assert result == 0
mock_verify_tpt.assert_called_once_with(
mock_ssh_client, "tbuild", logging.getLogger("airflow.providers.teradata.hooks.tpt")
)
mock_write_file.assert_called_once()
mock_gen_password.assert_called_once()
mock_encrypt_file.assert_called_once()
mock_transfer_file.assert_called_once()
mock_decrypt_file.assert_called_once()
mock_set_permissions.assert_called_once()
mock_execute_remote_command.assert_called_once()
mock_remote_secure_delete.assert_called_once()
mock_secure_delete.assert_called()
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
@patch("airflow.providers.teradata.hooks.tpt.execute_remote_command")
@patch("airflow.providers.teradata.hooks.tpt.remote_secure_delete")
@patch("airflow.providers.teradata.hooks.tpt.secure_delete")
@patch("airflow.providers.teradata.hooks.tpt.set_remote_file_permissions")
@patch("airflow.providers.teradata.hooks.tpt.decrypt_remote_file")
@patch("airflow.providers.teradata.hooks.tpt.transfer_file_sftp")
@patch("airflow.providers.teradata.hooks.tpt.generate_encrypted_file_with_openssl")
@patch("airflow.providers.teradata.hooks.tpt.generate_random_password")
@patch("airflow.providers.teradata.hooks.tpt.verify_tpt_utility_on_remote_host")
@patch("airflow.providers.teradata.hooks.tpt.write_file")
def test_execute_tbuild_via_ssh_failure(
self,
mock_write_file,
mock_verify_tpt,
mock_gen_password,
mock_encrypt_file,
mock_transfer_file,
mock_decrypt_file,
mock_set_permissions,
mock_secure_delete,
mock_remote_secure_delete,
mock_execute_remote_command,
mock_ssh_hook,
):
"""Test failed execution of tbuild via SSH"""
# Setup hook with SSH
hook = TptHook(ssh_conn_id="ssh_default")
hook.ssh_hook = MagicMock()
# Mock SSH client
mock_ssh_client = MagicMock()
hook.ssh_hook.get_conn.return_value.__enter__.return_value = mock_ssh_client
# Mock execute_remote_command with failure
mock_execute_remote_command.return_value = (1, "DDL failed", "Syntax error")
# Mock password generation
mock_gen_password.return_value = "test_password"
# Execute the method and expect failure
with pytest.raises(RuntimeError, match="tbuild command failed with exit code 1"):
hook._execute_tbuild_via_ssh("CREATE TABLE test (id INT);", "/tmp")
# Verify cleanup was called even on failure
mock_remote_secure_delete.assert_called_once()
mock_secure_delete.assert_called()
@patch("airflow.providers.teradata.hooks.tpt.SSHHook")
def test_execute_tbuild_via_ssh_no_ssh_hook(self, mock_ssh_hook):
"""Test tbuild via SSH when SSH hook is not initialized"""
hook = TptHook(ssh_conn_id="ssh_default")
hook.ssh_hook = None # Simulate uninitialized SSH hook
with pytest.raises(ConnectionError, match="SSH connection is not established"):
hook._execute_tbuild_via_ssh("CREATE TABLE test (id INT);", "/tmp")
def test_on_kill(self):
"""Test on_kill method"""
hook = TptHook()
# Should not raise any exception
hook.on_kill()
|
TestTptHook
|
python
|
walkccc__LeetCode
|
solutions/1780. Check if Number is a Sum of Powers of Three/1780.py
|
{
"start": 0,
"end": 161
}
|
class ____:
def checkPowersOfThree(self, n: int) -> bool:
while n > 1:
n, r = divmod(n, 3)
if r == 2:
return False
return True
|
Solution
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scattercarpet/_unselected.py
|
{
"start": 233,
"end": 3432
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet"
_path_str = "scattercarpet.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scattercarpet.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scattercarpet.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scattercarpet.unselected.M
arker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattercarpet.unselected.T
extfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Unselected`
marker
:class:`plotly.graph_objects.scattercarpet.unselected.M
arker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scattercarpet.unselected.T
extfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Unselected
|
python
|
numba__llvmlite
|
versioneer.py
|
{
"start": 34312,
"end": 35895
}
|
class ____(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
if versionfile_build:
target_versionfile = os.path.join(self.build_lib,
versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
|
cmd_build
|
python
|
ethereum__web3.py
|
web3/contract/contract.py
|
{
"start": 1909,
"end": 6859
}
|
class ____(BaseContractEvent):
# mypy types
w3: "Web3"
@combomethod
def get_logs(
self,
argument_filters: dict[str, Any] | None = None,
from_block: BlockIdentifier | None = None,
to_block: BlockIdentifier | None = None,
block_hash: HexBytes | None = None,
) -> Iterable[EventData]:
"""
Get events for this contract instance using eth_getLogs API.
This is a stateless method, as opposed to create_filter.
It can be safely called against nodes which do not provide
eth_newFilter API, like Infura nodes.
If there are many events,
like ``Transfer`` events for a popular token,
the Ethereum node might be overloaded and timeout
on the underlying JSON-RPC call.
Example - how to get all ERC-20 token transactions
for the latest 10 blocks:
.. code-block:: python
from = max(my_contract.web3.eth.block_number - 10, 1)
to = my_contract.web3.eth.block_number
events = my_contract.events.Transfer.get_logs(from_block=from, to_block=to)
for e in events:
print(e["args"]["from"],
e["args"]["to"],
e["args"]["value"])
The returned processed log values will look like:
.. code-block:: python
(
AttributeDict({
'args': AttributeDict({}),
'event': 'LogNoArguments',
'logIndex': 0,
'transactionIndex': 0,
'transactionHash': HexBytes('...'),
'address': '0xF2E246BB76DF876Cef8b38ae84130F4F55De395b',
'blockHash': HexBytes('...'),
'blockNumber': 3
}),
AttributeDict(...),
...
)
See also: :func:`web3.middleware.filter.LocalFilterMiddleware`.
:param argument_filters: Filter by argument values. Indexed arguments are
filtered by the node while non-indexed arguments are filtered by the library.
:param from_block: block number or "latest", defaults to "latest"
:param to_block: block number or "latest". Defaults to "latest"
:param block_hash: block hash. block_hash cannot be set at the
same time as ``from_block`` or ``to_block``
:yield: Tuple of :class:`AttributeDict` instances
"""
event_abi = self._get_event_abi()
# validate ``argument_filters`` if present
if argument_filters is not None:
event_arg_names = get_abi_input_names(event_abi)
if not all(arg in event_arg_names for arg in argument_filters.keys()):
raise Web3ValidationError(
"When filtering by argument names, all argument names must be "
"present in the contract's event ABI."
)
_filter_params = self._get_event_filter_params(
event_abi, argument_filters, from_block, to_block, block_hash
)
# call JSON-RPC API
logs = self.w3.eth.get_logs(_filter_params)
# convert raw binary data to Python proxy objects as described by ABI:
all_event_logs = tuple(
get_event_data(self.w3.codec, event_abi, entry) for entry in logs
)
filtered_logs = self._process_get_logs_argument_filters(
event_abi,
all_event_logs,
argument_filters,
)
sorted_logs = sorted(filtered_logs, key=lambda e: e["logIndex"])
sorted_logs = sorted(sorted_logs, key=lambda e: e["blockNumber"])
return sorted_logs
@combomethod
def create_filter(
self,
*, # PEP 3102
argument_filters: dict[str, Any] | None = None,
from_block: BlockIdentifier | None = None,
to_block: BlockIdentifier = "latest",
address: ChecksumAddress | None = None,
topics: Sequence[Any] | None = None,
) -> LogFilter:
"""
Create filter object that tracks logs emitted by this contract event.
"""
abi = self._get_event_abi()
filter_builder = EventFilterBuilder(abi, self.w3.codec)
self._set_up_filter_builder(
argument_filters,
from_block,
to_block,
address,
topics,
filter_builder,
)
log_filter = filter_builder.deploy(self.w3)
log_filter.log_entry_formatter = get_event_data(self.w3.codec, abi)
log_filter.builder = filter_builder
return log_filter
@combomethod
def build_filter(self) -> EventFilterBuilder:
abi = self._get_event_abi()
builder = EventFilterBuilder(
abi,
self.w3.codec,
formatter=get_event_data(self.w3.codec, abi),
)
builder.address = self.address
return builder
|
ContractEvent
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
|
{
"start": 12646,
"end": 14704
}
|
class ____(IncrementalRkiCovidStream):
"""Docs: https://api.corona-zahlen.org/germany/germany/history/frozen-incidence/:days"""
primary_key = None
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.start_date = config.get("start_date")
@property
def source_defined_cursor(self) -> bool:
return False
@property
def cursor_field(self) -> str:
return "date"
def date_to_int(self, start_date) -> int:
diff = datetime.now() - datetime.strptime(start_date, "%Y-%m-%d")
if diff.days <= 0:
return 1
return diff.days
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
current_stream_state = {self.cursor_field: self.start_date}
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
records = super().read_records(stream_state=stream_state, **kwargs)
if stream_state:
for record in records:
if record[self.cursor_field] > stream_state.get(self.cursor_field):
yield record
else:
yield from records
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json().get("data"):
return response.json().get("data").get("history")
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if self.start_date:
return "germany/history/frozen-incidence/" + str(self.date_to_int(self.start_date))
return "germany/history/frozen-incidence/"
# source: germany/history/hospitalization/:days | Incremental
|
GermanHistoryFrozenIncidence
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
|
{
"start": 36945,
"end": 41468
}
|
class ____(quantize_model_test_base.QuantizedModelTest):
"""Test cases regarding the use of CalibrationOptions proto.
Run all tests cases in both the graph mode (default in TF1) and the eager mode
(default in TF2) to ensure support for when TF2 is disabled.
"""
@parameterized.parameters(
{
'calibration_options': qc.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX
)
},
{
'calibration_options': qc.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_AVERAGE_MIN_MAX
),
},
{
'calibration_options': qc.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE,
calibration_parameters=qc.CalibrationOptions.CalibrationParameters(
num_bins=10,
),
),
},
{
'calibration_options': qc.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE,
calibration_parameters=qc.CalibrationOptions.CalibrationParameters(
num_bins=10,
),
),
},
{
'calibration_options': qc.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY,
calibration_parameters=qc.CalibrationOptions.CalibrationParameters(
num_bins=10,
),
),
},
{
'calibration_options': qc.CalibrationOptions(
calibration_method=_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC,
calibration_parameters=qc.CalibrationOptions.CalibrationParameters(
num_bins=10,
),
),
},
)
@test_util.run_in_graph_and_eager_modes
def test_conv_ptq_model_by_calibration_options(
self,
calibration_options: qc.CalibrationOptions,
):
bias_fn = nn_ops.bias_add
activation_fn = nn_ops.relu6
enable_per_channel_quantized_weight = False
has_batch_norm = True
dilations = None
input_shape = (1, 3, 4, 3)
filter_shape = (2, 3, 3, 2)
strides = (1, 1, 1, 1)
model = self._create_conv2d_model(
input_shape,
filter_shape,
self._input_saved_model_path,
bias_fn,
activation_fn,
has_batch_norm,
strides,
dilations,
)
# Generate model input data.
input_data = ops.convert_to_tensor(
np.random.uniform(low=0.0, high=10, size=input_shape).astype('f4')
)
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(100):
yield {
'input_tensor': ops.convert_to_tensor(
np.random.uniform(low=0, high=10, size=input_shape).astype('f4')
),
}
dataset_path = self.create_tempfile('tfrecord').full_path
path_map = {'serving_default': dataset_path}
repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save(
{'serving_default': data_gen()}
)
config = qc.QuantizationConfig(
static_range_ptq_preset=qc.StaticRangePtqPreset(
representative_datasets=[
qc.RepresentativeDatasetConfig(
tf_record=qc.TfRecordFile(path=dataset_path)
)
],
enable_per_channel_quantized_weight=enable_per_channel_quantized_weight,
),
tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]),
calibration_options=calibration_options,
)
quantization.quantize_saved_model(
self._input_saved_model_path,
self._output_saved_model_path,
config,
)
expected_outputs = model.conv2d(input_data)
root = load.load(self._output_saved_model_path)
self.assertCountEqual(root.signatures.keys(), {'serving_default'})
new_outputs = root.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(input_data)
)
# Tests that the quantized graph outputs similar values. The rtol and atol
# values are arbitrary.
self.assertAllClose(new_outputs, expected_outputs, rtol=0.02, atol=0.5)
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.46,
)
|
CalibrationOptionsTest
|
python
|
huggingface__transformers
|
src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py
|
{
"start": 1543,
"end": 2004
}
|
class ____(LlamaMLP):
def __init__(self, config: HunYuanDenseV1Config, layer_idx=None, is_shared_mlp=False):
super().__init__(config)
self.layer_idx = layer_idx
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
HunYuanDenseV1MLP
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/engine/interfaces.py
|
{
"start": 108531,
"end": 113680
}
|
class ____:
"""Encapsulate information about an error condition in progress.
This object exists solely to be passed to the
:meth:`_events.DialectEvents.handle_error` event,
supporting an interface that
can be extended without backwards-incompatibility.
"""
__slots__ = ()
dialect: Dialect
"""The :class:`_engine.Dialect` in use.
This member is present for all invocations of the event hook.
.. versionadded:: 2.0
"""
connection: Optional[Connection]
"""The :class:`_engine.Connection` in use during the exception.
This member is present, except in the case of a failure when
first connecting.
.. seealso::
:attr:`.ExceptionContext.engine`
"""
engine: Optional[Engine]
"""The :class:`_engine.Engine` in use during the exception.
This member is present in all cases except for when handling an error
within the connection pool "pre-ping" process.
"""
cursor: Optional[DBAPICursor]
"""The DBAPI cursor object.
May be None.
"""
statement: Optional[str]
"""String SQL statement that was emitted directly to the DBAPI.
May be None.
"""
parameters: Optional[_DBAPIAnyExecuteParams]
"""Parameter collection that was emitted directly to the DBAPI.
May be None.
"""
original_exception: BaseException
"""The exception object which was caught.
This member is always present.
"""
sqlalchemy_exception: Optional[StatementError]
"""The :class:`sqlalchemy.exc.StatementError` which wraps the original,
and will be raised if exception handling is not circumvented by the event.
May be None, as not all exception types are wrapped by SQLAlchemy.
For DBAPI-level exceptions that subclass the dbapi's Error class, this
field will always be present.
"""
chained_exception: Optional[BaseException]
"""The exception that was returned by the previous handler in the
exception chain, if any.
If present, this exception will be the one ultimately raised by
SQLAlchemy unless a subsequent handler replaces it.
May be None.
"""
execution_context: Optional[ExecutionContext]
"""The :class:`.ExecutionContext` corresponding to the execution
operation in progress.
This is present for statement execution operations, but not for
operations such as transaction begin/end. It also is not present when
the exception was raised before the :class:`.ExecutionContext`
could be constructed.
Note that the :attr:`.ExceptionContext.statement` and
:attr:`.ExceptionContext.parameters` members may represent a
different value than that of the :class:`.ExecutionContext`,
potentially in the case where a
:meth:`_events.ConnectionEvents.before_cursor_execute` event or similar
modified the statement/parameters to be sent.
May be None.
"""
is_disconnect: bool
"""Represent whether the exception as occurred represents a "disconnect"
condition.
This flag will always be True or False within the scope of the
:meth:`_events.DialectEvents.handle_error` handler.
SQLAlchemy will defer to this flag in order to determine whether or not
the connection should be invalidated subsequently. That is, by
assigning to this flag, a "disconnect" event which then results in
a connection and pool invalidation can be invoked or prevented by
changing this flag.
.. note:: The pool "pre_ping" handler enabled using the
:paramref:`_sa.create_engine.pool_pre_ping` parameter does **not**
consult this event before deciding if the "ping" returned false,
as opposed to receiving an unhandled error. For this use case, the
:ref:`legacy recipe based on engine_connect() may be used
<pool_disconnects_pessimistic_custom>`. A future API allow more
comprehensive customization of the "disconnect" detection mechanism
across all functions.
"""
invalidate_pool_on_disconnect: bool
"""Represent whether all connections in the pool should be invalidated
when a "disconnect" condition is in effect.
Setting this flag to False within the scope of the
:meth:`_events.DialectEvents.handle_error`
event will have the effect such
that the full collection of connections in the pool will not be
invalidated during a disconnect; only the current connection that is the
subject of the error will actually be invalidated.
The purpose of this flag is for custom disconnect-handling schemes where
the invalidation of other connections in the pool is to be performed
based on other conditions, or even on a per-connection basis.
"""
is_pre_ping: bool
"""Indicates if this error is occurring within the "pre-ping" step
performed when :paramref:`_sa.create_engine.pool_pre_ping` is set to
``True``. In this mode, the :attr:`.ExceptionContext.engine` attribute
will be ``None``. The dialect in use is accessible via the
:attr:`.ExceptionContext.dialect` attribute.
.. versionadded:: 2.0.5
"""
|
ExceptionContext
|
python
|
MorvanZhou__Reinforcement-learning-with-tensorflow
|
contents/5_Deep_Q_Network/DQN_modified.py
|
{
"start": 434,
"end": 6566
}
|
class ____:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
# consist of [target_net, evaluate_net]
self._build_net()
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net')
with tf.variable_scope('hard_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ all inputs ------------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input State
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input Next State
self.r = tf.placeholder(tf.float32, [None, ], name='r') # input Reward
self.a = tf.placeholder(tf.int32, [None, ], name='a') # input Action
w_initializer, b_initializer = tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)
# ------------------ build evaluate_net ------------------
with tf.variable_scope('eval_net'):
e1 = tf.layers.dense(self.s, 20, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='e1')
self.q_eval = tf.layers.dense(e1, self.n_actions, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='q')
# ------------------ build target_net ------------------
with tf.variable_scope('target_net'):
t1 = tf.layers.dense(self.s_, 20, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='t1')
self.q_next = tf.layers.dense(t1, self.n_actions, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='t2')
with tf.variable_scope('q_target'):
q_target = self.r + self.gamma * tf.reduce_max(self.q_next, axis=1, name='Qmax_s_') # shape=(None, )
self.q_target = tf.stop_gradient(q_target)
with tf.variable_scope('q_eval'):
a_indices = tf.stack([tf.range(tf.shape(self.a)[0], dtype=tf.int32), self.a], axis=1)
self.q_eval_wrt_a = tf.gather_nd(params=self.q_eval, indices=a_indices) # shape=(None, )
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval_wrt_a, name='TD_error'))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.target_replace_op)
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
_, cost = self.sess.run(
[self._train_op, self.loss],
feed_dict={
self.s: batch_memory[:, :self.n_features],
self.a: batch_memory[:, self.n_features],
self.r: batch_memory[:, self.n_features + 1],
self.s_: batch_memory[:, -self.n_features:],
})
self.cost_his.append(cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
if __name__ == '__main__':
DQN = DeepQNetwork(3,4, output_graph=True)
|
DeepQNetwork
|
python
|
huggingface__transformers
|
src/transformers/models/parakeet/modeling_parakeet.py
|
{
"start": 11138,
"end": 16119
}
|
class ____(nn.Module):
"""Multi-head attention with relative positional encoding. See section 3.3 of https://huggingface.co/papers/1901.02860."""
def __init__(self, config: ParakeetEncoderConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = False
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
# W_{k,R} projection
self.relative_k_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
# global content bias
self.bias_u = nn.Parameter(torch.zeros(config.num_attention_heads, self.head_dim))
# global positional bias
self.bias_v = nn.Parameter(torch.zeros(config.num_attention_heads, self.head_dim))
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
batch_size, seq_length = input_shape
hidden_shape = (batch_size, seq_length, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
query_states_with_bias_u = query_states + self.bias_u.view(
1, self.config.num_attention_heads, 1, self.head_dim
)
query_states_with_bias_v = query_states + self.bias_v.view(
1, self.config.num_attention_heads, 1, self.head_dim
)
relative_key_states = self.relative_k_proj(position_embeddings)
relative_key_states = relative_key_states.view(batch_size, -1, self.config.num_attention_heads, self.head_dim)
# terms (b) and (d)
matrix_bd = query_states_with_bias_v @ relative_key_states.permute(0, 2, 3, 1)
matrix_bd = self._rel_shift(matrix_bd)
matrix_bd = matrix_bd[..., :seq_length]
matrix_bd = matrix_bd * self.scaling
if attention_mask is not None:
# here the original codebase uses -10000.0 rather than float("-inf") and then manual masked fill with 0.0s
# see: https://github.com/NVIDIA-NeMo/NeMo/blob/8cfedd7203462cb251a914e700e5605444277561/nemo/collections/asr/parts/submodules/multi_head_attention.py#L320-L340
# we rather went for a straight-forward approach with float("-inf")
matrix_bd = matrix_bd.masked_fill_(attention_mask.logical_not(), float("-inf"))
# will compute matrix_ac - terms (a) and (c) - and add matrix_bd
attn_output, attn_weights = attention_interface(
self,
query=query_states_with_bias_u,
key=key_states,
value=value_states,
attention_mask=matrix_bd,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
def _rel_shift(self, attention_scores):
"""Relative position shift for Shaw et al. style attention. See appendix B of https://huggingface.co/papers/1901.02860."""
batch_size, num_heads, query_length, position_length = attention_scores.shape
attention_scores = nn.functional.pad(attention_scores, pad=(1, 0))
attention_scores = attention_scores.view(batch_size, num_heads, -1, query_length)
attention_scores = attention_scores[:, :, 1:].view(batch_size, num_heads, query_length, position_length)
return attention_scores
|
ParakeetEncoderAttention
|
python
|
gabrielfalcao__HTTPretty
|
httpretty/core.py
|
{
"start": 13979,
"end": 14323
}
|
class ____(object):
def __init__(self, fakesocket):
self.fakesocket = fakesocket
def __getitem__(self, *args, **kw):
raise AssertionError('socket {} is not connected'.format(self.fakesocket.truesock))
def fake_socketpair(*args, **kw):
with restored_libs():
return old_socketpair(*args, **kw)
|
FakeAddressTuple
|
python
|
getsentry__sentry
|
src/sentry/models/release.py
|
{
"start": 2042,
"end": 2155
}
|
class ____(TypedDict, total=False):
author: CommitAuthor
message: str
date_added: str
|
_CommitDataKwargs
|
python
|
cython__cython
|
tests/run/ext_auto_richcmp.py
|
{
"start": 71,
"end": 307
}
|
class ____(object):
x = cython.declare(cython.int)
def __init__(self, x):
self.x = x
def __repr__(self):
return "<%d>" % self.x
@cython.cfunc
@cython.locals(x=X)
def x_of(x):
return x.x
@cython.cclass
|
X
|
python
|
google__pytype
|
pytype/tests/test_builtins2.py
|
{
"start": 211,
"end": 20575
}
|
class ____(test_base.BaseTest):
"""Tests for builtin methods and classes."""
def test_div_mod_with_unknown(self):
ty = self.Infer("""
def f(x, y):
divmod(x, __any_object__)
return divmod(3, y)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Tuple
def f(x, y) -> Tuple[Any, Any]: ...
""",
)
def test_defaultdict(self):
ty = self.Infer("""
import collections
r = collections.defaultdict()
r[3] = 3
""")
self.assertTypesMatchPytd(
ty,
"""
import collections
r = ... # type: collections.defaultdict[int, int]
""",
)
def test_dict_update(self):
ty = self.Infer("""
x = {}
x.update(a=3, b=4)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict
x = ... # type: Dict[str, int]
""",
)
def test_import_lib(self):
ty = self.Infer("""
import importlib
""")
self.assertTypesMatchPytd(
ty,
"""
import importlib
""",
)
def test_set_union(self):
ty = self.Infer("""
def f(y):
return set.union(*y)
def g(y):
return set.intersection(*y)
def h(y):
return set.difference(*y)
""")
self.assertTypesMatchPytd(
ty,
"""
def f(y) -> set: ...
def g(y) -> set: ...
def h(y) -> set: ...
""",
)
def test_set_init(self):
ty = self.Infer("""
data = set(x for x in [""])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Set
data = ... # type: Set[str]
""",
)
def test_frozenset_inheritance(self):
ty = self.Infer("""
class Foo(frozenset):
pass
Foo([])
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo(frozenset):
pass
""",
)
def test_old_style_class(self):
ty = self.Infer("""
class Foo:
def get_dict(self):
return self.__dict__
def get_name(self):
return self.__name__
def get_class(self):
return self.__class__
def get_doc(self):
return self.__doc__
def get_module(self):
return self.__module__
def get_bases(self):
return self.__bases__
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict, Type
class Foo:
def get_dict(self) -> Dict[str, Any]: ...
def get_name(self) -> str: ...
def get_class(self) -> Type[Foo]: ...
def get_doc(self) -> str: ...
def get_module(self) -> str: ...
def get_bases(self) -> tuple: ...
""",
)
def test_new_style_class(self):
ty = self.Infer("""
class Foo:
def get_dict(self):
return self.__dict__
def get_name(self):
return self.__name__
def get_class(self):
return self.__class__
def get_doc(self):
return self.__doc__
def get_module(self):
return self.__module__
def get_bases(self):
return self.__bases__
def get_hash(self):
return self.__hash__()
def get_mro(self):
return self.__mro__
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict, Type
class Foo:
def get_dict(self) -> Dict[str, Any]: ...
def get_name(self) -> str: ...
def get_class(self) -> Type[Foo]: ...
def get_doc(self) -> str: ...
def get_module(self) -> str: ...
def get_hash(self) -> int: ...
def get_mro(self) -> list: ...
def get_bases(self) -> tuple: ...
""",
)
def test_dict_init(self):
ty = self.Infer("""
x1 = dict(u=3, v=4, w=5)
x2 = dict([(3, "")])
x3 = dict(((3, ""),))
x4 = dict({(3, "")})
x5 = dict({})
x6 = dict({3: ""})
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
x1 = ... # type: Dict[str, int]
x2 = ... # type: Dict[int, str]
x3 = ... # type: Dict[int, str]
x4 = ... # type: Dict[int, str]
x5 = ... # type: Dict[nothing, nothing]
x6 = ... # type: Dict[int, str]
""",
)
def test_dict(self):
ty = self.Infer("""
x = dict(u=3, v=4, w=5)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
x: Dict[str, int]
""",
)
def test_module(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
x = ... # type: module
""",
)
ty = self.Infer(
"""
import foo
foo.x.bar()
x = foo.__name__
y = foo.x.baz
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
x = ... # type: str
y = ... # type: Any
""",
)
def test_classmethod(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class A:
x = ... # type: classmethod
""",
)
ty = self.Infer(
"""
from foo import A
y = A.x()
z = A().x()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Type
A = ... # type: Type[foo.A]
y = ... # type: Any
z = ... # type: Any
""",
)
def test_staticmethod(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class A:
x = ... # type: staticmethod
""",
)
ty = self.Infer(
"""
from foo import A
y = A.x()
z = A().x()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Type
A = ... # type: Type[foo.A]
y = ... # type: Any
z = ... # type: Any
""",
)
def test_min_max(self):
ty = self.Infer("""
x1 = min(x for x in range(3))
x2 = min([3.1, 4.1], key=lambda n: n)
x3 = min((1, 2, 3), key=int)
y1 = max(x for x in range(3))
y2 = max([3.1, 4.1], key=lambda n: n)
y3 = max((1, 2, 3), key=int)
""")
self.assertTypesMatchPytd(
ty,
"""
x1 = ... # type: int
x2 = ... # type: float
x3 = ... # type: int
y1 = ... # type: int
y2 = ... # type: float
y3 = ... # type: int
""",
)
def test_max_different_types(self):
ty = self.Infer("""
a = max(1, None)
b = max(1, None, 3j)
c = max(1, None, 3j, "str")
d = max(1, 2, 3, 4, 5, 6, 7)
e = max(1, None, key=int)
f = max(1, None, 3j, key=int)
g = max(1, None, 3j, "str", key=int)
h = max(1, 2, 3, 4, 5, 6, 7, key=int)
i = max([1,2,3,4])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional, Union
a = ... # type: Optional[int]
b = ... # type: Optional[Union[complex, int]]
c = ... # type: Optional[Union[complex, int, str]]
d = ... # type: int
e = ... # type: Optional[int]
f = ... # type: Optional[Union[complex, int]]
g = ... # type: Optional[Union[complex, int, str]]
h = ... # type: int
i = ... # type: int
""",
)
def test_min_different_types(self):
ty = self.Infer("""
a = min(1, None)
b = min(1, None, 3j)
c = min(1, None, 3j, "str")
d = min(1, 2, 3, 4, 5, 6, 7)
e = min(1, None, key=int)
f = min(1, None, 3j, key=int)
g = min(1, None, 3j, "str", key=int)
h = min(1, 2, 3, 4, 5, 6, 7, key=int)
i = min([1,2,3,4])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Optional, Union
a = ... # type: Optional[int]
b = ... # type: Optional[Union[complex, int]]
c = ... # type: Optional[Union[complex, int, str]]
d = ... # type: int
e = ... # type: Optional[int]
f = ... # type: Optional[Union[complex, int]]
g = ... # type: Optional[Union[complex, int, str]]
h = ... # type: int
i = ... # type: int
""",
)
def test_from_keys(self):
ty = self.Infer("""
d1 = dict.fromkeys([1])
d2 = dict.fromkeys([1], 0)
d3 = dict.fromkeys(bytearray("x"))
d4 = dict.fromkeys({True: False})
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict
d1 = ... # type: Dict[int, None]
d2 = ... # type: Dict[int, int]
d3 = ... # type: Dict[int, None]
d4 = ... # type: Dict[bool, None]
""",
)
def test_redefined_builtin(self):
ty = self.Infer("""
class BaseException(Exception): pass
class CryptoException(BaseException, ValueError): pass
""")
p1, p2 = ty.Lookup("CryptoException").bases
self.assertEqual(p1.name, "BaseException")
self.assertEqual(p2.name, "builtins.ValueError")
self.assertTypesMatchPytd(
ty,
"""
class BaseException(Exception): ...
class CryptoException(BaseException, ValueError): ...
""",
)
def test_sum(self):
ty = self.Infer("""
x1 = sum([1, 2])
x2 = sum([1, 2], 0)
x3 = sum([1.0, 3j])
x4 = sum([1.0, 3j], 0)
x5 = sum([[1], ["2"]], [])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Union
x1 = ... # type: int
x2 = ... # type: int
x3 = ... # type: Union[float, complex]
x4 = ... # type: Union[int, float, complex]
x5 = ... # type: List[Union[int, str]]
""",
)
def test_reversed(self):
ty, errors = self.InferWithErrors("""
x1 = reversed(range(42))
x2 = reversed([42])
x3 = reversed((4, 2))
x4 = reversed("hello")
x5 = reversed({42}) # wrong-arg-types[e1]
x6 = reversed(frozenset([42])) # wrong-arg-types[e2]
x7 = reversed({True: 42}) # wrong-arg-types[e3]
x8 = next(reversed([42]))
x9 = list(reversed([42]))
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
x1 = ... # type: reversed[int]
x2 = ... # type: reversed[int]
x3 = ... # type: reversed[int]
x4 = ... # type: reversed[str]
x5 = ... # type: reversed[nothing]
x6 = ... # type: reversed[nothing]
x7 = ... # type: reversed[nothing]
x8 = ... # type: int
x9 = ... # type: List[int]
""",
)
self.assertErrorRegexes(
errors,
{
"e1": r"set\[int\]",
"e2": r"frozenset\[int\]",
"e3": r"dict\[bool, int\]",
},
)
def test_str_join(self):
ty = self.Infer("""
a = ",".join([])
c = ",".join(["foo"])
""")
self.assertTypesMatchPytd(
ty,
"""
a = ... # type: str
c = ... # type: str
""",
)
def test_bytearray_join(self):
ty = self.Infer("""
b = bytearray()
x1 = b.join([])
x3 = b.join([b])
""")
self.assertTypesMatchPytd(
ty,
"""
b = ... # type: bytearray
x1 = ... # type: bytearray
x3 = ... # type: bytearray
""",
)
def test_reduce(self):
self.Check("""
reduce(lambda x, y: x+y, [1,2,3]).real
reduce(lambda x, y: x+y, ["foo"]).upper()
reduce(lambda x, y: 4, [], "foo").upper()
reduce(lambda x, y: "s", [1,2,3], 0).upper()
""")
def test_dict_pop_item(self):
ty = self.Infer("""
v = {"a": 1}.popitem()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple
v = ... # type: Tuple[str, int]
""",
)
def test_long_constant(self):
ty = self.Infer("""
MAX_VALUE = 2**64
""")
self.assertTypesMatchPytd(
ty,
"""
MAX_VALUE = ... # type: int
""",
)
def test_iter(self):
ty = self.Infer("""
x3 = iter(bytearray(42))
x4 = iter(x for x in [42])
x5 = iter([42])
x6 = iter((42,))
x7 = iter({42})
x8 = iter({"a": 1})
x9 = iter(int, 42)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Generator, Iterator
x3 = ... # type: bytearray_iterator
x4 = ... # type: Generator[int, Any, Any]
x5 = ... # type: listiterator[int]
x6 = ... # type: tupleiterator[int]
x7 = ... # type: setiterator[int]
x8 = ... # type: Iterator[str]
# The "nothing" is due to pytype ignoring Callable parameters and
# therefore not seeing the type parameter value tucked away in _RET.
x9 = ... # type: Iterator[int]
""",
)
def test_iter_with_sentinel(self):
ty = self.Infer("""
from typing import Union
i = 0
def it1() -> str:
global i
i += 1
return str(i)
def it2() -> Union[int, None]:
global i
i += 1
return None if i == 5 else i
def it3() -> Union[int, str]:
global i
i += 1
return str(i) if i == 5 else i
x1 = iter(it1, '3')
x2 = iter(it2, None)
x2b = iter(it2, 9)
x3 = iter(it3, 9)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Iterator, Optional, Union
i: int
x1: Iterator[str]
x2: Iterator[int]
x2b: Iterator[Optional[int]]
x3: Iterator[Union[int, str]]
def it1() -> str: ...
def it2() -> Optional[int]: ...
def it3() -> Union[int, str]: ...
""",
)
def test_list_init(self):
ty = self.Infer("""
l1 = list()
l2 = list([42])
l5 = list(iter([42]))
l6 = list(reversed([42]))
l7 = list(iter((42,)))
l8 = list(iter({42}))
l9 = list((42,))
l10 = list({42})
l11 = list("hello")
l12 = list(iter(bytearray(42)))
l13 = list(iter(range(42)))
l14 = list(x for x in [42])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
l1 = ... # type: List[nothing]
l2 = ... # type: List[int]
l5 = ... # type: List[int]
l6 = ... # type: List[int]
l7 = ... # type: List[int]
l8 = ... # type: List[int]
l9 = ... # type: List[int]
l10 = ... # type: List[int]
l11 = ... # type: List[str]
l12 = ... # type: List[int]
l13 = ... # type: List[int]
l14 = ... # type: List[int]
""",
)
def test_tuple_init(self):
ty = self.Infer("""
t1 = tuple()
t2 = tuple([42])
t5 = tuple(iter([42]))
t6 = tuple(reversed([42]))
t7 = tuple(iter((42,)))
t8 = tuple(iter({42}))
t9 = tuple((42,))
t10 = tuple({42})
t12 = tuple(iter(bytearray(42)))
t13 = tuple(iter(range(42)))
t14 = tuple(x for x in [42])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Tuple
t1 = ... # type: Tuple[()]
t2 = ... # type: Tuple[int, ...]
t5 = ... # type: Tuple[int, ...]
t6 = ... # type: Tuple[int, ...]
t7 = ... # type: Tuple[int, ...]
t8 = ... # type: Tuple[int, ...]
t9 = ... # type: Tuple[int, ...]
t10 = ... # type: Tuple[int, ...]
t12 = ... # type: Tuple[int, ...]
t13 = ... # type: Tuple[int, ...]
t14 = ... # type: Tuple[int, ...]
""",
)
def test_empty_tuple(self):
self.Check("""
isinstance(42, ())
issubclass(int, ())
type("X", (), {"foo": 42})
type("X", (), {})
""")
def test_list_extend(self):
ty = self.Infer("""
x1 = [42]
x1.extend([""])
x2 = [42]
x2.extend(("",))
x3 = [42]
x3.extend({""})
x4 = [42]
x4.extend(frozenset({""}))
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Union
x1 = ... # type: List[Union[int, str]]
x2 = ... # type: List[Union[int, str]]
x3 = ... # type: List[Union[int, str]]
x4 = ... # type: List[Union[int, str]]
""",
)
def test_sorted(self):
ty = self.Infer("""
x3 = sorted(bytearray("hello"))
x4 = sorted([])
x5 = sorted([42], reversed=True)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
x3 = ... # type: List[int]
x4 = ... # type: List[nothing]
x5 = ... # type: List[int]
""",
)
def test_enumerate(self):
ty = self.Infer("""
x1 = enumerate([42])
x2 = enumerate((42,))
x3 = enumerate(x for x in range(5))
x4 = list(enumerate(['']))
""")
self.assertTypesMatchPytd(
ty,
"""
x1 = ... # type: enumerate[int]
x2 = ... # type: enumerate[int]
x3 = ... # type: enumerate[int]
x4 = ... # type: list[tuple[int, str]]
""",
)
def test_frozenset_init(self):
ty = self.Infer("""
x1 = frozenset([42])
x2 = frozenset({42})
""")
self.assertTypesMatchPytd(
ty,
"""
x1 = ... # type: frozenset[int]
x2 = ... # type: frozenset[int]
""",
)
def test_frozenset_literal(self):
# In python2 this calls LOAD_CONST 'foo'; BUILD_SET, but python3 calls
# LOAD_CONST frozenset(['foo']) directly. Test that both versions work.
ty = self.Infer("""
a = "foo" in {"foo"}
""")
self.assertTypesMatchPytd(
ty,
"""
a = ... # type: bool
""",
)
def test_func_tools(self):
self.Check("""
import functools
""")
def test_abc(self):
self.Check("""
import abc
""")
def test_set_default(self):
ty = self.Infer("""
x = {}
x['bar'] = 3
y = x.setdefault('foo', 3.14)
z = x['foo']
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Union
x = ... # type: Dict[str, Union[float, int]]
y = ... # type: Union[float, int]
z = ... # type: float
""",
)
def test_set_default_one_arg(self):
ty = self.Infer("""
x = {}
x['bar'] = 3
y = x.setdefault('foo')
z = x['foo']
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Dict, Optional
x = ... # type: Dict[str, Optional[int]]
y = ... # type: Optional[int]
z = ... # type: None
""",
)
def test_set_default_varargs(self):
ty = self.Infer("""
x1 = {}
y1 = x1.setdefault(*("foo", 42))
x2 = {}
y2 = x2.setdefault(*["foo", 42])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Dict
x1 = ... # type: Dict[str, int]
y1 = ... # type: int
x2 = ... # type: Dict[str, int]
y2 = ... # type: int
""",
)
def test_redefine_next(self):
ty = self.Infer("""
next = 42
""")
self.assertTypesMatchPytd(
ty,
"""
next = ... # type: int
""",
)
def test_os_environ_copy(self):
self.Check("""
import os
os.environ.copy()["foo"] = "bar"
""")
def test_bytearray_init(self):
self.Check("""
bytearray(42)
bytearray([42])
bytearray(u"hello", "utf-8")
bytearray(u"hello", "utf-8", "")
""")
def test_compile(self):
self.Check("""
code = compile("1 + 2", "foo.py", "single")
""")
def test_int_init(self):
self.Check("""
int(42)
int(42.0)
int("42")
int(u"42")
int()
""")
def test_exec(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
x = exec
""",
)
self.Check(
"""
import foo
foo.x("a = 2")
""",
pythonpath=[d.path],
)
def test_format_string(self):
self.Check("""
class A:
def __format__(self, format_spec):
return "hard_coded".__format__(format_spec)
a = A()
print(f"{a}")
""")
if __name__ == "__main__":
test_base.main()
|
BuiltinTests2
|
python
|
realpython__materials
|
python-property/users.py
|
{
"start": 27,
"end": 444
}
|
class ____:
def __init__(self, name, password):
self.name = name
self.password = password
@property
def password(self):
raise AttributeError("Password is write-only")
@password.setter
def password(self, plaintext):
salt = os.urandom(32)
self._hashed_password = hashlib.pbkdf2_hmac(
"sha256", plaintext.encode("utf-8"), salt, 100_000
)
|
User
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/team.py
|
{
"start": 4264,
"end": 4471
}
|
class ____(TypedDict, total=False):
externalTeams: list[ExternalActorResponse]
organization: OrganizationSerializerResponse
projects: list[ProjectSerializerResponse]
|
_TeamSerializerResponseOptional
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_mbti.py
|
{
"start": 1567,
"end": 3750
}
|
class ____(ColumnMapExpectation):
"""Expect column values to conform to the valid Myers-Briggs Type Indicator (MBTI)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_mbti": [
"INTP",
"intp",
"ESFJ",
"INFP",
],
"malformed_mbti": [
"",
"INTP-",
"IETP",
"This is not a valid MBTI",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_mbti"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_mbti"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_mbti"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMbti().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidMbti
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/trainer/connectors/callback_connector.py
|
{
"start": 1690,
"end": 11886
}
|
class ____:
def __init__(self, trainer: "pl.Trainer"):
self.trainer = trainer
def on_trainer_init(
self,
callbacks: Optional[Union[list[Callback], Callback]],
enable_checkpointing: bool,
enable_progress_bar: bool,
default_root_dir: Optional[str],
enable_model_summary: bool,
max_time: Optional[Union[str, timedelta, dict[str, int]]] = None,
) -> None:
# init folder paths for checkpoint + weights save callbacks
self.trainer._default_root_dir = default_root_dir or os.getcwd()
# init callbacks
if isinstance(callbacks, Callback):
callbacks = [callbacks]
self.trainer.callbacks = callbacks or []
# configure checkpoint callback
# pass through the required args to figure out defaults
self._configure_checkpoint_callbacks(enable_checkpointing)
# configure the timer callback.
# responsible to stop the training when max_time is reached.
self._configure_timer_callback(max_time)
# init progress bar
self._configure_progress_bar(enable_progress_bar)
# configure the ModelSummary callback
self._configure_model_summary_callback(enable_model_summary)
self.trainer.callbacks.extend(_load_external_callbacks("lightning.pytorch.callbacks_factory"))
_validate_callbacks_list(self.trainer.callbacks)
# push all model checkpoint callbacks to the end
# it is important that these are the last callbacks to run
self.trainer.callbacks = self._reorder_callbacks(self.trainer.callbacks)
def _configure_checkpoint_callbacks(self, enable_checkpointing: bool) -> None:
if self.trainer.checkpoint_callbacks:
if not enable_checkpointing:
raise MisconfigurationException(
"Trainer was configured with `enable_checkpointing=False`"
" but found `ModelCheckpoint` in callbacks list."
)
elif enable_checkpointing:
if RequirementCache("litmodels >=0.1.7") and self.trainer._model_registry:
trainer_source = inspect.getmodule(self.trainer)
if trainer_source is None or not isinstance(trainer_source.__package__, str):
raise RuntimeError("Unable to determine the source of the trainer.")
# this need to imported based on the actual package lightning/pytorch_lightning
if "pytorch_lightning" in trainer_source.__package__:
from litmodels.integrations.checkpoints import PytorchLightningModelCheckpoint as LitModelCheckpoint
else:
from litmodels.integrations.checkpoints import LightningModelCheckpoint as LitModelCheckpoint
model_checkpoint = LitModelCheckpoint(model_registry=self.trainer._model_registry)
else:
rank_zero_info(
"💡 Tip: For seamless cloud uploads and versioning,"
" try installing [litmodels](https://pypi.org/project/litmodels/) to enable LitModelCheckpoint,"
" which syncs automatically with the Lightning model registry."
)
model_checkpoint = ModelCheckpoint()
self.trainer.callbacks.append(model_checkpoint)
def _configure_model_summary_callback(self, enable_model_summary: bool) -> None:
if not enable_model_summary:
return
model_summary_cbs = [type(cb) for cb in self.trainer.callbacks if isinstance(cb, ModelSummary)]
if model_summary_cbs:
rank_zero_info(
f"Trainer already configured with model summary callbacks: {model_summary_cbs}."
" Skipping setting a default `ModelSummary` callback."
)
return
model_summary: ModelSummary
model_summary = RichModelSummary() if _RICH_AVAILABLE else ModelSummary()
self.trainer.callbacks.append(model_summary)
def _configure_progress_bar(self, enable_progress_bar: bool = True) -> None:
progress_bars = [c for c in self.trainer.callbacks if isinstance(c, ProgressBar)]
if len(progress_bars) > 1:
raise MisconfigurationException(
"You added multiple progress bar callbacks to the Trainer, but currently only one"
" progress bar is supported."
)
if len(progress_bars) == 1:
# the user specified the progress bar in the callbacks list
# so the trainer doesn't need to provide a default one
if enable_progress_bar:
return
# otherwise the user specified a progress bar callback but also
# elected to disable the progress bar with the trainer flag
progress_bar_callback = progress_bars[0]
raise MisconfigurationException(
"Trainer was configured with `enable_progress_bar=False`"
f" but found `{progress_bar_callback.__class__.__name__}` in callbacks list."
)
if enable_progress_bar:
progress_bar_callback = RichProgressBar() if _RICH_AVAILABLE else TQDMProgressBar()
self.trainer.callbacks.append(progress_bar_callback)
def _configure_timer_callback(self, max_time: Optional[Union[str, timedelta, dict[str, int]]] = None) -> None:
if max_time is None:
return
if any(isinstance(cb, Timer) for cb in self.trainer.callbacks):
rank_zero_info("Ignoring `Trainer(max_time=...)`, callbacks list already contains a Timer.")
return
timer = Timer(duration=max_time, interval="step")
self.trainer.callbacks.append(timer)
def _attach_model_logging_functions(self) -> None:
lightning_module = self.trainer.lightning_module
for callback in self.trainer.callbacks:
callback.log = lightning_module.log
callback.log_dict = lightning_module.log_dict
def _attach_model_callbacks(self) -> None:
"""Attaches the callbacks defined in the model.
If a callback returned by the model's configure_callback method has the same type as one or several
callbacks already present in the trainer callbacks list, it will replace them.
In addition, all :class:`~lightning.pytorch.callbacks.model_checkpoint.ModelCheckpoint` callbacks
will be pushed to the end of the list, ensuring they run last.
"""
trainer = self.trainer
model_callbacks = call._call_lightning_module_hook(trainer, "configure_callbacks")
if not model_callbacks:
return
model_callbacks = [model_callbacks] if not isinstance(model_callbacks, Sequence) else model_callbacks
model_callback_types = {type(c) for c in model_callbacks}
trainer_callback_types = {type(c) for c in trainer.callbacks}
# edge case: if an unmodified callback was added, the logic below would filter it
trainer_callback_types.discard(Callback)
# exclude trainer callbacks of the same class or subclass
override_types = set()
for model_cb in model_callback_types:
for trainer_cb in trainer_callback_types:
if issubclass(model_cb, trainer_cb):
override_types.add(trainer_cb)
break
if override_types:
rank_zero_info(
"The following callbacks returned in `LightningModule.configure_callbacks` will override"
" existing callbacks passed to Trainer:"
f" {', '.join(sorted(t.__name__ for t in override_types))}"
)
# remove all callbacks with a type that occurs in model callbacks
all_callbacks = [c for c in trainer.callbacks if type(c) not in override_types]
all_callbacks.extend(model_callbacks)
all_callbacks = _CallbackConnector._reorder_callbacks(all_callbacks)
# TODO: connectors refactor: move callbacks list to connector and do not write Trainer state
trainer.callbacks = all_callbacks
@staticmethod
def _reorder_callbacks(callbacks: list[Callback]) -> list[Callback]:
"""Moves all the tuner specific callbacks at the beginning of the list and all the `ModelCheckpoint` callbacks
to the end of the list. The sequential order within the group of checkpoint callbacks is preserved, as well as
the order of all other callbacks.
Args:
callbacks: A list of callbacks.
Return:
A new list in which the first elements are tuner specific callbacks and last elements are ModelCheckpoints
if there were any present in the input.
"""
tuner_callbacks: list[Callback] = []
other_callbacks: list[Callback] = []
checkpoint_callbacks: list[Callback] = []
for cb in callbacks:
if isinstance(cb, (BatchSizeFinder, LearningRateFinder)):
tuner_callbacks.append(cb)
elif isinstance(cb, Checkpoint):
checkpoint_callbacks.append(cb)
else:
other_callbacks.append(cb)
return tuner_callbacks + other_callbacks + checkpoint_callbacks
def _validate_callbacks_list(callbacks: list[Callback]) -> None:
stateful_callbacks = [cb for cb in callbacks if is_overridden("state_dict", instance=cb, parent=Callback)]
seen_callbacks = set()
for callback in stateful_callbacks:
if callback.state_key in seen_callbacks:
raise RuntimeError(
f"Found more than one stateful callback of type `{type(callback).__name__}`. In the current"
" configuration, this callback does not support being saved alongside other instances of the same type."
f" Please consult the documentation of `{type(callback).__name__}` regarding valid settings for"
" the callback state to be checkpointable."
" HINT: The `callback.state_key` must be unique among all callbacks in the Trainer."
)
seen_callbacks.add(callback.state_key)
|
_CallbackConnector
|
python
|
google__jax
|
jax/_src/interpreters/ad.py
|
{
"start": 66249,
"end": 66957
}
|
class ____(Exception):
def __init__(self):
# TODO(mattjj): track source provenance on AD tracers, improve error
msg = ("Detected differentiation of a custom_vjp function with respect to "
"a closed-over value. That isn't supported because the custom VJP "
"rule only specifies how to differentiate the custom_vjp function "
"with respect to explicit input parameters. Try passing the "
"closed-over value into the custom_vjp function as an argument, and "
"adapting the custom_vjp fwd and bwd rules.")
super().__init__(msg)
# TODO(mattjj): remove this vestigial dict
reducing_transposes: dict[core.Primitive, Callable] = {}
|
CustomVJPException
|
python
|
getsentry__sentry
|
tests/sentry/notifications/api/endpoints/test_user_notification_email.py
|
{
"start": 208,
"end": 632
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-user-notifications-email"
def setUp(self) -> None:
self.organization2 = self.create_organization(name="Another Org", owner=self.user)
self.project2 = self.create_project(
organization=self.organization, teams=[self.team], name="Another Name"
)
self.login_as(user=self.user)
@control_silo_test
|
UserNotificationEmailTestBase
|
python
|
kamyu104__LeetCode-Solutions
|
Python/check-if-the-number-is-fascinating.py
|
{
"start": 51,
"end": 518
}
|
class ____(object):
def isFascinating(self, n):
"""
:type n: int
:rtype: bool
"""
lookup = [0]
def check(x):
while x:
x, d = divmod(x, 10)
if d == 0 or lookup[0]&(1<<d):
return False
lookup[0] |= (1<<d)
return True
return check(n) and check(2*n) and check(3*n)
# Time: O(logn)
# Space: O(logn)
# string
|
Solution
|
python
|
pytorch__pytorch
|
test/dynamo/test_autograd_function.py
|
{
"start": 4922,
"end": 5231
}
|
class ____(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
with torch.no_grad():
ctx.save_for_backward(x)
ctx.mark_non_differentiable(x)
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output
|
ContextSaveAndMark
|
python
|
huggingface__transformers
|
src/transformers/models/bert_generation/modeling_bert_generation.py
|
{
"start": 26468,
"end": 30836
}
|
class ____(BertGenerationPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"lm_head.decoder.weight": "bert.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`")
self.bert = BertGenerationEncoder(config)
self.lm_head = BertGenerationOnlyLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, BertGenerationDecoder, BertGenerationConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
>>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
>>> config.is_decoder = True
>>> model = BertGenerationDecoder.from_pretrained(
... "google/bert_for_seq_generation_L-24_bbc_encoder", config=config
... )
>>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs: BaseModelOutputWithPastAndCrossAttentions = self.bert(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
return_dict=True,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
__all__ = [
"BertGenerationDecoder",
"BertGenerationEncoder",
"BertGenerationPreTrainedModel",
]
|
BertGenerationDecoder
|
python
|
facebookresearch__faiss
|
tests/test_search_params.py
|
{
"start": 18219,
"end": 19570
}
|
class ____(unittest.TestCase):
""" to test the sorted id bounds, there are a few cases to consider """
def do_test_sorted(self, imin, imax, n=100):
selr = faiss.IDSelectorRange(imin, imax, True)
sp = faiss.swig_ptr
for seed in range(10):
rs = np.random.RandomState(seed)
ids = rs.choice(30, n).astype('int64')
ids.sort()
j01 = np.zeros(2, dtype='uint64')
selr.find_sorted_ids_bounds(
len(ids), sp(ids), sp(j01[:1]), sp(j01[1:]))
j0, j1 = j01.astype(int)
ref_idx, = np.where((ids >= imin) & (ids < imax))
np.testing.assert_array_equal(ref_idx, np.arange(j0, j1))
def test_sorted_in_range(self):
self.do_test_sorted(10, 20)
def test_sorted_out_0(self):
self.do_test_sorted(-10, 20)
def test_sorted_out_1(self):
self.do_test_sorted(10, 40)
def test_sorted_in_range_smalln(self):
self.do_test_sorted(10, 20, n=5)
def test_12_92(self):
selr = faiss.IDSelectorRange(30, 80, True)
ids = np.array([12, 92], dtype='int64')
j01 = np.zeros(2, dtype='uint64')
sp = faiss.swig_ptr
selr.find_sorted_ids_bounds(
len(ids), sp(ids), sp(j01[:1]), sp(j01[1:]))
assert j01[0] >= j01[1]
|
TestSortedIDSelectorRange
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 13597,
"end": 14547
}
|
class ____(BaseModel):
"""
Event Log Response.
"""
event_log_id: Annotated[int, Field(title="Event Log Id")]
when: Annotated[datetime, Field(title="When")]
dag_id: Annotated[str | None, Field(title="Dag Id")] = None
task_id: Annotated[str | None, Field(title="Task Id")] = None
run_id: Annotated[str | None, Field(title="Run Id")] = None
map_index: Annotated[int | None, Field(title="Map Index")] = None
try_number: Annotated[int | None, Field(title="Try Number")] = None
event: Annotated[str, Field(title="Event")]
logical_date: Annotated[datetime | None, Field(title="Logical Date")] = None
owner: Annotated[str | None, Field(title="Owner")] = None
extra: Annotated[str | None, Field(title="Extra")] = None
dag_display_name: Annotated[str | None, Field(title="Dag Display Name")] = None
task_display_name: Annotated[str | None, Field(title="Task Display Name")] = None
|
EventLogResponse
|
python
|
apache__airflow
|
providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_adx.py
|
{
"start": 2000,
"end": 2058
}
|
class ____:
primary_results = [MOCK_RESULT]
|
MockResponse
|
python
|
spack__spack
|
lib/spack/spack/llnl/util/filesystem.py
|
{
"start": 95079,
"end": 110618
}
|
class ____:
"""Uses hybrid iterative deepening to locate the first matching
file. Up to depth ``bfs_depth`` it uses iterative deepening, which
mimics breadth-first with the same memory footprint as depth-first
search, after which it switches to ordinary depth-first search using
``os.walk``."""
def __init__(self, root: str, *file_patterns: str, bfs_depth: int = 2):
"""Create a small summary of the given file. Does not error
when file does not exist.
Args:
root (str): directory in which to recursively search
file_patterns (str): glob file patterns understood by fnmatch
bfs_depth (int): until this depth breadth-first traversal is used,
when no match is found, the mode is switched to depth-first search.
"""
self.root = root
self.bfs_depth = bfs_depth
self.match: Callable
# normcase is trivial on posix
regex = re.compile("|".join(fnmatch.translate(os.path.normcase(p)) for p in file_patterns))
# On case sensitive filesystems match against normcase'd paths.
if os.path is posixpath:
self.match = regex.match
else:
self.match = lambda p: regex.match(os.path.normcase(p))
def find(self) -> Optional[str]:
"""Run the file search
Returns:
str or None: path of the matching file
"""
self.file = None
# First do iterative deepening (i.e. bfs through limited depth dfs)
for i in range(self.bfs_depth + 1):
if self._find_at_depth(self.root, i):
return self.file
# Then fall back to depth-first search
return self._find_dfs()
def _find_at_depth(self, path, max_depth, depth=0) -> bool:
"""Returns True when done. Notice search can be done
either because a file was found, or because it recursed
through all directories."""
try:
entries = os.scandir(path)
except OSError:
return True
done = True
with entries:
# At max depth we look for matching files.
if depth == max_depth:
for f in entries:
# Exit on match
if self.match(f.name):
self.file = os.path.join(path, f.name)
return True
# is_dir should not require a stat call, so it's a good optimization.
if self._is_dir(f):
done = False
return done
# At lower depth only recurse into subdirs
for f in entries:
if not self._is_dir(f):
continue
# If any subdir is not fully traversed, we're not done yet.
if not self._find_at_depth(os.path.join(path, f.name), max_depth, depth + 1):
done = False
# Early exit when we've found something.
if self.file:
return True
return done
def _is_dir(self, f: os.DirEntry) -> bool:
"""Returns True when f is dir we can enter (and not a symlink)."""
try:
return f.is_dir(follow_symlinks=False)
except OSError:
return False
def _find_dfs(self) -> Optional[str]:
"""Returns match or None"""
for dirpath, _, filenames in os.walk(self.root):
for file in filenames:
if self.match(file):
return os.path.join(dirpath, file)
return None
def _windows_symlink(
src: str, dst: str, target_is_directory: bool = False, *, dir_fd: Union[int, None] = None
):
"""On Windows with System Administrator privileges this will be a normal symbolic link via
os.symlink. On Windows without privledges the link will be a junction for a directory and a
hardlink for a file. On Windows the various link types are:
Symbolic Link: A link to a file or directory on the same or different volume (drive letter) or
even to a remote file or directory (using UNC in its path). Need System Administrator
privileges to make these.
Hard Link: A link to a file on the same volume (drive letter) only. Every file (file's data)
has at least 1 hard link (file's name). But when this method creates a new hard link there will
be 2. Deleting all hard links effectively deletes the file. Don't need System Administrator
privileges.
Junction: A link to a directory on the same or different volume (drive letter) but not to a
remote directory. Don't need System Administrator privileges."""
source_path = os.path.normpath(src)
win_source_path = source_path
link_path = os.path.normpath(dst)
# Perform basic checks to make sure symlinking will succeed
if os.path.lexists(link_path):
raise AlreadyExistsError(f"Link path ({link_path}) already exists. Cannot create link.")
if not os.path.exists(source_path):
if os.path.isabs(source_path):
# An absolute source path that does not exist will result in a broken link.
raise SymlinkError(
f"Source path ({source_path}) is absolute but does not exist. Resulting "
f"link would be broken so not making link."
)
else:
# os.symlink can create a link when the given source path is relative to
# the link path. Emulate this behavior and check to see if the source exists
# relative to the link path ahead of link creation to prevent broken
# links from being made.
link_parent_dir = os.path.dirname(link_path)
relative_path = os.path.join(link_parent_dir, source_path)
if os.path.exists(relative_path):
# In order to work on windows, the source path needs to be modified to be
# relative because hardlink/junction dont resolve relative paths the same
# way as os.symlink. This is ignored on other operating systems.
win_source_path = relative_path
else:
raise SymlinkError(
f"The source path ({source_path}) is not relative to the link path "
f"({link_path}). Resulting link would be broken so not making link."
)
# Create the symlink
if not _windows_can_symlink():
_windows_create_link(win_source_path, link_path)
else:
os.symlink(source_path, link_path, target_is_directory=os.path.isdir(source_path))
def _windows_islink(path: str) -> bool:
"""Override os.islink to give correct answer for spack logic.
For Non-Windows: a link can be determined with the os.path.islink method.
Windows-only methods will return false for other operating systems.
For Windows: spack considers symlinks, hard links, and junctions to
all be links, so if any of those are True, return True.
Args:
path (str): path to check if it is a link.
Returns:
bool - whether the path is any kind link or not.
"""
return any([os.path.islink(path), _windows_is_junction(path), _windows_is_hardlink(path)])
def _windows_is_hardlink(path: str) -> bool:
"""Determines if a path is a windows hard link. This is accomplished
by looking at the number of links using os.stat. A non-hard-linked file
will have a st_nlink value of 1, whereas a hard link will have a value
larger than 1. Note that both the original and hard-linked file will
return True because they share the same inode.
Args:
path (str): Windows path to check for a hard link
Returns:
bool - Whether the path is a hard link or not.
"""
if sys.platform != "win32" or os.path.islink(path) or not os.path.exists(path):
return False
return os.stat(path).st_nlink > 1
def _windows_is_junction(path: str) -> bool:
"""Determines if a path is a windows junction. A junction can be
determined using a bitwise AND operation between the file's
attribute bitmask and the known junction bitmask (0x400).
Args:
path (str): A non-file path
Returns:
bool - whether the path is a junction or not.
"""
if sys.platform != "win32" or os.path.islink(path) or os.path.isfile(path):
return False
import ctypes.wintypes
get_file_attributes = ctypes.windll.kernel32.GetFileAttributesW # type: ignore[attr-defined]
get_file_attributes.argtypes = (ctypes.wintypes.LPWSTR,)
get_file_attributes.restype = ctypes.wintypes.DWORD
invalid_file_attributes = 0xFFFFFFFF
reparse_point = 0x400
file_attr = get_file_attributes(str(path))
if file_attr == invalid_file_attributes:
return False
return file_attr & reparse_point > 0
@lang.memoized
def _windows_can_symlink() -> bool:
"""
Determines if windows is able to make a symlink depending on
the system configuration and the level of the user's permissions.
"""
if sys.platform != "win32":
tty.warn("windows_can_symlink method can't be used on non-Windows OS.")
return False
tempdir = tempfile.mkdtemp()
dpath = os.path.join(tempdir, "dpath")
fpath = os.path.join(tempdir, "fpath.txt")
dlink = os.path.join(tempdir, "dlink")
flink = os.path.join(tempdir, "flink.txt")
touchp(fpath)
mkdirp(dpath)
try:
os.symlink(dpath, dlink)
can_symlink_directories = os.path.islink(dlink)
except OSError:
can_symlink_directories = False
try:
os.symlink(fpath, flink)
can_symlink_files = os.path.islink(flink)
except OSError:
can_symlink_files = False
# Cleanup the test directory
shutil.rmtree(tempdir)
return can_symlink_directories and can_symlink_files
def _windows_create_link(source: str, link: str):
"""
Attempts to create a Hard Link or Junction as an alternative
to a symbolic link. This is called when symbolic links cannot
be created.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_link method can't be used on non-Windows OS.")
elif os.path.isdir(source):
_windows_create_junction(source=source, link=link)
elif os.path.isfile(source):
_windows_create_hard_link(path=source, link=link)
else:
raise SymlinkError(
f"Cannot create link from {source}. It is neither a file nor a directory."
)
def _windows_create_junction(source: str, link: str):
"""Duly verify that the path and link are eligible to create a junction,
then create the junction.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_junction method can't be used on non-Windows OS.")
elif not os.path.exists(source):
raise SymlinkError("Source path does not exist, cannot create a junction.")
elif os.path.lexists(link):
raise AlreadyExistsError("Link path already exists, cannot create a junction.")
elif not os.path.isdir(source):
raise SymlinkError("Source path is not a directory, cannot create a junction.")
import subprocess
cmd = ["cmd", "/C", "mklink", "/J", link, source]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
tty.debug(out.decode())
if proc.returncode != 0:
err_str = err.decode()
tty.error(err_str)
raise SymlinkError("Make junction command returned a non-zero return code.", err_str)
def _windows_create_hard_link(path: str, link: str):
"""Duly verify that the path and link are eligible to create a hard
link, then create the hard link.
"""
if sys.platform != "win32":
raise SymlinkError("windows_create_hard_link method can't be used on non-Windows OS.")
elif not os.path.exists(path):
raise SymlinkError(f"File path {path} does not exist. Cannot create hard link.")
elif os.path.lexists(link):
raise AlreadyExistsError(f"Link path ({link}) already exists. Cannot create hard link.")
elif not os.path.isfile(path):
raise SymlinkError(f"File path ({link}) is not a file. Cannot create hard link.")
else:
tty.debug(f"Creating hard link {link} pointing to {path}")
CreateHardLink(link, path)
def _windows_readlink(path: str, *, dir_fd=None):
"""Spack utility to override of os.readlink method to work cross platform"""
if _windows_is_hardlink(path):
return _windows_read_hard_link(path)
elif _windows_is_junction(path):
return _windows_read_junction(path)
else:
return sanitize_win_longpath(os.readlink(path, dir_fd=dir_fd))
def _windows_read_hard_link(link: str) -> str:
"""Find all of the files that point to the same inode as the link"""
if sys.platform != "win32":
raise SymlinkError("Can't read hard link on non-Windows OS.")
link = os.path.abspath(link)
fsutil_cmd = ["fsutil", "hardlink", "list", link]
proc = subprocess.Popen(fsutil_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise SymlinkError(f"An error occurred while reading hard link: {err.decode()}")
# fsutil response does not include the drive name, so append it back to each linked file.
drive, link_tail = os.path.splitdrive(os.path.abspath(link))
links = set([os.path.join(drive, p) for p in out.decode().splitlines()])
links.remove(link)
if len(links) == 1:
return links.pop()
elif len(links) > 1:
# TODO: How best to handle the case where 3 or more paths point to a single inode?
raise SymlinkError(f"Found multiple paths pointing to the same inode {links}")
else:
raise SymlinkError("Cannot determine hard link source path.")
def _windows_read_junction(link: str):
"""Find the path that a junction points to."""
if sys.platform != "win32":
raise SymlinkError("Can't read junction on non-Windows OS.")
link = os.path.abspath(link)
link_basename = os.path.basename(link)
link_parent = os.path.dirname(link)
fsutil_cmd = ["dir", "/a:l", link_parent]
proc = subprocess.Popen(fsutil_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = proc.communicate()
if proc.returncode != 0:
raise SymlinkError(f"An error occurred while reading junction: {err.decode()}")
matches = re.search(rf"<JUNCTION>\s+{link_basename} \[(.*)]", out.decode())
if matches:
return matches.group(1)
else:
raise SymlinkError("Could not find junction path.")
@system_path_filter
def resolve_link_target_relative_to_the_link(link):
"""
os.path.isdir uses os.path.exists, which for links will check
the existence of the link target. If the link target is relative to
the link, we need to construct a pathname that is valid from
our cwd (which may not be the same as the link's directory)
"""
target = readlink(link)
if os.path.isabs(target):
return target
link_dir = os.path.dirname(os.path.abspath(link))
return os.path.join(link_dir, target)
if sys.platform == "win32":
symlink = _windows_symlink
readlink = _windows_readlink
islink = _windows_islink
rename = _win_rename
else:
symlink = os.symlink
readlink = os.readlink
islink = os.path.islink
rename = os.rename
|
FindFirstFile
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_mime.py
|
{
"start": 1619,
"end": 4091
}
|
class ____(ColumnMapExpectation):
"""Expect column values to be valid MIME types."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_mime": [
"https://docs.python.org/3/library/platform.html",
"Deep Learning.pdf",
"docs.zip",
"secret_data.csv",
"sitemap.xml",
"Videos/awesome_presentation.mp4",
],
"malformed_mime": [
"https://docs.python.org/3/library/platform",
"Deep Learning.",
"zip.docs",
"secret_data.undefined",
"Videos/awesome_presentation.mp8",
"This is not a mime type resource.",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_mime"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_mime"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_mime"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMIME().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidMIME
|
python
|
bokeh__bokeh
|
src/bokeh/protocol/message.py
|
{
"start": 3111,
"end": 3229
}
|
class ____(TypedDict):
msgid: ID
msgtype: str
reqid: NotRequired[ID]
num_buffers: NotRequired[int]
|
Header
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.