language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/base.py
|
{
"start": 41011,
"end": 41077
}
|
class ____(TIME):
__visit_name__ = "_BASETIMEIMPL"
|
_BASETIMEIMPL
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 607952,
"end": 608589
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("ReviewRequestEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("ReviewRequest"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
ReviewRequestConnection
|
python
|
sphinx-doc__sphinx
|
sphinx/util/_inventory_file_reader.py
|
{
"start": 442,
"end": 2027
}
|
class ____:
"""A file reader for an inventory file.
This reader supports mixture of texts and compressed texts.
"""
def __init__(self, stream: _SupportsRead) -> None:
self.stream = stream
self.buffer = b''
self.eof = False
def read_buffer(self) -> None:
chunk = self.stream.read(BUFSIZE)
if chunk == b'':
self.eof = True
self.buffer += chunk
def readline(self) -> str:
pos = self.buffer.find(b'\n')
if pos != -1:
line = self.buffer[:pos].decode()
self.buffer = self.buffer[pos + 1 :]
elif self.eof:
line = self.buffer.decode()
self.buffer = b''
else:
self.read_buffer()
line = self.readline()
return line
def readlines(self) -> Iterator[str]:
while not self.eof:
line = self.readline()
if line:
yield line
def read_compressed_chunks(self) -> Iterator[bytes]:
decompressor = zlib.decompressobj()
while not self.eof:
self.read_buffer()
yield decompressor.decompress(self.buffer)
self.buffer = b''
yield decompressor.flush()
def read_compressed_lines(self) -> Iterator[str]:
buf = b''
for chunk in self.read_compressed_chunks():
buf += chunk
pos = buf.find(b'\n')
while pos != -1:
yield buf[:pos].decode()
buf = buf[pos + 1 :]
pos = buf.find(b'\n')
|
InventoryFileReader
|
python
|
django__django
|
tests/test_utils/tests.py
|
{
"start": 8952,
"end": 13087
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(name="p1")
cls.p2 = Person.objects.create(name="p2")
def test_empty(self):
self.assertQuerySetEqual(Person.objects.filter(name="p3"), [])
def test_ordered(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
[self.p1, self.p2],
)
def test_unordered(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"), [self.p2, self.p1], ordered=False
)
def test_queryset(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
Person.objects.order_by("name"),
)
def test_flat_values_list(self):
self.assertQuerySetEqual(
Person.objects.order_by("name").values_list("name", flat=True),
["p1", "p2"],
)
def test_transform(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk,
)
def test_repr_transform(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
[repr(self.p1), repr(self.p2)],
transform=repr,
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
msg = (
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertQuerySetEqual(
Person.objects.all(),
[self.p1, self.p2],
)
# No error for one value.
self.assertQuerySetEqual(Person.objects.filter(name="p1"), [self.p1])
def test_repeated_values(self):
"""
assertQuerySetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name="Batmobile")
k2000 = Car.objects.create(name="K 2000")
PossessedCar.objects.bulk_create(
[
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
]
)
with self.assertRaises(AssertionError):
self.assertQuerySetEqual(
self.p1.cars.all(), [batmobile, k2000], ordered=False
)
self.assertQuerySetEqual(
self.p1.cars.all(), [batmobile] * 2 + [k2000] * 4, ordered=False
)
def test_maxdiff(self):
names = ["Joe Smith %s" % i for i in range(20)]
Person.objects.bulk_create([Person(name=name) for name in names])
names.append("Extra Person")
with self.assertRaises(AssertionError) as ctx:
self.assertQuerySetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
self.assertIn("Set self.maxDiff to None to see it.", str(ctx.exception))
original = self.maxDiff
self.maxDiff = None
try:
with self.assertRaises(AssertionError) as ctx:
self.assertQuerySetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
finally:
self.maxDiff = original
exception_msg = str(ctx.exception)
self.assertNotIn("Set self.maxDiff to None to see it.", exception_msg)
for name in names:
self.assertIn(name, exception_msg)
@override_settings(ROOT_URLCONF="test_utils.urls")
|
AssertQuerySetEqualTests
|
python
|
apache__airflow
|
airflow-core/src/airflow/executors/executor_utils.py
|
{
"start": 966,
"end": 2417
}
|
class ____(LoggingMixin):
"""Representation of an executor config/name."""
def __init__(self, module_path: str, alias: str | None = None, team_name: str | None = None) -> None:
self.module_path: str = module_path
self.alias: str | None = alias
self.team_name: str | None = team_name
self.set_connector_source()
def set_connector_source(self) -> None:
if self.alias in CORE_EXECUTOR_NAMES:
self.connector_source = ConnectorSource.CORE
else:
# Executor must be a module
self.connector_source = ConnectorSource.CUSTOM_PATH
def __repr__(self) -> str:
"""Implement repr."""
if self.alias in CORE_EXECUTOR_NAMES:
# This is a "core executor" we can refer to it by its known short name
return f"{self.team_name if self.team_name else ''}:{self.alias}:"
return f"{self.team_name if self.team_name else ''}:{self.alias if self.alias else ''}:{self.module_path}"
def __eq__(self, other) -> bool:
"""Implement eq."""
if (
self.alias == other.alias
and self.module_path == other.module_path
and self.connector_source == other.connector_source
and self.team_name == other.team_name
):
return True
return False
def __hash__(self) -> int:
"""Implement hash."""
return hash(self.__repr__())
|
ExecutorName
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_project_transaction_threshold.py
|
{
"start": 177,
"end": 6641
}
|
class ____(APITestCase):
feature_name = "organizations:performance-view"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.project = self.create_project()
self.url = reverse(
"sentry-api-0-project-transaction-threshold",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
def test_get_for_project_with_custom_threshold(self) -> None:
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=500,
metric=TransactionMetric.LCP.value,
)
with self.feature(self.feature_name):
response = self.client.get(self.url, format="json")
assert response.status_code == 200, response.content
assert response.data["threshold"] == "500"
assert response.data["metric"] == "lcp"
def test_get_for_project_without_custom_threshold(self) -> None:
with self.feature(self.feature_name):
response = self.client.get(self.url, format="json")
assert response.status_code == 200, response.content
assert response.data["threshold"] == "300"
assert response.data["metric"] == "duration"
def test_get_returns_error_without_feature_enabled(self) -> None:
with self.feature({self.feature_name: False}):
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=300,
metric=TransactionMetric.DURATION.value,
)
response = self.client.get(self.url, format="json")
assert response.status_code == 404
def test_create_project_threshold(self) -> None:
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"metric": "duration",
"threshold": "300",
},
)
assert response.status_code == 201, response.content
assert response.data["threshold"] == "300"
assert response.data["metric"] == "duration"
assert response.data["editedBy"] == str(self.user.id)
assert ProjectTransactionThreshold.objects.filter(
project=self.project, organization=self.project.organization
).exists()
def test_update_single_field_project_threshold(self) -> None:
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"metric": "lcp",
"threshold": "300",
},
)
assert response.status_code == 201, response.content
assert response.data["threshold"] == "300"
assert response.data["metric"] == "lcp"
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"threshold": "400",
},
)
assert response.status_code == 200, response.content
assert response.data["threshold"] == "400"
assert response.data["metric"] == "lcp"
def test_project_threshold_permissions(self) -> None:
user = self.create_user()
# user without project-write permissions
self.create_member(user=user, organization=self.organization, role="member")
self.login_as(user=user)
team = self.create_team()
project = self.create_project(teams=[team], name="foo")
url = reverse(
"sentry-api-0-project-transaction-threshold",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
ProjectTransactionThreshold.objects.create(
project=project,
organization=project.organization,
threshold=300,
metric=TransactionMetric.DURATION.value,
)
with self.feature(self.feature_name):
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
with self.feature(self.feature_name):
response = self.client.post(
url,
data={
"metric": "lcp",
"threshold": "400",
},
)
assert response.status_code == 403
with self.feature(self.feature_name):
response = self.client.delete(url)
assert response.status_code == 403
def test_update_project_threshold(self) -> None:
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"metric": "duration",
"threshold": "300",
},
)
assert response.status_code == 201, response.content
assert response.data["threshold"] == "300"
assert response.data["metric"] == "duration"
with self.feature(self.feature_name):
response = self.client.post(
self.url,
data={
"metric": "lcp",
"threshold": "400",
},
)
assert response.status_code == 200, response.content
assert response.data["threshold"] == "400"
assert response.data["metric"] == "lcp"
def test_clear_project_threshold(self) -> None:
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=300,
metric=TransactionMetric.DURATION.value,
)
assert ProjectTransactionThreshold.objects.filter(
project=self.project, organization=self.project.organization
).exists()
with self.feature(self.feature_name):
response = self.client.delete(self.url)
assert response.status_code == 204
assert not ProjectTransactionThreshold.objects.filter(
project=self.project, organization=self.project.organization
).exists()
|
ProjectTransactionThresholdTest
|
python
|
doocs__leetcode
|
solution/1700-1799/1703.Minimum Adjacent Swaps for K Consecutive Ones/Solution.py
|
{
"start": 0,
"end": 524
}
|
class ____:
def minMoves(self, nums: List[int], k: int) -> int:
arr = [i for i, x in enumerate(nums) if x]
s = list(accumulate(arr, initial=0))
ans = inf
x = (k + 1) // 2
y = k - x
for i in range(x - 1, len(arr) - y):
j = arr[i]
ls = s[i + 1] - s[i + 1 - x]
rs = s[i + 1 + y] - s[i + 1]
a = (j + j - x + 1) * x // 2 - ls
b = rs - (j + 1 + j + y) * y // 2
ans = min(ans, a + b)
return ans
|
Solution
|
python
|
pytorch__pytorch
|
test/jit/test_backends.py
|
{
"start": 17803,
"end": 18198
}
|
class ____(torch.nn.Module):
"""
A simple add Module used to test to_backend lowering machinery.
"""
def forward(self, x, h):
return x + h
# This is ignored in IS_WINDOWS or IS_MACOS cases. Hence we need the one in TestBackends.
@unittest.skipIf(
IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
"Non-portable load_library call used in test",
)
|
BasicModuleAdd
|
python
|
chroma-core__chroma
|
chromadb/proto/convert.py
|
{
"start": 894,
"end": 1036
}
|
class ____(TypedDict):
id: str
document: Optional[str]
embedding: Optional[Vector]
metadata: Optional[Metadata]
|
ProjectionRecord
|
python
|
huggingface__transformers
|
src/transformers/models/shieldgemma2/processing_shieldgemma2.py
|
{
"start": 2207,
"end": 8484
}
|
class ____(Gemma3Processor):
def __init__(
self, image_processor, tokenizer, chat_template=None, image_seq_length=256, policy_definitions=None, **kwargs
):
"""A processor for the ShieldGemma 2 model.
Args:
image_processor: The image processor to use, typically a `Gemma3ImageProcessorFast` instance.
tokenizer: The tokenizer to use, typically a `GemmaTokenizerFast` instance.
chat_template: The chat template to use with this processor. Typically, this is unset as the processor
configuration on Hugging Face Hub includes this value already.
image_seq_length: The number of soft tokens per image. Typically, this is unset as the processor
configuration on Hugging Face Hub includes this value already.
policy_definitions: A mapping from policy name to its description in text used as the default policies to
classify images against. The policy descriptions are included in the text of the prompts generated by
this processor. Typically, this is unset as the processor configuration on Hugging Face Hub includes
the base policies ShieldGemma was trained on.
"""
super().__init__(image_processor, tokenizer, chat_template, image_seq_length, **kwargs)
if policy_definitions is None:
self.policy_definitions = DEFAULT_SHIELDGEMMA2_POLICIES
else:
self.policy_definitions = policy_definitions
def __call__(
self,
images: Optional[ImageInput] = None,
text=None,
**kwargs: Unpack[ShieldGemma2ProcessorKwargs],
) -> BatchFeature:
"""Generates a batch of inputs from the provided images.
ShieldGemma was trained to classify image content for policy compliance using a specific prompt construction.
This processor generates a batch of such prompts from the provided images by:
1. Creating a list of conversations, one for each `<image, policy>` pair;
2. Converting these conversations to text using `self.apply_chat_template()`; and
3. Encoding the conversations and images using the same techniques as `Gemma3Processor`.
Args:
images: A single image or a list of images to include in the batch.
text: Not supported.
videos: Not supported.
audio: Not supported.
kwargs: An optional dictionary of keyword arguments to configure the
processor. Possible values include:
* `custom_policies`: Additional policy definitions that augment the `self.policy_definitions` passed
into the constructor. Note that `custom_policies` that share a key with `self.policy_definitions`
will override the policy description
* `policies`: (Optional) a list of keys in the joint `self.policy_definitions | custom_policies`
dictionary of specific interest for the provided images. If empty or None, prompts will be
generated for every key in the joint dictionary.
Returns:
A `BatchFeature` containing `input_ids`, `pixel_values`, etc. where each Tensor is of shape
`(len(images) * len(policies), )`, and the order within the batch will be
img1_policy1, ... img1_policyN, ... imgM_policyN.
"""
if not images:
raise ValueError("ShieldGemma 2 needs images to classify")
elif not isinstance(images, Sequence):
images = [images]
if not self.chat_template:
raise ValueError("ShieldGemma 2 requires the use of a specific chat template")
common_kwargs = kwargs.setdefault("common_kwargs", {})
if "return_tensors" in kwargs:
common_kwargs["return_tensors"] = kwargs.pop("return_tensors")
# Disable pan and scan
images_kwargs = kwargs.setdefault("images_kwargs", {})
if images_kwargs.get("do_pan_and_scan") is True:
logger.warning_once("ShieldGemma2 does not support pan and scan.")
images_kwargs["do_pan_and_scan"] = False
# Enable padding on the batch during tokenization
text_kwargs = kwargs.setdefault("text_kwargs", {})
if "padding" not in text_kwargs:
text_kwargs["padding"] = kwargs.pop("padding", True)
text_kwargs["padding_side"] = kwargs.pop("padding_side", "left")
policy_definitions: Mapping[str, str] = {
**self.policy_definitions,
**kwargs.get("custom_policies", {}),
}
if (policies := kwargs.get("policies")) is None:
policies = list(policy_definitions.keys())
# TODO(ryanmullins): Support images from PIL or URLs.
messages = []
expanded_images = []
for img in images:
if not isinstance(img, list):
img = [img]
elif len(img) > 1:
raise ValueError(f"SheildGemma can process at most one image per sample, but got {len(img)} images")
for policy in policies:
if img:
messages.append(
[
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": policy_definitions[policy]},
],
}
]
)
else:
messages.append(
[
{
"role": "user",
"content": [
{"type": "text", "text": policy_definitions[policy]},
],
}
]
)
expanded_images.append(img)
text = self.apply_chat_template(messages, tokenize=False)
return super().__call__(images=expanded_images, text=text, **kwargs)
__all__ = ["ShieldGemma2Processor"]
|
ShieldGemma2Processor
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/math_ops/matmul_op_test.py
|
{
"start": 3556,
"end": 5162
}
|
class ____(test_lib.TestCase):
pass # Filled in below
def _GetMatMulTest(a_np_, b_np_, use_static_shape_, **kwargs_):
@test_util.run_without_tensor_float_32("Tests matmul")
def Test(self):
np_val = np.matrix(a_np_) * np.matrix(b_np_)
use_gpu = True
if a_np_.dtype is np.float16 and (
not test_util.GpuSupportsHalfMatMulAndConv()):
use_gpu = False
print("Built without fp16 matmul support for Cuda, running test on CPU.")
# Transpose and possibly conjugate a_np_ and b_np_ according to the
# attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
# results in a valid matrix multiplication and produces the same result as
# np.matrix(a_np_) * np.matrix(b_np_)
effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
with self.cached_session() as sess, test_util.device(use_gpu):
if use_static_shape_:
a = constant_op.constant(effective_a_np)
b = constant_op.constant(effective_b_np)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = self.evaluate(res)
else:
a = array_ops.placeholder(a_np_.dtype)
b = array_ops.placeholder(b_np_.dtype)
res = math_ops.matmul(a, b, **kwargs_)
tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})
self.assertAllCloseAccordingToType(
tf_val,
np_val,
float_rtol=3e-5,
float_atol=3e-5,
half_rtol=0.2,
half_atol=0.2)
return Test
@test_util.with_eager_op_as_function
|
MatMulTest
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/dataset.py
|
{
"start": 14524,
"end": 18827
}
|
class ____(GoogleCloudBaseOperator, DatasetImportDataResultsCheckHelper):
"""
Imports data into a Dataset.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param dataset_id: Required. The ID of the Dataset to delete.
:param import_configs: Required. The desired input locations. The contents of all input locations will be
imported in one batch.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param raise_for_empty_result: Raise an error if no additional data has been populated after the import.
"""
template_fields = ("region", "dataset_id", "project_id", "impersonation_chain")
def __init__(
self,
*,
region: str,
project_id: str,
dataset_id: str,
import_configs: Sequence[ImportDataConfig] | list,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
raise_for_empty_result: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.dataset_id = dataset_id
self.import_configs = import_configs
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.raise_for_empty_result = raise_for_empty_result
def execute(self, context: Context):
hook = DatasetHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
initial_dataset_size = self._get_number_of_ds_items(
dataset=hook.get_dataset(
dataset=self.dataset_id,
project_id=self.project_id,
region=self.region,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
),
total_key_name="data_item_count",
)
self.log.info("Importing data: %s", self.dataset_id)
operation = hook.import_data(
project_id=self.project_id,
region=self.region,
dataset=self.dataset_id,
import_configs=self.import_configs,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
result_dataset_size = self._get_number_of_ds_items(
dataset=hook.get_dataset(
dataset=self.dataset_id,
project_id=self.project_id,
region=self.region,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
),
total_key_name="data_item_count",
)
if self.raise_for_empty_result:
self._raise_for_empty_import_result(self.dataset_id, initial_dataset_size, result_dataset_size)
self.log.info("Import was done successfully")
return {"total_data_items_imported": int(result_dataset_size) - int(initial_dataset_size)}
|
ImportDataOperator
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/dacite/exceptions.py
|
{
"start": 2275,
"end": 2587
}
|
class ____(DaciteError):
def __init__(self, keys: Set[str]) -> None:
super().__init__()
self.keys = keys
def __str__(self) -> str:
formatted_keys = ", ".join(f'"{key}"' for key in self.keys)
return f"can not match {formatted_keys} to any data class field"
|
UnexpectedDataError
|
python
|
facebook__pyre-check
|
client/commands/tests/check_test.py
|
{
"start": 476,
"end": 1857
}
|
class ____(testslide.TestCase):
def test_serialize_arguments(self) -> None:
def assert_serialized(
arguments: check.Arguments, items: Iterable[Tuple[str, object]]
) -> None:
serialized = arguments.serialize()
for key, value in items:
if key not in serialized:
self.fail(f"Cannot find key `{key}` in serialized arguments")
else:
self.assertEqual(value, serialized[key])
assert_serialized(
check.Arguments(
base_arguments=backend_arguments.BaseArguments(
log_path="/log",
global_root="/project",
source_paths=backend_arguments.SimpleSourcePath(
[configuration.search_path.SimpleElement("source")]
),
),
additional_logging_sections=["foo", "bar"],
show_error_traces=True,
strict=True,
),
[
("log_path", "/log"),
("global_root", "/project"),
("source_paths", {"kind": "simple", "paths": ["source"]}),
("additional_logging_sections", ["foo", "bar"]),
("show_error_traces", True),
("strict", True),
],
)
|
ArgumentTest
|
python
|
getsentry__sentry
|
src/sentry/plugins/bases/issue.py
|
{
"start": 731,
"end": 942
}
|
class ____(forms.Form):
title = forms.CharField(max_length=200, widget=forms.TextInput(attrs={"class": "span9"}))
description = forms.CharField(widget=forms.Textarea(attrs={"class": "span9"}))
|
NewIssueForm
|
python
|
django__django
|
tests/check_framework/test_security.py
|
{
"start": 19689,
"end": 20042
}
|
class ____(SimpleTestCase):
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(base.check_allowed_hosts(None), [base.W020])
@override_settings(ALLOWED_HOSTS=[".example.com"])
def test_allowed_hosts_set(self):
self.assertEqual(base.check_allowed_hosts(None), [])
|
CheckAllowedHostsTest
|
python
|
walkccc__LeetCode
|
solutions/62. Unique Paths/62-2.py
|
{
"start": 0,
"end": 185
}
|
class ____:
def uniquePaths(self, m: int, n: int) -> int:
dp = [1] * n
for _ in range(1, m):
for j in range(1, n):
dp[j] += dp[j - 1]
return dp[n - 1]
|
Solution
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/index/package_finder.py
|
{
"start": 12144,
"end": 12365
}
|
class ____:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
prefer_binary: bool = False
allow_all_prereleases: bool = False
|
CandidatePreferences
|
python
|
gevent__gevent
|
src/gevent/tests/test__pywsgi.py
|
{
"start": 3026,
"end": 6582
}
|
class ____(object):
def __init__(self, status_line, headers):
self.status_line = status_line
self.headers = headers
self.body = None
self.chunks = False
try:
version, code, self.reason = status_line[:-2].split(' ', 2)
self.code = int(code)
HTTP, self.version = version.split('/')
assert HTTP == 'HTTP', repr(HTTP)
assert self.version in ('1.0', '1.1'), repr(self.version)
except Exception:
print('Error: %r' % status_line)
raise
def __iter__(self):
yield self.status_line
yield self.headers
yield self.body
def __str__(self):
args = (self.__class__.__name__, self.status_line, self.headers, self.body, self.chunks)
return '<%s status_line=%r headers=%r body=%r chunks=%r>' % args
def assertCode(self, code):
if hasattr(code, '__contains__'):
assert self.code in code, 'Unexpected code: %r (expected %r)\n%s' % (self.code, code, self)
else:
assert self.code == code, 'Unexpected code: %r (expected %r)\n%s' % (self.code, code, self)
def assertReason(self, reason):
assert self.reason == reason, 'Unexpected reason: %r (expected %r)\n%s' % (self.reason, reason, self)
def assertVersion(self, version):
assert self.version == version, 'Unexpected version: %r (expected %r)\n%s' % (self.version, version, self)
def assertHeader(self, header, value):
real_value = self.headers.get(header, False)
assert real_value == value, \
'Unexpected header %r: %r (expected %r)\n%s' % (header, real_value, value, self)
def assertBody(self, body):
if isinstance(body, str) and PY3:
body = body.encode("ascii")
assert self.body == body, 'Unexpected body: %r (expected %r)\n%s' % (self.body, body, self)
@classmethod
def read(cls, fd, code=200, reason='default', version='1.1',
body=None, chunks=None, content_length=None):
"""
Read an HTTP response, optionally perform assertions,
and return the Response object.
"""
# pylint:disable=too-many-branches
_status_line, headers = read_headers(fd)
self = cls(_status_line, headers)
if code is not None:
self.assertCode(code)
if reason == 'default':
reason = REASONS.get(code)
if reason is not None:
self.assertReason(reason)
if version is not None:
self.assertVersion(version)
if self.code == 100:
return self
if content_length is not None:
if isinstance(content_length, int):
content_length = str(content_length)
self.assertHeader('Content-Length', content_length)
if 'chunked' in headers.get('Transfer-Encoding', ''):
if CONTENT_LENGTH in headers:
print("WARNING: server used chunked transfer-encoding despite having Content-Length header (libevent 1.x's bug)")
self.chunks = list(iread_chunks(fd))
self.body = b''.join(self.chunks)
elif CONTENT_LENGTH in headers:
num = int(headers[CONTENT_LENGTH])
self.body = fd.read(num)
else:
self.body = fd.read()
if body is not None:
self.assertBody(body)
if chunks is not None:
assert chunks == self.chunks, (chunks, self.chunks)
return self
read_http = Response.read
|
Response
|
python
|
vyperlang__vyper
|
vyper/exceptions.py
|
{
"start": 7712,
"end": 7806
}
|
class ____(VyperException):
"""Invalid variable declaration."""
|
VariableDeclarationException
|
python
|
django__django
|
django/template/library.py
|
{
"start": 340,
"end": 9345
}
|
class ____:
"""
A class for registering template tags and filters. Compiled filter and
template tag functions are stored in the filters and tags attributes.
The filter, simple_tag, and inclusion_tag methods provide a convenient
way to register callables as tags.
"""
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise ValueError(
"Unsupported arguments to Library.tag: (%r, %r)"
% (name, compile_function),
)
def tag_function(self, func):
self.tags[func.__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
"""
Register a callable as a template filter. Example:
@register.filter
def lower(value):
return value.lower()
"""
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or
# @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ("expects_localtime", "is_safe", "needs_autoescape"):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it, e.g. stringfilter
setattr(unwrap(filter_func), attr, value)
filter_func._filter_name = name
return filter_func
else:
raise ValueError(
"Unsupported arguments to Library.filter: (%r, %r)"
% (name, filter_func),
)
def filter_function(self, func, **flags):
return self.filter(func.__name__, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
"""
Register a callable as a compiled template tag. Example:
@register.simple_tag
def hello(*args, **kwargs):
return 'world'
"""
def dec(func):
(
params,
varargs,
varkw,
defaults,
kwonly,
kwonly_defaults,
_,
) = getfullargspec(unwrap(func))
function_name = name or func.__name__
@wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == "as":
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(
parser,
bits,
params,
varargs,
varkw,
defaults,
kwonly,
kwonly_defaults,
takes_context,
function_name,
)
return SimpleNode(func, takes_context, args, kwargs, target_var)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_tag")
def simple_block_tag(self, func=None, takes_context=None, name=None, end_name=None):
"""
Register a callable as a compiled block template tag. Example:
@register.simple_block_tag
def hello(content):
return 'world'
"""
def dec(func):
nonlocal end_name
(
params,
varargs,
varkw,
defaults,
kwonly,
kwonly_defaults,
_,
) = getfullargspec(unwrap(func))
function_name = name or func.__name__
if end_name is None:
end_name = f"end{function_name}"
@wraps(func)
def compile_func(parser, token):
tag_params = params.copy()
if takes_context:
if len(tag_params) >= 2 and tag_params[1] == "content":
del tag_params[1]
else:
raise TemplateSyntaxError(
f"{function_name!r} is decorated with takes_context=True so"
" it must have a first argument of 'context' and a second "
"argument of 'content'"
)
elif tag_params and tag_params[0] == "content":
del tag_params[0]
else:
raise TemplateSyntaxError(
f"'{function_name}' must have a first argument of 'content'"
)
bits = token.split_contents()[1:]
target_var = None
if len(bits) >= 2 and bits[-2] == "as":
target_var = bits[-1]
bits = bits[:-2]
nodelist = parser.parse((end_name,))
parser.delete_first_token()
args, kwargs = parse_bits(
parser,
bits,
tag_params,
varargs,
varkw,
defaults,
kwonly,
kwonly_defaults,
takes_context,
function_name,
)
return SimpleBlockNode(
nodelist, func, takes_context, args, kwargs, target_var
)
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_block_tag(...)
return dec
elif callable(func):
# @register.simple_block_tag
return dec(func)
else:
raise ValueError("Invalid arguments provided to simple_block_tag")
def inclusion_tag(self, filename, func=None, takes_context=None, name=None):
"""
Register a callable as an inclusion tag:
@register.inclusion_tag('results.html')
def show_results(poll):
choices = poll.choice_set.all()
return {'choices': choices}
"""
def dec(func):
(
params,
varargs,
varkw,
defaults,
kwonly,
kwonly_defaults,
_,
) = getfullargspec(unwrap(func))
function_name = name or func.__name__
@wraps(func)
def compile_func(parser, token):
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser,
bits,
params,
varargs,
varkw,
defaults,
kwonly,
kwonly_defaults,
takes_context,
function_name,
)
return InclusionNode(
func,
takes_context,
args,
kwargs,
filename,
)
self.tag(function_name, compile_func)
return func
return dec
|
Library
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/jvm.py
|
{
"start": 38639,
"end": 40417
}
|
class ____(RegexLexer):
"""
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
.. versionadded:: 1.5
"""
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][\w\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w\.]*', Name.Decorator),
(r'(and|break|else|foreach|if|in|not|or|reverse)\b',
Keyword),
(r'(as|call|define)\b', Keyword.Declaration),
(r'(true|false|null)\b', Keyword.Constant),
(r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\'(\\\\|\\\'|[^\'])*\'', String),
(r'(\.)([a-zA-Z_]\w*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_\$]\w*', Name),
(r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'template': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
|
TeaLangLexer
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 339134,
"end": 339497
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("EnterpriseServerUserAccountsUpload", graphql_name="node")
|
EnterpriseServerUserAccountsUploadEdge
|
python
|
getsentry__sentry
|
src/sentry/users/api/serializers/userrole.py
|
{
"start": 352,
"end": 538
}
|
class ____(TypedDict):
id: str
name: str
permissions: list[str]
dateCreated: datetime | None
dateUpdated: datetime | None
@register(UserRole)
|
UserRoleSerializerResponse
|
python
|
django__django
|
tests/ordering/models.py
|
{
"start": 1733,
"end": 1888
}
|
class ____(models.Model):
name = models.CharField(max_length=30)
class Meta:
ordering = [models.functions.Lower("name")]
|
OrderedByExpression
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/session_create_params.py
|
{
"start": 6612,
"end": 6918
}
|
class ____(TypedDict, total=False):
type: Literal["near_field", "far_field"]
"""Type of noise reduction.
`near_field` is for close-talking microphones such as headphones, `far_field` is
for far-field microphones such as laptop or conference room microphones.
"""
|
InputAudioNoiseReduction
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/javascript.py
|
{
"start": 32547,
"end": 41335
}
|
class ____(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
.. versionadded:: 1.3
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[+-]' + _ws + r')([(a-zA-Z_].*?[^(])(' + _ws + r'\{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{()}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_]\w*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_]\w*', Name),
],
'classname': [
# interface definition that inherits
(r'([a-zA-Z_]\w*)(' + _ws + r':' + _ws +
r')([a-zA-Z_]\w*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_]\w*)(' + _ws + r'\()([a-zA-Z_]\w*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_]\w*)', Name.Class, '#pop'),
],
'forward_classname': [
(r'([a-zA-Z_]\w*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_]\w*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_]\w+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
bygroups(Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_]\w+)', # function name
bygroups(Name.Function), "#pop"),
default('#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_]\w+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_]\w+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_]\w+)', Text),
],
'expression': [
(r'([$a-zA-Z_]\w*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
|
ObjectiveJLexer
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_cython_lapack.py
|
{
"start": 132,
"end": 796
}
|
class ____:
def test_slamch(self):
for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
assert_allclose(cython_lapack._test_slamch(c),
lapack.slamch(c))
def test_dlamch(self):
for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
assert_allclose(cython_lapack._test_dlamch(c),
lapack.dlamch(c))
def test_complex_ladiv(self):
cx = .5 + 1.j
cy = .875 + 2.j
assert_allclose(cython_lapack._test_zladiv(cy, cx), 1.95+0.1j)
assert_allclose(cython_lapack._test_cladiv(cy, cx), 1.95+0.1j)
|
TestLamch
|
python
|
getsentry__sentry-python
|
tests/integrations/litellm/test_litellm.py
|
{
"start": 2649,
"end": 2827
}
|
class ____:
def __init__(self, embedding=None):
self.embedding = embedding or [0.1, 0.2, 0.3]
self.index = 0
self.object = "embedding"
|
MockEmbeddingData
|
python
|
ray-project__ray
|
rllib/core/rl_module/torch/tests/test_torch_rl_module.py
|
{
"start": 453,
"end": 3732
}
|
class ____(unittest.TestCase):
def test_compilation(self):
env = gym.make("CartPole-v1")
module = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
self.assertIsInstance(module, TorchRLModule)
def test_forward_train(self):
bsize = 1024
env = gym.make("CartPole-v1")
module = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
obs_shape = env.observation_space.shape
obs = torch.randn((bsize,) + obs_shape)
actions = torch.stack(
[torch.tensor(env.action_space.sample()) for _ in range(bsize)]
)
output = module.forward_train({"obs": obs})
self.assertIsInstance(output, dict)
self.assertIn(Columns.ACTION_DIST_INPUTS, output)
action_dist_inputs = output[Columns.ACTION_DIST_INPUTS]
action_dist_class = module.get_train_action_dist_cls()
action_dist = action_dist_class.from_logits(action_dist_inputs)
loss = -action_dist.logp(actions.view(-1)).mean()
loss.backward()
# check that all neural net parameters have gradients
for param in module.parameters():
self.assertIsNotNone(param.grad)
def test_forward(self):
"""Test forward inference and exploration of"""
env = gym.make("CartPole-v1")
module = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
obs_shape = env.observation_space.shape
obs = torch.randn((1,) + obs_shape)
# just test if the forward pass runs fine
module.forward_inference({"obs": obs})
module.forward_exploration({"obs": obs})
def test_get_set_state(self):
env = gym.make("CartPole-v1")
module = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
state = module.get_state()
self.assertIsInstance(state, dict)
module2 = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
state2 = module2.get_state()
check(state, state2, false=True)
module2.set_state(state)
state2_after = module2.get_state()
check(state, state2_after)
def test_checkpointing(self):
env = gym.make("CartPole-v1")
module = VPGTorchRLModule(
observation_space=env.observation_space,
action_space=env.action_space,
model_config={"hidden_dim": 32},
)
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = "/tmp/rl_module_test"
module.save_to_path(tmpdir)
new_module = VPGTorchRLModule.from_checkpoint(tmpdir)
check(module.get_state(), new_module.get_state())
self.assertNotEqual(id(module), id(new_module))
|
TestRLModule
|
python
|
vyperlang__vyper
|
vyper/ast/nodes.py
|
{
"start": 23840,
"end": 24624
}
|
class ____(VyperNode):
__slots__ = ("_expr_info",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._expr_info = None
def to_dict(self):
ret = super().to_dict()
if self.has_folded_value and self.get_folded_value() != self:
ret["folded_value"] = self.get_folded_value().to_dict()
if self._expr_info is None:
return ret
reads = [s.to_dict() for s in self._expr_info._reads]
reads = [s for s in reads if s]
if reads:
ret["variable_reads"] = reads
writes = [s.to_dict() for s in self._expr_info._writes]
writes = [s for s in writes if s]
if writes:
ret["variable_writes"] = writes
return ret
|
ExprNode
|
python
|
fastai__fastai
|
fastai/tabular/core.py
|
{
"start": 15915,
"end": 16587
}
|
class ____(TfmdDL):
"A transformed `DataLoader` for Tabular data"
def __init__(self, dataset, bs=16, shuffle=False, after_batch=None, num_workers=0, **kwargs):
if after_batch is None: after_batch = L(TransformBlock().batch_tfms)+ReadTabBatch(dataset)
super().__init__(dataset, bs=bs, shuffle=shuffle, after_batch=after_batch, num_workers=num_workers, **kwargs)
def create_item(self, s): return self.dataset.iloc[s or 0]
def create_batch(self, b): return self.dataset.iloc[b]
def do_item(self, s): return 0 if s is None else s
TabularPandas._dl_type = TabDataLoader
# %% ../../nbs/40_tabular.core.ipynb 95
@delegates()
|
TabDataLoader
|
python
|
python-visualization__folium
|
folium/map.py
|
{
"start": 562,
"end": 768
}
|
class ____:
def __init__(self, f):
self.f = f
def __get__(self, obj, owner):
return self.f(owner)
if TYPE_CHECKING:
from folium.features import CustomIcon, DivIcon
|
classproperty
|
python
|
keras-team__keras
|
keras/src/saving/saving_lib_test.py
|
{
"start": 3569,
"end": 3776
}
|
class ____(keras.Sequential):
def compile(self, *args, **kwargs):
super().compile(*args, **kwargs)
@keras.saving.register_keras_serializable(package="my_custom_package")
|
CompileOverridingSequential
|
python
|
PrefectHQ__prefect
|
src/prefect/client/orchestration/_logs/client.py
|
{
"start": 1654,
"end": 2996
}
|
class ____(BaseAsyncClient):
async def create_logs(
self, logs: Iterable[Union["LogCreate", dict[str, Any]]]
) -> None:
"""
Create logs for a flow or task run
Args:
logs: An iterable of `LogCreate` objects or already json-compatible dicts
"""
from prefect.client.schemas.actions import LogCreate
serialized_logs = [
log.model_dump(mode="json") if isinstance(log, LogCreate) else log
for log in logs
]
await self.request("POST", "/logs/", json=serialized_logs)
async def read_logs(
self,
log_filter: "LogFilter | None" = None,
limit: int | None = None,
offset: int | None = None,
sort: "LogSort | None" = None,
) -> list[Log]:
"""
Read flow and task run logs.
"""
from prefect.client.schemas.sorting import LogSort
body: dict[str, Any] = {
"logs": log_filter.model_dump(mode="json") if log_filter else None,
"limit": limit,
"offset": offset,
"sort": sort or LogSort.TIMESTAMP_ASC,
}
response = await self.request("POST", "/logs/filter", json=body)
from prefect.client.schemas.objects import Log
return Log.model_validate_list(response.json())
|
LogAsyncClient
|
python
|
pandas-dev__pandas
|
pandas/tests/indexing/test_partial.py
|
{
"start": 24131,
"end": 25040
}
|
class ____:
def test_slice_irregular_datetime_index_with_nan(self):
# GH36953
index = pd.to_datetime(["2012-01-01", "2012-01-02", "2012-01-03", None])
df = DataFrame(range(len(index)), index=index)
expected = DataFrame(range(len(index[:3])), index=index[:3])
with pytest.raises(KeyError, match="non-existing keys is not allowed"):
# Upper bound is not in index (which is unordered)
# GH53983
# GH37819
df["2012-01-01":"2012-01-04"]
# Need this precision for right bound since the right slice
# bound is "rounded" up to the largest timepoint smaller than
# the next "resolution"-step of the provided point.
# e.g. 2012-01-03 is rounded up to 2012-01-04 - 1ns
result = df["2012-01-01":"2012-01-03 00:00:00.000000000"]
tm.assert_frame_equal(result, expected)
|
TestStringSlicing
|
python
|
plotly__plotly.py
|
plotly/graph_objs/bar/_marker.py
|
{
"start": 233,
"end": 25123
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar"
_path_str = "bar.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"cornerradius",
"line",
"opacity",
"opacitysrc",
"pattern",
"reversescale",
"showscale",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color` is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to bar.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.bar.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Blackbody,B
luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic
,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def cornerradius(self):
"""
Sets the rounding of corners. May be an integer number of
pixels, or a percentage of bar width (as a string ending in %).
Defaults to `layout.barcornerradius`. In stack or relative
barmode, the first trace to set cornerradius is used for the
whole stack.
The 'cornerradius' property accepts values of any type
Returns
-------
Any
"""
return self["cornerradius"]
@cornerradius.setter
def cornerradius(self, val):
self["cornerradius"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.bar.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def opacity(self):
"""
Sets the opacity of the bars.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def pattern(self):
"""
Sets the pattern within the marker.
The 'pattern' property is an instance of Pattern
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.marker.Pattern`
- A dict of string/value properties that will be passed
to the Pattern constructor
Returns
-------
plotly.graph_objs.bar.marker.Pattern
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color` is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.bar.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
cornerradius
Sets the rounding of corners. May be an integer number
of pixels, or a percentage of bar width (as a string
ending in %). Defaults to `layout.barcornerradius`. In
stack or relative barmode, the first trace to set
cornerradius is used for the whole stack.
line
:class:`plotly.graph_objects.bar.marker.Line` instance
or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
pattern
Sets the pattern within the marker.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
cornerradius=None,
line=None,
opacity=None,
opacitysrc=None,
pattern=None,
reversescale=None,
showscale=None,
**kwargs,
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.bar.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.bar.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
cornerradius
Sets the rounding of corners. May be an integer number
of pixels, or a percentage of bar width (as a string
ending in %). Defaults to `layout.barcornerradius`. In
stack or relative barmode, the first trace to set
cornerradius is used for the whole stack.
line
:class:`plotly.graph_objects.bar.marker.Line` instance
or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
pattern
Sets the pattern within the marker.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("cornerradius", arg, cornerradius)
self._set_property("line", arg, line)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._set_property("pattern", arg, pattern)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Marker
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 135656,
"end": 135848
}
|
class ____(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet, **self.options)
|
WaitressServer
|
python
|
PyCQA__pylint
|
doc/data/messages/i/invalid-format-returned/good.py
|
{
"start": 0,
"end": 126
}
|
class ____:
"""__format__ returns <type 'str'>"""
def __format__(self, format_spec):
return "hello!"
|
CustomFormat
|
python
|
pypa__pip
|
src/pip/_internal/models/pylock.py
|
{
"start": 893,
"end": 1083
}
|
class ____:
type: str
url: str | None
# (not supported) path: Optional[str]
requested_revision: str | None
commit_id: str
subdirectory: str | None
@dataclass
|
PackageVcs
|
python
|
zostera__django-bootstrap4
|
example/app/views.py
|
{
"start": 476,
"end": 579
}
|
class ____:
storage = default_storage
fieldfile = FieldFile(None, FakeField, "dummy.txt")
|
FakeField
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/test/models.py
|
{
"start": 155,
"end": 1835
}
|
class ____(BaseModel):
poe_tasks: Set[str] = Field(..., description="List of unique poe tasks to run")
required_environment_variables: Set[str] = Field(
set(), description="List of unique required environment variables to pass to the container running the poe task"
)
poetry_extras: Set[str] = Field(set(), description="List of unique poetry extras to install")
optional_poetry_groups: Set[str] = Field(set(), description="List of unique poetry groups to install")
side_car_docker_engine: bool = Field(
False, description="Flag indicating the use of a sidecar Docker engine during the poe task executions"
)
mount_docker_socket: bool = Field(
False,
description="Flag indicating the mount of the host docker socket to the container running the poe task, useful when the package under test is using dagger",
)
python_versions: List[str] = Field(description="List of unique python versions to run the poe tasks on")
@validator("required_environment_variables")
def check_required_environment_variables_are_set(cls, value: Set) -> Set:
for required_env_var in value:
if required_env_var not in os.environ:
raise ValueError(f"Environment variable {required_env_var} is not set.")
return value
def deserialize_airbyte_ci_config(pyproject_toml: Dict) -> AirbyteCiPackageConfiguration:
try:
airbyte_ci_config = pyproject_toml["tool"]["airbyte_ci"]
except KeyError:
raise ValueError("Missing tool.airbyte_ci configuration in pyproject.toml")
return AirbyteCiPackageConfiguration.parse_obj(airbyte_ci_config)
|
AirbyteCiPackageConfiguration
|
python
|
Netflix__metaflow
|
metaflow/events.py
|
{
"start": 673,
"end": 5300
}
|
class ____(object):
"""
Defines a container of event triggers' metadata.
"""
def __init__(self, _meta=None):
if _meta is None:
_meta = []
_meta.sort(key=lambda x: x.get("timestamp") or float("-inf"), reverse=True)
self._runs = None
self._events = [
MetaflowEvent(
**{
**obj,
# Add timestamp as datetime. Guaranteed to exist for Metaflow
# events - best effort for everything else.
**(
{"timestamp": datetime.fromtimestamp(obj["timestamp"])}
if obj.get("timestamp")
and isinstance(obj.get("timestamp"), int)
else {}
),
}
)
for obj in _meta
]
@classmethod
def from_runs(cls, run_objs: List["metaflow.Run"]):
run_objs.sort(key=lambda x: x.finished_at, reverse=True)
trigger = Trigger(
[
{
"type": "run",
"timestamp": run_obj.finished_at,
"name": "metaflow.%s.%s" % (run_obj.parent.id, run_obj["end"].id),
"id": run_obj.end_task.pathspec,
}
for run_obj in run_objs
]
)
trigger._runs = run_objs
return trigger
@property
def event(self) -> Optional[MetaflowEvent]:
"""
The `MetaflowEvent` object corresponding to the triggering event.
If multiple events triggered the run, this property is the latest event.
Returns
-------
MetaflowEvent, optional
The latest event that triggered the run, if applicable.
"""
return next(iter(self._events), None)
@property
def events(self) -> Optional[List[MetaflowEvent]]:
"""
The list of `MetaflowEvent` objects correspondings to all the triggering events.
Returns
-------
List[MetaflowEvent], optional
List of all events that triggered the run
"""
return list(self._events) or None
@property
def run(self) -> Optional["metaflow.Run"]:
"""
The corresponding `Run` object if the triggering event is a Metaflow run.
In case multiple runs triggered the run, this property is the latest run.
Returns `None` if none of the triggering events are a `Run`.
Returns
-------
Run, optional
Latest Run that triggered this run, if applicable.
"""
if self._runs is None:
self.runs
return next(iter(self._runs), None)
@property
def runs(self) -> Optional[List["metaflow.Run"]]:
"""
The list of `Run` objects in the triggering events.
Returns `None` if none of the triggering events are `Run` objects.
Returns
-------
List[Run], optional
List of runs that triggered this run, if applicable.
"""
if self._runs is None:
# to avoid circular import
from metaflow import Run
self._runs = [
Run(
# object id is the task pathspec for events that map to run
obj.id[: obj.id.index("/", obj.id.index("/") + 1)],
_namespace_check=False,
)
for obj in self._events
if obj.type == "run"
]
return list(self._runs) or None
def __getitem__(self, key: str) -> Union["metaflow.Run", MetaflowEvent]:
"""
If triggering events are runs, `key` corresponds to the flow name of the triggering run.
Otherwise, `key` corresponds to the event name and a `MetaflowEvent` object is returned.
Returns
-------
Union[Run, MetaflowEvent]
`Run` object if triggered by a run. Otherwise returns a `MetaflowEvent`.
"""
if self.runs:
for run in self.runs:
if run.path_components[0] == key:
return run
elif self.events:
for event in self.events:
if event.name == key:
return event
raise KeyError(key)
def __iter__(self):
if self.events:
return iter(self.events)
return iter([])
def __contains__(self, ident: str) -> bool:
try:
return bool(self.__getitem__(ident))
except KeyError:
return False
|
Trigger
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_project.py
|
{
"start": 1381,
"end": 9605
}
|
class ____(ProjectMixin, TestCase):
def test_subprojects(self):
r = self.client.get("/api/v2/project/6/subprojects/", {})
resp = json.loads(r.content)
self.assertEqual(r.status_code, 200)
self.assertEqual(resp["subprojects"][0]["id"], 23)
@patch("readthedocs.projects.models.Project.find")
def test_conf_file_found(self, find_method):
find_method.return_value = [
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/latest/src/conf.py",
]
self.assertEqual(
self.pip.conf_file(),
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/latest/src/conf.py",
)
@patch("readthedocs.projects.models.Project.find")
def test_multiple_conf_file_one_doc_in_path(self, find_method):
find_method.return_value = [
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/latest/src/conf.py",
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/latest/docs/conf.py",
]
self.assertEqual(
self.pip.conf_file(),
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/latest/docs/conf.py",
)
@patch("readthedocs.projects.models.Project.find")
@patch("readthedocs.projects.models.Project.full_find")
def test_conf_file_not_found(self, find_method, full_find_method):
find_method.return_value = []
full_find_method.return_value = []
with self.assertRaises(ProjectConfigurationError) as e:
self.pip.conf_file()
self.assertEqual(
e.exception.message_id,
ProjectConfigurationError.NOT_FOUND,
)
@patch("readthedocs.projects.models.Project.find")
def test_multiple_conf_files(self, find_method):
find_method.return_value = [
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/multi-conf.py/src/conf.py",
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/multi-conf.py/src/sub/conf.py",
"/home/docs/rtfd/code/readthedocs.org/user_builds/pip/checkouts/multi-conf.py/src/sub/src/conf.py",
]
with self.assertRaises(ProjectConfigurationError) as e:
self.pip.conf_file()
self.assertEqual(
e.exception.message_id,
ProjectConfigurationError.MULTIPLE_CONF_FILES,
)
def test_get_storage_path(self):
for type_ in MEDIA_TYPES:
self.assertEqual(
self.pip.get_storage_path(type_, LATEST, include_file=False),
f"{type_}/pip/latest",
)
self.assertEqual(
self.pip.get_storage_path(MEDIA_TYPE_PDF, LATEST),
"pdf/pip/latest/pip.pdf",
)
self.assertEqual(
self.pip.get_storage_path(MEDIA_TYPE_EPUB, LATEST),
"epub/pip/latest/pip.epub",
)
self.assertEqual(
self.pip.get_storage_path(MEDIA_TYPE_HTMLZIP, LATEST),
"htmlzip/pip/latest/pip.zip",
)
def test_get_storage_path_invalid_inputs(self):
# Invalid type.
with pytest.raises(ValueError):
self.pip.get_storage_path("foo")
# Trying to get a file from a non-downloadable type.
with pytest.raises(ValueError):
self.pip.get_storage_path(MEDIA_TYPE_HTML, include_file=True)
# Trying path traversal.
with pytest.raises(ValueError):
self.pip.get_storage_path(
MEDIA_TYPE_HTML, version_slug="../sneaky/index.html", include_file=False
)
def test_get_storage_path_for_external_versions(self):
self.assertEqual(
self.pip.get_storage_path(
"pdf",
self.external_version.slug,
version_type=self.external_version.type,
),
"external/pdf/pip/99/pip.pdf",
)
self.assertEqual(
self.pip.get_storage_path(
"epub",
self.external_version.slug,
version_type=self.external_version.type,
),
"external/epub/pip/99/pip.epub",
)
self.assertEqual(
self.pip.get_storage_path(
"htmlzip",
self.external_version.slug,
version_type=self.external_version.type,
),
"external/htmlzip/pip/99/pip.zip",
)
def test_active_versions_excludes_external_versions(self):
self.assertNotIn(self.external_version, self.pip.active_versions())
def test_all_active_versions_excludes_external_versions(self):
self.assertNotIn(self.external_version, self.pip.all_active_versions())
def test_update_stable_version_excludes_external_versions(self):
# Delete all versions excluding External Versions.
self.pip.versions.exclude(type=EXTERNAL).delete()
# Test that External Version is not considered for stable.
self.assertEqual(self.pip.update_stable_version(), None)
def test_update_stable_version_machine_false(self):
# Initial stable version from fixture
self.assertEqual(self.pip.update_stable_version().slug, "0.8.1")
# None, when there is no stable to promote
self.assertEqual(self.pip.update_stable_version(), None)
get(
Version,
identifier="9.0",
verbose_name="9.0",
slug="9.0",
type=TAG,
project=self.pip,
active=True,
)
# New stable now is the newly created version
self.assertEqual(self.pip.update_stable_version().slug, "9.0")
# Make stable version machine=False
stable = self.pip.get_stable_version()
stable.machine = False
stable.save()
get(
Version,
identifier="10.0",
verbose_name="10.0",
slug="10.0",
type=TAG,
project=self.pip,
active=True,
)
# None, since the stable version is marked as machine=False and Read
# the Docs does not have control over it
with patch("readthedocs.projects.models.determine_stable_version") as m:
self.assertEqual(self.pip.update_stable_version(), None)
m.assert_not_called()
def test_has_good_build_excludes_external_versions(self):
# Delete all versions excluding External Versions.
self.pip.versions.exclude(type=EXTERNAL).delete()
# Test that External Version is not considered for has_good_build.
self.assertFalse(self.pip.has_good_build)
def test_latest_internal_build_excludes_external_versions(self):
# Delete all versions excluding External Versions.
self.pip.versions.exclude(type=EXTERNAL).delete()
# Test that External Version is not considered for latest_internal_build.
self.assertEqual(self.pip.latest_internal_build, None)
def test_git_provider_github(self):
self.pip.repo = "https://github.com/pypa/pip"
self.pip.save()
assert self.pip.is_github_project
assert not self.pip.is_gitlab_project
assert not self.pip.is_bitbucket_project
def test_git_service_class_github(self):
self.pip.repo = "https://github.com/pypa/pip"
self.pip.save()
self.assertEqual(self.pip.get_git_service_class(), None)
self.assertEqual(
self.pip.get_git_service_class(fallback_to_clone_url=True), GitHubService
)
def test_git_provider_gitlab(self):
self.pip.repo = "https://gitlab.com/pypa/pip"
self.pip.save()
assert self.pip.is_gitlab_project
assert not self.pip.is_github_project
assert not self.pip.is_bitbucket_project
def test_git_service_class_gitlab(self):
self.pip.repo = "https://gitlab.com/pypa/pip"
self.pip.save()
self.assertEqual(self.pip.get_git_service_class(), None)
self.assertEqual(
self.pip.get_git_service_class(fallback_to_clone_url=True), GitLabService
)
@mock.patch("readthedocs.projects.forms.trigger_build", mock.MagicMock())
|
TestProject
|
python
|
pikepdf__pikepdf
|
src/pikepdf/canvas.py
|
{
"start": 4270,
"end": 15411
}
|
class ____(DimensionedFont):
"""Font implementation designed to work with Type 1 Fonts and TrueType fonts.
As described in section 9.6 of the PDF spec.
See also section 9.8: Font Descriptors.
The PDF spec also considers Type3 fonts to be "Simple Fonts", but Type3 fonts are
not implemented here.
"""
data: Dictionary
_diffmap_cache = None
def __init__(self, data: Dictionary):
"""Create a SimpleFont instance from a font resource dictionary."""
if Name.Subtype not in data or data.Subtype not in (
Name.Type1,
Name.MMType1,
Name.TrueType,
):
raise ValueError(
'Font resource dictionary does not describe a Type1 or TrueType font:',
data,
)
self.data = data
@classmethod
def load(cls, name: Name, resource_dict: Dictionary) -> SimpleFont:
"""Load a font from the specified resource dictionary."""
if name not in resource_dict.Font:
raise LookupError(
f'Cannot find font information for {name} '
f'(Available fonts: {", ".join(resource_dict.Font.keys())})'
)
font_data = resource_dict.Font[name]
if not isinstance(font_data, Dictionary):
raise TypeError(
f'Font data for {name} is not a dictionary, but a {type(font_data)}'
)
return cls(font_data)
def register(self, pdf: Pdf) -> Dictionary:
"""Register the font."""
return pdf.make_indirect(self.data)
@property
def leading(self) -> int | Decimal:
"""Returns leading for a SimpleFont."""
if Name.Leading in self.data.FontDescriptor:
return self.data.FontDescriptor.Leading
else:
return 0
@property
def ascent(self) -> Decimal:
"""Returns ascent for a SimpleFont."""
# Required for all byt type 3 fonts, so should be present
return self.data.FontDescriptor.Ascent
@property
def descent(self) -> Decimal:
"""Returns descent for a SimpleFont."""
# Required for all byt type 3 fonts, so should be present
return self.data.FontDescriptor.Descent
def unscaled_char_width(self, char: int | bytes | str) -> Decimal:
"""Get the (unscaled) width of the character, in glyph-space units.
Args:
char: The character to check. May be a char code, or a string containing a
single character.
"""
if isinstance(char, str):
char = self.encode(str)
if isinstance(char, bytes):
# Simple fonts always use single-byte encodings, so this is safe
char = char[0]
char_code = char - int(self.data.get(Name.FirstChar, 0))
if Name.Widths in self.data and len(self.data.Widths) > char_code:
width = self.data.Widths[char_code]
elif Name.MissingWidth in self.data.FontDescriptor:
width = self.data.FontDescriptor.MissingWidth
else:
width = Decimal(0)
return width
def convert_width(
self, width: int | Decimal, fontsize: int | Decimal = 1
) -> int | Decimal:
"""Convert width from glyph space to text space, scaling by font size.
Scaling based on the nominal height (see 9.2.2):
"This standard is arranged so that the nominal height of tightly spaced lines of
text is 1 unit. ... The standard-size font shall then be scaled to be usable."
This means, essentially, that a font size of 1 means a character is 1 text-space
unit high, and a font size of 12 is 12 text-space units high. Assuming no text
scaling is in place (such as via the text matrix), and the PDF has not set a
user-defined unit in the page dictionary, then text space units will be points
(defined as 1/72 of an inch).
"""
# For all but Type3 fonts, the ratio of text-space units to glyph-space units is
# a fixed ratio of 1 to 1000 (See 9.2.4: Glyph Positioning and Metrics)
glyph_space_ratio = Decimal(1000)
return (width / glyph_space_ratio) * fontsize
def convert_width_reverse(
self, width: int | Decimal, fontsize: int | Decimal = 1
) -> int | Decimal:
"""Convert width from text space back to glyph space, scaling by font size."""
# For all but Type3 fonts, the ratio of text-space units to glyph-space units is
# a fixed ratio of 1 to 1000 (See 9.2.4: Glyph Positioning and Metrics)
glyph_space_ratio = Decimal(1000)
return (width * glyph_space_ratio) / fontsize
def encode(self, text: str) -> bytes:
"""Encode a string in the encoding used by this font.
This currently only works with fonts that use the WinAnsiEncoding or the
MacRomanEncoding. Differences maps are supported, though with a limited
set of recognized character names.
"""
if Name.Encoding not in self.data:
# This is allowed by the spec, and if I understand correctly has the same
# meaning as StandardEncoding.
raise NotImplementedError(
'Cannot encode without explicitly defined encoding'
)
if isinstance(self.data.Encoding, Name):
return self._encode_named(text, self.data.Encoding)
if isinstance(self.data.Encoding, Dictionary):
if Name.Differences in self.data.Encoding:
return self._encode_diffmap(
text,
self.data.Encoding.Differences,
self.data.Encoding.get(Name.BaseEncoding),
)
if Name.BaseEncoding not in self.data.Encoding:
raise NotImplementedError(
'Cannot encode without explicitly defined encoding'
)
return self._encode_named(text, self.data.Encoding.BaseEncoding)
raise TypeError(f'Unsupported encoding type: {type(self.data.Encoding)}')
def _encode_named(self, text: str, encoding: Name):
if encoding == Name.StandardEncoding:
# Standard encoding is defined as "whatever the underlying font uses by
# default", but we have no good way to detect that.
raise NotImplementedError('Cannot encode to StandardEncoding')
if encoding == Name.WinAnsiEncoding:
return text.encode('cp1252')
if encoding == Name.MacRomanEncoding:
return text.encode('mac_roman')
if encoding == Name.MacExpertEncoding:
# D.4 describes this character set if we want to implement a codec. However,
# it doesn't seem actually useful to me.
raise NotImplementedError('Cannot encode to MacExpertEncoding')
if encoding == Name.PDFDocEncoding:
# The spec says this is generally not used to show text, but includes it as
# an option anyway, so we'll do the same.
return text.encode('pdfdoc_pikepdf')
raise ValueError('Unknown encoding:', encoding)
def _encode_diffmap(
self, text: str, diffmap: Array, base_encoding: Name | None = None
):
if self._diffmap_cache is None:
self._diffmap_cache = _differences_map_lookup(diffmap)
result = bytearray()
for char in text:
if char in self._diffmap_cache:
result.append(self._diffmap_cache[char])
elif base_encoding is not None:
result.extend(self._encode_named(char, base_encoding))
elif char.isascii():
result.append(ord(char))
else:
# Can't map character
log.warning(f"No mapping for {repr(char)} in current encoding; skipped")
def text_width(
self,
text: str | bytes,
fontsize: int | Decimal = 1,
*,
char_spacing: int | Decimal = 0,
word_spacing: int | Decimal = 0,
) -> int | Decimal:
"""Get the width of the string.
This is the width of the string when rendered with the current font, scaled by
the given font size.
Args:
text: The string to check
fontsize: The target font size in text-space units. (Assuming text space
isn't being scaled, this means the font size in points.)
char_spacing: Additional space that will be added between each character.
May be negative.
word_spacing: Additional space that will be added after each ASCII space
character (' '). May be negative.
"""
width = 0
ascii_space = ord(' ')
if isinstance(text, str):
text = self.encode(text)
for byte in text:
# It may seem like we are ignoring the possibility for multi-byte encodings
# here. However, Simple Fonts are explicitly defined as using only
# single-byte encodings (See 9.2.2), so this is safe. Composite fonts will
# obviously require a more sophisticated implementation.
width += self.unscaled_char_width(byte) + char_spacing
if byte == ascii_space:
width += word_spacing
return self.convert_width(width, fontsize)
def _parse_differences_map(diffmap: Array):
"""Parses a Differences map to ``(char_code, char_name)`` pairs.
This procedure is as described in 9.6.5.1.
Here, ``char_code`` refers to the byte value of the character as it would appear in
a text content stream using this font; it is the PDF encoding, not the true unicode
character code. The corresponding ``char_name`` refers to the name of the glyph. The
name is used by Type1 and Type3 fonts to look up the actual glyph used from the
font.
A partial mapping of glyph names to true unicode characters is available at
pikepdf._data.CHARNAMES_TO_UNICODE`.
"""
counter = 0
for value in diffmap:
if isinstance(value, Name):
yield counter, value
counter += 1
else:
# An index
counter = value
# pdfminer.six has a some closely related code:
# https://github.com/pdfminer/pdfminer.six/blob/master/pdfminer/encodingdb.py
# It works exactly opposite of what we would need here, but still could be interesting
# to adapt.
def _differences_map_lookup(diffmap: Array) -> dict:
"""Convert a Differences map (See 9.6.5.1) to a Python dict.
The Python dict maps unicode characters to the character index value.
The character index values are the byte values used in actual text content streams.
If the difference map encodes characters whose names aren't recognized, they will be
omitted from the final map, and a warning emitted.
"""
diff = {}
for index, name in _parse_differences_map(diffmap):
try:
diff[CHARNAMES_TO_UNICODE[str(name)]] = index
except KeyError:
log.warning(f"Unknown character name in difference map: {str(name)}")
return diff
|
SimpleFont
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_attributes.py
|
{
"start": 91649,
"end": 97728
}
|
class ____(fixtures.ORMTest):
@testing.fixture
def dict_collection(self):
class Foo(BasicEntity):
pass
class Bar(BasicEntity):
def __init__(self, name):
self.name = name
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(
Foo,
"someattr",
uselist=True,
useobject=True,
typecallable=attribute_keyed_dict("name"),
)
_register_attribute(
Bar,
"name",
uselist=False,
useobject=False,
)
return Foo, Bar
@testing.fixture
def list_collection(self):
class Foo(BasicEntity):
pass
class Bar(BasicEntity):
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(
Foo,
"someattr",
uselist=True,
useobject=True,
)
return Foo, Bar
def test_listen_w_list_key(self, list_collection):
Foo, Bar = list_collection
m1 = Mock()
event.listen(Foo.someattr, "append", m1, include_key=True)
event.listen(Foo.someattr, "remove", m1, include_key=True)
f1 = Foo()
b1, b2, b3 = Bar(), Bar(), Bar()
f1.someattr.append(b1)
f1.someattr.append(b2)
f1.someattr[1] = b3
del f1.someattr[0]
append_token, remove_token = (
Foo.someattr.impl._append_token,
Foo.someattr.impl._remove_token,
)
eq_(
m1.mock_calls,
[
call(
f1,
b1,
append_token,
key=NO_KEY,
),
call(
f1,
b2,
append_token,
key=NO_KEY,
),
call(
f1,
b2,
remove_token,
key=1,
),
call(
f1,
b3,
append_token,
key=1,
),
call(
f1,
b1,
remove_token,
key=0,
),
],
)
def test_listen_w_dict_key(self, dict_collection):
Foo, Bar = dict_collection
m1 = Mock()
event.listen(Foo.someattr, "append", m1, include_key=True)
event.listen(Foo.someattr, "remove", m1, include_key=True)
f1 = Foo()
b1, b2, b3 = Bar("b1"), Bar("b2"), Bar("b3")
f1.someattr["k1"] = b1
f1.someattr.update({"k2": b2, "k3": b3})
del f1.someattr["k2"]
append_token, remove_token = (
Foo.someattr.impl._append_token,
Foo.someattr.impl._remove_token,
)
eq_(
m1.mock_calls,
[
call(
f1,
b1,
append_token,
key="k1",
),
call(
f1,
b2,
append_token,
key="k2",
),
call(
f1,
b3,
append_token,
key="k3",
),
call(
f1,
b2,
remove_token,
key="k2",
),
],
)
def test_dict_bulk_replace_w_key(self, dict_collection):
Foo, Bar = dict_collection
m1 = Mock()
event.listen(Foo.someattr, "bulk_replace", m1, include_key=True)
event.listen(Foo.someattr, "append", m1, include_key=True)
event.listen(Foo.someattr, "remove", m1, include_key=True)
f1 = Foo()
b1, b2, b3, b4 = Bar("b1"), Bar("b2"), Bar("b3"), Bar("b4")
f1.someattr = {"b1": b1, "b3": b3}
f1.someattr = {"b2": b2, "b3": b3, "b4": b4}
bulk_replace_token = Foo.someattr.impl._bulk_replace_token
eq_(
m1.mock_calls,
[
call(f1, [b1, b3], bulk_replace_token, keys=["b1", "b3"]),
call(f1, b1, bulk_replace_token, key="b1"),
call(f1, b3, bulk_replace_token, key="b3"),
call(
f1,
[b2, b3, b4],
bulk_replace_token,
keys=["b2", "b3", "b4"],
),
call(f1, b2, bulk_replace_token, key="b2"),
call(f1, b4, bulk_replace_token, key="b4"),
call(f1, b1, bulk_replace_token, key=NO_KEY),
],
)
def test_listen_wo_dict_key(self, dict_collection):
Foo, Bar = dict_collection
m1 = Mock()
event.listen(Foo.someattr, "append", m1)
event.listen(Foo.someattr, "remove", m1)
f1 = Foo()
b1, b2, b3 = Bar("b1"), Bar("b2"), Bar("b3")
f1.someattr["k1"] = b1
f1.someattr.update({"k2": b2, "k3": b3})
del f1.someattr["k2"]
append_token, remove_token = (
Foo.someattr.impl._append_token,
Foo.someattr.impl._remove_token,
)
eq_(
m1.mock_calls,
[
call(
f1,
b1,
append_token,
),
call(
f1,
b2,
append_token,
),
call(
f1,
b3,
append_token,
),
call(
f1,
b2,
remove_token,
),
],
)
|
CollectionKeyTest
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_success_policy_rule.py
|
{
"start": 383,
"end": 7177
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'succeeded_count': 'int',
'succeeded_indexes': 'str'
}
attribute_map = {
'succeeded_count': 'succeededCount',
'succeeded_indexes': 'succeededIndexes'
}
def __init__(self, succeeded_count=None, succeeded_indexes=None, local_vars_configuration=None): # noqa: E501
"""V1SuccessPolicyRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._succeeded_count = None
self._succeeded_indexes = None
self.discriminator = None
if succeeded_count is not None:
self.succeeded_count = succeeded_count
if succeeded_indexes is not None:
self.succeeded_indexes = succeeded_indexes
@property
def succeeded_count(self):
"""Gets the succeeded_count of this V1SuccessPolicyRule. # noqa: E501
succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer. # noqa: E501
:return: The succeeded_count of this V1SuccessPolicyRule. # noqa: E501
:rtype: int
"""
return self._succeeded_count
@succeeded_count.setter
def succeeded_count(self, succeeded_count):
"""Sets the succeeded_count of this V1SuccessPolicyRule.
succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer. # noqa: E501
:param succeeded_count: The succeeded_count of this V1SuccessPolicyRule. # noqa: E501
:type: int
"""
self._succeeded_count = succeeded_count
@property
def succeeded_indexes(self):
"""Gets the succeeded_indexes of this V1SuccessPolicyRule. # noqa: E501
succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time. # noqa: E501
:return: The succeeded_indexes of this V1SuccessPolicyRule. # noqa: E501
:rtype: str
"""
return self._succeeded_indexes
@succeeded_indexes.setter
def succeeded_indexes(self, succeeded_indexes):
"""Sets the succeeded_indexes of this V1SuccessPolicyRule.
succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time. # noqa: E501
:param succeeded_indexes: The succeeded_indexes of this V1SuccessPolicyRule. # noqa: E501
:type: str
"""
self._succeeded_indexes = succeeded_indexes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SuccessPolicyRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SuccessPolicyRule):
return True
return self.to_dict() != other.to_dict()
|
V1SuccessPolicyRule
|
python
|
huggingface__transformers
|
tests/models/clip/test_modeling_clip.py
|
{
"start": 23324,
"end": 24176
}
|
class ____(CLIPModelTester):
def __init__(self, parent):
super().__init__(parent)
self.batch_size = self.vision_model_tester.batch_size
self.num_hidden_layers = self.vision_model_tester.num_hidden_layers
self.hidden_size = self.vision_model_tester.hidden_size
self.seq_length = self.vision_model_tester.seq_length
def prepare_config_and_inputs(self):
_, pixel_values = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
|
CLIPForImageClassificationModelTester
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/solver30.py
|
{
"start": 409,
"end": 776
}
|
class ____(Iterator[Z]):
def __init__(self, a: Callable[[Z], Any], b: Iterable[Z]) -> None: ...
def __next__(self) -> Z: ...
def func4(a: Callable[[Z], Any], b: Iterable[Z]) -> Iterator[Z]: ...
func1(func2(func3(lambda x: reveal_type(x.foo, expected_text="bool"), items)))
func1(func2(func4(lambda x: reveal_type(x.foo, expected_text="bool"), items)))
|
func3
|
python
|
Netflix__metaflow
|
test/core/tests/timeout_decorator.py
|
{
"start": 72,
"end": 1328
}
|
class ____(MetaflowTest):
"""
Test that checks that the timeout decorator works as intended.
"""
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('catch(var="ex", print_exception=False)')
@tag("timeout(seconds=1)")
@steps(0, ["singleton-start", "foreach-inner"], required=True)
def step_sleep(self):
self.check = True
import time
time.sleep(5)
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
run = checker.get_run()
if run:
timeout_raised = False
for step in run:
for task in step:
if "check" in task.data:
extype = (
"metaflow.plugins.timeout_decorator." "TimeoutException"
)
assert_equals(extype, str(task.data.ex.type))
timeout_raised = True
assert_equals(True, timeout_raised)
|
TimeoutDecoratorTest
|
python
|
python-poetry__poetry
|
src/poetry/inspection/lazy_wheel.py
|
{
"start": 3663,
"end": 5741
}
|
class ____:
"""Stateful bookkeeping to merge interval graphs."""
def __init__(self, *, left: Iterable[int] = (), right: Iterable[int] = ()) -> None:
self._left = list(left)
self._right = list(right)
def __repr__(self) -> str:
return (
f"{type(self).__name__}"
f"(left={tuple(self._left)}, right={tuple(self._right)})"
)
def _merge(
self, start: int, end: int, left: int, right: int
) -> Iterator[tuple[int, int]]:
"""Return an iterator of intervals to be fetched.
Args:
start: Start of needed interval
end: End of needed interval
left: Index of first overlapping downloaded data
right: Index after last overlapping downloaded data
"""
lslice, rslice = self._left[left:right], self._right[left:right]
i = start = min([start, *lslice[:1]])
end = max([end, *rslice[-1:]])
for j, k in zip(lslice, rslice):
if j > i:
yield i, j - 1
i = k + 1
if i <= end:
yield i, end
self._left[left:right], self._right[left:right] = [start], [end]
def minimal_intervals_covering(
self, start: int, end: int
) -> Iterator[tuple[int, int]]:
"""Provide the intervals needed to cover from ``start <= x <= end``.
This method mutates internal state so that later calls only return intervals not
covered by prior calls. The first call to this method will always return exactly
one interval, which was exactly the one requested. Later requests for
intervals overlapping that first requested interval will yield only the ranges
not previously covered (which may be empty, e.g. if the same interval is
requested twice).
This may be used e.g. to download substrings of remote files on demand.
"""
left = bisect_left(self._right, start)
right = bisect_right(self._left, end)
yield from self._merge(start, end, left, right)
|
MergeIntervals
|
python
|
allegroai__clearml
|
examples/frameworks/keras/keras_tensorboard.py
|
{
"start": 744,
"end": 4356
}
|
class ____(TensorBoard):
@staticmethod
def make_image(tensor):
from PIL import Image
import io
tensor = np.stack((tensor, tensor, tensor), axis=2)
height, width, channels = tensor.shape
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
image_string = output.getvalue()
output.close()
return tf.Summary.Image(height=height,
width=width,
colorspace=channels,
encoded_image_string=image_string)
def on_epoch_end(self, epoch, logs=None):
if logs is None:
logs = {}
super(TensorBoardImage, self).on_epoch_end(epoch, logs)
images = self.validation_data[0] # 0 - data; 1 - labels
img = (255 * images[0].reshape(28, 28)).astype('uint8')
image = self.make_image(img)
summary = tf.Summary(value=[tf.Summary.Value(tag='image', image=image)])
self.writer.add_summary(summary, epoch)
parser = argparse.ArgumentParser(description='Keras MNIST Example')
parser.add_argument('--batch-size', type=int, default=128, help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=6, help='number of epochs to train (default: 6)')
args = parser.parse_args()
# the data, shuffled and split between train and test sets
nb_classes = 10
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784).astype('float32')/255.
X_test = X_test.reshape(10000, 784).astype('float32')/255.
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
# model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
# model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model2 = Sequential()
model2.add(Dense(512, input_shape=(784,)))
model2.add(Activation('relu'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='examples', task_name='Keras with TensorBoard example')
# To set your own configuration:
# task.connect_configuration(
# name="MyConfig",
# configuration={'test': 1337, 'nested': {'key': 'value', 'number': 1}}
# )
# Advanced: setting model class enumeration
labels = dict(('digit_%d' % i, i) for i in range(10))
task.set_model_label_enumeration(labels)
output_folder = os.path.join(tempfile.gettempdir(), 'keras_example')
board = TensorBoard(histogram_freq=1, log_dir=output_folder, write_images=False)
model_store = ModelCheckpoint(filepath=os.path.join(output_folder, 'weight.{epoch}.keras'))
# load previous model, if it is there
# noinspection PyBroadException
try:
model.load_weights(os.path.join(output_folder, 'weight.1.keras'))
except Exception:
pass
history = model.fit(X_train, Y_train,
batch_size=args.batch_size, epochs=args.epochs,
callbacks=[board, model_store],
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
TensorBoardImage
|
python
|
keras-team__keras
|
keras/src/layers/normalization/rms_normalization_test.py
|
{
"start": 121,
"end": 1982
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.RMSNormalization,
init_kwargs={},
input_shape=(4, 2),
expected_output_shape=(4, 2),
expected_num_trainable_weights=1,
expected_num_seed_generators=0,
)
self.run_layer_test(
layers.RMSNormalization,
init_kwargs={
"axis": -1,
},
input_shape=(4, 2),
expected_output_shape=(4, 2),
expected_num_trainable_weights=1,
expected_num_seed_generators=0,
)
def test_correctness(self):
layer = layers.RMSNormalization()
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
inputs = ops.convert_to_tensor(inputs)
out = layer(inputs)
expected = ops.multiply(
ops.multiply(
inputs,
ops.rsqrt(ops.mean(ops.square(inputs), axis=-1, keepdims=True)),
),
layer.scale,
)
self.assertAllClose(out, expected, atol=1e-1)
def test_output(self):
layer = layers.RMSNormalization()
inputs = np.arange(10).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(
out,
[
[
0.0,
0.18731716,
0.37463433,
0.5619515,
0.74926865,
0.9365858,
1.123903,
1.3112202,
1.4985373,
1.6858544,
]
],
)
|
RMSNormalizationTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0140_addons_options_base_version.py
|
{
"start": 183,
"end": 1310
}
|
class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("builds", "0059_add_version_date_index"),
("projects", "0139_addons_filetreediff_field"),
]
operations = [
migrations.AddField(
model_name="addonsconfig",
name="options_base_version",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="builds.version",
verbose_name="Base version to compare against (eg. DocDiff, File Tree Diff)",
),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="options_base_version",
field=models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="builds.version",
verbose_name="Base version to compare against (eg. DocDiff, File Tree Diff)",
),
),
]
|
Migration
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/elements.py
|
{
"start": 178068,
"end": 178186
}
|
class ____(_IdentifiedClause):
__visit_name__ = "release_savepoint"
inherit_cache = False
|
ReleaseSavepointClause
|
python
|
pypa__pip
|
tests/unit/test_resolution_legacy_resolver.py
|
{
"start": 2441,
"end": 4732
}
|
class ____:
"""
Test _add_requirement_to_set().
"""
def test_unsupported_wheel_link_requirement_raises(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
# GIVEN
resolver = make_test_resolver(monkeypatch, [])
requirement_set = RequirementSet(check_supported_wheels=True)
install_req = install_req_from_line(
"https://whatever.com/peppercorn-0.4-py2.py3-bogus-any.whl",
)
assert install_req.link is not None
assert install_req.link.is_wheel
assert install_req.link.scheme == "https"
# WHEN / THEN
with pytest.raises(InstallationError):
resolver._add_requirement_to_set(requirement_set, install_req)
def test_unsupported_wheel_local_file_requirement_raises(
self, data: TestData, monkeypatch: pytest.MonkeyPatch
) -> None:
# GIVEN
resolver = make_test_resolver(monkeypatch, [])
requirement_set = RequirementSet(check_supported_wheels=True)
install_req = install_req_from_line(
os.fspath(data.packages.joinpath("simple.dist-0.1-py1-none-invalid.whl")),
)
assert install_req.link is not None
assert install_req.link.is_wheel
assert install_req.link.scheme == "file"
# WHEN / THEN
with pytest.raises(InstallationError):
resolver._add_requirement_to_set(requirement_set, install_req)
def test_exclusive_environment_markers(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
"""Make sure excluding environment markers are handled correctly."""
# GIVEN
resolver = make_test_resolver(monkeypatch, [])
requirement_set = RequirementSet(check_supported_wheels=True)
eq36 = install_req_from_line("Django>=1.6.10,<1.7 ; python_version == '3.6'")
eq36.user_supplied = True
ne36 = install_req_from_line("Django>=1.6.10,<1.8 ; python_version != '3.6'")
ne36.user_supplied = True
# WHEN
resolver._add_requirement_to_set(requirement_set, eq36)
resolver._add_requirement_to_set(requirement_set, ne36)
# THEN
assert requirement_set.has_requirement("Django")
assert len(requirement_set.all_requirements) == 1
|
TestAddRequirement
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 22217,
"end": 22382
}
|
class ____(AttributeTemplate):
key = types.NPDatetime
def resolve___class__(self, ty):
return types.NumberClass(ty)
@infer_getattr
|
NPDatetimeAttribute
|
python
|
spack__spack
|
lib/spack/spack/dependency.py
|
{
"start": 362,
"end": 2851
}
|
class ____:
"""Class representing metadata for a dependency on a package.
This class differs from ``spack.spec.DependencySpec`` because it
represents metadata at the ``Package`` level.
``spack.spec.DependencySpec`` is a descriptor for an actual package
configuration, while ``Dependency`` is a descriptor for a package's
dependency *requirements*.
A dependency is a requirement for a configuration of another package
that satisfies a particular spec. The dependency can have *types*,
which determine *how* that package configuration is required,
e.g. whether it is required for building the package, whether it
needs to be linked to, or whether it is needed at runtime so that
Spack can call commands from it.
A package can also depend on another package with *patches*. This is
for cases where the maintainers of one package also maintain special
patches for their dependencies. If one package depends on another
with patches, a special version of that dependency with patches
applied will be built for use by the dependent package. The patches
are included in the new version's spec hash to differentiate it from
unpatched versions of the same package, so that unpatched versions of
the dependency package can coexist with the patched version.
"""
__slots__ = "pkg", "spec", "patches", "depflag"
def __init__(
self,
pkg: Type["spack.package_base.PackageBase"],
spec: spack.spec.Spec,
depflag: dt.DepFlag = dt.DEFAULT,
):
"""Create a new Dependency.
Args:
pkg: Package that has this dependency
spec: Spec indicating dependency requirements
type: strings describing dependency relationship
"""
self.pkg = pkg
self.spec = spec
# This dict maps condition specs to lists of Patch objects, just
# as the patches dict on packages does.
self.patches: Dict[spack.spec.Spec, List["spack.patch.Patch"]] = {}
self.depflag = depflag
@property
def name(self) -> str:
"""Get the name of the dependency package."""
return self.spec.name
def __repr__(self) -> str:
types = dt.flag_to_chars(self.depflag)
if self.patches:
return f"<Dependency: {self.pkg.name} -> {self.spec} [{types}, {self.patches}]>"
else:
return f"<Dependency: {self.pkg.name} -> {self.spec} [{types}]>"
|
Dependency
|
python
|
encode__django-rest-framework
|
tests/test_bound_fields.py
|
{
"start": 76,
"end": 3660
}
|
class ____:
def test_empty_bound_field(self):
class ExampleSerializer(serializers.Serializer):
text = serializers.CharField(max_length=100)
amount = serializers.IntegerField()
serializer = ExampleSerializer()
assert serializer['text'].value == ''
assert serializer['text'].errors is None
assert serializer['text'].name == 'text'
assert serializer['amount'].value is None
assert serializer['amount'].errors is None
assert serializer['amount'].name == 'amount'
def test_populated_bound_field(self):
class ExampleSerializer(serializers.Serializer):
text = serializers.CharField(max_length=100)
amount = serializers.IntegerField()
serializer = ExampleSerializer(data={'text': 'abc', 'amount': 123})
assert serializer.is_valid()
assert serializer['text'].value == 'abc'
assert serializer['text'].errors is None
assert serializer['text'].name == 'text'
assert serializer['amount'].value == 123
assert serializer['amount'].errors is None
assert serializer['amount'].name == 'amount'
def test_error_bound_field(self):
class ExampleSerializer(serializers.Serializer):
text = serializers.CharField(max_length=100)
amount = serializers.IntegerField()
serializer = ExampleSerializer(data={'text': 'x' * 1000, 'amount': 123})
serializer.is_valid()
assert serializer['text'].value == 'x' * 1000
assert serializer['text'].errors == ['Ensure this field has no more than 100 characters.']
assert serializer['text'].name == 'text'
assert serializer['amount'].value == 123
assert serializer['amount'].errors is None
assert serializer['amount'].name == 'amount'
def test_delete_field(self):
class ExampleSerializer(serializers.Serializer):
text = serializers.CharField(max_length=100)
amount = serializers.IntegerField()
serializer = ExampleSerializer()
del serializer.fields['text']
assert 'text' not in serializer.fields
def test_as_form_fields(self):
class ExampleSerializer(serializers.Serializer):
bool_field = serializers.BooleanField()
null_field = serializers.IntegerField(allow_null=True)
serializer = ExampleSerializer(data={'bool_field': False, 'null_field': None})
assert serializer.is_valid()
assert serializer['bool_field'].as_form_field().value == ''
assert serializer['null_field'].as_form_field().value == ''
def test_rendering_boolean_field(self):
from rest_framework.renderers import HTMLFormRenderer
class ExampleSerializer(serializers.Serializer):
bool_field = serializers.BooleanField(
style={'base_template': 'checkbox.html', 'template_pack': 'rest_framework/vertical'})
serializer = ExampleSerializer(data={'bool_field': True})
assert serializer.is_valid()
renderer = HTMLFormRenderer()
rendered = renderer.render_field(serializer['bool_field'], {})
expected_packed = (
'<divclass="form-group">'
'<divclass="checkbox">'
'<label>'
'<inputtype="checkbox"name="bool_field"value="true"checked>'
'Boolfield'
'</label>'
'</div>'
'</div>'
)
rendered_packed = ''.join(rendered.split())
assert rendered_packed == expected_packed
|
TestSimpleBoundField
|
python
|
pennersr__django-allauth
|
allauth/headless/mfa/inputs.py
|
{
"start": 633,
"end": 719
}
|
class ____(GenerateRecoveryCodesForm, inputs.Input):
pass
|
GenerateRecoveryCodesInput
|
python
|
sympy__sympy
|
sympy/physics/quantum/tests/test_state.py
|
{
"start": 1198,
"end": 6748
}
|
class ____(TimeDepKet):
@classmethod
def default_args(self):
return ("r", "theta", "phi", "t")
def test_ket():
k = Ket('0')
assert isinstance(k, Ket)
assert isinstance(k, KetBase)
assert isinstance(k, StateBase)
assert isinstance(k, QExpr)
assert k.label == (Symbol('0'),)
assert k.hilbert_space == HilbertSpace()
assert k.is_commutative is False
# Make sure this doesn't get converted to the number pi.
k = Ket('pi')
assert k.label == (Symbol('pi'),)
k = Ket(x, y)
assert k.label == (x, y)
assert k.hilbert_space == HilbertSpace()
assert k.is_commutative is False
assert k.dual_class() == Bra
assert k.dual == Bra(x, y)
assert k.subs(x, y) == Ket(y, y)
k = CustomKet()
assert k == CustomKet("test")
k = CustomKetMultipleLabels()
assert k == CustomKetMultipleLabels("r", "theta", "phi")
assert Ket() == Ket('psi')
def test_bra():
b = Bra('0')
assert isinstance(b, Bra)
assert isinstance(b, BraBase)
assert isinstance(b, StateBase)
assert isinstance(b, QExpr)
assert b.label == (Symbol('0'),)
assert b.hilbert_space == HilbertSpace()
assert b.is_commutative is False
# Make sure this doesn't get converted to the number pi.
b = Bra('pi')
assert b.label == (Symbol('pi'),)
b = Bra(x, y)
assert b.label == (x, y)
assert b.hilbert_space == HilbertSpace()
assert b.is_commutative is False
assert b.dual_class() == Ket
assert b.dual == Ket(x, y)
assert b.subs(x, y) == Bra(y, y)
assert Bra() == Bra('psi')
def test_ops():
k0 = Ket(0)
k1 = Ket(1)
k = 2*I*k0 - (x/sqrt(2))*k1
assert k == Add(Mul(2, I, k0),
Mul(Rational(-1, 2), x, Pow(2, S.Half), k1))
def test_time_dep_ket():
k = TimeDepKet(0, t)
assert isinstance(k, TimeDepKet)
assert isinstance(k, KetBase)
assert isinstance(k, StateBase)
assert isinstance(k, QExpr)
assert k.label == (Integer(0),)
assert k.args == (Integer(0), t)
assert k.time == t
assert k.dual_class() == TimeDepBra
assert k.dual == TimeDepBra(0, t)
assert k.subs(t, 2) == TimeDepKet(0, 2)
k = TimeDepKet(x, 0.5)
assert k.label == (x,)
assert k.args == (x, sympify(0.5))
k = CustomTimeDepKet()
assert k.label == (Symbol("test"),)
assert k.time == Symbol("t")
assert k == CustomTimeDepKet("test", "t")
k = CustomTimeDepKetMultipleLabels()
assert k.label == (Symbol("r"), Symbol("theta"), Symbol("phi"))
assert k.time == Symbol("t")
assert k == CustomTimeDepKetMultipleLabels("r", "theta", "phi", "t")
assert TimeDepKet() == TimeDepKet("psi", "t")
def test_time_dep_bra():
b = TimeDepBra(0, t)
assert isinstance(b, TimeDepBra)
assert isinstance(b, BraBase)
assert isinstance(b, StateBase)
assert isinstance(b, QExpr)
assert b.label == (Integer(0),)
assert b.args == (Integer(0), t)
assert b.time == t
assert b.dual_class() == TimeDepKet
assert b.dual == TimeDepKet(0, t)
k = TimeDepBra(x, 0.5)
assert k.label == (x,)
assert k.args == (x, sympify(0.5))
assert TimeDepBra() == TimeDepBra("psi", "t")
def test_bra_ket_dagger():
x = symbols('x', complex=True)
k = Ket('k')
b = Bra('b')
assert Dagger(k) == Bra('k')
assert Dagger(b) == Ket('b')
assert Dagger(k).is_commutative is False
k2 = Ket('k2')
e = 2*I*k + x*k2
assert Dagger(e) == conjugate(x)*Dagger(k2) - 2*I*Dagger(k)
def test_wavefunction():
x, y = symbols('x y', real=True)
L = symbols('L', positive=True)
n = symbols('n', integer=True, positive=True)
f = Wavefunction(x**2, x)
p = f.prob()
lims = f.limits
assert f.is_normalized is False
assert f.norm is oo
assert f(10) == 100
assert p(10) == 10000
assert lims[x] == (-oo, oo)
assert diff(f, x) == Wavefunction(2*x, x)
raises(NotImplementedError, lambda: f.normalize())
assert conjugate(f) == Wavefunction(conjugate(f.expr), x)
assert conjugate(f) == Dagger(f)
g = Wavefunction(x**2*y + y**2*x, (x, 0, 1), (y, 0, 2))
lims_g = g.limits
assert lims_g[x] == (0, 1)
assert lims_g[y] == (0, 2)
assert g.is_normalized is False
assert g.norm == sqrt(42)/3
assert g(2, 4) == 0
assert g(1, 1) == 2
assert diff(diff(g, x), y) == Wavefunction(2*x + 2*y, (x, 0, 1), (y, 0, 2))
assert conjugate(g) == Wavefunction(conjugate(g.expr), *g.args[1:])
assert conjugate(g) == Dagger(g)
h = Wavefunction(sqrt(5)*x**2, (x, 0, 1))
assert h.is_normalized is True
assert h.normalize() == h
assert conjugate(h) == Wavefunction(conjugate(h.expr), (x, 0, 1))
assert conjugate(h) == Dagger(h)
piab = Wavefunction(sin(n*pi*x/L), (x, 0, L))
assert piab.norm == sqrt(L/2)
assert piab(L + 1) == 0
assert piab(0.5) == sin(0.5*n*pi/L)
assert piab(0.5, n=1, L=1) == sin(0.5*pi)
assert piab.normalize() == \
Wavefunction(sqrt(2)/sqrt(L)*sin(n*pi*x/L), (x, 0, L))
assert conjugate(piab) == Wavefunction(conjugate(piab.expr), (x, 0, L))
assert conjugate(piab) == Dagger(piab)
k = Wavefunction(x**2, 'x')
assert type(k.variables[0]) == Symbol
def test_orthogonal_states():
bracket = OrthogonalBra(x) * OrthogonalKet(x)
assert bracket.doit() == 1
bracket = OrthogonalBra(x) * OrthogonalKet(x+1)
assert bracket.doit() == 0
bracket = OrthogonalBra(x) * OrthogonalKet(y)
assert bracket.doit() == bracket
|
CustomTimeDepKetMultipleLabels
|
python
|
fastai__fastai
|
fastai/torch_core.py
|
{
"start": 23540,
"end": 23749
}
|
class ____(Int, ShowTitle):
_show_args = {'label': 'text'}
def show(self, ctx=None, **kwargs):
"Show self"
return show_title(str(self), ctx=ctx, **merge(self._show_args, kwargs))
|
TitledInt
|
python
|
numpy__numpy
|
tools/swig/test/testSuperTensor.py
|
{
"start": 13937,
"end": 14252
}
|
class ____(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
#self.result = int(self.result)
######################################################################
|
longTestCase
|
python
|
mlflow__mlflow
|
tests/langgraph/sample_code/langgraph_diy.py
|
{
"start": 581,
"end": 1104
}
|
class ____(TypedDict):
# The add_messages function defines how an update should be processed
# Default is to replace. add_messages says "append"
messages: Annotated[Sequence[BaseMessage], add_messages]
workflow = StateGraph(AgentState)
workflow.add_node("generate", generate)
workflow.add_edge(START, "generate")
workflow.add_conditional_edges(
"generate",
should_continue,
{
"yes": "generate",
"no": END,
},
)
graph = workflow.compile()
mlflow.models.set_model(graph)
|
AgentState
|
python
|
getsentry__sentry
|
src/sentry/organizations/services/organization/model.py
|
{
"start": 3675,
"end": 3947
}
|
class ____(RpcModel):
id: int = -1
organization_id: int = -1
user_id: int | None = None # This can be null when the user is deleted.
flags: RpcOrganizationMemberFlags = Field(default_factory=lambda: RpcOrganizationMemberFlags())
|
RpcOrganizationMemberSummary
|
python
|
Netflix__metaflow
|
test/unit/inheritance/test_inheritance.py
|
{
"start": 8705,
"end": 12561
}
|
class ____:
"""Test comprehensive multiple inheritance from independent hierarchies"""
def test_flow_completes(self, comprehensive_multi_hierarchy_run):
"""Test that multi-hierarchy flow completes"""
assert comprehensive_multi_hierarchy_run.successful
assert comprehensive_multi_hierarchy_run.finished
def test_parameters_from_first_hierarchy(self, comprehensive_multi_hierarchy_run):
"""Test parameters from first hierarchy are accessible"""
end_task = comprehensive_multi_hierarchy_run["end"].task
assert end_task["result_param_a"].data == 10
assert end_task["result_param_b"].data == 20
def test_parameters_from_second_hierarchy(self, comprehensive_multi_hierarchy_run):
"""Test parameters from second hierarchy are accessible"""
end_task = comprehensive_multi_hierarchy_run["end"].task
assert end_task["result_param_x"].data == 30
assert end_task["result_param_y"].data == 40
def test_merge_point_parameters(self, comprehensive_multi_hierarchy_run):
"""Test parameters from merge point are accessible"""
end_task = comprehensive_multi_hierarchy_run["end"].task
assert end_task["result_param_c"].data == 5
assert end_task["result_final_param"].data == "merged"
def test_configs_from_both_hierarchies(self, comprehensive_multi_hierarchy_run):
"""Test configs from both hierarchies are accessible"""
end_task = comprehensive_multi_hierarchy_run["end"].task
# First hierarchy
config_a = end_task["result_config_a"].data
assert config_a["source"] == "hierarchy_a"
assert config_a["value"] == 100
# Second hierarchy
config_x = end_task["result_config_x"].data
assert config_x["source"] == "hierarchy_x"
assert config_x["multiplier"] == 2
config_y = end_task["result_config_y"].data
assert config_y["enabled"] is True
assert config_y["threshold"] == 50
# Merge point
config_c = end_task["result_config_c"].data
assert config_c["merge"] is True
assert config_c["offset"] == 200
def test_step_override_from_merge_point(self, comprehensive_multi_hierarchy_run):
"""Test that BaseC's process step overrides BaseY's process step"""
# If the computation matches BaseC's logic (not BaseY's), override worked
end_task = comprehensive_multi_hierarchy_run["end"].task
# hierarchy_a_result = param_a + param_b + config_a.value = 10 + 20 + 100 = 130
# base_value = hierarchy_a_result * multiplier = 130 * 2 = 260
# Since base_value (260) > threshold (50):
# processed_value = base_value + offset + param_c = 260 + 200 + 5 = 465
assert end_task["result_final"].data == 465
def test_cross_hierarchy_computation(self, comprehensive_multi_hierarchy_run):
"""Test computation using values from both hierarchies"""
end_task = comprehensive_multi_hierarchy_run["end"].task
# Cross-hierarchy sum = param_a + param_b + param_x + param_y + param_c
# = 10 + 20 + 30 + 40 + 5 = 105
assert end_task["result_cross_hierarchy"].data == 105
def test_mutator_from_first_hierarchy_executes(
self, comprehensive_multi_hierarchy_run
):
end_task = comprehensive_multi_hierarchy_run["end"].task
assert end_task["logging_param_count"].data == 6
assert end_task["logging_config_count"].data == 4
def test_decorated_step_from_first_hierarchy(
self, comprehensive_multi_hierarchy_run
):
"""Test that decorated step from first hierarchy works"""
end_task = comprehensive_multi_hierarchy_run["end"].task
assert end_task["source_from_var"].data == "hierarchy_x"
# Integration tests
|
TestComprehensiveMultiHierarchy
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/test_system_message.py
|
{
"start": 31828,
"end": 37638
}
|
class ____:
"""Test integration of SystemMessage with middleware chain."""
def test_multiple_middleware_can_modify_system_message(self) -> None:
"""Test that multiple middleware can modify system message in sequence."""
def first_middleware(request: ModelRequest) -> ModelRequest:
"""First middleware adds base system message."""
new_message = SystemMessage(
content="You are an assistant.",
additional_kwargs={"middleware_1": "applied"},
)
return request.override(system_message=new_message)
def second_middleware(request: ModelRequest) -> ModelRequest:
"""Second middleware appends to system message."""
current_content = request.system_message.text
new_content = current_content + " Be helpful."
merged_kwargs = {
**request.system_message.additional_kwargs,
"middleware_2": "applied",
}
new_message = SystemMessage(
content=new_content,
additional_kwargs=merged_kwargs,
)
return request.override(system_message=new_message)
request = _make_request(system_message=None)
# Apply middleware in sequence
request = first_middleware(request)
assert len(request.system_message.content_blocks) == 1
assert request.system_message.content_blocks[0]["text"] == "You are an assistant."
assert request.system_message.additional_kwargs["middleware_1"] == "applied"
request = second_middleware(request)
assert len(request.system_message.content_blocks) == 1
assert (
request.system_message.content_blocks[0]["text"] == "You are an assistant. Be helpful."
)
assert request.system_message.additional_kwargs["middleware_1"] == "applied"
assert request.system_message.additional_kwargs["middleware_2"] == "applied"
def test_middleware_preserves_system_message_metadata(self) -> None:
"""Test that metadata is preserved when middleware modifies system message."""
base_message = SystemMessage(
content="Base prompt",
additional_kwargs={"key1": "value1", "key2": "value2"},
response_metadata={"model": "gpt-4"},
)
def preserving_middleware(request: ModelRequest) -> ModelRequest:
"""Middleware that preserves existing metadata."""
new_message = SystemMessage(
content=request.system_message.text + " Extended.",
additional_kwargs=request.system_message.additional_kwargs,
response_metadata=request.system_message.response_metadata,
)
return request.override(system_message=new_message)
request = _make_request(system_message=base_message)
new_request = preserving_middleware(request)
assert len(new_request.system_message.content_blocks) == 1
assert new_request.system_message.content_blocks[0]["text"] == "Base prompt Extended."
assert new_request.system_message.additional_kwargs == {
"key1": "value1",
"key2": "value2",
}
assert new_request.system_message.response_metadata == {"model": "gpt-4"}
def test_backward_compatibility_with_string_system_prompt(self) -> None:
"""Test that middleware still works with string system prompts."""
def string_middleware(request: ModelRequest) -> ModelRequest:
"""Middleware using string system prompt (backward compatible)."""
current_prompt = request.system_prompt or ""
new_prompt = current_prompt + " Additional instructions."
return request.override(system_prompt=new_prompt.strip())
request = _make_request(system_prompt="Base prompt")
new_request = string_middleware(request)
assert new_request.system_prompt == "Base prompt Additional instructions."
assert isinstance(new_request.system_message, SystemMessage)
@pytest.mark.parametrize(
"initial_value",
[
SystemMessage(content="Hello"),
"Hello",
None,
],
ids=["system_message", "string", "none"],
)
def test_middleware_can_switch_between_formats(self, initial_value) -> None:
"""Test middleware can work with SystemMessage, string, or None."""
def flexible_middleware(request: ModelRequest) -> ModelRequest:
"""Middleware that works with various formats."""
if request.system_message:
new_message = SystemMessage(content=request.system_message.text + " [modified]")
return request.override(system_message=new_message)
else:
new_message = SystemMessage(content="[created]")
return request.override(system_message=new_message)
if isinstance(initial_value, SystemMessage):
request = _make_request(system_message=initial_value)
expected_text = "Hello [modified]"
elif isinstance(initial_value, str):
request = _make_request(system_prompt=initial_value)
expected_text = "Hello [modified]"
else: # None
request = _make_request(system_message=None)
expected_text = "[created]"
result = flexible_middleware(request)
assert len(result.system_message.content_blocks) == 1
assert result.system_message.content_blocks[0]["text"] == expected_text
# =============================================================================
# Edge Cases and Error Handling
# =============================================================================
|
TestSystemMessageMiddlewareIntegration
|
python
|
pytorch__pytorch
|
test/functorch/test_control_flow.py
|
{
"start": 319718,
"end": 323125
}
|
class ____(torch.nn.Module):
def forward(self, x):
x: "f32[s77, 3]";
x, = fx_pytree.tree_flatten_spec(([x], {}), self._in_spec)
_guards_fn = self._guards_fn(x); _guards_fn = None
sym_size_int_1: "Sym(s77)" = torch.ops.aten.sym_size.int(x, 0)
while_loop_cond_graph_0 = self.while_loop_cond_graph_0
while_loop_body_graph_0 = self.while_loop_body_graph_0
while_loop = torch.ops.higher_order.while_loop(while_loop_cond_graph_0, while_loop_body_graph_0, (0, x), ()); while_loop_cond_graph_0 = while_loop_body_graph_0 = x = None
getitem_2: "Sym(u1)" = while_loop[0]
ge: "Sym(u1 >= 1)" = getitem_2 >= 1
_assert_scalar_default = torch.ops.aten._assert_scalar.default(ge, "Runtime assertion failed for expression u1 >= 1 on node 'ge'"); ge = _assert_scalar_default = None
gt_1: "Sym(u1 > 0)" = getitem_2 > 0
_assert_scalar_default_1 = torch.ops.aten._assert_scalar.default(gt_1, "Runtime assertion failed for expression 0 < u1 on node 'gt_1'"); gt_1 = _assert_scalar_default_1 = None
getitem_1: "f32[s77, 3]" = while_loop[1]; while_loop = None
add: "Sym(u1 + 1)" = getitem_2 + 1
add_1: "f32[s77, 3]" = torch.ops.aten.add.Tensor(getitem_1, getitem_2); getitem_1 = None
lt: "Sym(u1 < s77)" = getitem_2 < sym_size_int_1; sym_size_int_1 = None
mul: "Sym(2*u1)" = getitem_2 * 2; getitem_2 = None
ones: "f32[2*u1]" = torch.ops.aten.ones.default([mul], device = device(type='cpu'), pin_memory = False); mul = None
return pytree.tree_unflatten((add, add_1, lt, ones), self._out_spec)
class while_loop_cond_graph_0(torch.nn.Module):
def forward(self, it_1: "Sym(u0)", x_1: "f32[s77, 3]"):
sym_size_int_1: "Sym(s77)" = torch.ops.aten.sym_size.int(x_1, 0); x_1 = None
lt: "Sym(u0 < s77)" = it_1 < sym_size_int_1; it_1 = sym_size_int_1 = None
return lt
class while_loop_body_graph_0(torch.nn.Module):
def forward(self, it_1: "Sym(u0)", x_1: "f32[s77, 3]"):
clone: "f32[s77, 3]" = torch.ops.aten.clone.default(x_1); x_1 = None
select: "f32[3]" = torch.ops.aten.select.int(clone, 0, it_1)
select_1: "f32[3]" = torch.ops.aten.select.int(clone, 0, it_1)
add: "f32[3]" = torch.ops.aten.add.Tensor(select_1, it_1); select_1 = None
copy_: "f32[3]" = torch.ops.aten.copy_.default(select, add); select = add = copy_ = None
add_1: "Sym(u0 + 1)" = it_1 + 1; it_1 = None
return (add_1, clone)
""", # noqa: B950
)
@skipIfTorchDynamo("Graph is not captured correctly when test with dynamo")
@parametrize("dynamic", [True, False])
@parametrize("backend", ["eager", "aot_eager"])
def test_while_loop_op_int_carry_compile(self, dynamic, backend):
m, args = WHILE_LOOP_TESTS["int_carry"]
if backend == "eager":
backend = EagerAndRecordGraphs()
self._check_compile(m, args, dynamic=dynamic, backend=backend)
if (
isinstance(backend, EagerAndRecordGraphs)
and dynamic
and not TEST_WITH_CROSSREF
):
self.assertEqual(len(backend.graphs), 1)
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/endpoints/validators/incident_groupopenperiod.py
|
{
"start": 41,
"end": 816
}
|
class ____(serializers.Serializer):
incident_id = serializers.IntegerField(required=False)
incident_identifier = serializers.IntegerField(required=False)
group_id = serializers.IntegerField(required=False)
open_period_id = serializers.IntegerField(required=False)
def validate(self, attrs):
super().validate(attrs)
if (
not attrs.get("incident_id")
and not attrs.get("incident_identifier")
and not attrs.get("group_id")
and not attrs.get("open_period_id")
):
raise serializers.ValidationError(
"One of 'incident_id', 'incident_identifier', 'group_id', or 'open_period_id' must be provided."
)
return attrs
|
IncidentGroupOpenPeriodValidator
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 38066,
"end": 38260
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("CRITICAL", "HIGH", "LOW", "MODERATE")
|
SecurityAdvisorySeverity
|
python
|
mlflow__mlflow
|
mlflow/types/responses.py
|
{
"start": 1763,
"end": 2369
}
|
class ____(Response):
"""Response object for ResponsesAgent.
Args:
output: List of output items. See examples at
https://mlflow.org/docs/latest/genai/flavors/responses-agent-intro#creating-agent-output.
reasoning: Reasoning parameters
usage: Usage information
custom_outputs (Dict[str, Any]): An optional param to provide arbitrary additional context
from the model. The dictionary values must be JSON-serializable. **Optional**, defaults
to ``None``
"""
custom_outputs: dict[str, Any] | None = None
|
ResponsesAgentResponse
|
python
|
davidhalter__jedi
|
jedi/inference/gradual/base.py
|
{
"start": 14655,
"end": 15554
}
|
class ____(LazyValueWrapper):
def __init__(self, parent_context, class_value, tree_name, generics_manager):
self.inference_state = class_value.inference_state
self.parent_context = parent_context
self._class_value = class_value
self._tree_name = tree_name
self._generics_manager = generics_manager
def py__class__(self):
return self._class_value
def get_annotated_class_object(self):
return self._class_value
def get_qualified_names(self):
return (self.py__name__(),)
@property
def name(self):
return ValueName(self, self._tree_name)
def _get_wrapped_value(self):
object_, = builtin_from_name(self.inference_state, 'object').execute_annotation()
return object_
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._generics_manager)
|
BaseTypingInstance
|
python
|
weaviate__weaviate-python-client
|
weaviate/proto/v1/v5261/v1/weaviate_pb2_grpc.py
|
{
"start": 8597,
"end": 14420
}
|
class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/weaviate.v1.Weaviate/Search',
v1_dot_search__get__pb2.SearchRequest.SerializeToString,
v1_dot_search__get__pb2.SearchReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def BatchObjects(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/weaviate.v1.Weaviate/BatchObjects',
v1_dot_batch__pb2.BatchObjectsRequest.SerializeToString,
v1_dot_batch__pb2.BatchObjectsReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def BatchReferences(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/weaviate.v1.Weaviate/BatchReferences',
v1_dot_batch__pb2.BatchReferencesRequest.SerializeToString,
v1_dot_batch__pb2.BatchReferencesReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def BatchDelete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/weaviate.v1.Weaviate/BatchDelete',
v1_dot_batch__delete__pb2.BatchDeleteRequest.SerializeToString,
v1_dot_batch__delete__pb2.BatchDeleteReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def TenantsGet(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/weaviate.v1.Weaviate/TenantsGet',
v1_dot_tenants__pb2.TenantsGetRequest.SerializeToString,
v1_dot_tenants__pb2.TenantsGetReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def Aggregate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/weaviate.v1.Weaviate/Aggregate',
v1_dot_aggregate__pb2.AggregateRequest.SerializeToString,
v1_dot_aggregate__pb2.AggregateReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def BatchStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(
request_iterator,
target,
'/weaviate.v1.Weaviate/BatchStream',
v1_dot_batch__pb2.BatchStreamRequest.SerializeToString,
v1_dot_batch__pb2.BatchStreamReply.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
|
Weaviate
|
python
|
sympy__sympy
|
sympy/integrals/meijerint.py
|
{
"start": 12436,
"end": 80775
}
|
class ____(ValueError):
"""
Exception raised by _get_coeff_exp, for internal use only.
"""
pass
def _get_coeff_exp(expr, x):
"""
When expr is known to be of the form c*x**b, with c and/or b possibly 1,
return c, b.
Examples
========
>>> from sympy.abc import x, a, b
>>> from sympy.integrals.meijerint import _get_coeff_exp
>>> _get_coeff_exp(a*x**b, x)
(a, b)
>>> _get_coeff_exp(x, x)
(1, 1)
>>> _get_coeff_exp(2*x, x)
(2, 1)
>>> _get_coeff_exp(x**3, x)
(1, 3)
"""
from sympy.simplify import powsimp
(c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x)
if not m:
return c, S.Zero
[m] = m
if m.is_Pow:
if m.base != x:
raise _CoeffExpValueError('expr not of form a*x**b')
return c, m.exp
elif m == x:
return c, S.One
else:
raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)
def _exponents(expr, x):
"""
Find the exponents of ``x`` (not including zero) in ``expr``.
Examples
========
>>> from sympy.integrals.meijerint import _exponents
>>> from sympy.abc import x, y
>>> from sympy import sin
>>> _exponents(x, x)
{1}
>>> _exponents(x**2, x)
{2}
>>> _exponents(x**2 + x, x)
{1, 2}
>>> _exponents(x**3*sin(x + x**y) + 1/x, x)
{-1, 1, 3, y}
"""
def _exponents_(expr, x, res):
if expr == x:
res.update([1])
return
if expr.is_Pow and expr.base == x:
res.update([expr.exp])
return
for argument in expr.args:
_exponents_(argument, x, res)
res = set()
_exponents_(expr, x, res)
return res
def _functions(expr, x):
""" Find the types of functions in expr, to estimate the complexity. """
return {e.func for e in expr.atoms(Function) if x in e.free_symbols}
def _find_splitting_points(expr, x):
"""
Find numbers a such that a linear substitution x -> x + a would
(hopefully) simplify expr.
Examples
========
>>> from sympy.integrals.meijerint import _find_splitting_points as fsp
>>> from sympy import sin
>>> from sympy.abc import x
>>> fsp(x, x)
{0}
>>> fsp((x-1)**3, x)
{1}
>>> fsp(sin(x+3)*x, x)
{-3, 0}
"""
p, q = [Wild(n, exclude=[x]) for n in 'pq']
def compute_innermost(expr, res):
if not isinstance(expr, Expr):
return
m = expr.match(p*x + q)
if m and m[p] != 0:
res.add(-m[q]/m[p])
return
if expr.is_Atom:
return
for argument in expr.args:
compute_innermost(argument, res)
innermost = set()
compute_innermost(expr, innermost)
return innermost
def _split_mul(f, x):
"""
Split expression ``f`` into fac, po, g, where fac is a constant factor,
po = x**s for some s independent of s, and g is "the rest".
Examples
========
>>> from sympy.integrals.meijerint import _split_mul
>>> from sympy import sin
>>> from sympy.abc import s, x
>>> _split_mul((3*x)**s*sin(x**2)*x, x)
(3**s, x*x**s, sin(x**2))
"""
fac = S.One
po = S.One
g = S.One
f = expand_power_base(f)
args = Mul.make_args(f)
for a in args:
if a == x:
po *= x
elif x not in a.free_symbols:
fac *= a
else:
if a.is_Pow and x not in a.exp.free_symbols:
c, t = a.base.as_coeff_mul(x)
if t != (x,):
c, t = expand_mul(a.base).as_coeff_mul(x)
if t == (x,):
po *= x**a.exp
fac *= unpolarify(polarify(c**a.exp, subs=False))
continue
g *= a
return fac, po, g
def _mul_args(f):
"""
Return a list ``L`` such that ``Mul(*L) == f``.
If ``f`` is not a ``Mul`` or ``Pow``, ``L=[f]``.
If ``f=g**n`` for an integer ``n``, ``L=[g]*n``.
If ``f`` is a ``Mul``, ``L`` comes from applying ``_mul_args`` to all factors of ``f``.
"""
args = Mul.make_args(f)
gs = []
for g in args:
if g.is_Pow and g.exp.is_Integer:
n = g.exp
base = g.base
if n < 0:
n = -n
base = 1/base
gs += [base]*n
else:
gs.append(g)
return gs
def _mul_as_two_parts(f):
"""
Find all the ways to split ``f`` into a product of two terms.
Return None on failure.
Explanation
===========
Although the order is canonical from multiset_partitions, this is
not necessarily the best order to process the terms. For example,
if the case of len(gs) == 2 is removed and multiset is allowed to
sort the terms, some tests fail.
Examples
========
>>> from sympy.integrals.meijerint import _mul_as_two_parts
>>> from sympy import sin, exp, ordered
>>> from sympy.abc import x
>>> list(ordered(_mul_as_two_parts(x*sin(x)*exp(x))))
[(x, exp(x)*sin(x)), (x*exp(x), sin(x)), (x*sin(x), exp(x))]
"""
gs = _mul_args(f)
if len(gs) < 2:
return None
if len(gs) == 2:
return [tuple(gs)]
return [(Mul(*x), Mul(*y)) for (x, y) in multiset_partitions(gs, 2)]
def _inflate_g(g, n):
""" Return C, h such that h is a G function of argument z**n and
g = C*h. """
# TODO should this be a method of meijerg?
# See: [L, page 150, equation (5)]
def inflate(params, n):
""" (a1, .., ak) -> (a1/n, (a1+1)/n, ..., (ak + n-1)/n) """
return [(a + i)/n for a, i in itertools.product(params, range(n))]
v = S(len(g.ap) - len(g.bq))
C = n**(1 + g.nu + v/2)
C /= (2*pi)**((n - 1)*g.delta)
return C, meijerg(inflate(g.an, n), inflate(g.aother, n),
inflate(g.bm, n), inflate(g.bother, n),
g.argument**n * n**(n*v))
def _flip_g(g):
""" Turn the G function into one of inverse argument
(i.e. G(1/x) -> G'(x)) """
# See [L], section 5.2
def tr(l):
return [1 - a for a in l]
return meijerg(tr(g.bm), tr(g.bother), tr(g.an), tr(g.aother), 1/g.argument)
def _inflate_fox_h(g, a):
r"""
Let d denote the integrand in the definition of the G function ``g``.
Consider the function H which is defined in the same way, but with
integrand d/Gamma(a*s) (contour conventions as usual).
If ``a`` is rational, the function H can be written as C*G, for a constant C
and a G-function G.
This function returns C, G.
"""
if a < 0:
return _inflate_fox_h(_flip_g(g), -a)
p = S(a.p)
q = S(a.q)
# We use the substitution s->qs, i.e. inflate g by q. We are left with an
# extra factor of Gamma(p*s), for which we use Gauss' multiplication
# theorem.
D, g = _inflate_g(g, q)
z = g.argument
D /= (2*pi)**((1 - p)/2)*p**Rational(-1, 2)
z /= p**p
bs = [(n + 1)/p for n in range(p)]
return D, meijerg(g.an, g.aother, g.bm, list(g.bother) + bs, z)
_dummies: dict[tuple[str, str], Dummy] = {}
def _dummy(name, token, expr, **kwargs):
"""
Return a dummy. This will return the same dummy if the same token+name is
requested more than once, and it is not already in expr.
This is for being cache-friendly.
"""
d = _dummy_(name, token, **kwargs)
if d in expr.free_symbols:
return Dummy(name, **kwargs)
return d
def _dummy_(name, token, **kwargs):
"""
Return a dummy associated to name and token. Same effect as declaring
it globally.
"""
if not (name, token) in _dummies:
_dummies[(name, token)] = Dummy(name, **kwargs)
return _dummies[(name, token)]
def _is_analytic(f, x):
""" Check if f(x), when expressed using G functions on the positive reals,
will in fact agree with the G functions almost everywhere """
return not any(x in expr.free_symbols for expr in f.atoms(Heaviside, Abs))
def _condsimp(cond, first=True):
"""
Do naive simplifications on ``cond``.
Explanation
===========
Note that this routine is completely ad-hoc, simplification rules being
added as need arises rather than following any logical pattern.
Examples
========
>>> from sympy.integrals.meijerint import _condsimp as simp
>>> from sympy import Or, Eq
>>> from sympy.abc import x, y
>>> simp(Or(x < y, Eq(x, y)))
x <= y
"""
if first:
cond = cond.replace(lambda _: _.is_Relational, _canonical_coeff)
first = False
if not isinstance(cond, BooleanFunction):
return cond
p, q, r = symbols('p q r', cls=Wild)
# transforms tests use 0, 4, 5 and 11-14
# meijer tests use 0, 2, 11, 14
# joint_rv uses 6, 7
rules = [
(Or(p < q, Eq(p, q)), p <= q), # 0
# The next two obviously are instances of a general pattern, but it is
# easier to spell out the few cases we care about.
(And(Abs(arg(p)) <= pi, Abs(arg(p) - 2*pi) <= pi),
Eq(arg(p) - pi, 0)), # 1
(And(Abs(2*arg(p) + pi) <= pi, Abs(2*arg(p) - pi) <= pi),
Eq(arg(p), 0)), # 2
(And(Abs(2*arg(p) + pi) < pi, Abs(2*arg(p) - pi) <= pi),
S.false), # 3
(And(Abs(arg(p) - pi/2) <= pi/2, Abs(arg(p) + pi/2) <= pi/2),
Eq(arg(p), 0)), # 4
(And(Abs(arg(p) - pi/2) <= pi/2, Abs(arg(p) + pi/2) < pi/2),
S.false), # 5
(And(Abs(arg(p**2/2 + 1)) < pi, Ne(Abs(arg(p**2/2 + 1)), pi)),
S.true), # 6
(Or(Abs(arg(p**2/2 + 1)) < pi, Ne(1/(p**2/2 + 1), 0)),
S.true), # 7
(And(Abs(unbranched_argument(p)) <= pi,
Abs(unbranched_argument(exp_polar(-2*pi*S.ImaginaryUnit)*p)) <= pi),
Eq(unbranched_argument(exp_polar(-S.ImaginaryUnit*pi)*p), 0)), # 8
(And(Abs(unbranched_argument(p)) <= pi/2,
Abs(unbranched_argument(exp_polar(-pi*S.ImaginaryUnit)*p)) <= pi/2),
Eq(unbranched_argument(exp_polar(-S.ImaginaryUnit*pi/2)*p), 0)), # 9
(Or(p <= q, And(p < q, r)), p <= q), # 10
(Ne(p**2, 1) & (p**2 > 1), p**2 > 1), # 11
(Ne(1/p, 1) & (cos(Abs(arg(p)))*Abs(p) > 1), Abs(p) > 1), # 12
(Ne(p, 2) & (cos(Abs(arg(p)))*Abs(p) > 2), Abs(p) > 2), # 13
((Abs(arg(p)) < pi/2) & (cos(Abs(arg(p)))*sqrt(Abs(p**2)) > 1), p**2 > 1), # 14
]
cond = cond.func(*[_condsimp(_, first) for _ in cond.args])
change = True
while change:
change = False
for irule, (fro, to) in enumerate(rules):
if fro.func != cond.func:
continue
for n, arg1 in enumerate(cond.args):
if r in fro.args[0].free_symbols:
m = arg1.match(fro.args[1])
num = 1
else:
num = 0
m = arg1.match(fro.args[0])
if not m:
continue
otherargs = [x.subs(m) for x in fro.args[:num] + fro.args[num + 1:]]
otherlist = [n]
for arg2 in otherargs:
for k, arg3 in enumerate(cond.args):
if k in otherlist:
continue
if arg2 == arg3:
otherlist += [k]
break
if isinstance(arg3, And) and arg2.args[1] == r and \
isinstance(arg2, And) and arg2.args[0] in arg3.args:
otherlist += [k]
break
if isinstance(arg3, And) and arg2.args[0] == r and \
isinstance(arg2, And) and arg2.args[1] in arg3.args:
otherlist += [k]
break
if len(otherlist) != len(otherargs) + 1:
continue
newargs = [arg_ for (k, arg_) in enumerate(cond.args)
if k not in otherlist] + [to.subs(m)]
if SYMPY_DEBUG:
if irule not in (0, 2, 4, 5, 6, 7, 11, 12, 13, 14):
print('used new rule:', irule)
cond = cond.func(*newargs)
change = True
break
# final tweak
def rel_touchup(rel):
if rel.rel_op != '==' or rel.rhs != 0:
return rel
# handle Eq(*, 0)
LHS = rel.lhs
m = LHS.match(arg(p)**q)
if not m:
m = LHS.match(unbranched_argument(polar_lift(p)**q))
if not m:
if isinstance(LHS, periodic_argument) and not LHS.args[0].is_polar \
and LHS.args[1] is S.Infinity:
return (LHS.args[0] > 0)
return rel
return (m[p] > 0)
cond = cond.replace(lambda _: _.is_Relational, rel_touchup)
if SYMPY_DEBUG:
print('_condsimp: ', cond)
return cond
def _eval_cond(cond):
""" Re-evaluate the conditions. """
if isinstance(cond, bool):
return cond
return _condsimp(cond.doit())
####################################################################
# Now the "backbone" functions to do actual integration.
####################################################################
def _my_principal_branch(expr, period, full_pb=False):
""" Bring expr nearer to its principal branch by removing superfluous
factors.
This function does *not* guarantee to yield the principal branch,
to avoid introducing opaque principal_branch() objects,
unless full_pb=True. """
res = principal_branch(expr, period)
if not full_pb:
res = res.replace(principal_branch, lambda x, y: x)
return res
def _rewrite_saxena_1(fac, po, g, x):
"""
Rewrite the integral fac*po*g dx, from zero to infinity, as
integral fac*G, where G has argument a*x. Note po=x**s.
Return fac, G.
"""
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
period = g.get_period()
a = _my_principal_branch(a, period)
# We substitute t = x**b.
C = fac/(Abs(b)*a**((s + 1)/b - 1))
# Absorb a factor of (at)**((1 + s)/b - 1).
def tr(l):
return [a + (1 + s)/b - 1 for a in l]
return C, meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother),
a*x)
def _check_antecedents_1(g, x, helper=False):
r"""
Return a condition under which the mellin transform of g exists.
Any power of x has already been absorbed into the G function,
so this is just $\int_0^\infty g\, dx$.
See [L, section 5.6.1]. (Note that s=1.)
If ``helper`` is True, only check if the MT exists at infinity, i.e. if
$\int_1^\infty g\, dx$ exists.
"""
# NOTE if you update these conditions, please update the documentation as well
delta = g.delta
eta, _ = _get_coeff_exp(g.argument, x)
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
if p > q:
def tr(l):
return [1 - x for x in l]
return _check_antecedents_1(meijerg(tr(g.bm), tr(g.bother),
tr(g.an), tr(g.aother), x/eta),
x)
tmp = [-re(b) < 1 for b in g.bm] + [1 < 1 - re(a) for a in g.an]
cond_3 = And(*tmp)
tmp += [-re(b) < 1 for b in g.bother]
tmp += [1 < 1 - re(a) for a in g.aother]
cond_3_star = And(*tmp)
cond_4 = (-re(g.nu) + (q + 1 - p)/2 > q - p)
def debug(*msg):
_debug(*msg)
def debugf(string, arg):
_debugf(string, arg)
debug('Checking antecedents for 1 function:')
debugf(' delta=%s, eta=%s, m=%s, n=%s, p=%s, q=%s',
(delta, eta, m, n, p, q))
debugf(' ap = %s, %s', (list(g.an), list(g.aother)))
debugf(' bq = %s, %s', (list(g.bm), list(g.bother)))
debugf(' cond_3=%s, cond_3*=%s, cond_4=%s', (cond_3, cond_3_star, cond_4))
conds = []
# case 1
case1 = []
tmp1 = [1 <= n, p < q, 1 <= m]
tmp2 = [1 <= p, 1 <= m, Eq(q, p + 1), Not(And(Eq(n, 0), Eq(m, p + 1)))]
tmp3 = [1 <= p, Eq(q, p)]
for k in range(ceiling(delta/2) + 1):
tmp3 += [Ne(Abs(unbranched_argument(eta)), (delta - 2*k)*pi)]
tmp = [delta > 0, Abs(unbranched_argument(eta)) < delta*pi]
extra = [Ne(eta, 0), cond_3]
if helper:
extra = []
for t in [tmp1, tmp2, tmp3]:
case1 += [And(*(t + tmp + extra))]
conds += case1
debug(' case 1:', case1)
# case 2
extra = [cond_3]
if helper:
extra = []
case2 = [And(Eq(n, 0), p + 1 <= m, m <= q,
Abs(unbranched_argument(eta)) < delta*pi, *extra)]
conds += case2
debug(' case 2:', case2)
# case 3
extra = [cond_3, cond_4]
if helper:
extra = []
case3 = [And(p < q, 1 <= m, delta > 0, Eq(Abs(unbranched_argument(eta)), delta*pi),
*extra)]
case3 += [And(p <= q - 2, Eq(delta, 0), Eq(Abs(unbranched_argument(eta)), 0), *extra)]
conds += case3
debug(' case 3:', case3)
# TODO altered cases 4-7
# extra case from wofram functions site:
# (reproduced verbatim from Prudnikov, section 2.24.2)
# https://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/01/
case_extra = []
case_extra += [Eq(p, q), Eq(delta, 0), Eq(unbranched_argument(eta), 0), Ne(eta, 0)]
if not helper:
case_extra += [cond_3]
s = []
for a, b in zip(g.ap, g.bq):
s += [b - a]
case_extra += [re(Add(*s)) < 0]
case_extra = And(*case_extra)
conds += [case_extra]
debug(' extra case:', [case_extra])
case_extra_2 = [And(delta > 0, Abs(unbranched_argument(eta)) < delta*pi)]
if not helper:
case_extra_2 += [cond_3]
case_extra_2 = And(*case_extra_2)
conds += [case_extra_2]
debug(' second extra case:', [case_extra_2])
# TODO This leaves only one case from the three listed by Prudnikov.
# Investigate if these indeed cover everything; if so, remove the rest.
return Or(*conds)
def _int0oo_1(g, x):
r"""
Evaluate $\int_0^\infty g\, dx$ using G functions,
assuming the necessary conditions are fulfilled.
Examples
========
>>> from sympy.abc import a, b, c, d, x, y
>>> from sympy import meijerg
>>> from sympy.integrals.meijerint import _int0oo_1
>>> _int0oo_1(meijerg([a], [b], [c], [d], x*y), x)
gamma(-a)*gamma(c + 1)/(y*gamma(-d)*gamma(b + 1))
"""
from sympy.simplify import gammasimp
# See [L, section 5.6.1]. Note that s=1.
eta, _ = _get_coeff_exp(g.argument, x)
res = 1/eta
# XXX TODO we should reduce order first
for b in g.bm:
res *= gamma(b + 1)
for a in g.an:
res *= gamma(1 - a - 1)
for b in g.bother:
res /= gamma(1 - b - 1)
for a in g.aother:
res /= gamma(a + 1)
return gammasimp(unpolarify(res))
def _rewrite_saxena(fac, po, g1, g2, x, full_pb=False):
"""
Rewrite the integral ``fac*po*g1*g2`` from 0 to oo in terms of G
functions with argument ``c*x``.
Explanation
===========
Return C, f1, f2 such that integral C f1 f2 from 0 to infinity equals
integral fac ``po``, ``g1``, ``g2`` from 0 to infinity.
Examples
========
>>> from sympy.integrals.meijerint import _rewrite_saxena
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg
>>> g1 = meijerg([], [], [0], [], s*t)
>>> g2 = meijerg([], [], [m/2], [-m/2], t**2/4)
>>> r = _rewrite_saxena(1, t**0, g1, g2, t)
>>> r[0]
s/(4*sqrt(pi))
>>> r[1]
meijerg(((), ()), ((-1/2, 0), ()), s**2*t/4)
>>> r[2]
meijerg(((), ()), ((m/2,), (-m/2,)), t/4)
"""
def pb(g):
a, b = _get_coeff_exp(g.argument, x)
per = g.get_period()
return meijerg(g.an, g.aother, g.bm, g.bother,
_my_principal_branch(a, per, full_pb)*x**b)
_, s = _get_coeff_exp(po, x)
_, b1 = _get_coeff_exp(g1.argument, x)
_, b2 = _get_coeff_exp(g2.argument, x)
if (b1 < 0) == True:
b1 = -b1
g1 = _flip_g(g1)
if (b2 < 0) == True:
b2 = -b2
g2 = _flip_g(g2)
if not b1.is_Rational or not b2.is_Rational:
return
m1, n1 = b1.p, b1.q
m2, n2 = b2.p, b2.q
tau = ilcm(m1*n2, m2*n1)
r1 = tau//(m1*n2)
r2 = tau//(m2*n1)
C1, g1 = _inflate_g(g1, r1)
C2, g2 = _inflate_g(g2, r2)
g1 = pb(g1)
g2 = pb(g2)
fac *= C1*C2
a1, b = _get_coeff_exp(g1.argument, x)
a2, _ = _get_coeff_exp(g2.argument, x)
# arbitrarily tack on the x**s part to g1
# TODO should we try both?
exp = (s + 1)/b - 1
fac = fac/(Abs(b) * a1**exp)
def tr(l):
return [a + exp for a in l]
g1 = meijerg(tr(g1.an), tr(g1.aother), tr(g1.bm), tr(g1.bother), a1*x)
g2 = meijerg(g2.an, g2.aother, g2.bm, g2.bother, a2*x)
from sympy.simplify import powdenest
return powdenest(fac, polar=True), g1, g2
def _check_antecedents(g1, g2, x):
""" Return a condition under which the integral theorem applies. """
# Yes, this is madness.
# XXX TODO this is a testing *nightmare*
# NOTE if you update these conditions, please update the documentation as well
# The following conditions are found in
# [P], Section 2.24.1
#
# They are also reproduced (verbatim!) at
# https://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/
#
# Note: k=l=r=alpha=1
sigma, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
s, t, u, v = S([len(g1.bm), len(g1.an), len(g1.ap), len(g1.bq)])
m, n, p, q = S([len(g2.bm), len(g2.an), len(g2.ap), len(g2.bq)])
bstar = s + t - (u + v)/2
cstar = m + n - (p + q)/2
rho = g1.nu + (u - v)/2 + 1
mu = g2.nu + (p - q)/2 + 1
phi = q - p - (v - u)
eta = 1 - (v - u) - mu - rho
psi = (pi*(q - m - n) + Abs(unbranched_argument(omega)))/(q - p)
theta = (pi*(v - s - t) + Abs(unbranched_argument(sigma)))/(v - u)
_debug('Checking antecedents:')
_debugf(' sigma=%s, s=%s, t=%s, u=%s, v=%s, b*=%s, rho=%s',
(sigma, s, t, u, v, bstar, rho))
_debugf(' omega=%s, m=%s, n=%s, p=%s, q=%s, c*=%s, mu=%s,',
(omega, m, n, p, q, cstar, mu))
_debugf(' phi=%s, eta=%s, psi=%s, theta=%s', (phi, eta, psi, theta))
def _c1():
for g in [g1, g2]:
for i, j in itertools.product(g.an, g.bm):
diff = i - j
if diff.is_integer and diff.is_positive:
return False
return True
c1 = _c1()
c2 = And(*[re(1 + i + j) > 0 for i in g1.bm for j in g2.bm])
c3 = And(*[re(1 + i + j) < 1 + 1 for i in g1.an for j in g2.an])
c4 = And(*[(p - q)*re(1 + i - 1) - re(mu) > Rational(-3, 2) for i in g1.an])
c5 = And(*[(p - q)*re(1 + i) - re(mu) > Rational(-3, 2) for i in g1.bm])
c6 = And(*[(u - v)*re(1 + i - 1) - re(rho) > Rational(-3, 2) for i in g2.an])
c7 = And(*[(u - v)*re(1 + i) - re(rho) > Rational(-3, 2) for i in g2.bm])
c8 = (Abs(phi) + 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c9 = (Abs(phi) - 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c10 = (Abs(unbranched_argument(sigma)) < bstar*pi)
c11 = Eq(Abs(unbranched_argument(sigma)), bstar*pi)
c12 = (Abs(unbranched_argument(omega)) < cstar*pi)
c13 = Eq(Abs(unbranched_argument(omega)), cstar*pi)
# The following condition is *not* implemented as stated on the wolfram
# function site. In the book of Prudnikov there is an additional part
# (the And involving re()). However, I only have this book in russian, and
# I don't read any russian. The following condition is what other people
# have told me it means.
# Worryingly, it is different from the condition implemented in REDUCE.
# The REDUCE implementation:
# https://reduce-algebra.svn.sourceforge.net/svnroot/reduce-algebra/trunk/packages/defint/definta.red
# (search for tst14)
# The Wolfram alpha version:
# https://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/03/0014/
z0 = exp(-(bstar + cstar)*pi*S.ImaginaryUnit)
zos = unpolarify(z0*omega/sigma)
zso = unpolarify(z0*sigma/omega)
if zos == 1/zso:
c14 = And(Eq(phi, 0), bstar + cstar <= 1,
Or(Ne(zos, 1), re(mu + rho + v - u) < 1,
re(mu + rho + q - p) < 1))
else:
def _cond(z):
'''Returns True if abs(arg(1-z)) < pi, avoiding arg(0).
Explanation
===========
If ``z`` is 1 then arg is NaN. This raises a
TypeError on `NaN < pi`. Previously this gave `False` so
this behavior has been hardcoded here but someone should
check if this NaN is more serious! This NaN is triggered by
test_meijerint() in test_meijerint.py:
`meijerint_definite(exp(x), x, 0, I)`
'''
return z != 1 and Abs(arg(1 - z)) < pi
c14 = And(Eq(phi, 0), bstar - 1 + cstar <= 0,
Or(And(Ne(zos, 1), _cond(zos)),
And(re(mu + rho + v - u) < 1, Eq(zos, 1))))
c14_alt = And(Eq(phi, 0), cstar - 1 + bstar <= 0,
Or(And(Ne(zso, 1), _cond(zso)),
And(re(mu + rho + q - p) < 1, Eq(zso, 1))))
# Since r=k=l=1, in our case there is c14_alt which is the same as calling
# us with (g1, g2) = (g2, g1). The conditions below enumerate all cases
# (i.e. we don't have to try arguments reversed by hand), and indeed try
# all symmetric cases. (i.e. whenever there is a condition involving c14,
# there is also a dual condition which is exactly what we would get when g1,
# g2 were interchanged, *but c14 was unaltered*).
# Hence the following seems correct:
c14 = Or(c14, c14_alt)
'''
When `c15` is NaN (e.g. from `psi` being NaN as happens during
'test_issue_4992' and/or `theta` is NaN as in 'test_issue_6253',
both in `test_integrals.py`) the comparison to 0 formerly gave False
whereas now an error is raised. To keep the old behavior, the value
of NaN is replaced with False but perhaps a closer look at this condition
should be made: XXX how should conditions leading to c15=NaN be handled?
'''
try:
lambda_c = (q - p)*Abs(omega)**(1/(q - p))*cos(psi) \
+ (v - u)*Abs(sigma)**(1/(v - u))*cos(theta)
# the TypeError might be raised here, e.g. if lambda_c is NaN
if _eval_cond(lambda_c > 0) != False:
c15 = (lambda_c > 0)
else:
def lambda_s0(c1, c2):
return c1*(q - p)*Abs(omega)**(1/(q - p))*sin(psi) \
+ c2*(v - u)*Abs(sigma)**(1/(v - u))*sin(theta)
lambda_s = Piecewise(
((lambda_s0(+1, +1)*lambda_s0(-1, -1)),
And(Eq(unbranched_argument(sigma), 0), Eq(unbranched_argument(omega), 0))),
(lambda_s0(sign(unbranched_argument(omega)), +1)*lambda_s0(sign(unbranched_argument(omega)), -1),
And(Eq(unbranched_argument(sigma), 0), Ne(unbranched_argument(omega), 0))),
(lambda_s0(+1, sign(unbranched_argument(sigma)))*lambda_s0(-1, sign(unbranched_argument(sigma))),
And(Ne(unbranched_argument(sigma), 0), Eq(unbranched_argument(omega), 0))),
(lambda_s0(sign(unbranched_argument(omega)), sign(unbranched_argument(sigma))), True))
tmp = [lambda_c > 0,
And(Eq(lambda_c, 0), Ne(lambda_s, 0), re(eta) > -1),
And(Eq(lambda_c, 0), Eq(lambda_s, 0), re(eta) > 0)]
c15 = Or(*tmp)
except TypeError:
c15 = False
for cond, i in [(c1, 1), (c2, 2), (c3, 3), (c4, 4), (c5, 5), (c6, 6),
(c7, 7), (c8, 8), (c9, 9), (c10, 10), (c11, 11),
(c12, 12), (c13, 13), (c14, 14), (c15, 15)]:
_debugf(' c%s: %s', (i, cond))
# We will return Or(*conds)
conds = []
def pr(count):
_debugf(' case %s: %s', (count, conds[-1]))
conds += [And(m*n*s*t != 0, bstar.is_positive is True, cstar.is_positive is True, c1, c2, c3, c10,
c12)] # 1
pr(1)
conds += [And(Eq(u, v), Eq(bstar, 0), cstar.is_positive is True, sigma.is_positive is True, re(rho) < 1,
c1, c2, c3, c12)] # 2
pr(2)
conds += [And(Eq(p, q), Eq(cstar, 0), bstar.is_positive is True, omega.is_positive is True, re(mu) < 1,
c1, c2, c3, c10)] # 3
pr(3)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu) < 1, re(rho) < 1,
Ne(sigma, omega), c1, c2, c3)] # 4
pr(4)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu + rho) < 1,
Ne(omega, sigma), c1, c2, c3)] # 5
pr(5)
conds += [And(p > q, s.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c5, c10, c13)] # 6
pr(6)
conds += [And(p < q, t.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c4, c10, c13)] # 7
pr(7)
conds += [And(u > v, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c7, c11, c12)] # 8
pr(8)
conds += [And(u < v, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c6, c11, c12)] # 9
pr(9)
conds += [And(p > q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c5, c13)] # 10
pr(10)
conds += [And(p < q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c4, c13)] # 11
pr(11)
conds += [And(Eq(p, q), u > v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c7, c11)] # 12
pr(12)
conds += [And(Eq(p, q), u < v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c6, c11)] # 13
pr(13)
conds += [And(p < q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c7, c11, c13)] # 14
pr(14)
conds += [And(p > q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c6, c11, c13)] # 15
pr(15)
conds += [And(p > q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c7, c8, c11, c13, c14)] # 16
pr(16)
conds += [And(p < q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c6, c9, c11, c13, c14)] # 17
pr(17)
conds += [And(Eq(t, 0), s.is_positive is True, bstar.is_positive is True, phi.is_positive is True, c1, c2, c10)] # 18
pr(18)
conds += [And(Eq(s, 0), t.is_positive is True, bstar.is_positive is True, phi.is_negative is True, c1, c3, c10)] # 19
pr(19)
conds += [And(Eq(n, 0), m.is_positive is True, cstar.is_positive is True, phi.is_negative is True, c1, c2, c12)] # 20
pr(20)
conds += [And(Eq(m, 0), n.is_positive is True, cstar.is_positive is True, phi.is_positive is True, c1, c3, c12)] # 21
pr(21)
conds += [And(Eq(s*t, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 22
pr(22)
conds += [And(Eq(m*n, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 23
pr(23)
# The following case is from [Luke1969]. As far as I can tell, it is *not*
# covered by Prudnikov's.
# Let G1 and G2 be the two G-functions. Suppose the integral exists from
# 0 to a > 0 (this is easy the easy part), that G1 is exponential decay at
# infinity, and that the mellin transform of G2 exists.
# Then the integral exists.
mt1_exists = _check_antecedents_1(g1, x, helper=True)
mt2_exists = _check_antecedents_1(g2, x, helper=True)
conds += [And(mt2_exists, Eq(t, 0), u < s, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E1')
conds += [And(mt2_exists, Eq(s, 0), v < t, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E2')
conds += [And(mt1_exists, Eq(n, 0), p < m, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E3')
conds += [And(mt1_exists, Eq(m, 0), q < n, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E4')
# Let's short-circuit if this worked ...
# the rest is corner-cases and terrible to read.
r = Or(*conds)
if _eval_cond(r) != False:
return r
conds += [And(m + n > p, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
Abs(unbranched_argument(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 24
pr(24)
conds += [And(m + n > q, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
Abs(unbranched_argument(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 25
pr(25)
conds += [And(Eq(p, q - 1), Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)),
c1, c2, c10, c14, c15)] # 26
pr(26)
conds += [And(Eq(p, q + 1), Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)),
c1, c3, c10, c14, c15)] # 27
pr(27)
conds += [And(p < q - 1, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < Abs(unbranched_argument(omega)),
Abs(unbranched_argument(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 28
pr(28)
conds += [And(
p > q + 1, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar >= 0,
cstar*pi < Abs(unbranched_argument(omega)),
Abs(unbranched_argument(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 29
pr(29)
conds += [And(Eq(n, 0), Eq(phi, 0), s + t > 0, m.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
Abs(unbranched_argument(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 30
pr(30)
conds += [And(Eq(m, 0), Eq(phi, 0), s + t > v, n.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
Abs(unbranched_argument(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 31
pr(31)
conds += [And(Eq(n, 0), Eq(phi, 0), Eq(u, v - 1), m.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (bstar + 1)*pi,
c1, c2, c12, c14, c15)] # 32
pr(32)
conds += [And(Eq(m, 0), Eq(phi, 0), Eq(u, v + 1), n.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (bstar + 1)*pi,
c1, c3, c12, c14, c15)] # 33
pr(33)
conds += [And(
Eq(n, 0), Eq(phi, 0), u < v - 1, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 34
pr(34)
conds += [And(
Eq(m, 0), Eq(phi, 0), u > v + 1, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < Abs(unbranched_argument(sigma)),
Abs(unbranched_argument(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 35
pr(35)
return Or(*conds)
# NOTE An alternative, but as far as I can tell weaker, set of conditions
# can be found in [L, section 5.6.2].
def _int0oo(g1, g2, x):
"""
Express integral from zero to infinity g1*g2 using a G function,
assuming the necessary conditions are fulfilled.
Examples
========
>>> from sympy.integrals.meijerint import _int0oo
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg, S
>>> g1 = meijerg([], [], [-S(1)/2, 0], [], s**2*t/4)
>>> g2 = meijerg([], [], [m/2], [-m/2], t/4)
>>> _int0oo(g1, g2, t)
4*meijerg(((0, 1/2), ()), ((m/2,), (-m/2,)), s**(-2))/s**2
"""
# See: [L, section 5.6.2, equation (1)]
eta, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
def neg(l):
return [-x for x in l]
a1 = neg(g1.bm) + list(g2.an)
a2 = list(g2.aother) + neg(g1.bother)
b1 = neg(g1.an) + list(g2.bm)
b2 = list(g2.bother) + neg(g1.aother)
return meijerg(a1, a2, b1, b2, omega/eta)/eta
def _rewrite_inversion(fac, po, g, x):
""" Absorb ``po`` == x**s into g. """
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
def tr(l):
return [t + s/b for t in l]
from sympy.simplify import powdenest
return (powdenest(fac/a**(s/b), polar=True),
meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), g.argument))
def _check_antecedents_inversion(g, x):
""" Check antecedents for the laplace inversion integral. """
_debug('Checking antecedents for inversion:')
z = g.argument
_, e = _get_coeff_exp(z, x)
if e < 0:
_debug(' Flipping G.')
# We want to assume that argument gets large as |x| -> oo
return _check_antecedents_inversion(_flip_g(g), x)
def statement_half(a, b, c, z, plus):
coeff, exponent = _get_coeff_exp(z, x)
a *= exponent
b *= coeff**c
c *= exponent
conds = []
wp = b*exp(S.ImaginaryUnit*re(c)*pi/2)
wm = b*exp(-S.ImaginaryUnit*re(c)*pi/2)
if plus:
w = wp
else:
w = wm
conds += [And(Or(Eq(b, 0), re(c) <= 0), re(a) <= -1)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) < 0)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) <= 0,
re(a) <= -1)]
return Or(*conds)
def statement(a, b, c, z):
""" Provide a convergence statement for z**a * exp(b*z**c),
c/f sphinx docs. """
return And(statement_half(a, b, c, z, True),
statement_half(a, b, c, z, False))
# Notations from [L], section 5.7-10
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
tau = m + n - p
nu = q - m - n
rho = (tau - nu)/2
sigma = q - p
if sigma == 1:
epsilon = S.Half
elif sigma > 1:
epsilon = 1
else:
epsilon = S.NaN
theta = ((1 - sigma)/2 + Add(*g.bq) - Add(*g.ap))/sigma
delta = g.delta
_debugf(' m=%s, n=%s, p=%s, q=%s, tau=%s, nu=%s, rho=%s, sigma=%s',
(m, n, p, q, tau, nu, rho, sigma))
_debugf(' epsilon=%s, theta=%s, delta=%s', (epsilon, theta, delta))
# First check if the computation is valid.
if not (g.delta >= e/2 or (p >= 1 and p >= q)):
_debug(' Computation not valid for these parameters.')
return False
# Now check if the inversion integral exists.
# Test "condition A"
for a, b in itertools.product(g.an, g.bm):
if (a - b).is_integer and a > b:
_debug(' Not a valid G function.')
return False
# There are two cases. If p >= q, we can directly use a slater expansion
# like [L], 5.2 (11). Note in particular that the asymptotics of such an
# expansion even hold when some of the parameters differ by integers, i.e.
# the formula itself would not be valid! (b/c G functions are cts. in their
# parameters)
# When p < q, we need to use the theorems of [L], 5.10.
if p >= q:
_debug(' Using asymptotic Slater expansion.')
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def E(z):
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def H(z):
return statement(theta, -sigma, 1/sigma, z)
def Hp(z):
return statement_half(theta, -sigma, 1/sigma, z, True)
def Hm(z):
return statement_half(theta, -sigma, 1/sigma, z, False)
# [L], section 5.10
conds = []
# Theorem 1 -- p < q from test above
conds += [And(1 <= n, 1 <= m, rho*pi - delta >= pi/2, delta > 0,
E(z*exp(S.ImaginaryUnit*pi*(nu + 1))))]
# Theorem 2, statements (2) and (3)
conds += [And(p + 1 <= m, m + 1 <= q, delta > 0, delta < pi/2, n == 0,
(m - p + 1)*pi - delta >= pi/2,
Hp(z*exp(S.ImaginaryUnit*pi*(q - m))),
Hm(z*exp(-S.ImaginaryUnit*pi*(q - m))))]
# Theorem 2, statement (5) -- p < q from test above
conds += [And(m == q, n == 0, delta > 0,
(sigma + epsilon)*pi - delta >= pi/2, H(z))]
# Theorem 3, statements (6) and (7)
conds += [And(Or(And(p <= q - 2, 1 <= tau, tau <= sigma/2),
And(p + 1 <= m + n, m + n <= (p + q)/2)),
delta > 0, delta < pi/2, (tau + 1)*pi - delta >= pi/2,
Hp(z*exp(S.ImaginaryUnit*pi*nu)),
Hm(z*exp(-S.ImaginaryUnit*pi*nu)))]
# Theorem 4, statements (10) and (11) -- p < q from test above
conds += [And(1 <= m, rho > 0, delta > 0, delta + rho*pi < pi/2,
(tau + epsilon)*pi - delta >= pi/2,
Hp(z*exp(S.ImaginaryUnit*pi*nu)),
Hm(z*exp(-S.ImaginaryUnit*pi*nu)))]
# Trivial case
conds += [m == 0]
# TODO
# Theorem 5 is quite general
# Theorem 6 contains special cases for q=p+1
return Or(*conds)
def _int_inversion(g, x, t):
"""
Compute the laplace inversion integral, assuming the formula applies.
"""
b, a = _get_coeff_exp(g.argument, x)
C, g = _inflate_fox_h(meijerg(g.an, g.aother, g.bm, g.bother, b/t**a), -a)
return C/t*g
####################################################################
# Finally, the real meat.
####################################################################
_lookup_table = None
@cacheit
@timeit
def _rewrite_single(f, x, recursive=True):
"""
Try to rewrite f as a sum of single G functions of the form
C*x**s*G(a*x**b), where b is a rational number and C is independent of x.
We guarantee that result.argument.as_coeff_mul(x) returns (a, (x**b,))
or (a, ()).
Returns a list of tuples (C, s, G) and a condition cond.
Returns None on failure.
"""
from .transforms import (mellin_transform, inverse_mellin_transform,
IntegralTransformError, MellinTransformStripError)
global _lookup_table
if not _lookup_table:
_lookup_table = {}
_create_lookup_table(_lookup_table)
if isinstance(f, meijerg):
coeff, m = factor(f.argument, x).as_coeff_mul(x)
if len(m) > 1:
return None
m = m[0]
if m.is_Pow:
if m.base != x or not m.exp.is_Rational:
return None
elif m != x:
return None
return [(1, 0, meijerg(f.an, f.aother, f.bm, f.bother, coeff*m))], True
f_ = f
f = f.subs(x, z)
t = _mytype(f, z)
if t in _lookup_table:
l = _lookup_table[t]
for formula, terms, cond, hint in l:
subs = f.match(formula, old=True)
if subs:
subs_ = {}
for fro, to in subs.items():
subs_[fro] = unpolarify(polarify(to, lift=True),
exponents_only=True)
subs = subs_
if not isinstance(hint, bool):
hint = hint.subs(subs)
if hint == False:
continue
if not isinstance(cond, (bool, BooleanAtom)):
cond = unpolarify(cond.subs(subs))
if _eval_cond(cond) == False:
continue
if not isinstance(terms, list):
terms = terms(subs)
res = []
for fac, g in terms:
r1 = _get_coeff_exp(unpolarify(fac.subs(subs).subs(z, x),
exponents_only=True), x)
try:
g = g.subs(subs).subs(z, x)
except ValueError:
continue
# NOTE these substitutions can in principle introduce oo,
# zoo and other absurdities. It shouldn't matter,
# but better be safe.
if Tuple(*(r1 + (g,))).has(S.Infinity, S.ComplexInfinity, S.NegativeInfinity):
continue
g = meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(g.argument, exponents_only=True))
res.append(r1 + (g,))
if res:
return res, cond
# try recursive mellin transform
if not recursive:
return None
_debug('Trying recursive Mellin transform method.')
def my_imt(F, s, x, strip):
""" Calling simplify() all the time is slow and not helpful, since
most of the time it only factors things in a way that has to be
un-done anyway. But sometimes it can remove apparent poles. """
# XXX should this be in inverse_mellin_transform?
try:
return inverse_mellin_transform(F, s, x, strip,
as_meijerg=True, needeval=True)
except MellinTransformStripError:
from sympy.simplify import simplify
return inverse_mellin_transform(
simplify(cancel(expand(F))), s, x, strip,
as_meijerg=True, needeval=True)
f = f_
s = _dummy('s', 'rewrite-single', f)
# to avoid infinite recursion, we have to force the two g functions case
def my_integrator(f, x):
r = _meijerint_definite_4(f, x, only_double=True)
if r is not None:
from sympy.simplify import hyperexpand
res, cond = r
res = _my_unpolarify(hyperexpand(res, rewrite='nonrepsmall'))
return Piecewise((res, cond),
(Integral(f, (x, S.Zero, S.Infinity)), True))
return Integral(f, (x, S.Zero, S.Infinity))
try:
F, strip, _ = mellin_transform(f, x, s, integrator=my_integrator,
simplify=False, needeval=True)
g = my_imt(F, s, x, strip)
except IntegralTransformError:
g = None
if g is None:
# We try to find an expression by analytic continuation.
# (also if the dummy is already in the expression, there is no point in
# putting in another one)
a = _dummy_('a', 'rewrite-single')
if a not in f.free_symbols and _is_analytic(f, x):
try:
F, strip, _ = mellin_transform(f.subs(x, a*x), x, s,
integrator=my_integrator,
needeval=True, simplify=False)
g = my_imt(F, s, x, strip).subs(a, 1)
except IntegralTransformError:
g = None
if g is None or g.has(S.Infinity, S.NaN, S.ComplexInfinity):
_debug('Recursive Mellin transform failed.')
return None
args = Add.make_args(g)
res = []
for f in args:
c, m = f.as_coeff_mul(x)
if len(m) > 1:
raise NotImplementedError('Unexpected form...')
g = m[0]
a, b = _get_coeff_exp(g.argument, x)
res += [(c, 0, meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(polarify(
a, lift=True), exponents_only=True)
*x**b))]
_debug('Recursive Mellin transform worked:', g)
return res, True
def _rewrite1(f, x, recursive=True):
"""
Try to rewrite ``f`` using a (sum of) single G functions with argument a*x**b.
Return fac, po, g such that f = fac*po*g, fac is independent of ``x``.
and po = x**s.
Here g is a result from _rewrite_single.
Return None on failure.
"""
fac, po, g = _split_mul(f, x)
g = _rewrite_single(g, x, recursive)
if g:
return fac, po, g[0], g[1]
def _rewrite2(f, x):
"""
Try to rewrite ``f`` as a product of two G functions of arguments a*x**b.
Return fac, po, g1, g2 such that f = fac*po*g1*g2, where fac is
independent of x and po is x**s.
Here g1 and g2 are results of _rewrite_single.
Returns None on failure.
"""
fac, po, g = _split_mul(f, x)
if any(_rewrite_single(expr, x, False) is None for expr in _mul_args(g)):
return None
l = _mul_as_two_parts(g)
if not l:
return None
l = list(ordered(l, [
lambda p: max(len(_exponents(p[0], x)), len(_exponents(p[1], x))),
lambda p: max(len(_functions(p[0], x)), len(_functions(p[1], x))),
lambda p: max(len(_find_splitting_points(p[0], x)),
len(_find_splitting_points(p[1], x)))]))
for recursive, (fac1, fac2) in itertools.product((False, True), l):
g1 = _rewrite_single(fac1, x, recursive)
g2 = _rewrite_single(fac2, x, recursive)
if g1 and g2:
cond = And(g1[1], g2[1])
if cond != False:
return fac, po, g1[0], g2[0], cond
def meijerint_indefinite(f, x):
"""
Compute an indefinite integral of ``f`` by rewriting it as a G function.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_indefinite
>>> from sympy import sin
>>> from sympy.abc import x
>>> meijerint_indefinite(sin(x), x)
-cos(x)
"""
f = sympify(f)
results = []
for a in sorted(_find_splitting_points(f, x) | {S.Zero}, key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, x + a), x)
if not res:
continue
res = res.subs(x, x - a)
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(
_rewrite_hyperbolics_as_exp(f), x)
if rv:
if not isinstance(rv, list):
from sympy.simplify.radsimp import collect
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
def _meijerint_indefinite_1(f, x):
""" Helper that does not attempt any substitution. """
_debug('Trying to compute the indefinite integral of', f, 'wrt', x)
from sympy.simplify import hyperexpand, powdenest
gs = _rewrite1(f, x)
if gs is None:
# Note: the code that calls us will do expand() and try again
return None
fac, po, gl, cond = gs
_debug(' could rewrite:', gs)
res = S.Zero
for C, s, g in gl:
a, b = _get_coeff_exp(g.argument, x)
_, c = _get_coeff_exp(po, x)
c += s
# we do a substitution t=a*x**b, get integrand fac*t**rho*g
fac_ = fac * C * x**(1 + c) / b
rho = (c + 1)/b
# we now use t**rho*G(params, t) = G(params + rho, t)
# [L, page 150, equation (4)]
# and integral G(params, t) dt = G(1, params+1, 0, t)
# (or a similar expression with 1 and 0 exchanged ... pick the one
# which yields a well-defined function)
# [R, section 5]
# (Note that this dummy will immediately go away again, so we
# can safely pass S.One for ``expr``.)
t = _dummy('t', 'meijerint-indefinite', S.One)
def tr(p):
return [a + rho for a in p]
if any(b.is_integer and (b <= 0) == True for b in tr(g.bm)):
r = -meijerg(
list(g.an), list(g.aother) + [1-rho], list(g.bm) + [-rho], list(g.bother), t)
else:
r = meijerg(
list(g.an) + [1-rho], list(g.aother), list(g.bm), list(g.bother) + [-rho], t)
# The antiderivative is most often expected to be defined
# in the neighborhood of x = 0.
if b.is_extended_nonnegative and not f.subs(x, 0).has(S.NaN, S.ComplexInfinity):
place = 0 # Assume we can expand at zero
else:
place = None
r = hyperexpand(r.subs(t, a*x**b), place=place)
# now substitute back
# Note: we really do want the powers of x to combine.
res += powdenest(fac_*r, polar=True)
def _clean(res):
"""This multiplies out superfluous powers of x we created, and chops off
constants:
>> _clean(x*(exp(x)/x - 1/x) + 3)
exp(x)
cancel is used before mul_expand since it is possible for an
expression to have an additive constant that does not become isolated
with simple expansion. Such a situation was identified in issue 6369:
Examples
========
>>> from sympy import sqrt, cancel
>>> from sympy.abc import x
>>> a = sqrt(2*x + 1)
>>> bad = (3*x*a**5 + 2*x - a**5 + 1)/a**2
>>> bad.expand().as_independent(x)[0]
0
>>> cancel(bad).expand().as_independent(x)[0]
1
"""
res = expand_mul(cancel(res), deep=False)
return Add._from_args(res.as_coeff_add(x)[1])
res = piecewise_fold(res, evaluate=None)
if res.is_Piecewise:
newargs = []
for e, c in res.args:
e = _my_unpolarify(_clean(e))
newargs += [(e, c)]
res = Piecewise(*newargs, evaluate=False)
else:
res = _my_unpolarify(_clean(res))
return Piecewise((res, _my_unpolarify(cond)), (Integral(f, x), True))
@timeit
def meijerint_definite(f, x, a, b):
"""
Integrate ``f`` over the interval [``a``, ``b``], by rewriting it as a product
of two G functions, or as a single G function.
Return res, cond, where cond are convergence conditions.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_definite
>>> from sympy import exp, oo
>>> from sympy.abc import x
>>> meijerint_definite(exp(-x**2), x, -oo, oo)
(sqrt(pi), True)
This function is implemented as a succession of functions
meijerint_definite, _meijerint_definite_2, _meijerint_definite_3,
_meijerint_definite_4. Each function in the list calls the next one
(presumably) several times. This means that calling meijerint_definite
can be very costly.
"""
# This consists of three steps:
# 1) Change the integration limits to 0, oo
# 2) Rewrite in terms of G functions
# 3) Evaluate the integral
#
# There are usually several ways of doing this, and we want to try all.
# This function does (1), calls _meijerint_definite_2 for step (2).
_debugf('Integrating %s wrt %s from %s to %s.', (f, x, a, b))
f = sympify(f)
if f.has(DiracDelta):
_debug('Integrand has DiracDelta terms - giving up.')
return None
if f.has(SingularityFunction):
_debug('Integrand has Singularity Function terms - giving up.')
return None
f_, x_, a_, b_ = f, x, a, b
# Let's use a dummy in case any of the boundaries has x.
d = Dummy('x')
f = f.subs(x, d)
x = d
if a == b:
return (S.Zero, True)
results = []
if a is S.NegativeInfinity and b is not S.Infinity:
return meijerint_definite(f.subs(x, -x), x, -b, -a)
elif a is S.NegativeInfinity:
# Integrating -oo to oo. We need to find a place to split the integral.
_debug(' Integrating -oo to +oo.')
innermost = _find_splitting_points(f, x)
_debug(' Sensible splitting points:', innermost)
for c in sorted(innermost, key=default_sort_key, reverse=True) + [S.Zero]:
_debug(' Trying to split at', c)
if not c.is_extended_real:
_debug(' Non-real splitting point.')
continue
res1 = _meijerint_definite_2(f.subs(x, x + c), x)
if res1 is None:
_debug(' But could not compute first integral.')
continue
res2 = _meijerint_definite_2(f.subs(x, c - x), x)
if res2 is None:
_debug(' But could not compute second integral.')
continue
res1, cond1 = res1
res2, cond2 = res2
cond = _condsimp(And(cond1, cond2))
if cond == False:
_debug(' But combined condition is always false.')
continue
res = res1 + res2
return res, cond
elif a is S.Infinity:
res = meijerint_definite(f, x, b, S.Infinity)
return -res[0], res[1]
elif (a, b) == (S.Zero, S.Infinity):
# This is a common case - try it directly first.
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
else:
if b is S.Infinity:
for split in _find_splitting_points(f, x):
if (a - split >= 0) == True:
_debugf('Trying x -> x + %s', split)
res = _meijerint_definite_2(f.subs(x, x + split)
*Heaviside(x + split - a), x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
f = f.subs(x, x + a)
b = b - a
a = 0
if b is not S.Infinity:
phi = exp(S.ImaginaryUnit*arg(b))
b = Abs(b)
f = f.subs(x, phi*x)
f *= Heaviside(b - x)*phi
b = S.Infinity
_debug('Changed limits to', a, b)
_debug('Changed function to', f)
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
if f_.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_definite(
_rewrite_hyperbolics_as_exp(f_), x_, a_, b_)
if rv:
if not isinstance(rv, list):
from sympy.simplify.radsimp import collect
rv = (collect(factor_terms(rv[0]), rv[0].atoms(exp)),) + rv[1:]
return rv
results.extend(rv)
if results:
return next(ordered(results))
def _guess_expansion(f, x):
""" Try to guess sensible rewritings for integrand f(x). """
res = [(f, 'original integrand')]
orig = res[-1][0]
saw = {orig}
expanded = expand_mul(orig)
if expanded not in saw:
res += [(expanded, 'expand_mul')]
saw.add(expanded)
expanded = expand(orig)
if expanded not in saw:
res += [(expanded, 'expand')]
saw.add(expanded)
if orig.has(TrigonometricFunction, HyperbolicFunction):
expanded = expand_mul(expand_trig(orig))
if expanded not in saw:
res += [(expanded, 'expand_trig, expand_mul')]
saw.add(expanded)
if orig.has(cos, sin):
from sympy.simplify.fu import sincos_to_sum
reduced = sincos_to_sum(orig)
if reduced not in saw:
res += [(reduced, 'trig power reduction')]
saw.add(reduced)
return res
def _meijerint_definite_2(f, x):
"""
Try to integrate f dx from zero to infinity.
The body of this function computes various 'simplifications'
f1, f2, ... of f (e.g. by calling expand_mul(), trigexpand()
- see _guess_expansion) and calls _meijerint_definite_3 with each of
these in succession.
If _meijerint_definite_3 succeeds with any of the simplified functions,
returns this result.
"""
# This function does preparation for (2), calls
# _meijerint_definite_3 for (2) and (3) combined.
# use a positive dummy - we integrate from 0 to oo
# XXX if a nonnegative symbol is used there will be test failures
dummy = _dummy('x', 'meijerint-definite2', f, positive=True)
f = f.subs(x, dummy)
x = dummy
if f == 0:
return S.Zero, True
for g, explanation in _guess_expansion(f, x):
_debug('Trying', explanation)
res = _meijerint_definite_3(g, x)
if res:
return res
def _meijerint_definite_3(f, x):
"""
Try to integrate f dx from zero to infinity.
This function calls _meijerint_definite_4 to try to compute the
integral. If this fails, it tries using linearity.
"""
res = _meijerint_definite_4(f, x)
if res and res[1] != False:
return res
if f.is_Add:
_debug('Expanding and evaluating all terms.')
ress = [_meijerint_definite_4(g, x) for g in f.args]
if all(r is not None for r in ress):
conds = []
res = S.Zero
for r, c in ress:
res += r
conds += [c]
c = And(*conds)
if c != False:
return res, c
def _my_unpolarify(f):
return _eval_cond(unpolarify(f))
@timeit
def _meijerint_definite_4(f, x, only_double=False):
"""
Try to integrate f dx from zero to infinity.
Explanation
===========
This function tries to apply the integration theorems found in literature,
i.e. it tries to rewrite f as either one or a product of two G-functions.
The parameter ``only_double`` is used internally in the recursive algorithm
to disable trying to rewrite f as a single G-function.
"""
from sympy.simplify import hyperexpand
# This function does (2) and (3)
_debug('Integrating', f)
# Try single G function.
if not only_double:
gs = _rewrite1(f, x, recursive=False)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S.Zero
for C, s, f in g:
if C == 0:
continue
C, f = _rewrite_saxena_1(fac*C, po*x**s, f, x)
res += C*_int0oo_1(f, x)
cond = And(cond, _check_antecedents_1(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitutions is:', res)
return _my_unpolarify(hyperexpand(res)), cond
# Try two G functions.
gs = _rewrite2(f, x)
if gs is not None:
for full_pb in [False, True]:
fac, po, g1, g2, cond = gs
_debug('Could rewrite as two G functions:', fac, po, g1, g2)
res = S.Zero
for C1, s1, f1 in g1:
for C2, s2, f2 in g2:
r = _rewrite_saxena(fac*C1*C2, po*x**(s1 + s2),
f1, f2, x, full_pb)
if r is None:
_debug('Non-rational exponents.')
return
C, f1_, f2_ = r
_debug('Saxena subst for yielded:', C, f1_, f2_)
cond = And(cond, _check_antecedents(f1_, f2_, x))
if cond == False:
break
res += C*_int0oo(f1_, f2_, x)
else:
continue
break
cond = _my_unpolarify(cond)
if cond == False:
_debugf('But cond is always False (full_pb=%s).', full_pb)
else:
_debugf('Result before branch substitutions is: %s', (res, ))
if only_double:
return res, cond
return _my_unpolarify(hyperexpand(res)), cond
def meijerint_inversion(f, x, t):
r"""
Compute the inverse laplace transform
$\int_{c+i\infty}^{c-i\infty} f(x) e^{tx}\, dx$,
for real c larger than the real part of all singularities of ``f``.
Note that ``t`` is always assumed real and positive.
Return None if the integral does not exist or could not be evaluated.
Examples
========
>>> from sympy.abc import x, t
>>> from sympy.integrals.meijerint import meijerint_inversion
>>> meijerint_inversion(1/x, x, t)
Heaviside(t)
"""
f_ = f
t_ = t
t = Dummy('t', polar=True) # We don't want sqrt(t**2) = abs(t) etc
f = f.subs(t_, t)
_debug('Laplace-inverting', f)
if not _is_analytic(f, x):
_debug('But expression is not analytic.')
return None
# Exponentials correspond to shifts; we filter them out and then
# shift the result later. If we are given an Add this will not
# work, but the calling code will take care of that.
shift = S.Zero
if f.is_Mul:
args = list(f.args)
elif isinstance(f, exp):
args = [f]
else:
args = None
if args:
newargs = []
exponentials = []
while args:
arg = args.pop()
if isinstance(arg, exp):
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
try:
a, b = _get_coeff_exp(arg.args[0], x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a)
else:
newargs.append(arg)
elif arg.is_Pow:
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
if x not in arg.base.free_symbols:
try:
a, b = _get_coeff_exp(arg.exp, x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a*log(arg.base))
newargs.append(arg)
else:
newargs.append(arg)
shift = Add(*exponentials)
f = Mul(*newargs)
if x not in f.free_symbols:
_debug('Expression consists of constant and exp shift:', f, shift)
cond = Eq(im(shift), 0)
if cond == False:
_debug('but shift is nonreal, cannot be a Laplace transform')
return None
res = f*DiracDelta(t + shift)
_debug('Result is a delta function, possibly conditional:', res, cond)
# cond is True or Eq
return Piecewise((res.subs(t, t_), cond))
gs = _rewrite1(f, x)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S.Zero
for C, s, f in g:
C, f = _rewrite_inversion(fac*C, po*x**s, f, x)
res += C*_int_inversion(f, x, t)
cond = And(cond, _check_antecedents_inversion(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitution:', res)
from sympy.simplify import hyperexpand
res = _my_unpolarify(hyperexpand(res))
if not res.has(Heaviside):
res *= Heaviside(t)
res = res.subs(t, t + shift)
if not isinstance(cond, bool):
cond = cond.subs(t, t + shift)
from .transforms import InverseLaplaceTransform
return Piecewise((res.subs(t, t_), cond),
(InverseLaplaceTransform(f_.subs(t, t_), x, t_, None), True))
|
_CoeffExpValueError
|
python
|
openai__openai-python
|
tests/api_resources/vector_stores/test_file_batches.py
|
{
"start": 9434,
"end": 19028
}
|
class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
file_batch = await async_client.vector_stores.file_batches.create(
vector_store_id="vs_abc123",
)
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
file_batch = await async_client.vector_stores.file_batches.create(
vector_store_id="vs_abc123",
attributes={"foo": "string"},
chunking_strategy={"type": "auto"},
file_ids=["string"],
files=[
{
"file_id": "file_id",
"attributes": {"foo": "string"},
"chunking_strategy": {"type": "auto"},
}
],
)
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.create(
vector_store_id="vs_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.create(
vector_store_id="vs_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.create(
vector_store_id="",
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
file_batch = await async_client.vector_stores.file_batches.retrieve(
batch_id="vsfb_abc123",
vector_store_id="vs_abc123",
)
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="vsfb_abc123",
vector_store_id="vs_abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.retrieve(
batch_id="vsfb_abc123",
vector_store_id="vs_abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="vsfb_abc123",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.retrieve(
batch_id="",
vector_store_id="vs_abc123",
)
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
file_batch = await async_client.vector_stores.file_batches.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.cancel(
batch_id="batch_id",
vector_store_id="vector_store_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
assert_matches_type(VectorStoreFileBatch, file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="batch_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.cancel(
batch_id="",
vector_store_id="vector_store_id",
)
@parametrize
async def test_method_list_files(self, async_client: AsyncOpenAI) -> None:
file_batch = await async_client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
async def test_method_list_files_with_all_params(self, async_client: AsyncOpenAI) -> None:
file_batch = await async_client.vector_stores.file_batches.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
after="after",
before="before",
filter="in_progress",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
async def test_raw_response_list_files(self, async_client: AsyncOpenAI) -> None:
response = await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = response.parse()
assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
@parametrize
async def test_streaming_response_list_files(self, async_client: AsyncOpenAI) -> None:
async with async_client.vector_stores.file_batches.with_streaming_response.list_files(
batch_id="batch_id",
vector_store_id="vector_store_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file_batch = await response.parse()
assert_matches_type(AsyncCursorPage[VectorStoreFile], file_batch, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list_files(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `vector_store_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="batch_id",
vector_store_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
await async_client.vector_stores.file_batches.with_raw_response.list_files(
batch_id="",
vector_store_id="vector_store_id",
)
|
TestAsyncFileBatches
|
python
|
walkccc__LeetCode
|
solutions/394. Decode String/394-2.py
|
{
"start": 0,
"end": 492
}
|
class ____:
def decodeString(self, s: str) -> str:
ans = ''
while self.i < len(s) and s[self.i] != ']':
if s[self.i].isdigit():
k = 0
while self.i < len(s) and s[self.i].isdigit():
k = k * 10 + int(s[self.i])
self.i += 1
self.i += 1 # '['
decodedString = self.decodeString(s)
self.i += 1 # ']'
ans += k * decodedString
else:
ans += s[self.i]
self.i += 1
return ans
i = 0
|
Solution
|
python
|
tornadoweb__tornado
|
tornado/test/websocket_test.py
|
{
"start": 1331,
"end": 1938
}
|
class ____(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for tests to see the close code and reason on the
server side.
"""
def initialize(self, close_future=None, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
if self.close_future is not None:
self.close_future.set_result((self.close_code, self.close_reason))
|
TestWebSocketHandler
|
python
|
agronholm__apscheduler
|
src/apscheduler/datastores/sqlalchemy.py
|
{
"start": 3295,
"end": 3550
}
|
class ____:
job_id: UUID
outcome: JobOutcome
task_id: str
schedule_id: str | None
scheduled_fire_time: datetime | None
result_expires_at: datetime
exception: Exception | None = None
@attrs.define(eq=False, repr=False)
|
_JobDiscard
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI032.py
|
{
"start": 39,
"end": 177
}
|
class ____:
def __eq__(self, other: Any) -> bool: ... # PYI032
def __ne__(self, other: typing.Any) -> typing.Any: ... # PYI032
|
Bad
|
python
|
huggingface__transformers
|
src/transformers/models/idefics2/configuration_idefics2.py
|
{
"start": 829,
"end": 4850
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Idefics2VisionModel`]. It is used to instantiate a
Idefics2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) used in the Idefics2 model
[HuggingFaceM4/idefics2-8b](https://huggingface.co/HuggingFaceM4/idefics2-8b).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
Number of channels in the input images.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation for initializing all weight matrices in the model.
Example:
```python
>>> from transformers.models.idefics2.modeling_idefics2 import Idefics2VisionTransformer
>>> from transformers.models.idefics2.configuration_idefics2 import Idefics2VisionConfig
>>> # Initializing a Idefics2VisionConfig with google/siglip-base-patch16-224 style configuration
>>> configuration = Idefics2VisionConfig()
>>> # Initializing a Idefics2VisionTransformer (with random weights) from the google/siglip-base-patch16-224 style configuration
>>> model = Idefics2VisionTransformer(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "idefics2_vision"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
image_size=224,
patch_size=32,
hidden_act="gelu_pytorch_tanh",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
|
Idefics2VisionConfig
|
python
|
squidfunk__mkdocs-material
|
material/plugins/projects/builder/watcher/handler.py
|
{
"start": 2813,
"end": 3874
}
|
class ____(FileSystemEventHandler):
# Initialize event handler
def __init__(self, project: Project, handler: Callable):
self.project = project
self.handler = handler
# Handle file creation event
def on_created(self, event: FileSystemEvent):
self._handle(event)
# Handle file deletion event
def on_deleted(self, event: FileSystemEvent):
self._handle(event)
# ------------------------------------------------------------------------
# Invoke file event handler
def _handle(self, event: FileSystemEvent):
config = self.project.config
# Touch mkdocs.yml to trigger rebuild
if os.path.isfile(config.config_file_path):
os.utime(config.config_file_path, None)
# Invoke handler
self.handler(self.project)
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs")
|
ProjectAddedOrRemoved
|
python
|
huggingface__transformers
|
src/transformers/models/tapas/tokenization_tapas.py
|
{
"start": 1930,
"end": 2054
}
|
class ____:
rows: list[list[list[str]]]
selected_tokens: list[TokenCoordinates]
@dataclass(frozen=True)
|
TokenizedTable
|
python
|
pytorch__pytorch
|
test/mobile/model_test/nn_ops.py
|
{
"start": 9823,
"end": 11522
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])
self.y = torch.LongTensor([[3, 0, -1, 1]])
def forward(self):
a = torch.randn(3, 2)
b = torch.rand(3, 2)
c = torch.rand(3)
log_probs = torch.randn(50, 16, 20).log_softmax(2).detach()
targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
input_lengths = torch.full((16,), 50, dtype=torch.long)
target_lengths = torch.randint(10, 30, (16,), dtype=torch.long)
return len(
F.binary_cross_entropy(torch.sigmoid(a), b),
F.binary_cross_entropy_with_logits(torch.sigmoid(a), b),
F.poisson_nll_loss(a, b),
F.cosine_embedding_loss(a, b, c),
F.cross_entropy(a, b),
F.ctc_loss(log_probs, targets, input_lengths, target_lengths),
# F.gaussian_nll_loss(a, b, torch.ones(5, 1)), # ENTER is not supported in mobile module
F.hinge_embedding_loss(a, b),
F.kl_div(a, b),
F.l1_loss(a, b),
F.mse_loss(a, b),
F.margin_ranking_loss(c, c, c),
F.multilabel_margin_loss(self.x, self.y),
F.multilabel_soft_margin_loss(self.x, self.y),
F.multi_margin_loss(self.x, torch.tensor([3])),
F.nll_loss(a, torch.tensor([1, 0, 1])),
F.huber_loss(a, b),
F.smooth_l1_loss(a, b),
F.soft_margin_loss(a, b),
F.triplet_margin_loss(a, b, -b),
# F.triplet_margin_with_distance_loss(a, b, -b), # can't take variable number of arguments
)
|
NNLossFunctionModule
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_fail.py
|
{
"start": 1877,
"end": 2191
}
|
class ____(BaseModel):
undefined: Undefined # noqa F821
UndefinedAnnotationModel()
Model.model_construct(x=1)
Model.model_construct(_fields_set={'x'}, x=1, y='2')
Model.model_construct(x='1', y='2')
# Strict mode fails
inheriting = InheritingModel(x='1', y='1')
Model(x='1', y='2')
|
UndefinedAnnotationModel
|
python
|
great-expectations__great_expectations
|
tests/integration/data_sources_and_expectations/expectations/test_expect_column_values_to_not_match_like_pattern.py
|
{
"start": 1041,
"end": 4394
}
|
class ____:
@pytest.mark.parametrize(
"expectation",
[
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePattern(column=COL_A, like_pattern="z%"),
id="no_matches",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePattern(column=COL_A, like_pattern="_______"),
id="too_many_underscores",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePattern(
column=COL_B, like_pattern="a%", mostly=0.6
),
id="mostly",
),
],
)
@parameterize_batch_for_data_sources(data_source_configs=SUPPORTED_DATA_SOURCES, data=DATA)
def test_success(
self,
batch_for_datasource: Batch,
expectation: gxe.ExpectColumnValuesToNotMatchLikePattern,
) -> None:
result = batch_for_datasource.validate(expectation)
assert result.success
@pytest.mark.parametrize(
"expectation",
[
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePattern(column=COL_A, like_pattern="a%"),
id="all_matches",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePattern(column=COL_A, like_pattern="__"),
id="underscores_match",
),
pytest.param(
gxe.ExpectColumnValuesToNotMatchLikePattern(
column=COL_B, like_pattern="a%", mostly=0.7
),
id="mostly_threshold_not_met",
),
],
)
@parameterize_batch_for_data_sources(data_source_configs=SUPPORTED_DATA_SOURCES, data=DATA)
def test_failure(
self,
batch_for_datasource: Batch,
expectation: gxe.ExpectColumnValuesToNotMatchLikePattern,
) -> None:
result = batch_for_datasource.validate(expectation)
assert not result.success
@parameterize_batch_for_data_sources(
data_source_configs=[PostgreSQLDatasourceTestConfig()], data=DATA
)
def test_include_unexpected_rows_postgres(self, batch_for_datasource: Batch) -> None:
"""Test include_unexpected_rows for ExpectColumnValuesToNotMatchLikePattern."""
expectation = gxe.ExpectColumnValuesToNotMatchLikePattern(column=COL_A, like_pattern="a%")
result = batch_for_datasource.validate(
expectation, result_format={"result_format": "BASIC", "include_unexpected_rows": True}
)
assert not result.success
result_dict = result["result"]
# Verify that unexpected_rows is present and contains the expected data
assert "unexpected_rows" in result_dict
assert result_dict["unexpected_rows"] is not None
unexpected_rows_data = result_dict["unexpected_rows"]
assert isinstance(unexpected_rows_data, list)
# Should contain 3 rows where COL_A matches like_pattern "a%" ("aa", "ab", "ac" all match)
assert len(unexpected_rows_data) == 3
# Check that "aa", "ab", and "ac" appear in the unexpected rows data
unexpected_rows_str = str(unexpected_rows_data)
assert "aa" in unexpected_rows_str
assert "ab" in unexpected_rows_str
assert "ac" in unexpected_rows_str
|
TestNormalSql
|
python
|
ApeWorX__ape
|
src/ape/managers/config.py
|
{
"start": 767,
"end": 7412
}
|
class ____(ExtraAttributesMixin, BaseManager):
"""
An Ape configuration manager, controlled by ``ape-config.yaml``
files. **NOTE**: This is a singleton wrapper class that
points to the local project's config. For the config field
definitions, see :class:`~ape.api.config.ApeConfig`.
"""
def __init__(self, data_folder: Optional[Path] = None, request_header: Optional[dict] = None):
if not data_folder and "APE_DATA_FOLDER" in os.environ:
self.DATA_FOLDER = Path(os.environ["APE_DATA_FOLDER"])
else:
self.DATA_FOLDER = data_folder or Path.home() / ".ape"
request_header = request_header or {
"User-Agent": USER_AGENT,
"Content-Type": "application/json",
}
self.REQUEST_HEADER = request_header or {}
def __ape_extra_attributes__(self):
# The "extra" attributes are the local project's
# config attributes. To see the actual ``ape-config.yaml``
# definitions, see :class:`~ape.api.config.ApeConfig`.
yield ExtraModelAttributes(
name="config",
# Active project's config.
attributes=self.local_project.config,
include_getitem=True,
)
@log_instead_of_fail(default="<ConfigManager>")
def __repr__(self) -> str:
return f"<{CONFIG_FILE_NAME}>"
def __str__(self) -> str:
return str(self.local_project.config)
@only_raise_attribute_error
def __getattr__(self, name: str) -> Any:
"""
The root config manager (funneling to this method)
refers to the local project's config. Config is loaded
per project in Ape to support multi-project environments
and a smarter dependency system.
See :class:`~ape.api.config.ApeConfig` for field definitions
and model-related controls.
"""
return get_attribute_with_extras(self, name)
def __getitem__(self, name: str) -> Any:
return get_item_with_extras(self, name)
@cached_property
def global_config(self) -> ApeConfig:
"""
Root-level configurations, loaded from the
data folder. **NOTE**: This only needs to load
once and applies to all projects.
"""
return self.load_global_config()
def get_config(self, name: str) -> ApeConfig:
return self.local_project.config.get_config(name)
def load_global_config(self) -> ApeConfig:
path = self.DATA_FOLDER / CONFIG_FILE_NAME
return ApeConfig.validate_file(path) if path.is_file() else ApeConfig.model_validate({})
def merge_with_global(self, project_config: ApeConfig) -> ApeConfig:
global_data = self.global_config.model_dump(by_alias=True)
project_data = project_config.model_dump(by_alias=True)
merged_data = merge_configs(global_data, project_data)
return ApeConfig.model_validate(merged_data)
@classmethod
def extract_config(cls, manifest: "PackageManifest", **overrides) -> ApeConfig:
"""
Calculate the ape-config data from a package manifest.
Args:
manifest (PackageManifest): The manifest.
**overrides: Custom config settings.
Returns:
:class:`~ape.managers.config.ApeConfig`: Config data.
"""
return ApeConfig.from_manifest(manifest, **overrides)
@contextmanager
def isolate_data_folder(
self, keep: Optional[Union[Iterable[str], str]] = None
) -> Iterator[Path]:
"""
Change Ape's DATA_FOLDER to point a temporary path,
in a context, for testing purposes. Any data
cached to disk will not persist.
Args:
keep (Optional[Union[Iterable[str], str]]): Optionally, pass in
a key of subdirectory names to include in the new isolated
data folder. For example, pass ing ``"packages"`` to avoid
having to re-download dependencies in an isolated environment.
Returns:
Iterator[Path]: The temporary data folder.
"""
original_data_folder = self.DATA_FOLDER
if in_tempdir(original_data_folder):
# Already isolated.
yield original_data_folder
else:
keep = [keep] if isinstance(keep, str) else keep or []
try:
with create_tempdir() as temp_data_folder:
# Copy in items from "keep".
for item in keep:
path_to_keep = original_data_folder / item
if not path_to_keep.is_dir():
continue
dest_path = temp_data_folder / item
try:
shutil.copytree(path_to_keep, dest_path)
except Exception as err:
logger.warning(
f"Problem copying '{dest_path.name}' when making isolated project: {err}"
)
continue
self.DATA_FOLDER = temp_data_folder
yield temp_data_folder
finally:
self.DATA_FOLDER = original_data_folder
def _get_request_headers(self) -> RPCHeaders:
# Avoid multiple keys error by not initializing with both dicts.
headers = RPCHeaders(**self.REQUEST_HEADER)
for key, value in self.request_headers.items():
headers[key] = value
return headers
def merge_configs(*cfgs: dict) -> dict:
if len(cfgs) == 0:
return {}
elif len(cfgs) == 1:
return cfgs[0]
new_base = _merge_configs(cfgs[0], cfgs[1])
return merge_configs(new_base, *cfgs[2:])
def _merge_configs(base: dict, secondary: dict) -> dict:
result: dict = {}
# Short circuits
if not base and not secondary:
return result
elif not base:
return secondary
elif not secondary:
return base
for key, value in base.items():
if key not in secondary:
result[key] = value
elif not isinstance(value, dict) or not isinstance(secondary[key], dict):
# Is a primitive value found in both configs.
# Must use the second one.
result[key] = secondary[key]
else:
# Merge the dictionaries.
sub = _merge_configs(value, secondary[key])
result[key] = sub
# Add missed keys from secondary.
for key, value in secondary.items():
if key not in base:
result[key] = value
return result
|
ConfigManager
|
python
|
django__django
|
tests/apps/tests.py
|
{
"start": 19535,
"end": 21561
}
|
class ____(SimpleTestCase):
# We need nsapp to be top-level so our multiple-paths tests can add another
# location for it (if its inside a normal package with an __init__.py that
# isn't possible). In order to avoid cluttering the already-full tests/ dir
# (which is on sys.path), we add these new entries to sys.path temporarily.
base_location = os.path.join(HERE, "namespace_package_base")
other_location = os.path.join(HERE, "namespace_package_other_base")
app_path = os.path.join(base_location, "nsapp")
def test_single_path(self):
"""
A Py3.3+ namespace package can be an app if it has only one path.
"""
with extend_sys_path(self.base_location):
with self.settings(INSTALLED_APPS=["nsapp"]):
app_config = apps.get_app_config("nsapp")
self.assertEqual(app_config.path, self.app_path)
def test_multiple_paths(self):
"""
A Py3.3+ namespace package with multiple locations cannot be an app.
(Because then we wouldn't know where to load its templates, static
assets, etc. from.)
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.assertRaises(ImproperlyConfigured):
with self.settings(INSTALLED_APPS=["nsapp"]):
pass
def test_multiple_paths_explicit_path(self):
"""
Multiple locations are ok only if app-config has explicit path.
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.settings(INSTALLED_APPS=["nsapp.apps.NSAppConfig"]):
app_config = apps.get_app_config("nsapp")
self.assertEqual(app_config.path, self.app_path)
|
NamespacePackageAppTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-maximum-length-of-valid-subsequence-i.py
|
{
"start": 447,
"end": 779
}
|
class ____(object):
def maximumLength(self, nums):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
return max(sum(x%2 == 0 for x in nums),
sum(x%2 == 1 for x in nums),
sum(nums[i]%2 != nums[i+1]%2 for i in xrange(len(nums)-1))+1)
|
Solution2
|
python
|
getsentry__sentry
|
tests/sentry/data_export/processors/test_discover.py
|
{
"start": 319,
"end": 5832
}
|
class ____(TestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.org = self.create_organization(owner=self.user)
self.project1 = self.create_project(organization=self.org)
self.project2 = self.create_project(organization=self.org)
self.group = self.create_group(project=self.project1)
self.discover_query = {
"statsPeriod": "14d",
"project": [self.project1.id, self.project2.id],
"field": ["count(id)", "fake(field)", "issue"],
"query": "",
}
def test_get_projects(self) -> None:
project = DiscoverProcessor.get_projects(
organization_id=self.org.id, query={"project": [self.project1.id]}
)
assert isinstance(project, list)
assert project[0] == self.project1
projects = DiscoverProcessor.get_projects(
organization_id=self.org.id, query={"project": [self.project1.id, self.project2.id]}
)
assert sorted(p.id for p in projects) == sorted([self.project1.id, self.project2.id])
with pytest.raises(ExportError):
DiscoverProcessor.get_projects(organization_id=self.org.id, query={"project": [-1]})
def test_handle_issue_id_fields(self) -> None:
processor = DiscoverProcessor(organization=self.org, discover_query=self.discover_query)
assert processor.header_fields == ["count_id", "fake_field", "issue"]
result_list = [{"issue": self.group.id, "issue.id": self.group.id}]
new_result_list = processor.handle_fields(result_list)
assert new_result_list[0] != result_list
assert new_result_list[0]["issue"] == self.group.qualified_short_id
def test_handle_transaction_status_fields(self) -> None:
self.discover_query = {
**self.discover_query,
"field": ["title", "event.type", "transaction.status"],
}
processor = DiscoverProcessor(organization=self.org, discover_query=self.discover_query)
assert processor.header_fields == ["title", "event.type", "transaction.status"]
result_list = [
{"transaction.status": SPAN_STATUS_NAME_TO_CODE.get("ok")},
{"transaction.status": SPAN_STATUS_NAME_TO_CODE.get("not_found")},
]
new_result_list = processor.handle_fields(result_list)
assert new_result_list[0]["transaction.status"] == "ok"
assert new_result_list[1]["transaction.status"] == "not_found"
def test_handle__fields(self) -> None:
processor = DiscoverProcessor(organization=self.org, discover_query=self.discover_query)
assert processor.header_fields == ["count_id", "fake_field", "issue"]
result_list = [{"issue": self.group.id, "issue.id": self.group.id}]
new_result_list = processor.handle_fields(result_list)
assert new_result_list[0] != result_list
assert new_result_list[0]["issue"] == self.group.qualified_short_id
def test_handle_equations(self) -> None:
self.discover_query["field"] = ["count(id)", "fake(field)"]
self.discover_query["equations"] = ["count(id) / fake(field)", "count(id) / 2"]
processor = DiscoverProcessor(organization=self.org, discover_query=self.discover_query)
assert processor.header_fields == [
"count_id",
"fake_field",
"count(id) / fake(field)",
"count(id) / 2",
]
result_list = [{"equation[0]": 5, "equation[1]": 8}]
new_result_list = processor.handle_fields(result_list)
assert new_result_list[0] != result_list
assert new_result_list[0]["count(id) / fake(field)"] == 5
assert new_result_list[0]["count(id) / 2"] == 8
def test_handle_transactions_dataset(self) -> None:
# Store an error event to show we're querying transactions
self.store_event(load_data("python"), project_id=self.project1.id)
transaction_data = load_data("transaction")
transaction = self.store_event(
{**transaction_data, "transaction": "test transaction"}, project_id=self.project1.id
)
self.discover_query = {
**self.discover_query,
"field": ["title", "transaction.status"],
"dataset": "transactions",
}
processor = DiscoverProcessor(organization=self.org, discover_query=self.discover_query)
data = processor.data_fn(offset=0, limit=2)["data"]
assert data[0] == {
"title": "test transaction",
"transaction.status": 0,
"id": transaction.event_id,
"project.name": self.project1.slug,
}
def test_handle_errors_dataset(self) -> None:
# Store a transaction event to show we're querying errors
self.store_event(load_data("transaction"), project_id=self.project1.id)
error_data = load_data("python")
error_event = self.store_event(error_data, project_id=self.project1.id)
self.discover_query = {
**self.discover_query,
"field": ["title"],
"dataset": "errors",
}
processor = DiscoverProcessor(organization=self.org, discover_query=self.discover_query)
data = processor.data_fn(offset=0, limit=2)["data"]
assert data[0] == {
"title": error_event.message,
"id": error_event.event_id,
"project.name": self.project1.slug,
}
|
DiscoverProcessorTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py
|
{
"start": 10123,
"end": 13292
}
|
class ____(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when operator is self-adjoint.
Real spectrum <==> Self adjoint operator.
Note that when the spectrum is real, the operator may still be complex.
"""
@staticmethod
def dtypes_to_test():
# This operator will always be complex because, although the spectrum is
# real, the matrix will not be real.
return [dtypes.complex64, dtypes.complex128]
@staticmethod
def optional_tests():
"""List of optional test names to run."""
return [
"operator_matmul_with_same_type",
"operator_solve_with_same_type",
]
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = shape_info.shape
# For this test class, we are creating real spectrums.
# We also want the spectrum to have eigenvalues bounded away from zero.
#
# spectrum is bounded away from zero.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape), minval=1., maxval=2.)
if ensure_self_adjoint_and_pd:
spectrum = math_ops.abs(spectrum)
# If dtype is complex, cast spectrum to complex. The imaginary part will be
# zero, so the operator will still be self-adjoint.
spectrum = math_ops.cast(spectrum, dtype)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum,
is_self_adjoint=True,
is_positive_definite=True if ensure_self_adjoint_and_pd else None,
input_output_dtype=dtype)
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
def test_tape_safe(self):
spectrum = variables_module.Variable(
math_ops.cast([1. + 0j, 1. + 0j], dtypes.complex64))
operator = linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=True)
self.check_tape_safe(operator)
def test_convert_variables_to_tensors(self):
spectrum = variables_module.Variable(
math_ops.cast([1. + 0j, 1. + 0j], dtypes.complex64))
operator = linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=True)
with self.cached_session() as sess:
sess.run([spectrum.initializer])
self.check_convert_variables_to_tensors(operator)
|
LinearOperatorCirculantTestSelfAdjointOperator
|
python
|
getsentry__sentry
|
src/sentry/shared_integrations/exceptions/__init__.py
|
{
"start": 4558,
"end": 4675
}
|
class ____(ApiError):
@property
def content_type(self) -> str:
return self.text
|
UnsupportedResponseType
|
python
|
weaviate__weaviate-python-client
|
weaviate/gql/filter.py
|
{
"start": 5357,
"end": 7222
}
|
class ____(Filter):
"""NearVector class used to filter weaviate objects."""
def __init__(self, content: dict):
"""Initialize a NearVector class instance.
Args:
content: The content of the `nearVector` clause.
Raises:
TypeError: If 'content' is not of type dict.
KeyError: If 'content' does not contain "vector".
TypeError: If 'content["vector"]' is not of type list.
AttributeError: If invalid 'content' keys are provided.
ValueError: If 'content' has key "certainty"/"distance" but the value is not float.
"""
super().__init__(content)
if "vector" not in self._content:
raise KeyError("No 'vector' key in `content` argument.")
# Check optional fields
if "certainty" in self._content:
if "distance" in self._content:
raise ValueError(
"Cannot have both 'certainty' and 'distance' at the same time. "
"Only one is accepted."
)
_check_type(var_name="certainty", value=self._content["certainty"], dtype=float)
if "distance" in self._content:
_check_type(var_name="distance", value=self._content["distance"], dtype=float)
self._content["vector"] = get_vector(self._content["vector"])
def __str__(self) -> str:
near_vector = f"nearVector: {{vector: {dumps(self._content['vector'])}"
if "certainty" in self._content:
near_vector += f" certainty: {self._content['certainty']}"
if "distance" in self._content:
near_vector += f" distance: {self._content['distance']}"
if "targetVector" in self._content:
near_vector += f' targetVectors: "{self._content["targetVector"]}"'
return near_vector + "} "
|
NearVector
|
python
|
chroma-core__chroma
|
sample_apps/generative_benchmarking/functions/types.py
|
{
"start": 400,
"end": 470
}
|
class ____:
text: str
embedding: List[float]
@dataclass
|
QueryItem
|
python
|
paramiko__paramiko
|
paramiko/ssh_exception.py
|
{
"start": 1252,
"end": 1415
}
|
class ____(AuthenticationException):
"""
Exception raised when a password is needed to unlock a private key file.
"""
pass
|
PasswordRequiredException
|
python
|
gevent__gevent
|
src/greentest/3.14/test_context.py
|
{
"start": 558,
"end": 16711
}
|
class ____(unittest.TestCase):
def test_context_var_new_1(self):
with self.assertRaisesRegex(TypeError, 'takes exactly 1'):
contextvars.ContextVar()
with self.assertRaisesRegex(TypeError, 'must be a str'):
contextvars.ContextVar(1)
c = contextvars.ContextVar('aaa')
self.assertEqual(c.name, 'aaa')
with self.assertRaises(AttributeError):
c.name = 'bbb'
self.assertNotEqual(hash(c), hash('aaa'))
@isolated_context
def test_context_var_repr_1(self):
c = contextvars.ContextVar('a')
self.assertIn('a', repr(c))
c = contextvars.ContextVar('a', default=123)
self.assertIn('123', repr(c))
lst = []
c = contextvars.ContextVar('a', default=lst)
lst.append(c)
self.assertIn('...', repr(c))
self.assertIn('...', repr(lst))
t = c.set(1)
self.assertIn(repr(c), repr(t))
self.assertNotIn(' used ', repr(t))
c.reset(t)
self.assertIn(' used ', repr(t))
@isolated_context
def test_token_repr_1(self):
c = contextvars.ContextVar('a')
tok = c.set(1)
self.assertRegex(repr(tok),
r"^<Token var=<ContextVar name='a' "
r"at 0x[0-9a-fA-F]+> at 0x[0-9a-fA-F]+>$")
def test_context_subclassing_1(self):
with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
class MyContextVar(contextvars.ContextVar):
# Potentially we might want ContextVars to be subclassable.
pass
with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
class MyContext(contextvars.Context):
pass
with self.assertRaisesRegex(TypeError, 'not an acceptable base type'):
class MyToken(contextvars.Token):
pass
def test_context_new_1(self):
with self.assertRaisesRegex(TypeError, 'any arguments'):
contextvars.Context(1)
with self.assertRaisesRegex(TypeError, 'any arguments'):
contextvars.Context(1, a=1)
with self.assertRaisesRegex(TypeError, 'any arguments'):
contextvars.Context(a=1)
contextvars.Context(**{})
def test_context_new_unhashable_str_subclass(self):
# gh-132002: it used to crash on unhashable str subtypes.
class weird_str(str):
def __eq__(self, other):
pass
with self.assertRaisesRegex(TypeError, 'unhashable type'):
contextvars.ContextVar(weird_str())
def test_context_typerrors_1(self):
ctx = contextvars.Context()
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
ctx[1]
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
1 in ctx
with self.assertRaisesRegex(TypeError, 'ContextVar key was expected'):
ctx.get(1)
def test_context_get_context_1(self):
ctx = contextvars.copy_context()
self.assertIsInstance(ctx, contextvars.Context)
def test_context_run_1(self):
ctx = contextvars.Context()
with self.assertRaisesRegex(TypeError, 'missing 1 required'):
ctx.run()
def test_context_run_2(self):
ctx = contextvars.Context()
def func(*args, **kwargs):
kwargs['spam'] = 'foo'
args += ('bar',)
return args, kwargs
for f in (func, functools.partial(func)):
# partial doesn't support FASTCALL
self.assertEqual(ctx.run(f), (('bar',), {'spam': 'foo'}))
self.assertEqual(ctx.run(f, 1), ((1, 'bar'), {'spam': 'foo'}))
self.assertEqual(
ctx.run(f, a=2),
(('bar',), {'a': 2, 'spam': 'foo'}))
self.assertEqual(
ctx.run(f, 11, a=2),
((11, 'bar'), {'a': 2, 'spam': 'foo'}))
a = {}
self.assertEqual(
ctx.run(f, 11, **a),
((11, 'bar'), {'spam': 'foo'}))
self.assertEqual(a, {})
def test_context_run_3(self):
ctx = contextvars.Context()
def func(*args, **kwargs):
1 / 0
with self.assertRaises(ZeroDivisionError):
ctx.run(func)
with self.assertRaises(ZeroDivisionError):
ctx.run(func, 1, 2)
with self.assertRaises(ZeroDivisionError):
ctx.run(func, 1, 2, a=123)
@isolated_context
def test_context_run_4(self):
ctx1 = contextvars.Context()
ctx2 = contextvars.Context()
var = contextvars.ContextVar('var')
def func2():
self.assertIsNone(var.get(None))
def func1():
self.assertIsNone(var.get(None))
var.set('spam')
ctx2.run(func2)
self.assertEqual(var.get(None), 'spam')
cur = contextvars.copy_context()
self.assertEqual(len(cur), 1)
self.assertEqual(cur[var], 'spam')
return cur
returned_ctx = ctx1.run(func1)
self.assertEqual(ctx1, returned_ctx)
self.assertEqual(returned_ctx[var], 'spam')
self.assertIn(var, returned_ctx)
def test_context_run_5(self):
ctx = contextvars.Context()
var = contextvars.ContextVar('var')
def func():
self.assertIsNone(var.get(None))
var.set('spam')
1 / 0
with self.assertRaises(ZeroDivisionError):
ctx.run(func)
self.assertIsNone(var.get(None))
def test_context_run_6(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('a', default=0)
def fun():
self.assertEqual(c.get(), 0)
self.assertIsNone(ctx.get(c))
c.set(42)
self.assertEqual(c.get(), 42)
self.assertEqual(ctx.get(c), 42)
ctx.run(fun)
def test_context_run_7(self):
ctx = contextvars.Context()
def fun():
with self.assertRaisesRegex(RuntimeError, 'is already entered'):
ctx.run(fun)
ctx.run(fun)
@isolated_context
def test_context_getset_1(self):
c = contextvars.ContextVar('c')
with self.assertRaises(LookupError):
c.get()
self.assertIsNone(c.get(None))
t0 = c.set(42)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(None), 42)
self.assertIs(t0.old_value, t0.MISSING)
self.assertIs(t0.old_value, contextvars.Token.MISSING)
self.assertIs(t0.var, c)
t = c.set('spam')
self.assertEqual(c.get(), 'spam')
self.assertEqual(c.get(None), 'spam')
self.assertEqual(t.old_value, 42)
c.reset(t)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(None), 42)
c.set('spam2')
with self.assertRaisesRegex(RuntimeError, 'has already been used'):
c.reset(t)
self.assertEqual(c.get(), 'spam2')
ctx1 = contextvars.copy_context()
self.assertIn(c, ctx1)
c.reset(t0)
with self.assertRaisesRegex(RuntimeError, 'has already been used'):
c.reset(t0)
self.assertIsNone(c.get(None))
self.assertIn(c, ctx1)
self.assertEqual(ctx1[c], 'spam2')
self.assertEqual(ctx1.get(c, 'aa'), 'spam2')
self.assertEqual(len(ctx1), 1)
self.assertEqual(list(ctx1.items()), [(c, 'spam2')])
self.assertEqual(list(ctx1.values()), ['spam2'])
self.assertEqual(list(ctx1.keys()), [c])
self.assertEqual(list(ctx1), [c])
ctx2 = contextvars.copy_context()
self.assertNotIn(c, ctx2)
with self.assertRaises(KeyError):
ctx2[c]
self.assertEqual(ctx2.get(c, 'aa'), 'aa')
self.assertEqual(len(ctx2), 0)
self.assertEqual(list(ctx2), [])
@isolated_context
def test_context_getset_2(self):
v1 = contextvars.ContextVar('v1')
v2 = contextvars.ContextVar('v2')
t1 = v1.set(42)
with self.assertRaisesRegex(ValueError, 'by a different'):
v2.reset(t1)
@isolated_context
def test_context_getset_3(self):
c = contextvars.ContextVar('c', default=42)
ctx = contextvars.Context()
def fun():
self.assertEqual(c.get(), 42)
with self.assertRaises(KeyError):
ctx[c]
self.assertIsNone(ctx.get(c))
self.assertEqual(ctx.get(c, 'spam'), 'spam')
self.assertNotIn(c, ctx)
self.assertEqual(list(ctx.keys()), [])
t = c.set(1)
self.assertEqual(list(ctx.keys()), [c])
self.assertEqual(ctx[c], 1)
c.reset(t)
self.assertEqual(list(ctx.keys()), [])
with self.assertRaises(KeyError):
ctx[c]
ctx.run(fun)
@isolated_context
def test_context_getset_4(self):
c = contextvars.ContextVar('c', default=42)
ctx = contextvars.Context()
tok = ctx.run(c.set, 1)
with self.assertRaisesRegex(ValueError, 'different Context'):
c.reset(tok)
@isolated_context
def test_context_getset_5(self):
c = contextvars.ContextVar('c', default=42)
c.set([])
def fun():
c.set([])
c.get().append(42)
self.assertEqual(c.get(), [42])
contextvars.copy_context().run(fun)
self.assertEqual(c.get(), [])
def test_context_copy_1(self):
ctx1 = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def ctx1_fun():
c.set(10)
ctx2 = ctx1.copy()
self.assertEqual(ctx2[c], 10)
c.set(20)
self.assertEqual(ctx1[c], 20)
self.assertEqual(ctx2[c], 10)
ctx2.run(ctx2_fun)
self.assertEqual(ctx1[c], 20)
self.assertEqual(ctx2[c], 30)
def ctx2_fun():
self.assertEqual(c.get(), 10)
c.set(30)
self.assertEqual(c.get(), 30)
ctx1.run(ctx1_fun)
def test_context_isinstance(self):
ctx = contextvars.Context()
self.assertIsInstance(ctx, collections.abc.Mapping)
self.assertTrue(issubclass(contextvars.Context, collections.abc.Mapping))
mapping_methods = (
'__contains__', '__eq__', '__getitem__', '__iter__', '__len__',
'__ne__', 'get', 'items', 'keys', 'values',
)
for name in mapping_methods:
with self.subTest(name=name):
self.assertTrue(callable(getattr(ctx, name)))
@isolated_context
@threading_helper.requires_working_threading()
def test_context_threads_1(self):
cvar = contextvars.ContextVar('cvar')
def sub(num):
for i in range(10):
cvar.set(num + i)
time.sleep(random.uniform(0.001, 0.05))
self.assertEqual(cvar.get(), num + i)
return num
tp = concurrent.futures.ThreadPoolExecutor(max_workers=10)
try:
results = list(tp.map(sub, range(10)))
finally:
tp.shutdown()
self.assertEqual(results, list(range(10)))
@isolated_context
@threading_helper.requires_working_threading()
def test_context_thread_inherit(self):
import threading
cvar = contextvars.ContextVar('cvar')
def run_context_none():
if sys.flags.thread_inherit_context:
expected = 1
else:
expected = None
self.assertEqual(cvar.get(None), expected)
# By default, context is inherited based on the
# sys.flags.thread_inherit_context option.
cvar.set(1)
thread = threading.Thread(target=run_context_none)
thread.start()
thread.join()
# Passing 'None' explicitly should have same behaviour as not
# passing parameter.
thread = threading.Thread(target=run_context_none, context=None)
thread.start()
thread.join()
# An explicit Context value can also be passed
custom_ctx = contextvars.Context()
custom_var = None
def setup_context():
nonlocal custom_var
custom_var = contextvars.ContextVar('custom')
custom_var.set(2)
custom_ctx.run(setup_context)
def run_custom():
self.assertEqual(custom_var.get(), 2)
thread = threading.Thread(target=run_custom, context=custom_ctx)
thread.start()
thread.join()
# You can also pass a new Context() object to start with an empty context
def run_empty():
with self.assertRaises(LookupError):
cvar.get()
thread = threading.Thread(target=run_empty, context=contextvars.Context())
thread.start()
thread.join()
def test_token_contextmanager_with_default(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def fun():
with c.set(36):
self.assertEqual(c.get(), 36)
self.assertEqual(c.get(), 42)
ctx.run(fun)
def test_token_contextmanager_without_default(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c')
def fun():
with c.set(36):
self.assertEqual(c.get(), 36)
with self.assertRaisesRegex(LookupError, "<ContextVar name='c'"):
c.get()
ctx.run(fun)
def test_token_contextmanager_on_exception(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def fun():
with c.set(36):
self.assertEqual(c.get(), 36)
raise ValueError("custom exception")
self.assertEqual(c.get(), 42)
with self.assertRaisesRegex(ValueError, "custom exception"):
ctx.run(fun)
def test_token_contextmanager_reentrant(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def fun():
token = c.set(36)
with self.assertRaisesRegex(
RuntimeError,
"<Token .+ has already been used once"
):
with token:
with token:
self.assertEqual(c.get(), 36)
self.assertEqual(c.get(), 42)
ctx.run(fun)
def test_token_contextmanager_multiple_c_set(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def fun():
with c.set(36):
self.assertEqual(c.get(), 36)
c.set(24)
self.assertEqual(c.get(), 24)
c.set(12)
self.assertEqual(c.get(), 12)
self.assertEqual(c.get(), 42)
ctx.run(fun)
def test_token_contextmanager_with_explicit_reset_the_same_token(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def fun():
with self.assertRaisesRegex(
RuntimeError,
"<Token .+ has already been used once"
):
with c.set(36) as token:
self.assertEqual(c.get(), 36)
c.reset(token)
self.assertEqual(c.get(), 42)
self.assertEqual(c.get(), 42)
ctx.run(fun)
def test_token_contextmanager_with_explicit_reset_another_token(self):
ctx = contextvars.Context()
c = contextvars.ContextVar('c', default=42)
def fun():
with c.set(36):
self.assertEqual(c.get(), 36)
token = c.set(24)
self.assertEqual(c.get(), 24)
c.reset(token)
self.assertEqual(c.get(), 36)
self.assertEqual(c.get(), 42)
ctx.run(fun)
# HAMT Tests
|
ContextTest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/kitchen-sink/kitchen_sink/airflow_dags/dag_level_custom_callback.py
|
{
"start": 838,
"end": 1251
}
|
class ____(DefaultProxyDAGToDagsterOperator):
@classmethod
def build_from_dag(cls, dag):
return CustomProxyDagToDagsterOperator(dag=dag, task_id="OVERRIDDEN")
proxying_to_dagster(
proxied_state=load_proxied_state_from_yaml(Path(__file__).parent / "proxied_state"),
global_vars=globals(),
build_from_dag_fn=CustomProxyDagToDagsterOperator.build_from_dag,
)
|
CustomProxyDagToDagsterOperator
|
python
|
pytorch__pytorch
|
torch/_inductor/ir.py
|
{
"start": 36390,
"end": 39910
}
|
class ____(Pointwise):
output_indexer: Callable[[Sequence[Expr]], Expr]
scatter_mode: StoreMode = None
def constant_to_device(self, device: torch.device) -> IRNode:
"""Move this to a given device. Requires that all reads are to constants."""
loader = self.make_loader()
loader = patch.object(ConstantBuffer, "override_device", device)(loader)
return Scatter(
device=device,
dtype=self.dtype,
inner_fn=loader,
ranges=self.ranges,
output_indexer=self.output_indexer,
scatter_mode=self.scatter_mode,
)
def store_output(
self,
output_name: Optional[str],
indexer: Callable[[Sequence[Expr]], Never],
vars: Sequence[Expr],
) -> Any:
loader = self.make_loader()
if output_name is None:
output_name = "unnamed"
return ops.store(
output_name,
indexer(self.output_indexer(vars)),
loader(vars),
mode=self.scatter_mode,
)
REDUCTION_COMBINE_FN: dict[str, Callable[..., OpsValue]] = {
"any": ops_wrapper("logical_or"),
"max": ops_wrapper("maximum"),
"min": ops_wrapper("minimum"),
"prod": ops_wrapper("mul"),
"sum": ops_wrapper("add"),
"dot": ops_wrapper("add"),
"xor_sum": ops_wrapper("bitwise_xor"),
}
def get_reduction_combine_fn(
reduction_type: str, dtype: torch.dtype, arg_break_ties_left: bool = True
) -> Callable[..., object]:
if reduction_type in REDUCTION_COMBINE_FN:
return REDUCTION_COMBINE_FN[reduction_type]
elif reduction_type in ("argmax", "argmin"):
def argmax_combine_fn(
a: tuple[object, object], b: tuple[object, object]
) -> tuple[OpsValue, OpsValue]:
a_value, a_index = a
b_value, b_index = b
if reduction_type == "argmin":
mask = ops.lt(a_value, b_value)
else:
mask = ops.gt(a_value, b_value)
equal = ops.eq(a_value, b_value)
if is_float_dtype(dtype):
a_isnan = ops.ne(a_value, a_value)
b_isnan = ops.ne(b_value, b_value)
mask = ops.logical_or(mask, ops.gt(a_isnan, b_isnan))
equal = ops.logical_or(equal, ops.logical_and(a_isnan, b_isnan))
tie = (
ops.lt(a_index, b_index)
if arg_break_ties_left
else ops.gt(a_index, b_index)
)
mask = ops.logical_or(mask, ops.logical_and(equal, tie))
return (
ops.where(mask, a_value, b_value),
ops.where(mask, a_index, b_index),
)
return argmax_combine_fn
elif reduction_type == "welford_combine":
def welford_combine_fn(
a: tuple[OpsValue, OpsValue, OpsValue],
b: tuple[OpsValue, OpsValue, OpsValue],
) -> tuple[OpsValue, OpsValue, OpsValue]:
a_mean, a_m2, a_weight = a
b_mean, b_m2, b_weight = b
delta = b_mean - a_mean
new_weight = a_weight + b_weight
w2_over_w = b_weight / new_weight
return (
a_mean + delta * w2_over_w,
a_m2 + b_m2 + delta * delta * a_weight * w2_over_w,
new_weight,
)
return welford_combine_fn
else:
raise NotImplementedError(f"unknown reduction_type={reduction_type}")
@ir_dataclass
|
Scatter
|
python
|
modin-project__modin
|
modin/core/dataframe/algebra/map.py
|
{
"start": 1072,
"end": 2603
}
|
class ____(Operator):
"""Builder class for Map operator."""
@classmethod
def register(
cls,
function: Callable[..., pandas.DataFrame],
*call_args: tuple,
**call_kwds: dict,
) -> Callable[..., PandasQueryCompiler]:
"""
Build Map operator that will be performed across each partition.
Parameters
----------
function : callable(pandas.DataFrame, *args, **kwargs) -> pandas.DataFrame
Function that will be applied to the each partition.
Function takes `pandas.DataFrame` and returns `pandas.DataFrame`
of the same shape.
*call_args : tuple
Args that will be passed to the returned function.
**call_kwds : dict
Kwargs that will be passed to the returned function.
Returns
-------
callable
Function that takes query compiler and executes map function.
"""
def caller(
query_compiler: PandasQueryCompiler, *args: tuple, **kwargs: dict
) -> PandasQueryCompiler:
"""Execute Map function against passed query compiler."""
shape_hint = call_kwds.pop("shape_hint", None) or query_compiler._shape_hint
return query_compiler.__constructor__(
query_compiler._modin_frame.map(
lambda x: function(x, *args, **kwargs), *call_args, **call_kwds
),
shape_hint=shape_hint,
)
return caller
|
Map
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1208909,
"end": 1209713
}
|
class ____(sgqlc.types.Type, Node):
"""Represents a 'labeled' event on a given issue or pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "label", "labelable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
label = sgqlc.types.Field(sgqlc.types.non_null(Label), graphql_name="label")
"""Identifies the label associated with the 'labeled' event."""
labelable = sgqlc.types.Field(sgqlc.types.non_null(Labelable), graphql_name="labelable")
"""Identifies the `Labelable` associated with the event."""
|
LabeledEvent
|
python
|
getsentry__sentry
|
src/sentry/deletions/defaults/grouphash.py
|
{
"start": 129,
"end": 441
}
|
class ____(ModelDeletionTask[GroupHash]):
def get_child_relations(self, instance: GroupHash) -> list[BaseRelation]:
from sentry.models.grouphashmetadata import GroupHashMetadata
return [
ModelRelation(GroupHashMetadata, {"grouphash_id": instance.id}),
]
|
GroupHashDeletionTask
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.