language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/sentry_apps/api/endpoints/test_organization_sentry_apps.py
|
{
"start": 1213,
"end": 4275
}
|
class ____(OrganizationSentryAppsTest):
def test_gets_all_apps_in_own_org(self) -> None:
self.login_as(user=self.user)
response = self.client.get(self.url, format="json")
assert response.status_code == 200
assert_response_json(
response,
[
{
"name": self.unpublished_app.name,
"author": self.unpublished_app.author,
"slug": self.unpublished_app.slug,
"scopes": [],
"events": [],
"uuid": self.unpublished_app.uuid,
"status": self.unpublished_app.get_status_display(),
"webhookUrl": self.unpublished_app.webhook_url,
"redirectUrl": self.unpublished_app.redirect_url,
"isAlertable": self.unpublished_app.is_alertable,
"verifyInstall": self.unpublished_app.verify_install,
"clientId": self.unpublished_app.application.client_id,
"clientSecret": self.unpublished_app.application.client_secret,
"overview": self.unpublished_app.overview,
"allowedOrigins": [],
"schema": {},
"owner": {"id": self.org.id, "slug": self.org.slug},
"featureData": [
{
"featureId": 0,
"featureGate": "integrations-api",
"description": "Testin can **utilize the Sentry API** to pull data or update resources in Sentry (with permissions granted, of course).",
}
],
"popularity": SentryApp._meta.get_field("popularity").default,
"avatars": [],
"metadata": {},
}
],
)
def test_includes_internal_integrations(self) -> None:
self.create_project(organization=self.org)
internal_integration = self.create_internal_integration(organization=self.org)
self.login_as(self.user)
response = self.client.get(self.url, format="json")
assert response.status_code == 200
assert internal_integration.uuid in [a["uuid"] for a in response.data]
def test_cannot_see_apps_in_other_orgs(self) -> None:
self.login_as(user=self.user)
url = reverse("sentry-api-0-organization-sentry-apps", args=[self.super_org.slug])
response = self.client.get(url, format="json")
assert response.status_code == 403
def test_filter_for_internal(self) -> None:
self.login_as(user=self.user)
self.create_project(organization=self.org)
internal_integration = self.create_internal_integration(organization=self.org)
response = self.client.get(f"{self.url}?status=internal", format="json")
assert len(response.data) == 1
assert response.data[0]["uuid"] == internal_integration.uuid
|
GetOrganizationSentryAppsTest
|
python
|
walkccc__LeetCode
|
solutions/109. Convert Sorted List to Binary Search Tree/109-2.py
|
{
"start": 0,
"end": 476
}
|
class ____:
def sortedListToBST(self, head: ListNode | None) -> TreeNode | None:
arr = []
# Construct the array.
curr = head
while curr:
arr.append(curr.val)
curr = curr.next
def helper(l: int, r: int) -> TreeNode | None:
if l > r:
return None
m = (l + r) // 2
root = TreeNode(arr[m])
root.left = helper(l, m - 1)
root.right = helper(m + 1, r)
return root
return helper(0, len(arr) - 1)
|
Solution
|
python
|
facebookresearch__faiss
|
benchs/bench_fw/benchmark.py
|
{
"start": 35153,
"end": 42711
}
|
class ____:
num_threads: int
training_vectors: Optional[DatasetDescriptor] = None
database_vectors: Optional[DatasetDescriptor] = None
query_vectors: Optional[DatasetDescriptor] = None
index_descs: Optional[List[IndexDescriptorClassic]] = None
range_ref_index_desc: Optional[str] = None
k: int = 1
distance_metric: str = "L2"
def set_io(self, benchmark_io):
self.io = benchmark_io
def get_embedding_dimension(self):
if self.training_vectors is not None:
xt = self.io.get_dataset(self.training_vectors)
return xt.shape[1]
if self.database_vectors is not None:
xb = self.io.get_dataset(self.database_vectors)
return xb.shape[1]
if self.query_vectors is not None:
xq = self.io.get_dataset(self.query_vectors)
return xq.shape[1]
raise ValueError("Failed to determine dimension of dataset")
def create_descriptors(
self, ci_desc: IndexDescriptorClassic, train, build, knn, reconstruct, range
):
codec_desc = None
index_desc = None
knn_desc = None
dim = self.get_embedding_dimension()
if train and ci_desc.factory is not None:
codec_desc = CodecDescriptor(
d=dim,
metric=self.distance_metric,
num_threads=self.num_threads,
factory=ci_desc.factory,
construction_params=ci_desc.construction_params,
training_vectors=self.training_vectors,
)
if build:
if codec_desc is None:
assert ci_desc.path is not None
codec_desc = CodecDescriptor(
d=dim,
metric=self.distance_metric,
num_threads=self.num_threads,
bucket=ci_desc.bucket,
path=ci_desc.path,
)
index_desc = IndexDescriptor(
d=codec_desc.d,
metric=self.distance_metric,
num_threads=self.num_threads,
codec_desc=codec_desc,
database_desc=self.database_vectors,
)
if knn or range:
if index_desc is None:
assert ci_desc.path is not None
index_desc = IndexDescriptor(
d=dim,
metric=self.distance_metric,
num_threads=self.num_threads,
bucket=ci_desc.bucket,
path=ci_desc.path,
)
knn_desc = KnnDescriptor(
d=dim,
metric=self.distance_metric,
num_threads=self.num_threads,
index_desc=index_desc,
query_dataset=self.query_vectors,
search_params=ci_desc.search_params,
range_metrics=ci_desc.range_metrics,
radius=ci_desc.radius,
k=self.k,
)
return codec_desc, index_desc, knn_desc
def create_execution_operator(
self,
train,
build,
knn,
reconstruct,
range,
) -> ExecutionOperator:
# all operators are created, as ground truth are always created in benchmarking
train_op = TrainOperator(
num_threads=self.num_threads, distance_metric=self.distance_metric
)
build_op = BuildOperator(
num_threads=self.num_threads, distance_metric=self.distance_metric
)
search_op = SearchOperator(
num_threads=self.num_threads, distance_metric=self.distance_metric
)
search_op.range = range
exec_op = ExecutionOperator(
train_op=train_op,
build_op=build_op,
search_op=search_op,
num_threads=self.num_threads,
)
assert hasattr(self, "io")
exec_op.set_io(self.io)
# iterate over classic descriptors
for ci_desc in self.index_descs:
codec_desc, index_desc, knn_desc = self.create_descriptors(
ci_desc, train, build, knn, reconstruct, range
)
exec_op.add_index_descs(codec_desc, index_desc, knn_desc)
return exec_op
def clone_one(self, index_desc):
benchmark = Benchmark(
num_threads=self.num_threads,
training_vectors=self.training_vectors,
database_vectors=self.database_vectors,
query_vectors=self.query_vectors,
# index_descs=[self.get_flat_desc("Flat"), index_desc],
index_descs=[index_desc], # Should automatically find flat descriptors
range_ref_index_desc=self.range_ref_index_desc,
k=self.k,
distance_metric=self.distance_metric,
)
benchmark.set_io(self.io.clone())
return benchmark
def benchmark(
self,
result_file=None,
local=False,
train=False,
reconstruct=False,
knn=False,
range=False,
):
logger.info("begin evaluate")
results = {"indices": {}, "experiments": {}}
faiss.omp_set_num_threads(self.num_threads)
exec_op = self.create_execution_operator(
train=train,
build=knn or range,
knn=knn,
reconstruct=reconstruct,
range=range,
)
exec_op.create_ground_truths(results)
todo = self.index_descs
for index_desc in self.index_descs:
index_desc.requires = None
queued = set()
while todo:
current_todo = []
next_todo = []
for index_desc in todo:
results, requires = exec_op.execute(results, dry_run=False)
if requires is None:
continue
if requires in queued:
if index_desc.requires != requires:
index_desc.requires = requires
next_todo.append(index_desc)
else:
queued.add(requires)
index_desc.requires = requires
current_todo.append(index_desc)
if current_todo:
results_one = {"indices": {}, "experiments": {}}
params = [
(
index_desc,
self.clone_one(index_desc),
results_one,
train,
reconstruct,
knn,
range,
)
for index_desc in current_todo
]
for result in self.io.launch_jobs(
run_benchmark_one, params, local=local
):
dict_merge(results, result)
todo = next_todo
if result_file is not None:
self.io.write_json(results, result_file, overwrite=True)
logger.info("end evaluate")
return results
def run_benchmark_one(params):
logger.info(params)
index_desc, benchmark, results, train, reconstruct, knn, range = params
exec_op = benchmark.create_execution_operator(
train=train,
build=knn,
knn=knn,
reconstruct=reconstruct,
range=range,
)
results, requires = exec_op.execute(results=results, dry_run=False)
assert requires is None
assert results is not None
return results
|
Benchmark
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_stateful.py
|
{
"start": 1496,
"end": 1704
}
|
class ____(RuleBasedStateMachine):
def myfunc(self, data):
print(data)
rule1 = rule(data=just("rule1data"))(myfunc)
rule2 = rule(data=just("rule2data"))(myfunc)
|
MultipleRulesSameFuncMachine
|
python
|
graphql-python__graphene
|
graphene/types/inputobjecttype.py
|
{
"start": 1956,
"end": 2330
}
|
class ____(dict, BaseType): # type: ignore
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
for key in self._meta.fields:
setattr(self, key, self.get(key, _INPUT_OBJECT_TYPE_DEFAULT_VALUE))
def __init_subclass__(cls, *args, **kwargs):
pass
|
InputObjectTypeContainer
|
python
|
pytorch__pytorch
|
test/test_multiprocessing_spawn.py
|
{
"start": 7730,
"end": 7880
}
|
class ____(TestCase, _TestMultiProcessing):
start_method = 'fork'
@unittest.skipIf(
IS_WINDOWS,
"Fork is only available on Unix",
)
|
ForkTest
|
python
|
django__django
|
django/contrib/flatpages/middleware.py
|
{
"start": 172,
"end": 784
}
|
class ____(MiddlewareMixin):
def process_response(self, request, response):
if response.status_code != 404:
return response # No need to check for a flatpage for non-404 responses.
try:
return flatpage(request, request.path_info)
# Return the original response if any errors happened. Because this
# is a middleware, we can't assume the errors will be caught elsewhere.
except Http404:
return response
except Exception:
if settings.DEBUG:
raise
return response
|
FlatpageFallbackMiddleware
|
python
|
numba__numba
|
numba/cuda/args.py
|
{
"start": 193,
"end": 771
}
|
class ____(metaclass=abc.ABCMeta):
def __init__(self, value):
self.value = value
@abc.abstractmethod
def to_device(self, retr, stream=0):
"""
:param stream: a stream to use when copying data
:param retr:
a list of clean-up work to do after the kernel's been run.
Append 0-arg lambdas to it!
:return: a value (usually an `DeviceNDArray`) to be passed to
the kernel
"""
pass
@property
def _numba_type_(self):
return typeof(self.value, Purpose.argument)
|
ArgHint
|
python
|
sphinx-doc__sphinx
|
sphinx/util/_io.py
|
{
"start": 275,
"end": 884
}
|
class ____:
"""File-like object writing to two streams."""
def __init__(
self,
stream_term: SupportsWrite,
stream_file: SupportsWrite,
) -> None:
self.stream_term = stream_term
self.stream_file = stream_file
def write(self, text: str, /) -> None:
self.stream_term.write(text)
self.stream_file.write(strip_escape_sequences(text))
def flush(self) -> None:
if hasattr(self.stream_term, 'flush'):
self.stream_term.flush()
if hasattr(self.stream_file, 'flush'):
self.stream_file.flush()
|
TeeStripANSI
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/functionAnnotation1.py
|
{
"start": 747,
"end": 1061
}
|
class ____:
pass
def func1g(*args, **kwargs):
# type: (*int, **float) -> int
return sum(args) + sum(round(kwarg) for kwarg in kwargs.values())
def func1h(
a, # type: _Literal["{", "}"]
b, # type: Union[_Literal["%"], _Literal["{"], _Literal["$"]]
):
# type: (...) -> str
return ""
|
Foo
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-hardcoded-records/source_hardcoded_records/streams.py
|
{
"start": 557,
"end": 834
}
|
class ____(HardcodedStream):
sample_record = {
"id": 1,
"make": "Mazda",
"model": "MX-5",
"year": 2008,
"price": 2869,
"created_at": "2022-02-01T17:02:19+00:00",
"updated_at": "2022-11-01T17:02:19+00:00",
}
|
Products
|
python
|
walkccc__LeetCode
|
solutions/324. Wiggle Sort II/324.py
|
{
"start": 0,
"end": 1302
}
|
class ____:
def wiggleSort(self, nums: list[int]) -> None:
n = len(nums)
median = self._findKthLargest(nums, (n + 1) // 2)
def A(i: int):
return (1 + 2 * i) % (n | 1)
i = 0
j = 0
k = n - 1
while i <= k:
if nums[A(i)] > median:
nums[A(i)], nums[A(j)] = nums[A(j)], nums[A(i)]
i, j = i + 1, j + 1
elif nums[A(i)] < median:
nums[A(i)], nums[A(k)] = nums[A(k)], nums[A(i)]
k -= 1
else:
i += 1
# Same as 215. Kth Largest Element in an Array
def _findKthLargest(self, nums: list[int], k: int) -> int:
def quickSelect(l: int, r: int, k: int) -> int:
randIndex = random.randint(0, r - l) + l
nums[randIndex], nums[r] = nums[r], nums[randIndex]
pivot = nums[r]
nextSwapped = l
for i in range(l, r):
if nums[i] >= pivot:
nums[nextSwapped], nums[i] = nums[i], nums[nextSwapped]
nextSwapped += 1
nums[nextSwapped], nums[r] = nums[r], nums[nextSwapped]
count = nextSwapped - l + 1 # Number of nums >= pivot
if count == k:
return nums[nextSwapped]
if count > k:
return quickSelect(l, nextSwapped - 1, k)
return quickSelect(nextSwapped + 1, r, k - count)
return quickSelect(0, len(nums) - 1, k)
|
Solution
|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_ansiballz/_extensions/_debugpy.py
|
{
"start": 1745,
"end": 3150
}
|
class ____:
"""Debugger options for debugpy."""
host: str = 'localhost'
"""The host to connect to for remote debugging."""
port: int = 5678
"""The port to connect to for remote debugging."""
connect: dict[str, object] = dataclasses.field(default_factory=dict)
"""The options to pass to the `debugpy.connect` method."""
source_mapping: dict[str, str] = dataclasses.field(default_factory=dict)
"""
A mapping of source paths to provide to debugpy.
This setting is used internally by AnsiballZ and is not required unless Ansible CLI commands are run from a different system than your IDE.
In that scenario, use this setting instead of configuring source mapping in your IDE.
The key is a path known to the IDE.
The value is the same path as known to the Ansible CLI.
Both file paths and directories are supported.
"""
def run(args: dict[str, t.Any]) -> None: # pragma: nocover
"""Enable remote debugging."""
import debugpy
options = Options(**args)
temp_dir = pathlib.Path(__file__).parent.parent.parent.parent.parent.parent
path_mapping = [[key, str(temp_dir / value)] for key, value in options.source_mapping.items()]
os.environ['PATHS_FROM_ECLIPSE_TO_PYTHON'] = json.dumps(path_mapping)
debugpy.connect((options.host, options.port), **options.connect)
pass # A convenient place to put a breakpoint
|
Options
|
python
|
networkx__networkx
|
networkx/classes/tests/test_special.py
|
{
"start": 2464,
"end": 3739
}
|
class ____(BaseDiGraphTester):
def setup_method(self):
all_edge_dict = {"weight": 1}
class MyGraph(nx.DiGraph):
def edge_attr_dict_factory(self):
return all_edge_dict
self.Graph = MyGraph
# build dict-of-dict-of-dict K3
ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict)
ed4, ed5, ed6 = (all_edge_dict, all_edge_dict, all_edge_dict)
self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}}
self.k3edges = [(0, 1), (0, 2), (1, 2)]
self.k3nodes = [0, 1, 2]
self.K3 = self.Graph()
self.K3._succ = self.k3adj
# K3._adj is synced with K3._succ
self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}}
self.K3._node = {}
self.K3._node[0] = {}
self.K3._node[1] = {}
self.K3._node[2] = {}
ed1, ed2 = (all_edge_dict, all_edge_dict)
self.P3 = self.Graph()
self.P3._succ = {0: {1: ed1}, 1: {2: ed2}, 2: {}}
# P3._adj is synced with P3._succ
self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}}
self.P3._node = {}
self.P3._node[0] = {}
self.P3._node[1] = {}
self.P3._node[2] = {}
|
TestThinDiGraph
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_ansible_collector.py
|
{
"start": 19266,
"end": 19996
}
|
class ____(TestPkgMgrFacts):
def test_is_openbsd_pkg(self):
self.assertIn('pkg_mgr', self.facts)
self.assertEqual(self.facts['pkg_mgr'], 'openbsd_pkg')
def setUp(self):
self.patcher = patch('platform.system')
mock_platform = self.patcher.start()
mock_platform.return_value = 'OpenBSD'
mock_module = self._mock_module()
collectors = self._collectors(mock_module)
fact_collector = \
ansible_collector.AnsibleFactCollector(collectors=collectors,
namespace=ns)
self.facts = fact_collector.collect(module=mock_module)
def tearDown(self):
self.patcher.stop()
|
TestOpenBSDPkgMgrFacts
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_differ.py
|
{
"start": 1966,
"end": 2047
}
|
class ____:
@staticmethod
def func(data):
return data
|
Staticmethod
|
python
|
facebookresearch__faiss
|
benchs/bench_fw/utils.py
|
{
"start": 3741,
"end": 3874
}
|
class ____(Enum):
DISABLE = 1 # no Pareto filtering
INDEX = 2 # index-local optima
GLOBAL = 3 # global optima
|
ParetoMode
|
python
|
pytorch__pytorch
|
torch/_numpy/_dtypes.py
|
{
"start": 1672,
"end": 1792
}
|
class ____(signedinteger):
name = "int64"
typecode = "l"
torch_dtype = torch.int64
# unsigned integers
|
int64
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scattergl/legendgrouptitle/_font.py
|
{
"start": 233,
"end": 9937
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergl.legendgrouptitle"
_path_str = "scattergl.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergl.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergl.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/datetimes/test_ops.py
|
{
"start": 163,
"end": 530
}
|
class ____:
def test_infer_freq(self, freq_sample):
# GH 11018
idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10, unit="ns")
result = DatetimeIndex(idx.asi8, freq="infer")
tm.assert_index_equal(idx, result)
assert result.freq == freq_sample
@pytest.mark.parametrize("freq", ["B", "C"])
|
TestDatetimeIndexOps
|
python
|
pandas-dev__pandas
|
pandas/tests/series/methods/test_isna.py
|
{
"start": 147,
"end": 941
}
|
class ____:
def test_isna_period_dtype(self):
# GH#13737
ser = Series([Period("2011-01", freq="M"), Period("NaT", freq="M")])
expected = Series([False, True])
result = ser.isna()
tm.assert_series_equal(result, expected)
result = ser.notna()
tm.assert_series_equal(result, ~expected)
def test_isna(self):
ser = Series([0, 5.4, 3, np.nan, -0.001])
expected = Series([False, False, False, True, False])
tm.assert_series_equal(ser.isna(), expected)
tm.assert_series_equal(ser.notna(), ~expected)
ser = Series(["hi", "", np.nan])
expected = Series([False, False, True])
tm.assert_series_equal(ser.isna(), expected)
tm.assert_series_equal(ser.notna(), ~expected)
|
TestIsna
|
python
|
spack__spack
|
lib/spack/spack/fetch_strategy.py
|
{
"start": 44490,
"end": 47397
}
|
class ____(VCSFetchStrategy):
"""Fetch strategy that gets source code from a subversion repository.
Use like this in a package::
version("name", svn="http://www.example.com/svn/trunk")
Optionally, you can provide a revision for the URL::
version("name", svn="http://www.example.com/svn/trunk", revision="1641")
Repositories are checked out into the standard stage source path directory.
"""
url_attr = "svn"
optional_attrs = ["revision"]
def __init__(self, **kwargs):
# Discards the keywords in kwargs that may conflict with the next call
# to __init__
forwarded_args = copy.copy(kwargs)
forwarded_args.pop("name", None)
super().__init__(**forwarded_args)
self._svn = None
if self.revision is not None:
self.revision = str(self.revision)
@property
def svn(self):
if not self._svn:
self._svn = which("svn", required=True)
return self._svn
@property
def cachable(self):
return self.cache_enabled and bool(self.revision)
def source_id(self):
return self.revision
def mirror_id(self):
if self.revision:
repo_path = urllib.parse.urlparse(self.url).path
result = os.path.sep.join(["svn", repo_path, self.revision])
return result
@_needs_stage
def fetch(self):
if self.stage.expanded:
tty.debug("Already fetched {0}".format(self.stage.source_path))
return
tty.debug("Checking out subversion repository: {0}".format(self.url))
args = ["checkout", "--force", "--quiet"]
if self.revision:
args += ["-r", self.revision]
args.extend([self.url])
with temp_cwd():
self.svn(*args)
repo_name = get_single_file(".")
self.stage.srcdir = repo_name
shutil.move(repo_name, self.stage.source_path)
def _remove_untracked_files(self):
"""Removes untracked files in an svn repository."""
with working_dir(self.stage.source_path):
status = self.svn("status", "--no-ignore", output=str)
self.svn("status", "--no-ignore")
for line in status.split("\n"):
if not re.match("^[I?]", line):
continue
path = line[8:].strip()
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
def archive(self, destination):
super().archive(destination, exclude=".svn")
@_needs_stage
def reset(self):
self._remove_untracked_files()
with working_dir(self.stage.source_path):
self.svn("revert", ".", "-R")
def __str__(self):
return "[svn] %s" % self.url
@fetcher
|
SvnFetchStrategy
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/traversals.py
|
{
"start": 6358,
"end": 7577
}
|
class ____(HasTraverseInternals):
__slots__ = ()
def _clone(self, **kw):
raise NotImplementedError()
def _copy_internals(
self, *, omit_attrs: Iterable[str] = (), **kw: Any
) -> None:
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return
for attrname, obj, meth in _copy_internals.run_generated_dispatch(
self, traverse_internals, "_generated_copy_internals_traversal"
):
if attrname in omit_attrs:
continue
if obj is not None:
result = meth(attrname, self, obj, **kw)
if result is not None:
setattr(self, attrname, result)
|
HasCopyInternals
|
python
|
django-import-export__django-import-export
|
tests/core/migrations/0002_book_published_time.py
|
{
"start": 43,
"end": 395
}
|
class ____(migrations.Migration):
dependencies = [
("core", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="book",
name="published_time",
field=models.TimeField(
blank=True, null=True, verbose_name="Time published"
),
),
]
|
Migration
|
python
|
donnemartin__interactive-coding-challenges
|
linked_lists/partition/test_partition.py
|
{
"start": 18,
"end": 1463
}
|
class ____(unittest.TestCase):
def test_partition(self):
print('Test: Empty list')
linked_list = MyLinkedList(None)
linked_list.partition(10)
self.assertEqual(linked_list.get_all_data(), [])
print('Test: One element list, left list empty')
linked_list = MyLinkedList(Node(5))
linked_list.partition(0)
self.assertEqual(linked_list.get_all_data(), [5])
print('Test: Right list is empty')
linked_list = MyLinkedList(Node(5))
linked_list.partition(10)
self.assertEqual(linked_list.get_all_data(), [5])
print('Test: General case')
# Partition = 10
# Input: 4, 3, 13, 8, 10, 1, 14, 10, 12
# Output: 4, 3, 8, 1, 10, 10, 13, 14, 12
linked_list = MyLinkedList(Node(12))
linked_list.insert_to_front(10)
linked_list.insert_to_front(14)
linked_list.insert_to_front(1)
linked_list.insert_to_front(10)
linked_list.insert_to_front(8)
linked_list.insert_to_front(13)
linked_list.insert_to_front(3)
linked_list.insert_to_front(4)
partitioned_list = linked_list.partition(10)
self.assertEqual(partitioned_list.get_all_data(),
[4, 3, 8, 1, 10, 10, 13, 14, 12])
print('Success: test_partition')
def main():
test = TestPartition()
test.test_partition()
if __name__ == '__main__':
main()
|
TestPartition
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/class_as_data_structure.py
|
{
"start": 960,
"end": 1115
}
|
class ____:
def __init__(self, foo:int, bar:list):
self.foo = foo
self.bar = bar
def other_function(self): ...
|
NoWarningsMoreMethods
|
python
|
Lightning-AI__lightning
|
src/lightning/fabric/_graveyard/tpu.py
|
{
"start": 2413,
"end": 2823
}
|
class ____(XLAPrecision):
"""Legacy class.
Use :class:`~lightning.fabric.plugins.precision.xla.XLAPrecision` instead.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
rank_zero_deprecation(
"The `TPUPrecision` class is deprecated. Use `lightning.fabric.plugins.precision.XLAPrecision` instead."
)
super().__init__(precision="32-true")
|
TPUPrecision
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-palindrome-with-fixed-length.py
|
{
"start": 40,
"end": 659
}
|
class ____(object):
def kthPalindrome(self, queries, intLength):
"""
:type queries: List[int]
:type intLength: int
:rtype: List[int]
"""
def reverse(x):
result = 0
while x:
result = result*10+x%10
x //= 10
return result
def f(l, x):
x = 10**((l-1)//2)+(x-1)
if x > 10**((l+1)//2)-1:
return -1
return x*10**(l//2)+reverse(x//10 if l%2 else x)
return [f(intLength, x) for x in queries]
# Time: O(n * l)
# Space: O(l)
# math
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 154076,
"end": 154937
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, auth_token: str, counter_id: str, start_date: str, end_date: str):
"""Airbyte Source for Yandex Metrica.
Args:
name (str): The name of the destination.
auth_token (str): Your Yandex Metrica API access token
counter_id (str): Counter ID
start_date (str): UTC date and time in the format YYYY-MM-DD.
end_date (str): UTC date and time in the format YYYY-MM-DD.
"""
self.auth_token = check.str_param(auth_token, "auth_token")
self.counter_id = check.str_param(counter_id, "counter_id")
self.start_date = check.str_param(start_date, "start_date")
self.end_date = check.str_param(end_date, "end_date")
super().__init__("Yandex Metrica", name)
|
YandexMetricaSource
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/group.py
|
{
"start": 35201,
"end": 43663
}
|
class ____(GroupSerializerBase):
skip_snuba_fields = {
*SKIP_SNUBA_FIELDS,
"last_seen",
"times_seen",
"date",
"timestamp", # We merge this with start/end, so don't want to include it as its own
# condition
# We don't need to filter by release stage again here since we're
# filtering to specific groups. Saves us making a second query to
# postgres for no reason
RELEASE_STAGE_ALIAS,
}
def __init__(
self,
environment_ids: list[int] | None = None,
start: datetime | None = None,
end: datetime | None = None,
search_filters=None,
collapse=None,
expand=None,
organization_id=None,
project_ids=None,
):
super().__init__(collapse=collapse, expand=expand)
from sentry.search.snuba.executors import get_search_filter
self.environment_ids = environment_ids
self.organization_id = organization_id
# XXX: We copy this logic from `PostgresSnubaQueryExecutor.query`. Ideally we
# should try and encapsulate this logic, but if you're changing this, change it
# there as well.
self.start = None
start_params = [
_f
for _f in [
start,
get_search_filter(search_filters, "date", ">"),
get_search_filter(search_filters, "timestamp", ">"),
]
if _f
]
if start_params:
self.start = max(_f for _f in start_params if _f)
self.end = None
end_params = [
_f
for _f in [
end,
get_search_filter(search_filters, "date", "<"),
get_search_filter(search_filters, "timestamp", "<"),
]
if _f
]
if end_params:
self.end = min(end_params)
conditions = []
if search_filters is not None:
for search_filter in search_filters:
if search_filter.key.name not in self.skip_snuba_fields:
formatted_conditions, projects_to_filter, group_ids = format_search_filter(
search_filter,
params={
"organization_id": organization_id,
"project_id": project_ids,
"environment_id": environment_ids,
},
)
# if no re-formatted conditions, use fallback method
new_condition = None
if formatted_conditions:
new_condition = formatted_conditions[0]
elif group_ids:
new_condition = convert_search_filter_to_snuba_query(
search_filter,
params={
"organization_id": organization_id,
"project_id": project_ids,
"environment_id": environment_ids,
},
)
if new_condition:
conditions.append(new_condition)
self.conditions = conditions
def _seen_stats_error(
self, error_issue_list: Sequence[Group], user
) -> Mapping[Group, SeenStats]:
return self._parse_seen_stats_results(
self._execute_error_seen_stats_query(
item_list=error_issue_list,
start=self.start,
end=self.end,
conditions=self.conditions,
environment_ids=self.environment_ids,
),
error_issue_list,
bool(self.start or self.end or self.conditions),
self.environment_ids,
)
def _seen_stats_generic(
self, generic_issue_list: Sequence[Group], user
) -> Mapping[Group, SeenStats]:
return self._parse_seen_stats_results(
self._execute_generic_seen_stats_query(
item_list=generic_issue_list,
start=self.start,
end=self.end,
conditions=self.conditions,
environment_ids=self.environment_ids,
),
generic_issue_list,
bool(self.start or self.end or self.conditions),
self.environment_ids,
)
@staticmethod
def _execute_error_seen_stats_query(
item_list, start=None, end=None, conditions=None, environment_ids=None
):
project_ids = list({item.project_id for item in item_list})
group_ids = [item.id for item in item_list]
aggregations = [
["count()", "", "times_seen"],
["min", "timestamp", "first_seen"],
["max", "timestamp", "last_seen"],
["uniq", "tags[sentry:user]", "count"],
]
filters = {"project_id": project_ids, "group_id": group_ids}
if environment_ids:
filters["environment"] = environment_ids
return aliased_query(
dataset=Dataset.Events,
start=start,
end=end,
groupby=["group_id"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="serializers.GroupSerializerSnuba._execute_error_seen_stats_query",
tenant_ids=(
{"organization_id": item_list[0].project.organization_id} if item_list else None
),
)
@staticmethod
def _execute_generic_seen_stats_query(
item_list, start=None, end=None, conditions=None, environment_ids=None
):
project_ids = list({item.project_id for item in item_list})
group_ids = [item.id for item in item_list]
aggregations = [
["count()", "", "times_seen"],
["min", "timestamp", "first_seen"],
["max", "timestamp", "last_seen"],
["uniq", "tags[sentry:user]", "count"],
]
filters = {"project_id": project_ids, "group_id": group_ids}
if environment_ids:
filters["environment"] = environment_ids
return aliased_query(
dataset=Dataset.IssuePlatform,
start=start,
end=end,
groupby=["group_id"],
conditions=conditions,
filter_keys=filters,
aggregations=aggregations,
referrer="serializers.GroupSerializerSnuba._execute_generic_seen_stats_query",
tenant_ids=(
{"organization_id": item_list[0].project.organization_id} if item_list else None
),
)
@staticmethod
def _parse_seen_stats_results(
result, item_list, use_result_first_seen_times_seen, environment_ids=None
):
seen_data = {
issue["group_id"]: fix_tag_value_data(
dict(filter(lambda key: key[0] != "group_id", issue.items()))
)
for issue in result["data"]
}
user_counts = {item_id: value["count"] for item_id, value in seen_data.items()}
last_seen = {item_id: value["last_seen"] for item_id, value in seen_data.items()}
if use_result_first_seen_times_seen:
first_seen = {item_id: value["first_seen"] for item_id, value in seen_data.items()}
times_seen = {item_id: value["times_seen"] for item_id, value in seen_data.items()}
else:
if environment_ids:
first_seen = {
ge["group_id"]: ge["first_seen__min"]
for ge in GroupEnvironment.objects.filter(
group_id__in=[item.id for item in item_list],
environment_id__in=environment_ids,
)
.values("group_id")
.annotate(Min("first_seen"))
}
else:
first_seen = {item.id: item.first_seen for item in item_list}
times_seen = {item.id: item.times_seen for item in item_list}
return {
item: {
"times_seen": times_seen.get(item.id, 0),
"first_seen": first_seen.get(item.id),
"last_seen": last_seen.get(item.id),
"user_count": user_counts.get(item.id, 0),
}
for item in item_list
}
|
GroupSerializerSnuba
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 56835,
"end": 57364
}
|
class ____(sgqlc.types.Enum):
"""Represents items that can be pinned to a profile page or
dashboard.
Enumeration Choices:
* `GIST`: A gist.
* `ISSUE`: An issue.
* `ORGANIZATION`: An organization.
* `PROJECT`: A project.
* `PULL_REQUEST`: A pull request.
* `REPOSITORY`: A repository.
* `TEAM`: A team.
* `USER`: A user.
"""
__schema__ = github_schema
__choices__ = ("GIST", "ISSUE", "ORGANIZATION", "PROJECT", "PULL_REQUEST", "REPOSITORY", "TEAM", "USER")
|
PinnableItemType
|
python
|
pandas-dev__pandas
|
pandas/tests/series/indexing/test_setitem.py
|
{
"start": 45579,
"end": 46193
}
|
class ____(CoercionTest):
@pytest.fixture
def obj(self):
return Series([1.1, 2.2, 3.3, 4.4], dtype=np.float32)
def test_slice_key(self, obj, key, expected, raises, val, indexer_sli, is_inplace):
super().test_slice_key(obj, key, expected, raises, val, indexer_sli, is_inplace)
if isinstance(val, float):
# the xfail would xpass bc test_slice_key short-circuits
raise AssertionError("xfail not relevant for this test.")
@pytest.mark.parametrize(
"exp_dtype",
[
"M8[ms]",
"M8[ms, UTC]",
"m8[ms]",
],
)
|
TestCoercionFloat32
|
python
|
google__jax
|
jax/_src/interpreters/batching.py
|
{
"start": 2050,
"end": 2378
}
|
class ____:
idx: core.Var
lengths: Array | core.Var | Tracer
def __repr__(self) -> str:
return f'{self.lengths}.Var{id(self.idx)}'
replace = dataclasses.replace
# Jumble(aval=a:3 => f32[[3 1 4].a],
# data=Array([0., 1., 2., 0., 0., 1., 2., 3.], dtype=float32))
@dataclasses.dataclass(frozen=True)
|
IndexedAxisSize
|
python
|
pytorch__pytorch
|
torch/_dynamo/guards.py
|
{
"start": 6942,
"end": 7290
}
|
class ____(IndentedBuffer):
def prefix(self) -> str:
return "| " * (self._indent * self.tabwidth)
def writeline(self, line: str, skip_prefix: bool = False) -> None: # type: ignore[override]
if skip_prefix:
super().writeline(line)
else:
super().writeline("+- " + line)
|
IndentedBufferWithPrefix
|
python
|
kamyu104__LeetCode-Solutions
|
Python/permutation-difference-between-two-strings.py
|
{
"start": 48,
"end": 379
}
|
class ____(object):
def findPermutationDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: int
"""
lookup = [-1]*26
for i, x in enumerate(s):
lookup[ord(x)-ord('a')] = i
return sum(abs(lookup[ord(x)-ord('a')]-i)for i, x in enumerate(t))
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/wavlm/modeling_wavlm.py
|
{
"start": 39499,
"end": 45507
}
|
class ____(WavLMPreTrainedModel):
def __init__(self, config: WavLMConfig):
super().__init__(config)
self.config = config
self.feature_extractor = WavLMFeatureEncoder(config)
self.feature_projection = WavLMFeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = WavLMEncoderStableLayerNorm(config)
else:
self.encoder = WavLMEncoder(config)
self.adapter = WavLMAdapter(config) if config.add_adapter else None
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.feature_extractor._freeze_parameters()
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, WavLMBaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(
extract_features.shape[1], attention_mask, add_adapter=False
)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if self.adapter is not None:
hidden_states = self.adapter(hidden_states)
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return WavLMBaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
_HIDDEN_STATES_START_POSITION = 2
@auto_docstring(
custom_intro="""
WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
"""
)
|
WavLMModel
|
python
|
cython__cython
|
tests/run/test_templatelib.py
|
{
"start": 8641,
"end": 9728
}
|
class ____(unittest.TestCase):
def test_abc(self):
self.assertIsInstance(iter(t''), Iterable)
self.assertIsInstance(iter(t''), Iterator)
def test_final(self):
TemplateIter = type(iter(t''))
with self.assertRaisesRegex(TypeError, 'is not an acceptable base type'):
class Sub(TemplateIter): ...
def test_iter(self):
x = 1
res = list(iter(t'abc {x} yz'))
self.assertEqual(res[0], 'abc ')
self.assertIsInstance(res[1], Interpolation)
self.assertEqual(res[1].value, 1)
self.assertEqual(res[1].expression, 'x')
self.assertEqual(res[1].conversion, None)
self.assertEqual(res[1].format_spec, '')
self.assertEqual(res[2], ' yz')
def test_exhausted(self):
# See https://github.com/python/cpython/issues/134119.
template_iter = iter(t"{1}")
self.assertIsInstance(next(template_iter), Interpolation)
self.assertRaises(StopIteration, next, template_iter)
self.assertRaises(StopIteration, next, template_iter)
|
TemplateIterTests
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/triggers/vertex_ai.py
|
{
"start": 5256,
"end": 6064
}
|
class ____(BaseVertexAIJobTrigger):
"""CreateBatchPredictionJobTrigger run on the trigger worker to perform create operation."""
job_type_verbose_name = "Batch Prediction Job"
job_serializer_class = BatchPredictionJob
@cached_property
def async_hook(self) -> BatchPredictionJobAsyncHook:
return BatchPredictionJobAsyncHook(
gcp_conn_id=self.conn_id, impersonation_chain=self.impersonation_chain
)
async def _wait_job(self) -> types.BatchPredictionJob:
job: types.BatchPredictionJob = await self.async_hook.wait_batch_prediction_job(
project_id=self.project_id,
location=self.location,
job_id=self.job_id,
poll_interval=self.poll_interval,
)
return job
|
CreateBatchPredictionJobTrigger
|
python
|
psf__black
|
tests/test_black.py
|
{
"start": 4027,
"end": 85333
}
|
class ____(BlackBaseTestCase):
invokeBlack = staticmethod(invokeBlack)
def test_empty_ff(self) -> None:
expected = ""
tmp_file = Path(black.dump_to_file())
try:
self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES))
actual = tmp_file.read_text(encoding="utf-8")
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
@patch("black.dump_to_file", dump_to_stderr)
def test_one_empty_line(self) -> None:
for nl in ["\n", "\r\n"]:
source = expected = nl
assert_format(source, expected)
def test_one_empty_line_ff(self) -> None:
for nl in ["\n", "\r\n"]:
expected = nl
tmp_file = Path(black.dump_to_file(nl))
if system() == "Windows":
# Writing files in text mode automatically uses the system newline,
# but in this case we don't want this for testing reasons. See:
# https://github.com/psf/black/pull/3348
with open(tmp_file, "wb") as f:
f.write(nl.encode("utf-8"))
try:
self.assertFalse(ff(tmp_file, write_back=black.WriteBack.YES))
with open(tmp_file, "rb") as f:
actual = f.read().decode("utf-8")
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
def test_piping(self) -> None:
_, source, expected = read_data_from_file(
PROJECT_ROOT / "src/black/__init__.py"
)
result = BlackRunner().invoke(
black.main,
[
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
f"--config={EMPTY_CONFIG}",
],
input=BytesIO(source.encode("utf-8")),
)
self.assertEqual(result.exit_code, 0)
self.assertFormatEqual(expected, result.stdout)
if source != result.stdout:
black.assert_equivalent(source, result.stdout)
black.assert_stable(source, result.stdout, DEFAULT_MODE)
def test_piping_diff(self) -> None:
diff_header = re.compile(
r"(STDIN|STDOUT)\t\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d"
r"\+\d\d:\d\d"
)
source, _ = read_data("cases", "expression.py")
expected, _ = read_data("cases", "expression.diff")
args = [
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
"--diff",
f"--config={EMPTY_CONFIG}",
]
result = BlackRunner().invoke(
black.main, args, input=BytesIO(source.encode("utf-8"))
)
self.assertEqual(result.exit_code, 0)
actual = diff_header.sub(DETERMINISTIC_HEADER, result.stdout)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
self.assertEqual(expected, actual)
def test_piping_diff_with_color(self) -> None:
source, _ = read_data("cases", "expression.py")
args = [
"-",
"--fast",
f"--line-length={black.DEFAULT_LINE_LENGTH}",
"--diff",
"--color",
f"--config={EMPTY_CONFIG}",
]
result = BlackRunner().invoke(
black.main, args, input=BytesIO(source.encode("utf-8"))
)
actual = result.output
# Again, the contents are checked in a different test, so only look for colors.
self.assertIn("\033[1m", actual)
self.assertIn("\033[36m", actual)
self.assertIn("\033[32m", actual)
self.assertIn("\033[31m", actual)
self.assertIn("\033[0m", actual)
def test_pep_572_version_detection(self) -> None:
source, _ = read_data("cases", "pep_572")
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.ASSIGNMENT_EXPRESSIONS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
def test_pep_695_version_detection(self) -> None:
for file in ("type_aliases", "type_params"):
source, _ = read_data("cases", file)
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.TYPE_PARAMS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY312, versions)
def test_pep_696_version_detection(self) -> None:
source, _ = read_data("cases", "type_param_defaults")
samples = [
source,
"type X[T=int] = float",
"type X[T:int=int]=int",
"type X[*Ts=int]=int",
"type X[*Ts=*int]=int",
"type X[**P=int]=int",
]
for sample in samples:
root = black.lib2to3_parse(sample)
features = black.get_features_used(root)
self.assertIn(black.Feature.TYPE_PARAM_DEFAULTS, features)
def test_expression_ff(self) -> None:
source, expected = read_data("cases", "expression.py")
tmp_file = Path(black.dump_to_file(source))
try:
self.assertTrue(ff(tmp_file, write_back=black.WriteBack.YES))
actual = tmp_file.read_text(encoding="utf-8")
finally:
os.unlink(tmp_file)
self.assertFormatEqual(expected, actual)
with patch("black.dump_to_file", dump_to_stderr):
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
def test_expression_diff(self) -> None:
source, _ = read_data("cases", "expression.py")
expected, _ = read_data("cases", "expression.diff")
tmp_file = Path(black.dump_to_file(source))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d"
)
try:
result = BlackRunner().invoke(
black.main, ["--diff", str(tmp_file), f"--config={EMPTY_CONFIG}"]
)
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.stdout
actual = diff_header.sub(DETERMINISTIC_HEADER, actual)
if expected != actual:
dump = black.dump_to_file(actual)
msg = (
"Expected diff isn't equal to the actual. If you made changes to"
" expression.py and this is an anticipated difference, overwrite"
f" tests/data/cases/expression.diff with {dump}"
)
self.assertEqual(expected, actual, msg)
def test_expression_diff_with_color(self) -> None:
source, _ = read_data("cases", "expression.py")
expected, _ = read_data("cases", "expression.diff")
tmp_file = Path(black.dump_to_file(source))
try:
result = BlackRunner().invoke(
black.main,
["--diff", "--color", str(tmp_file), f"--config={EMPTY_CONFIG}"],
)
finally:
os.unlink(tmp_file)
actual = result.output
# We check the contents of the diff in `test_expression_diff`. All
# we need to check here is that color codes exist in the result.
self.assertIn("\033[1m", actual)
self.assertIn("\033[36m", actual)
self.assertIn("\033[32m", actual)
self.assertIn("\033[31m", actual)
self.assertIn("\033[0m", actual)
def test_detect_pos_only_arguments(self) -> None:
source, _ = read_data("cases", "pep_570")
root = black.lib2to3_parse(source)
features = black.get_features_used(root)
self.assertIn(black.Feature.POS_ONLY_ARGUMENTS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
def test_detect_debug_f_strings(self) -> None:
root = black.lib2to3_parse("""f"{x=}" """)
features = black.get_features_used(root)
self.assertIn(black.Feature.DEBUG_F_STRINGS, features)
versions = black.detect_target_versions(root)
self.assertIn(black.TargetVersion.PY38, versions)
root = black.lib2to3_parse(
"""f"{x}"\nf'{"="}'\nf'{(x:=5)}'\nf'{f(a="3=")}'\nf'{x:=10}'\n"""
)
features = black.get_features_used(root)
self.assertNotIn(black.Feature.DEBUG_F_STRINGS, features)
root = black.lib2to3_parse(
"""f"heard a rumour that { f'{1+1=}' } ... seems like it could be true" """
)
features = black.get_features_used(root)
self.assertIn(black.Feature.DEBUG_F_STRINGS, features)
@patch("black.dump_to_file", dump_to_stderr)
def test_string_quotes(self) -> None:
source, expected = read_data("miscellaneous", "string_quotes")
mode = black.Mode(unstable=True)
assert_format(source, expected, mode)
mode = replace(mode, string_normalization=False)
not_normalized = fs(source, mode=mode)
self.assertFormatEqual(source.replace("\\\n", ""), not_normalized)
black.assert_equivalent(source, not_normalized)
black.assert_stable(source, not_normalized, mode=mode)
def test_skip_source_first_line(self) -> None:
source, _ = read_data("miscellaneous", "invalid_header")
tmp_file = Path(black.dump_to_file(source))
# Full source should fail (invalid syntax at header)
self.invokeBlack([str(tmp_file), "--diff", "--check"], exit_code=123)
# So, skipping the first line should work
result = BlackRunner().invoke(
black.main, [str(tmp_file), "-x", f"--config={EMPTY_CONFIG}"]
)
self.assertEqual(result.exit_code, 0)
actual = tmp_file.read_text(encoding="utf-8")
self.assertFormatEqual(source, actual)
def test_skip_source_first_line_when_mixing_newlines(self) -> None:
code_mixing_newlines = b"Header will be skipped\r\ni = [1,2,3]\nj = [1,2,3]\n"
expected = b"Header will be skipped\r\ni = [1, 2, 3]\nj = [1, 2, 3]\n"
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "skip_header.py"
test_file.write_bytes(code_mixing_newlines)
mode = replace(DEFAULT_MODE, skip_source_first_line=True)
ff(test_file, mode=mode, write_back=black.WriteBack.YES)
self.assertEqual(test_file.read_bytes(), expected)
def test_skip_magic_trailing_comma(self) -> None:
source, _ = read_data("cases", "expression")
expected, _ = read_data(
"miscellaneous", "expression_skip_magic_trailing_comma.diff"
)
tmp_file = Path(black.dump_to_file(source))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d"
)
try:
result = BlackRunner().invoke(
black.main, ["-C", "--diff", str(tmp_file), f"--config={EMPTY_CONFIG}"]
)
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.stdout
actual = diff_header.sub(DETERMINISTIC_HEADER, actual)
actual = actual.rstrip() + "\n" # the diff output has a trailing space
if expected != actual:
dump = black.dump_to_file(actual)
msg = (
"Expected diff isn't equal to the actual. If you made changes to"
" expression.py and this is an anticipated difference, overwrite"
" tests/data/miscellaneous/expression_skip_magic_trailing_comma.diff"
f" with {dump}"
)
self.assertEqual(expected, actual, msg)
@patch("black.dump_to_file", dump_to_stderr)
def test_python37(self) -> None:
source_path = get_case_path("cases", "python37")
_, source, expected = read_data_from_file(source_path)
actual = fs(source)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(source, actual)
black.assert_stable(source, actual, DEFAULT_MODE)
# ensure black can parse this when the target is 3.7
self.invokeBlack([str(source_path), "--target-version", "py37"])
def test_tab_comment_indentation(self) -> None:
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t# comment\n\tpass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
contents_tab = "if 1:\n\tif 2:\n\t\tpass\n\t\t# comment\n\tpass\n"
contents_spc = "if 1:\n if 2:\n pass\n # comment\n pass\n"
self.assertFormatEqual(contents_spc, fs(contents_spc))
self.assertFormatEqual(contents_spc, fs(contents_tab))
def test_false_positive_symlink_output_issue_3384(self) -> None:
# Emulate the behavior when using the CLI (`black ./child --verbose`), which
# involves patching some `pathlib.Path` methods. In particular, `is_dir` is
# patched only on its first call: when checking if "./child" is a directory it
# should return True. The "./child" folder exists relative to the cwd when
# running from CLI, but fails when running the tests because cwd is different
project_root = Path(THIS_DIR / "data" / "nested_gitignore_tests")
working_directory = project_root / "root"
with change_directory(working_directory):
# Note that the root folder (project_root) isn't the folder
# named "root" (aka working_directory)
report = MagicMock(verbose=True)
black.get_sources(
root=project_root,
src=("./child",),
quiet=False,
verbose=True,
include=DEFAULT_INCLUDE,
exclude=None,
report=report,
extend_exclude=None,
force_exclude=None,
stdin_filename=None,
)
assert not any(
mock_args[1].startswith("is a symbolic link that points outside")
for _, mock_args, _ in report.path_ignored.mock_calls
), "A symbolic link was reported."
report.path_ignored.assert_called_once_with(
Path(working_directory, "child", "b.py"),
"matches a .gitignore file content",
)
def test_report_verbose(self) -> None:
report = Report(verbose=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.output._out", out), patch("black.output._err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "f1 already well formatted, good job.")
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
out_lines[-1], "f3 wasn't modified on disk since last run."
)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 3)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 4)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 5)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "wat ignored: no match")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 6)
self.assertEqual(len(err_lines), 2)
self.assertEqual(out_lines[-1], "f4 already well formatted, good job.")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2"
" files would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2"
" files would fail to reformat.",
)
def test_report_quiet(self) -> None:
report = Report(quiet=True)
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.output._out", out), patch("black.output._err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 1)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2"
" files would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2"
" files would fail to reformat.",
)
def test_report_normal(self) -> None:
report = black.Report()
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.output._out", out), patch("black.output._err", err):
report.done(Path("f1"), black.Changed.NO)
self.assertEqual(len(out_lines), 0)
self.assertEqual(len(err_lines), 0)
self.assertEqual(unstyle(str(report)), "1 file left unchanged.")
self.assertEqual(report.return_code, 0)
report.done(Path("f2"), black.Changed.YES)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 1 file left unchanged."
)
report.done(Path("f3"), black.Changed.CACHED)
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 0)
self.assertEqual(out_lines[-1], "reformatted f2")
self.assertEqual(
unstyle(str(report)), "1 file reformatted, 2 files left unchanged."
)
self.assertEqual(report.return_code, 0)
report.check = True
self.assertEqual(report.return_code, 1)
report.check = False
report.failed(Path("e1"), "boom")
self.assertEqual(len(out_lines), 1)
self.assertEqual(len(err_lines), 1)
self.assertEqual(err_lines[-1], "error: cannot format e1: boom")
self.assertEqual(
unstyle(str(report)),
"1 file reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f3"), black.Changed.YES)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 1)
self.assertEqual(out_lines[-1], "reformatted f3")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 1 file failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.failed(Path("e2"), "boom")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(err_lines[-1], "error: cannot format e2: boom")
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.path_ignored(Path("wat"), "no match")
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 2 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.done(Path("f4"), black.Changed.NO)
self.assertEqual(len(out_lines), 2)
self.assertEqual(len(err_lines), 2)
self.assertEqual(
unstyle(str(report)),
"2 files reformatted, 3 files left unchanged, 2 files failed to"
" reformat.",
)
self.assertEqual(report.return_code, 123)
report.check = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2"
" files would fail to reformat.",
)
report.check = False
report.diff = True
self.assertEqual(
unstyle(str(report)),
"2 files would be reformatted, 3 files would be left unchanged, 2"
" files would fail to reformat.",
)
def test_lib2to3_parse(self) -> None:
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse("invalid syntax")
straddling = "x + y"
black.lib2to3_parse(straddling)
black.lib2to3_parse(straddling, {TargetVersion.PY36})
py2_only = "print x"
with self.assertRaises(black.InvalidInput):
black.lib2to3_parse(py2_only, {TargetVersion.PY36})
py3_only = "exec(x, end=y)"
black.lib2to3_parse(py3_only)
black.lib2to3_parse(py3_only, {TargetVersion.PY36})
def test_get_features_used_decorator(self) -> None:
# Test the feature detection of new decorator syntax
# since this makes some test cases of test_get_features_used()
# fails if it fails, this is tested first so that a useful case
# is identified
simples, relaxed = read_data("miscellaneous", "decorators")
# skip explanation comments at the top of the file
for simple_test in simples.split("##")[1:]:
node = black.lib2to3_parse(simple_test)
decorator = str(node.children[0].children[0]).strip()
self.assertNotIn(
Feature.RELAXED_DECORATORS,
black.get_features_used(node),
msg=(
f"decorator '{decorator}' follows python<=3.8 syntax"
"but is detected as 3.9+"
# f"The full node is\n{node!r}"
),
)
# skip the '# output' comment at the top of the output part
for relaxed_test in relaxed.split("##")[1:]:
node = black.lib2to3_parse(relaxed_test)
decorator = str(node.children[0].children[0]).strip()
self.assertIn(
Feature.RELAXED_DECORATORS,
black.get_features_used(node),
msg=(
f"decorator '{decorator}' uses python3.9+ syntax"
"but is detected as python<=3.8"
# f"The full node is\n{node!r}"
),
)
def test_get_features_used(self) -> None:
self.check_features_used("def f(*, arg): ...\n", set())
self.check_features_used(
"def f(*, arg,): ...\n", {Feature.TRAILING_COMMA_IN_DEF}
)
self.check_features_used("f(*arg,)\n", {Feature.TRAILING_COMMA_IN_CALL})
self.check_features_used("def f(*, arg): f'string'\n", {Feature.F_STRINGS})
self.check_features_used("123_456\n", {Feature.NUMERIC_UNDERSCORES})
self.check_features_used("123456\n", set())
source, expected = read_data("cases", "function")
expected_features = {
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.F_STRINGS,
}
self.check_features_used(source, expected_features)
self.check_features_used(expected, expected_features)
source, expected = read_data("cases", "expression")
self.check_features_used(source, set())
self.check_features_used(expected, set())
self.check_features_used("lambda a, /, b: ...\n", {Feature.POS_ONLY_ARGUMENTS})
self.check_features_used("def fn(a, /, b): ...", {Feature.POS_ONLY_ARGUMENTS})
self.check_features_used("def fn(): yield a, b", set())
self.check_features_used("def fn(): return a, b", set())
self.check_features_used("def fn(): yield *b, c", {Feature.UNPACKING_ON_FLOW})
self.check_features_used(
"def fn(): return a, *b, c", {Feature.UNPACKING_ON_FLOW}
)
self.check_features_used("x = a, *b, c", set())
self.check_features_used("x: Any = regular", set())
self.check_features_used("x: Any = (regular, regular)", set())
self.check_features_used("x: Any = Complex(Type(1))[something]", set())
self.check_features_used(
"x: Tuple[int, ...] = a, b, c", {Feature.ANN_ASSIGN_EXTENDED_RHS}
)
self.check_features_used("try: pass\nexcept Something: pass", set())
self.check_features_used("try: pass\nexcept (*Something,): pass", set())
self.check_features_used(
"try: pass\nexcept *Group: pass", {Feature.EXCEPT_STAR}
)
self.check_features_used("a[*b]", {Feature.VARIADIC_GENERICS})
self.check_features_used("a[x, *y(), z] = t", {Feature.VARIADIC_GENERICS})
self.check_features_used("def fn(*args: *T): pass", {Feature.VARIADIC_GENERICS})
self.check_features_used(
"def fn(*args: *tuple[*T]): pass", {Feature.VARIADIC_GENERICS}
)
self.check_features_used("with a: pass", set())
self.check_features_used("with a, b: pass", set())
self.check_features_used("with a as b: pass", set())
self.check_features_used("with a as b, c as d: pass", set())
self.check_features_used("with (a): pass", set())
self.check_features_used("with (a, b): pass", set())
self.check_features_used("with (a, b) as (c, d): pass", set())
self.check_features_used(
"with (a as b): pass", {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
)
self.check_features_used(
"with ((a as b)): pass", {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
)
self.check_features_used(
"with (a, b as c): pass", {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
)
self.check_features_used(
"with (a, (b as c)): pass", {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
)
self.check_features_used(
"with ((a, ((b as c)))): pass", {Feature.PARENTHESIZED_CONTEXT_MANAGERS}
)
self.check_features_used(
"x = t'foo {f'bar'}'", {Feature.T_STRINGS, Feature.F_STRINGS}
)
def check_features_used(self, source: str, expected: set[Feature]) -> None:
node = black.lib2to3_parse(source)
actual = black.get_features_used(node)
msg = f"Expected {expected} but got {actual} for {source!r}"
try:
self.assertEqual(actual, expected, msg=msg)
except AssertionError:
DebugVisitor.show(node)
raise
def test_get_features_used_for_future_flags(self) -> None:
for src, features in [
("from __future__ import annotations", {Feature.FUTURE_ANNOTATIONS}),
(
"from __future__ import (other, annotations)",
{Feature.FUTURE_ANNOTATIONS},
),
("a = 1 + 2\nfrom something import annotations", set()),
("from __future__ import x, y", set()),
]:
with self.subTest(src=src, features=sorted(f.value for f in features)):
node = black.lib2to3_parse(src)
future_imports = black.get_future_imports(node)
self.assertEqual(
black.get_features_used(node, future_imports=future_imports),
features,
)
def test_get_future_imports(self) -> None:
node = black.lib2to3_parse("\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import multiple, imports\n")
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("from __future__ import (parenthesized, imports)\n")
self.assertEqual({"parenthesized", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import multiple\nfrom __future__ import imports\n"
)
self.assertEqual({"multiple", "imports"}, black.get_future_imports(node))
node = black.lib2to3_parse("# comment\nfrom __future__ import black\n")
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse('"""docstring"""\nfrom __future__ import black\n')
self.assertEqual({"black"}, black.get_future_imports(node))
node = black.lib2to3_parse("some(other, code)\nfrom __future__ import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse("from some.module import black\n")
self.assertEqual(set(), black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import unicode_literals as _unicode_literals"
)
self.assertEqual({"unicode_literals"}, black.get_future_imports(node))
node = black.lib2to3_parse(
"from __future__ import unicode_literals as _lol, print"
)
self.assertEqual({"unicode_literals", "print"}, black.get_future_imports(node))
@pytest.mark.incompatible_with_mypyc
def test_debug_visitor(self) -> None:
source, _ = read_data("miscellaneous", "debug_visitor")
expected, _ = read_data("miscellaneous", "debug_visitor.out")
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.debug.out", out):
DebugVisitor.show(source)
actual = "\n".join(out_lines) + "\n"
log_name = ""
if expected != actual:
log_name = black.dump_to_file(*out_lines)
self.assertEqual(
expected,
actual,
f"AST print out is different. Actual version dumped to {log_name}",
)
def test_format_file_contents(self) -> None:
mode = DEFAULT_MODE
empty = ""
with self.assertRaises(black.NothingChanged):
black.format_file_contents(empty, mode=mode, fast=False)
just_nl = "\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(just_nl, mode=mode, fast=False)
same = "j = [1, 2, 3]\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(same, mode=mode, fast=False)
different = "j = [1,2,3]"
expected = same
actual = black.format_file_contents(different, mode=mode, fast=False)
self.assertEqual(expected, actual)
invalid = "return if you can"
with self.assertRaises(black.InvalidInput) as e:
black.format_file_contents(invalid, mode=mode, fast=False)
self.assertEqual(str(e.exception), "Cannot parse: 1:7: return if you can")
just_crlf = "\r\n"
with self.assertRaises(black.NothingChanged):
black.format_file_contents(just_crlf, mode=mode, fast=False)
just_whitespace_nl = "\n\t\n \n\t \n \t\n\n"
actual = black.format_file_contents(just_whitespace_nl, mode=mode, fast=False)
self.assertEqual("\n", actual)
just_whitespace_crlf = "\r\n\t\r\n \r\n\t \r\n \t\r\n\r\n"
actual = black.format_file_contents(just_whitespace_crlf, mode=mode, fast=False)
self.assertEqual("\r\n", actual)
def test_endmarker(self) -> None:
n = black.lib2to3_parse("\n")
self.assertEqual(n.type, black.syms.file_input)
self.assertEqual(len(n.children), 1)
self.assertEqual(n.children[0].type, black.token.ENDMARKER)
@patch("tests.conftest.PRINT_FULL_TREE", True)
@patch("tests.conftest.PRINT_TREE_DIFF", False)
@pytest.mark.incompatible_with_mypyc
def test_assertFormatEqual_print_full_tree(self) -> None:
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.output._out", out), patch("black.output._err", err):
with self.assertRaises(AssertionError):
self.assertFormatEqual("j = [1, 2, 3]", "j = [1, 2, 3,]")
out_str = "".join(out_lines)
self.assertIn("Expected tree:", out_str)
self.assertIn("Actual tree:", out_str)
self.assertEqual("".join(err_lines), "")
@patch("tests.conftest.PRINT_FULL_TREE", False)
@patch("tests.conftest.PRINT_TREE_DIFF", True)
@pytest.mark.incompatible_with_mypyc
def test_assertFormatEqual_print_tree_diff(self) -> None:
out_lines = []
err_lines = []
def out(msg: str, **kwargs: Any) -> None:
out_lines.append(msg)
def err(msg: str, **kwargs: Any) -> None:
err_lines.append(msg)
with patch("black.output._out", out), patch("black.output._err", err):
with self.assertRaises(AssertionError):
self.assertFormatEqual("j = [1, 2, 3]\n", "j = [1, 2, 3,]\n")
out_str = "".join(out_lines)
self.assertIn("Tree Diff:", out_str)
self.assertIn("+ COMMA", out_str)
self.assertIn("+ ','", out_str)
self.assertEqual("".join(err_lines), "")
@event_loop()
@patch("concurrent.futures.ProcessPoolExecutor", MagicMock(side_effect=OSError))
def test_works_in_mono_process_only_environment(self) -> None:
with cache_dir() as workspace:
for f in [
(workspace / "one.py").resolve(),
(workspace / "two.py").resolve(),
]:
f.write_text('print("hello")\n', encoding="utf-8")
self.invokeBlack([str(workspace)])
@event_loop()
def test_check_diff_use_together(self) -> None:
with cache_dir():
# Files which will be reformatted.
src1 = get_case_path("miscellaneous", "string_quotes")
self.invokeBlack([str(src1), "--diff", "--check"], exit_code=1)
# Files which will not be reformatted.
src2 = get_case_path("cases", "composition")
self.invokeBlack([str(src2), "--diff", "--check"])
# Multi file command.
self.invokeBlack([str(src1), str(src2), "--diff", "--check"], exit_code=1)
def test_no_src_fails(self) -> None:
with cache_dir():
self.invokeBlack([], exit_code=1)
def test_src_and_code_fails(self) -> None:
with cache_dir():
self.invokeBlack([".", "-c", "0"], exit_code=1)
def test_broken_symlink(self) -> None:
with cache_dir() as workspace:
symlink = workspace / "broken_link.py"
try:
symlink.symlink_to("nonexistent.py")
except (OSError, NotImplementedError) as e:
self.skipTest(f"Can't create symlinks: {e}")
self.invokeBlack([str(workspace.resolve())])
def test_single_file_force_pyi(self) -> None:
pyi_mode = replace(DEFAULT_MODE, is_pyi=True)
contents, expected = read_data("miscellaneous", "force_pyi")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
path.write_text(contents, encoding="utf-8")
self.invokeBlack([str(path), "--pyi"])
actual = path.read_text(encoding="utf-8")
# verify cache with --pyi is separate
pyi_cache = black.Cache.read(pyi_mode)
assert not pyi_cache.is_changed(path)
normal_cache = black.Cache.read(DEFAULT_MODE)
assert normal_cache.is_changed(path)
self.assertFormatEqual(expected, actual)
black.assert_equivalent(contents, actual)
black.assert_stable(contents, actual, pyi_mode)
@event_loop()
def test_multi_file_force_pyi(self) -> None:
reg_mode = DEFAULT_MODE
pyi_mode = replace(DEFAULT_MODE, is_pyi=True)
contents, expected = read_data("miscellaneous", "force_pyi")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
path.write_text(contents, encoding="utf-8")
self.invokeBlack([str(p) for p in paths] + ["--pyi"])
for path in paths:
actual = path.read_text(encoding="utf-8")
self.assertEqual(actual, expected)
# verify cache with --pyi is separate
pyi_cache = black.Cache.read(pyi_mode)
normal_cache = black.Cache.read(reg_mode)
for path in paths:
assert not pyi_cache.is_changed(path)
assert normal_cache.is_changed(path)
def test_pipe_force_pyi(self) -> None:
source, expected = read_data("miscellaneous", "force_pyi")
result = CliRunner().invoke(
black.main, ["-", "-q", "--pyi"], input=BytesIO(source.encode("utf-8"))
)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
def test_single_file_force_py36(self) -> None:
reg_mode = DEFAULT_MODE
py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
source, expected = read_data("miscellaneous", "force_py36")
with cache_dir() as workspace:
path = (workspace / "file.py").resolve()
path.write_text(source, encoding="utf-8")
self.invokeBlack([str(path), *PY36_ARGS])
actual = path.read_text(encoding="utf-8")
# verify cache with --target-version is separate
py36_cache = black.Cache.read(py36_mode)
assert not py36_cache.is_changed(path)
normal_cache = black.Cache.read(reg_mode)
assert normal_cache.is_changed(path)
self.assertEqual(actual, expected)
@event_loop()
def test_multi_file_force_py36(self) -> None:
reg_mode = DEFAULT_MODE
py36_mode = replace(DEFAULT_MODE, target_versions=PY36_VERSIONS)
source, expected = read_data("miscellaneous", "force_py36")
with cache_dir() as workspace:
paths = [
(workspace / "file1.py").resolve(),
(workspace / "file2.py").resolve(),
]
for path in paths:
path.write_text(source, encoding="utf-8")
self.invokeBlack([str(p) for p in paths] + PY36_ARGS)
for path in paths:
actual = path.read_text(encoding="utf-8")
self.assertEqual(actual, expected)
# verify cache with --target-version is separate
pyi_cache = black.Cache.read(py36_mode)
normal_cache = black.Cache.read(reg_mode)
for path in paths:
assert not pyi_cache.is_changed(path)
assert normal_cache.is_changed(path)
def test_pipe_force_py36(self) -> None:
source, expected = read_data("miscellaneous", "force_py36")
result = CliRunner().invoke(
black.main,
["-", "-q", "--target-version=py36"],
input=BytesIO(source.encode("utf-8")),
)
self.assertEqual(result.exit_code, 0)
actual = result.output
self.assertFormatEqual(actual, expected)
@pytest.mark.incompatible_with_mypyc
def test_reformat_one_with_stdin(self) -> None:
with patch(
"black.format_stdin_to_stdout",
return_value=lambda *args, **kwargs: black.Changed.YES,
) as fsts:
report = MagicMock()
path = Path("-")
black.reformat_one(
path,
fast=True,
write_back=black.WriteBack.YES,
mode=DEFAULT_MODE,
report=report,
)
fsts.assert_called_once()
report.done.assert_called_with(path, black.Changed.YES)
@pytest.mark.incompatible_with_mypyc
def test_reformat_one_with_stdin_filename(self) -> None:
with patch(
"black.format_stdin_to_stdout",
return_value=lambda *args, **kwargs: black.Changed.YES,
) as fsts:
report = MagicMock()
p = "foo.py"
path = Path(f"__BLACK_STDIN_FILENAME__{p}")
expected = Path(p)
black.reformat_one(
path,
fast=True,
write_back=black.WriteBack.YES,
mode=DEFAULT_MODE,
report=report,
)
fsts.assert_called_once_with(
fast=True, write_back=black.WriteBack.YES, mode=DEFAULT_MODE, lines=()
)
# __BLACK_STDIN_FILENAME__ should have been stripped
report.done.assert_called_with(expected, black.Changed.YES)
@pytest.mark.incompatible_with_mypyc
def test_reformat_one_with_stdin_filename_pyi(self) -> None:
with patch(
"black.format_stdin_to_stdout",
return_value=lambda *args, **kwargs: black.Changed.YES,
) as fsts:
report = MagicMock()
p = "foo.pyi"
path = Path(f"__BLACK_STDIN_FILENAME__{p}")
expected = Path(p)
black.reformat_one(
path,
fast=True,
write_back=black.WriteBack.YES,
mode=DEFAULT_MODE,
report=report,
)
fsts.assert_called_once_with(
fast=True,
write_back=black.WriteBack.YES,
mode=replace(DEFAULT_MODE, is_pyi=True),
lines=(),
)
# __BLACK_STDIN_FILENAME__ should have been stripped
report.done.assert_called_with(expected, black.Changed.YES)
@pytest.mark.incompatible_with_mypyc
def test_reformat_one_with_stdin_filename_ipynb(self) -> None:
with patch(
"black.format_stdin_to_stdout",
return_value=lambda *args, **kwargs: black.Changed.YES,
) as fsts:
report = MagicMock()
p = "foo.ipynb"
path = Path(f"__BLACK_STDIN_FILENAME__{p}")
expected = Path(p)
black.reformat_one(
path,
fast=True,
write_back=black.WriteBack.YES,
mode=DEFAULT_MODE,
report=report,
)
fsts.assert_called_once_with(
fast=True,
write_back=black.WriteBack.YES,
mode=replace(DEFAULT_MODE, is_ipynb=True),
lines=(),
)
# __BLACK_STDIN_FILENAME__ should have been stripped
report.done.assert_called_with(expected, black.Changed.YES)
@pytest.mark.incompatible_with_mypyc
def test_reformat_one_with_stdin_and_existing_path(self) -> None:
with patch(
"black.format_stdin_to_stdout",
return_value=lambda *args, **kwargs: black.Changed.YES,
) as fsts:
report = MagicMock()
# Even with an existing file, since we are forcing stdin, black
# should output to stdout and not modify the file inplace
p = THIS_DIR / "data" / "cases" / "collections.py"
# Make sure is_file actually returns True
self.assertTrue(p.is_file())
path = Path(f"__BLACK_STDIN_FILENAME__{p}")
expected = Path(p)
black.reformat_one(
path,
fast=True,
write_back=black.WriteBack.YES,
mode=DEFAULT_MODE,
report=report,
)
fsts.assert_called_once()
# __BLACK_STDIN_FILENAME__ should have been stripped
report.done.assert_called_with(expected, black.Changed.YES)
def test_reformat_one_with_stdin_empty(self) -> None:
cases = [
("", ""),
("\n", "\n"),
("\r\n", "\r\n"),
(" \t", ""),
(" \t\n\t ", "\n"),
(" \t\r\n\t ", "\r\n"),
]
def _new_wrapper(
output: io.StringIO, io_TextIOWrapper: type[io.TextIOWrapper]
) -> Callable[[Any, Any], io.StringIO | io.TextIOWrapper]:
def get_output(*args: Any, **kwargs: Any) -> io.StringIO | io.TextIOWrapper:
if args == (sys.stdout.buffer,):
# It's `format_stdin_to_stdout()` calling `io.TextIOWrapper()`,
# return our mock object.
return output
# It's something else (i.e. `decode_bytes()`) calling
# `io.TextIOWrapper()`, pass through to the original implementation.
# See discussion in https://github.com/psf/black/pull/2489
return io_TextIOWrapper(*args, **kwargs)
return get_output
for content, expected in cases:
output = io.StringIO()
io_TextIOWrapper = io.TextIOWrapper
with patch("io.TextIOWrapper", _new_wrapper(output, io_TextIOWrapper)):
try:
black.format_stdin_to_stdout(
fast=True,
content=content,
write_back=black.WriteBack.YES,
mode=DEFAULT_MODE,
)
except io.UnsupportedOperation:
pass # StringIO does not support detach
assert output.getvalue() == expected
def test_cli_unstable(self) -> None:
self.invokeBlack(["--unstable", "-c", "0"], exit_code=0)
self.invokeBlack(["--preview", "-c", "0"], exit_code=0)
# Must also pass --preview
self.invokeBlack(
["--enable-unstable-feature", "string_processing", "-c", "0"], exit_code=1
)
self.invokeBlack(
["--preview", "--enable-unstable-feature", "string_processing", "-c", "0"],
exit_code=0,
)
self.invokeBlack(
["--unstable", "--enable-unstable-feature", "string_processing", "-c", "0"],
exit_code=0,
)
def test_invalid_cli_regex(self) -> None:
for option in ["--include", "--exclude", "--extend-exclude", "--force-exclude"]:
self.invokeBlack(["-", option, "**()(!!*)"], exit_code=2)
def test_required_version_matches_version(self) -> None:
self.invokeBlack(
["--required-version", black.__version__, "-c", "0"],
exit_code=0,
ignore_config=True,
)
def test_required_version_matches_partial_version(self) -> None:
self.invokeBlack(
["--required-version", black.__version__.split(".")[0], "-c", "0"],
exit_code=0,
ignore_config=True,
)
def test_required_version_does_not_match_on_minor_version(self) -> None:
self.invokeBlack(
["--required-version", black.__version__.split(".")[0] + ".999", "-c", "0"],
exit_code=1,
ignore_config=True,
)
def test_required_version_does_not_match_version(self) -> None:
result = BlackRunner().invoke(
black.main,
["--required-version", "20.99b", "-c", "0"],
)
self.assertEqual(result.exit_code, 1)
self.assertIn("required version", result.stderr)
def test_preserves_line_endings(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.py"
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
test_file.write_bytes(contents.encode())
ff(test_file, write_back=black.WriteBack.YES)
updated_contents: bytes = test_file.read_bytes()
self.assertIn(nl.encode(), updated_contents)
if nl == "\n":
self.assertNotIn(b"\r\n", updated_contents)
def test_preserves_line_endings_via_stdin(self) -> None:
for nl in ["\n", "\r\n"]:
contents = nl.join(["def f( ):", " pass"])
runner = BlackRunner()
result = runner.invoke(
black.main, ["-", "--fast"], input=BytesIO(contents.encode("utf-8"))
)
self.assertEqual(result.exit_code, 0)
output = result.stdout_bytes
self.assertIn(nl.encode("utf-8"), output)
if nl == "\n":
self.assertNotIn(b"\r\n", output)
def test_normalize_line_endings(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.py"
for data, expected in (
(b"c\r\nc\n ", b"c\r\nc\r\n"),
(b"l\nl\r\n ", b"l\nl\n"),
):
test_file.write_bytes(data)
ff(test_file, write_back=black.WriteBack.YES)
self.assertEqual(test_file.read_bytes(), expected)
def test_root_logger_not_used_directly(self) -> None:
def fail(*args: Any, **kwargs: Any) -> None:
self.fail("Record created with root logger")
with patch.multiple(
logging.root,
debug=fail,
info=fail,
warning=fail,
error=fail,
critical=fail,
log=fail,
):
ff(THIS_DIR / "util.py")
def test_invalid_config_return_code(self) -> None:
tmp_file = Path(black.dump_to_file())
try:
tmp_config = Path(black.dump_to_file())
tmp_config.unlink()
args = ["--config", str(tmp_config), str(tmp_file)]
self.invokeBlack(args, exit_code=2, ignore_config=False)
finally:
tmp_file.unlink()
def test_parse_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "test.toml"
config = black.parse_pyproject_toml(str(test_toml_file))
self.assertEqual(config["verbose"], 1)
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], True)
self.assertEqual(config["line_length"], 79)
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["python_cell_magics"], ["custom1", "custom2"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
def test_spellcheck_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "data" / "incorrect_spelling.toml"
result = BlackRunner().invoke(
black.main,
[
"--code=print('hello world')",
"--verbose",
f"--config={str(test_toml_file)}",
],
)
assert (
r"Invalid config keys detected: 'ine_length', 'target_ersion' (in"
rf" {test_toml_file})" in result.stderr
)
def test_parse_pyproject_toml_project_metadata(self) -> None:
for test_toml, expected in [
("only_black_pyproject.toml", ["py310"]),
("only_metadata_pyproject.toml", ["py37", "py38", "py39", "py310"]),
("neither_pyproject.toml", None),
("both_pyproject.toml", ["py310"]),
]:
test_toml_file = THIS_DIR / "data" / "project_metadata" / test_toml
config = black.parse_pyproject_toml(str(test_toml_file))
self.assertEqual(config.get("target_version"), expected)
def test_infer_target_version(self) -> None:
for version, expected in [
("3.6", [TargetVersion.PY36]),
("3.11.0rc1", [TargetVersion.PY311]),
(
">=3.10",
[
TargetVersion.PY310,
TargetVersion.PY311,
TargetVersion.PY312,
TargetVersion.PY313,
TargetVersion.PY314,
],
),
(
">=3.10.6",
[
TargetVersion.PY310,
TargetVersion.PY311,
TargetVersion.PY312,
TargetVersion.PY313,
TargetVersion.PY314,
],
),
("<3.6", [TargetVersion.PY33, TargetVersion.PY34, TargetVersion.PY35]),
(">3.7,<3.10", [TargetVersion.PY38, TargetVersion.PY39]),
(
">3.7,!=3.8,!=3.9",
[
TargetVersion.PY310,
TargetVersion.PY311,
TargetVersion.PY312,
TargetVersion.PY313,
TargetVersion.PY314,
],
),
(
"> 3.9.4, != 3.10.3",
[
TargetVersion.PY39,
TargetVersion.PY310,
TargetVersion.PY311,
TargetVersion.PY312,
TargetVersion.PY313,
TargetVersion.PY314,
],
),
(
"!=3.3,!=3.4",
[
TargetVersion.PY35,
TargetVersion.PY36,
TargetVersion.PY37,
TargetVersion.PY38,
TargetVersion.PY39,
TargetVersion.PY310,
TargetVersion.PY311,
TargetVersion.PY312,
TargetVersion.PY313,
TargetVersion.PY314,
],
),
(
"==3.*",
[
TargetVersion.PY33,
TargetVersion.PY34,
TargetVersion.PY35,
TargetVersion.PY36,
TargetVersion.PY37,
TargetVersion.PY38,
TargetVersion.PY39,
TargetVersion.PY310,
TargetVersion.PY311,
TargetVersion.PY312,
TargetVersion.PY313,
TargetVersion.PY314,
],
),
("==3.8.*", [TargetVersion.PY38]),
(None, None),
("", None),
("invalid", None),
("==invalid", None),
(">3.9,!=invalid", None),
("3", None),
("3.2", None),
("2.7.18", None),
("==2.7", None),
(">3.10,<3.11", None),
]:
test_toml = {"project": {"requires-python": version}}
result = black.files.infer_target_version(test_toml)
self.assertEqual(result, expected)
def test_read_pyproject_toml(self) -> None:
test_toml_file = THIS_DIR / "test.toml"
fake_ctx = FakeContext()
black.read_pyproject_toml(fake_ctx, FakeParameter(), str(test_toml_file))
config = fake_ctx.default_map
self.assertEqual(config["verbose"], "1")
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], "True")
self.assertEqual(config["line_length"], "79")
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
def test_read_pyproject_toml_from_stdin(self) -> None:
with TemporaryDirectory() as workspace:
root = Path(workspace)
src_dir = root / "src"
src_dir.mkdir()
src_pyproject = src_dir / "pyproject.toml"
src_pyproject.touch()
test_toml_content = (THIS_DIR / "test.toml").read_text(encoding="utf-8")
src_pyproject.write_text(test_toml_content, encoding="utf-8")
src_python = src_dir / "foo.py"
src_python.touch()
fake_ctx = FakeContext()
fake_ctx.params["src"] = ("-",)
fake_ctx.params["stdin_filename"] = str(src_python)
with change_directory(root):
black.read_pyproject_toml(fake_ctx, FakeParameter(), None)
config = fake_ctx.default_map
self.assertEqual(config["verbose"], "1")
self.assertEqual(config["check"], "no")
self.assertEqual(config["diff"], "y")
self.assertEqual(config["color"], "True")
self.assertEqual(config["line_length"], "79")
self.assertEqual(config["target_version"], ["py36", "py37", "py38"])
self.assertEqual(config["exclude"], r"\.pyi?$")
self.assertEqual(config["include"], r"\.py?$")
@pytest.mark.incompatible_with_mypyc
def test_find_project_root(self) -> None:
with TemporaryDirectory() as workspace:
root = Path(workspace)
test_dir = root / "test"
test_dir.mkdir()
src_dir = root / "src"
src_dir.mkdir()
root_pyproject = root / "pyproject.toml"
root_pyproject.write_text("[tool.black]", encoding="utf-8")
src_pyproject = src_dir / "pyproject.toml"
src_pyproject.write_text("[tool.black]", encoding="utf-8")
src_python = src_dir / "foo.py"
src_python.touch()
self.assertEqual(
black.find_project_root((src_dir, test_dir)),
(root.resolve(), "pyproject.toml"),
)
self.assertEqual(
black.find_project_root((src_dir,)),
(src_dir.resolve(), "pyproject.toml"),
)
self.assertEqual(
black.find_project_root((src_python,)),
(src_dir.resolve(), "pyproject.toml"),
)
with change_directory(test_dir):
self.assertEqual(
black.find_project_root(("-",), stdin_filename="../src/a.py"),
(src_dir.resolve(), "pyproject.toml"),
)
src_sub = src_dir / "sub"
src_sub.mkdir()
src_sub_pyproject = src_sub / "pyproject.toml"
src_sub_pyproject.touch() # empty
src_sub_python = src_sub / "bar.py"
# we skip src_sub_pyproject since it is missing the [tool.black] section
self.assertEqual(
black.find_project_root((src_sub_python,)),
(src_dir.resolve(), "pyproject.toml"),
)
@patch(
"black.files.find_user_pyproject_toml",
)
def test_find_pyproject_toml(self, find_user_pyproject_toml: MagicMock) -> None:
find_user_pyproject_toml.side_effect = RuntimeError()
with redirect_stderr(io.StringIO()) as stderr:
result = black.files.find_pyproject_toml(
path_search_start=(str(Path.cwd().root),)
)
assert result is None
err = stderr.getvalue()
assert "Ignoring user configuration" in err
@patch(
"black.files.find_user_pyproject_toml",
black.files.find_user_pyproject_toml.__wrapped__,
)
def test_find_user_pyproject_toml_linux(self) -> None:
if system() == "Windows":
return
# Test if XDG_CONFIG_HOME is checked
with TemporaryDirectory() as workspace:
tmp_user_config = Path(workspace) / "black"
with patch.dict("os.environ", {"XDG_CONFIG_HOME": workspace}):
self.assertEqual(
black.files.find_user_pyproject_toml(), tmp_user_config.resolve()
)
# Test fallback for XDG_CONFIG_HOME
with patch.dict("os.environ"):
os.environ.pop("XDG_CONFIG_HOME", None)
fallback_user_config = Path("~/.config").expanduser() / "black"
self.assertEqual(
black.files.find_user_pyproject_toml(), fallback_user_config.resolve()
)
def test_find_user_pyproject_toml_windows(self) -> None:
if system() != "Windows":
return
user_config_path = Path.home() / ".black"
self.assertEqual(
black.files.find_user_pyproject_toml(), user_config_path.resolve()
)
def test_bpo_33660_workaround(self) -> None:
if system() == "Windows":
return
root = Path("/")
path = Path("workspace") / "project"
report = black.Report(verbose=True)
resolves_outside = black.resolves_outside_root_or_cannot_stat(
path, root, report
)
self.assertIs(resolves_outside, False)
def test_normalize_path_ignore_windows_junctions_outside_of_root(self) -> None:
if system() != "Windows":
return
with TemporaryDirectory() as workspace:
root = Path(workspace)
junction_dir = root / "junction"
junction_target_outside_of_root = root / ".."
os.system(f"mklink /J {junction_dir} {junction_target_outside_of_root}")
report = black.Report(verbose=True)
resolves_outside = black.resolves_outside_root_or_cannot_stat(
junction_dir, root, report
)
# Manually delete for Python < 3.8
os.system(f"rmdir {junction_dir}")
self.assertIs(resolves_outside, True)
def test_newline_comment_interaction(self) -> None:
source = "class A:\\\r\n# type: ignore\n pass\n"
output = black.format_str(source, mode=DEFAULT_MODE)
black.assert_stable(source, output, mode=DEFAULT_MODE)
def test_bpo_2142_workaround(self) -> None:
# https://bugs.python.org/issue2142
source, _ = read_data("miscellaneous", "missing_final_newline")
# read_data adds a trailing newline
source = source.rstrip()
expected, _ = read_data("miscellaneous", "missing_final_newline.diff")
tmp_file = Path(black.dump_to_file(source, ensure_final_newline=False))
diff_header = re.compile(
rf"{re.escape(str(tmp_file))}\t\d\d\d\d-\d\d-\d\d "
r"\d\d:\d\d:\d\d\.\d\d\d\d\d\d\+\d\d:\d\d"
)
try:
result = BlackRunner().invoke(black.main, ["--diff", str(tmp_file)])
self.assertEqual(result.exit_code, 0)
finally:
os.unlink(tmp_file)
actual = result.stdout
actual = diff_header.sub(DETERMINISTIC_HEADER, actual)
self.assertEqual(actual, expected)
@staticmethod
def compare_results(
result: click.testing.Result, expected_value: str, expected_exit_code: int
) -> None:
"""Helper method to test the value and exit code of a click Result."""
assert (
result.stdout == expected_value
), "The output did not match the expected value."
assert result.exit_code == expected_exit_code, "The exit code is incorrect."
def test_code_option(self) -> None:
"""Test the code option with no changes."""
code = 'print("Hello world")\n'
args = ["--code", code]
result = CliRunner().invoke(black.main, args)
self.compare_results(result, code, 0)
def test_code_option_changed(self) -> None:
"""Test the code option when changes are required."""
code = "print('hello world')"
formatted = black.format_str(code, mode=DEFAULT_MODE)
args = ["--code", code]
result = CliRunner().invoke(black.main, args)
self.compare_results(result, formatted, 0)
def test_code_option_check(self) -> None:
"""Test the code option when check is passed."""
args = ["--check", "--code", 'print("Hello world")\n']
result = CliRunner().invoke(black.main, args)
self.compare_results(result, "", 0)
def test_code_option_check_changed(self) -> None:
"""Test the code option when changes are required, and check is passed."""
args = ["--check", "--code", "print('hello world')"]
result = CliRunner().invoke(black.main, args)
self.compare_results(result, "", 1)
def test_code_option_diff(self) -> None:
"""Test the code option when diff is passed."""
code = "print('hello world')"
formatted = black.format_str(code, mode=DEFAULT_MODE)
result_diff = diff(code, formatted, "STDIN", "STDOUT")
args = ["--diff", "--code", code]
result = CliRunner().invoke(black.main, args)
# Remove time from diff
output = DIFF_TIME.sub("", result.output)
assert output == result_diff, "The output did not match the expected value."
assert result.exit_code == 0, "The exit code is incorrect."
def test_code_option_color_diff(self) -> None:
"""Test the code option when color and diff are passed."""
code = "print('hello world')"
formatted = black.format_str(code, mode=DEFAULT_MODE)
result_diff = diff(code, formatted, "STDIN", "STDOUT")
result_diff = color_diff(result_diff)
args = ["--diff", "--color", "--code", code]
result = CliRunner().invoke(black.main, args)
# Remove time from diff
output = DIFF_TIME.sub("", result.output)
assert output == result_diff, "The output did not match the expected value."
assert result.exit_code == 0, "The exit code is incorrect."
@pytest.mark.incompatible_with_mypyc
def test_code_option_safe(self) -> None:
"""Test that the code option throws an error when the sanity checks fail."""
# Patch black.assert_equivalent to ensure the sanity checks fail
with patch.object(black, "assert_equivalent", side_effect=AssertionError):
code = 'print("Hello world")'
error_msg = f"{code}\nerror: cannot format <string>: \n"
args = ["--safe", "--code", code]
result = CliRunner().invoke(black.main, args)
assert error_msg == result.output
assert result.exit_code == 123
def test_code_option_fast(self) -> None:
"""Test that the code option ignores errors when the sanity checks fail."""
# Patch black.assert_equivalent to ensure the sanity checks fail
with patch.object(black, "assert_equivalent", side_effect=AssertionError):
code = 'print("Hello world")'
formatted = black.format_str(code, mode=DEFAULT_MODE)
args = ["--fast", "--code", code]
result = CliRunner().invoke(black.main, args)
self.compare_results(result, formatted, 0)
@pytest.mark.incompatible_with_mypyc
def test_code_option_config(self) -> None:
"""
Test that the code option finds the pyproject.toml in the current directory.
"""
with patch.object(black, "parse_pyproject_toml", return_value={}) as parse:
args = ["--code", "print"]
# This is the only directory known to contain a pyproject.toml
with change_directory(PROJECT_ROOT):
CliRunner().invoke(black.main, args)
pyproject_path = Path(Path.cwd(), "pyproject.toml").resolve()
assert (
len(parse.mock_calls) >= 1
), "Expected config parse to be called with the current directory."
_, call_args, _ = parse.mock_calls[0]
assert (
call_args[0].lower() == str(pyproject_path).lower()
), "Incorrect config loaded."
@pytest.mark.incompatible_with_mypyc
def test_code_option_parent_config(self) -> None:
"""
Test that the code option finds the pyproject.toml in the parent directory.
"""
with patch.object(black, "parse_pyproject_toml", return_value={}) as parse:
with change_directory(THIS_DIR):
args = ["--code", "print"]
CliRunner().invoke(black.main, args)
pyproject_path = Path(Path().cwd().parent, "pyproject.toml").resolve()
assert (
len(parse.mock_calls) >= 1
), "Expected config parse to be called with the current directory."
_, call_args, _ = parse.mock_calls[0]
assert (
call_args[0].lower() == str(pyproject_path).lower()
), "Incorrect config loaded."
def test_for_handled_unexpected_eof_error(self) -> None:
"""
Test that an unexpected EOF SyntaxError is nicely presented.
"""
with pytest.raises(black.parsing.InvalidInput) as exc_info:
black.lib2to3_parse("print(", {})
exc_info.match("Cannot parse: 1:6: Unexpected EOF in multi-line statement")
def test_line_ranges_with_code_option(self) -> None:
code = textwrap.dedent("""\
if a == b:
print ( "OK" )
""")
args = ["--line-ranges=1-1", "--code", code]
result = CliRunner().invoke(black.main, args)
expected = textwrap.dedent("""\
if a == b:
print ( "OK" )
""")
self.compare_results(result, expected, expected_exit_code=0)
def test_line_ranges_with_stdin(self) -> None:
code = textwrap.dedent("""\
if a == b:
print ( "OK" )
""")
runner = BlackRunner()
result = runner.invoke(
black.main, ["--line-ranges=1-1", "-"], input=BytesIO(code.encode("utf-8"))
)
expected = textwrap.dedent("""\
if a == b:
print ( "OK" )
""")
self.compare_results(result, expected, expected_exit_code=0)
def test_line_ranges_with_source(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.py"
test_file.write_text(
textwrap.dedent("""\
if a == b:
print ( "OK" )
"""),
encoding="utf-8",
)
args = ["--line-ranges=1-1", str(test_file)]
result = CliRunner().invoke(black.main, args)
assert not result.exit_code
formatted = test_file.read_text(encoding="utf-8")
expected = textwrap.dedent("""\
if a == b:
print ( "OK" )
""")
assert expected == formatted
def test_line_ranges_with_multiple_sources(self) -> None:
with TemporaryDirectory() as workspace:
test1_file = Path(workspace) / "test1.py"
test1_file.write_text("", encoding="utf-8")
test2_file = Path(workspace) / "test2.py"
test2_file.write_text("", encoding="utf-8")
args = ["--line-ranges=1-1", str(test1_file), str(test2_file)]
result = CliRunner().invoke(black.main, args)
assert result.exit_code == 1
assert "Cannot use --line-ranges to format multiple files" in result.output
def test_line_ranges_with_ipynb(self) -> None:
with TemporaryDirectory() as workspace:
test_file = Path(workspace) / "test.ipynb"
test_file.write_text("{}", encoding="utf-8")
args = ["--line-ranges=1-1", "--ipynb", str(test_file)]
result = CliRunner().invoke(black.main, args)
assert "Cannot use --line-ranges with ipynb files" in result.output
assert result.exit_code == 1
def test_line_ranges_in_pyproject_toml(self) -> None:
config = THIS_DIR / "data" / "invalid_line_ranges.toml"
result = BlackRunner().invoke(
black.main, ["--code", "print()", "--config", str(config)]
)
assert result.exit_code == 2
assert result.stderr_bytes is not None
assert (
b"Cannot use line-ranges in the pyproject.toml file." in result.stderr_bytes
)
def test_lines_with_leading_tabs_expanded(self) -> None:
# See CVE-2024-21503. Mostly test that this completes in a reasonable
# time.
payload = "\t" * 10_000
assert lines_with_leading_tabs_expanded(payload) == [payload]
tab = " " * 8
assert lines_with_leading_tabs_expanded("\tx") == [f"{tab}x"]
assert lines_with_leading_tabs_expanded("\t\tx") == [f"{tab}{tab}x"]
assert lines_with_leading_tabs_expanded("\tx\n y") == [f"{tab}x", " y"]
def test_carriage_return_edge_cases(self) -> None:
# These tests are here instead of in the normal cases because
# of git's newline normalization and because it's hard to
# get `\r` vs `\r\n` vs `\n` to display properly
assert (
black.format_str(
"try:\\\r# type: ignore\n pass\nfinally:\n pass\n",
mode=black.FileMode(),
)
== "try: # type: ignore\n pass\nfinally:\n pass\n"
)
assert black.format_str("{\r}", mode=black.FileMode()) == "{}\n"
assert black.format_str("pass #\r#\n", mode=black.FileMode()) == "pass #\n#\n"
assert black.format_str("x=\\\r\n1", mode=black.FileMode()) == "x = 1\n"
assert black.format_str("x=\\\n1", mode=black.FileMode()) == "x = 1\n"
assert black.format_str("x=\\\r1", mode=black.FileMode()) == "x = 1\n"
assert (
black.format_str("class A\\\r\n:...", mode=black.FileMode())
== "class A: ...\n"
)
assert (
black.format_str("class A\\\n:...", mode=black.FileMode())
== "class A: ...\n"
)
assert (
black.format_str("class A\\\r:...", mode=black.FileMode())
== "class A: ...\n"
)
def test_preview_newline_type_detection(self) -> None:
mode = Mode(enabled_features={Preview.normalize_cr_newlines})
newline_types = ["A\n", "A\r\n", "A\r"]
for test_case in itertools.permutations(newline_types):
assert black.format_str("".join(test_case), mode=mode) == test_case[0] * 3
|
BlackTestCase
|
python
|
walkccc__LeetCode
|
solutions/3085. Minimum Deletions to Make String K-Special/3085.py
|
{
"start": 0,
"end": 388
}
|
class ____:
def minimumDeletions(self, word: str, k: int) -> int:
ans = math.inf
count = collections.Counter(word)
for minFreq in count.values():
deletions = 0
for freq in count.values():
if freq < minFreq:
deletions += freq
else:
deletions += max(0, freq - (minFreq + k))
ans = min(ans, deletions)
return ans
|
Solution
|
python
|
pypa__warehouse
|
tests/unit/test_views.py
|
{
"start": 17028,
"end": 28411
}
|
class ____:
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_a_query(
self, monkeypatch, pyramid_services, db_request, metrics, page
):
params = MultiDict({"q": "foo bar"})
if page is not None:
params["page"] = page
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: True, hit=lambda *a: True, resets_in=lambda *a: None
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
db_request.opensearch = pretend.stub()
opensearch_query = pretend.stub()
get_opensearch_query = pretend.call_recorder(lambda *a, **kw: opensearch_query)
monkeypatch.setattr(views, "get_opensearch_query", get_opensearch_query)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "OpenSearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
assert search(db_request) == {
"page": page_obj,
"term": params.get("q", ""),
"order": "",
"applied_filters": [],
"available_filters": [],
}
assert get_opensearch_query.calls == [
pretend.call(db_request.opensearch, params.get("q"), "", [])
]
assert page_cls.calls == [
pretend.call(opensearch_query, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert metrics.histogram.calls == [
pretend.call("warehouse.views.search.results", 1000)
]
@pytest.mark.parametrize("page", [None, 1, 5])
def test_with_classifiers(
self, monkeypatch, pyramid_services, db_request, metrics, page
):
params = MultiDict([("q", "foo bar"), ("c", "foo :: bar"), ("c", "fiz :: buz")])
if page is not None:
params["page"] = page
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: True, hit=lambda *a: True, resets_in=lambda *a: None
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
opensearch_query = pretend.stub()
db_request.opensearch = pretend.stub()
get_opensearch_query = pretend.call_recorder(lambda *a, **kw: opensearch_query)
monkeypatch.setattr(views, "get_opensearch_query", get_opensearch_query)
classifier1 = ClassifierFactory.create(classifier="foo :: bar")
classifier2 = ClassifierFactory.create(classifier="foo :: baz")
classifier3 = ClassifierFactory.create(classifier="fiz :: buz")
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
release1._classifiers.append(classifier1)
release1._classifiers.append(classifier2)
page_obj = pretend.stub(page_count=(page or 1) + 10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "OpenSearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
search_view = search(db_request)
assert search_view == {
"page": page_obj,
"term": params.get("q", ""),
"order": "",
"applied_filters": params.getall("c"),
"available_filters": [
{
"foo": {
classifier1.classifier.split(" :: ")[1]: {},
classifier2.classifier.split(" :: ")[1]: {},
}
}
],
}
assert ("fiz", [classifier3.classifier]) not in search_view["available_filters"]
assert page_cls.calls == [
pretend.call(opensearch_query, url_maker=url_maker, page=page or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert get_opensearch_query.calls == [
pretend.call(db_request.opensearch, params.get("q"), "", params.getall("c"))
]
assert metrics.histogram.calls == [
pretend.call("warehouse.views.search.results", 1000)
]
def test_returns_404_with_pagenum_too_high(
self, monkeypatch, pyramid_services, db_request, metrics
):
params = MultiDict({"page": 15})
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: True, hit=lambda *a: True, resets_in=lambda *a: None
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
opensearch_query = pretend.stub()
db_request.opensearch = pretend.stub(query=lambda *a, **kw: opensearch_query)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "OpenSearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPNotFound):
search(db_request)
assert page_cls.calls == [
pretend.call(opensearch_query, url_maker=url_maker, page=15 or 1)
]
assert url_maker_factory.calls == [pretend.call(db_request)]
assert metrics.histogram.calls == []
def test_raises_400_with_pagenum_type_str(
self, monkeypatch, pyramid_services, db_request, metrics
):
params = MultiDict({"page": "abc"})
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: True, hit=lambda *a: True, resets_in=lambda *a: None
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
opensearch_query = pretend.stub()
db_request.opensearch = pretend.stub(query=lambda *a, **kw: opensearch_query)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "OpenSearchPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPBadRequest):
search(db_request)
assert page_cls.calls == []
assert metrics.histogram.calls == []
def test_return_413_when_query_too_long(
self, pyramid_services, db_request, metrics
):
params = MultiDict({"q": "a" * 1001})
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: True, hit=lambda *a: True, resets_in=lambda *a: None
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
with pytest.raises(HTTPRequestEntityTooLarge):
search(db_request)
assert metrics.increment.calls == [
pretend.call("warehouse.search.ratelimiter.hit"),
pretend.call("warehouse.views.search.error", tags=["error:query_too_long"]),
]
def test_returns_503_when_opensearch_unavailable(
self, monkeypatch, pyramid_services, db_request, metrics
):
params = MultiDict({"page": 15})
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: True, hit=lambda *a: True, resets_in=lambda *a: None
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
opensearch_query = pretend.stub()
db_request.opensearch = pretend.stub(query=lambda *a, **kw: opensearch_query)
def raiser(*args, **kwargs):
raise opensearchpy.ConnectionError()
monkeypatch.setattr(views, "OpenSearchPage", raiser)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
with pytest.raises(HTTPServiceUnavailable):
search(db_request)
assert url_maker_factory.calls == [pretend.call(db_request)]
assert metrics.increment.calls == [
pretend.call("warehouse.search.ratelimiter.hit"),
pretend.call("warehouse.views.search.error"),
]
assert metrics.histogram.calls == []
@pytest.mark.parametrize("resets_in", [None, 1, 5])
def test_returns_429_when_ratelimited(
self, monkeypatch, pyramid_services, db_request, metrics, resets_in
):
params = MultiDict({"q": "foo bar"})
db_request.params = params
fake_rate_limiter = pretend.stub(
test=lambda *a: False,
hit=lambda *a: True,
resets_in=lambda *a: (
None
if resets_in is None
else pretend.stub(total_seconds=lambda *a: resets_in)
),
)
pyramid_services.register_service(
fake_rate_limiter, IRateLimiter, None, name="search"
)
with pytest.raises(HTTPTooManyRequests) as exc_info:
search(db_request)
message = (
"Your search query could not be performed because there were too "
"many requests by the client."
)
if resets_in is not None:
message += f" Limit may reset in {resets_in} seconds."
assert exc_info.value.args[0] == message
assert metrics.increment.calls == [
pretend.call("warehouse.search.ratelimiter.exceeded")
]
def test_classifiers(db_request):
assert list_classifiers(db_request) == {"classifiers": sorted_classifiers}
def test_stats(db_request):
project = ProjectFactory.create()
release1 = ReleaseFactory.create(project=project)
release1.created = datetime.date(2011, 1, 1)
FileFactory.create(
release=release1,
filename=f"{project.name}-{release1.version}.tar.gz",
python_version="source",
size=69,
)
assert stats(db_request) == {
"total_packages_size": 69,
"top_packages": {project.name: {"size": 69}},
}
def test_health():
request = pretend.stub(
db=pretend.stub(execute=pretend.call_recorder(lambda q: None))
)
assert health(request) == "OK"
assert len(request.db.execute.calls) == 1
assert len(request.db.execute.calls[0].args) == 1
assert len(request.db.execute.calls[0].kwargs) == 0
assert isinstance(
request.db.execute.calls[0].args[0], sqlalchemy.sql.expression.TextClause
)
assert request.db.execute.calls[0].args[0].text == "SELECT 1"
|
TestSearch
|
python
|
apache__airflow
|
providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_container_instances.py
|
{
"start": 26434,
"end": 26711
}
|
class ____:
def __init__(self) -> None:
self.values: MutableMapping[str, Any | None] = {}
def xcom_push(self, key: str, value: Any | None) -> None:
self.values[key] = value
def xcom_pull(self, key: str) -> Any:
return self.values[key]
|
XcomMock
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-sarvam/llama_index/llms/sarvam/base.py
|
{
"start": 472,
"end": 2772
}
|
class ____(OpenAILike):
"""
Sarvam LLM.
To instantiate the `Sarvam` class, you will need to provide an API key. You can set the API key either as an environment variable `SARVAM_API_KEY` or directly in the class
constructor. If setting it in the class constructor, it would look like this:
If you haven't signed up for an API key yet, you can do so on the Sarvam website at (https://sarvam.ai). Once you have your API key, you can use the `Sarvam` class to interact
with the LLM for tasks like chatting, streaming, and completing prompts.
Examples:
`pip install llama-index-llms-sarvam`
```python
from llama_index.llms.sarvam import Sarvam
llm = Sarvam(
api_key="<your-api-key>",
max_tokens=256,
context_window=4096,
model="sarvam-m",
)
response = llm.complete("Hello World!")
print(response)
```
"""
model: str = Field(description="The Sarvam model to use.")
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "SARVAM_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "SARVAM_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Sarvam_LLM"
|
Sarvam
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/chunk_key_encodings.py
|
{
"start": 2145,
"end": 2796
}
|
class ____(ChunkKeyEncoding):
name: ClassVar[Literal["default"]] = "default"
separator: SeparatorLiteral = "/"
def __post_init__(self) -> None:
separator_parsed = parse_separator(self.separator)
object.__setattr__(self, "separator", separator_parsed)
def decode_chunk_key(self, chunk_key: str) -> tuple[int, ...]:
if chunk_key == "c":
return ()
return tuple(map(int, chunk_key[1:].split(self.separator)))
def encode_chunk_key(self, chunk_coords: tuple[int, ...]) -> str:
return self.separator.join(map(str, ("c",) + chunk_coords))
@dataclass(frozen=True)
|
DefaultChunkKeyEncoding
|
python
|
pola-rs__polars
|
py-polars/src/polars/exceptions.py
|
{
"start": 5039,
"end": 5500
}
|
class ____(PolarsWarning):
"""
Warning issued when a chrono format string contains dubious patterns.
Polars uses Rust's chrono crate to convert between string data and temporal data.
The patterns used by chrono differ slightly from Python's built-in datetime module.
Refer to the `chrono strftime documentation
<https://docs.rs/chrono/latest/chrono/format/strftime/index.html>`_ for the full
specification.
"""
|
ChronoFormatWarning
|
python
|
gevent__gevent
|
src/gevent/queue.py
|
{
"start": 23379,
"end": 23899
}
|
class ____(Queue):
"""
A subclass of :class:`JoinableQueue` that retrieves most recently added entries first.
.. versionchanged:: 24.10.1
Now extends :class:`JoinableQueue` instead of just :class:`Queue`.
"""
__slots__ = ()
def _create_queue(self, items=()):
return list(items)
def _put(self, item):
self.queue.append(item)
self._did_put_task()
def _get(self):
return self.queue.pop()
def _peek(self):
return self.queue[-1]
|
LifoQueue
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/branching/branching_io_manager.py
|
{
"start": 1224,
"end": 5400
}
|
class ____(ConfigurableIOManager):
"""A branching I/O manager composes two I/O managers.
1) The parent I/O manager, typically your production environment.
2) The branch I/O manager, typically a development or branched environment.
The objective of this to allow a developer to safely read from a production
environment and then write to a separate development environment. Once data
has been written to the branch environment subsequent reads of that asset
are sourced from the branch environment. This bookkeeping is done in Dagster's
asset catalog by emitting AssetMaterializations with metadata.
This is designed for iterative development on asset graphs, especially
where assets early in the graph are large and expensive to compute. One can
iteratively develop on downstream assets in that graph safely.
Some storage systems branching functionality natively. Examples include Snowflake's
CLONE feature. Branching I/O managers allow users to implement that functionality
in more flexible software layer over arbitrary storage systems.
"""
parent_io_manager: ResourceDependency[IOManager]
branch_io_manager: ResourceDependency[IOManager]
branch_name: str = "dev"
branch_metadata_key: str = "io_manager_branch"
def load_input(self, context: InputContext) -> Any:
if not context.has_asset_key:
# we are dealing with an op input
# just load it with the branch manager
return self.branch_io_manager.load_input(context)
else:
# we are dealing with an asset input
# figure out which partition keys are loaded, if any
partition_keys = []
if context.has_asset_partitions:
partition_keys = context.asset_partition_keys
# we'll fetch materializations with key=None if we aren't loading
# a partitioned asset, this will return us the latest materialization
# of an unpartitioned asset
if len(partition_keys) == 0:
partition_keys = [None]
# grab the latest materialization for each partition that we
# need to load, OR just the latest materialization if not partitioned
event_log_entries = [
latest_materialization_log_entry(
instance=context.instance,
asset_key=context.asset_key,
partition_key=partition_key,
)
for partition_key in partition_keys
]
# if all partitions are available in the branch, we can load from the branch
# otherwise we need to load from the parent
if all(
event_log_entry is not None
and event_log_entry.asset_materialization
and get_text_metadata_value(
event_log_entry.asset_materialization, self.branch_metadata_key
)
== self.branch_name
for event_log_entry in event_log_entries
):
context.log.info(
f'Branching Manager: Loading "{context.asset_key.to_user_string()}" from'
f' "{self.branch_name}"'
)
return self.branch_io_manager.load_input(context)
context.log.info(
f'Branching Manager Loading "{context.asset_key.to_user_string()}" from parent'
)
return self.parent_io_manager.load_input(context)
def handle_output(self, context: OutputContext, obj: Any) -> None:
# always write to the branch manager
self.branch_io_manager.handle_output(context, obj)
if context.has_asset_key:
# we are dealing with an asset output (not an op output)
# mark the asset materialization with the branch name
context.add_output_metadata({self.branch_metadata_key: self.branch_name})
context.log.info(
f'Branching Manager: Writing "{context.asset_key.to_user_string()}" to branch'
f' "{self.branch_name}"'
)
|
BranchingIOManager
|
python
|
mahmoud__boltons
|
boltons/dictutils.py
|
{
"start": 35525,
"end": 37642
}
|
class ____(dict):
"""An immutable dict subtype that is hashable and can itself be used
as a :class:`dict` key or :class:`set` entry. What
:class:`frozenset` is to :class:`set`, FrozenDict is to
:class:`dict`.
There was once an attempt to introduce such a type to the standard
library, but it was rejected: `PEP 416 <https://www.python.org/dev/peps/pep-0416/>`_.
Because FrozenDict is a :class:`dict` subtype, it automatically
works everywhere a dict would, including JSON serialization.
"""
__slots__ = ('_hash',)
def updated(self, *a, **kw):
"""Make a copy and add items from a dictionary or iterable (and/or
keyword arguments), overwriting values under an existing
key. See :meth:`dict.update` for more details.
"""
data = dict(self)
data.update(*a, **kw)
return type(self)(data)
@classmethod
def fromkeys(cls, keys, value=None):
# one of the lesser known and used/useful dict methods
return cls(dict.fromkeys(keys, value))
def __repr__(self):
cn = self.__class__.__name__
return f'{cn}({dict.__repr__(self)})'
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def __hash__(self):
try:
ret = self._hash
except AttributeError:
try:
ret = self._hash = hash(frozenset(self.items()))
except Exception as e:
ret = self._hash = FrozenHashError(e)
if ret.__class__ is FrozenHashError:
raise ret
return ret
def __copy__(self):
return self # immutable types don't copy, see tuple's behavior
# block everything else
def _raise_frozen_typeerror(self, *a, **kw):
"raises a TypeError, because FrozenDicts are immutable"
raise TypeError('%s object is immutable' % self.__class__.__name__)
__ior__ = __setitem__ = __delitem__ = update = _raise_frozen_typeerror
setdefault = pop = popitem = clear = _raise_frozen_typeerror
del _raise_frozen_typeerror
# end dictutils.py
|
FrozenDict
|
python
|
tornadoweb__tornado
|
tornado/test/web_test.py
|
{
"start": 25033,
"end": 25387
}
|
class ____(RequestHandler):
def get(self):
try:
self.set_header("X-Foo", "foo\r\nX-Bar: baz")
raise Exception("Didn't get expected exception")
except ValueError as e:
if "Unsafe header value" in str(e):
self.finish(b"ok")
else:
raise
|
HeaderInjectionHandler
|
python
|
pydantic__pydantic
|
pydantic-core/python/pydantic_core/core_schema.py
|
{
"start": 8396,
"end": 8643
}
|
class ____(SerializationInfo[ContextT], Protocol):
"""Extra data used during field serialization."""
@property
def field_name(self) -> str:
"""The name of the current field being serialized."""
...
|
FieldSerializationInfo
|
python
|
getsentry__sentry
|
src/sentry/preprod/api/endpoints/size_analysis/project_preprod_size_analysis_compare_download.py
|
{
"start": 746,
"end": 4586
}
|
class ____(ProjectEndpoint):
owner = ApiOwner.EMERGE_TOOLS
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
def get(
self, request: Request, project: Project, head_size_metric_id: int, base_size_metric_id: int
) -> HttpResponseBase:
"""
Download size analysis comparison results for specific size metrics
````````````````````````````````````````````````````
Download the size analysis comparison results for specific size metrics.
:pparam string organization_id_or_slug: the id or slug of the organization the
artifact belongs to.
:pparam string project_id_or_slug: the id or slug of the project to retrieve the
artifact from.
:pparam string head_size_metric_id: the ID of the head size metric to download size analysis comparison for.
:pparam string base_size_metric_id: the ID of the base size metric to download size analysis comparison for.
:auth: required
"""
analytics.record(
PreprodArtifactApiSizeAnalysisCompareDownloadEvent(
organization_id=project.organization_id,
project_id=project.id,
user_id=request.user.id,
head_size_metric_id=str(head_size_metric_id),
base_size_metric_id=str(base_size_metric_id),
)
)
if not features.has(
"organizations:preprod-frontend-routes", project.organization, actor=request.user
):
return Response({"error": "Feature not enabled"}, status=403)
logger.info(
"preprod.size_analysis.compare.api.download",
extra={
"head_size_metric_id": head_size_metric_id,
"base_size_metric_id": base_size_metric_id,
},
)
try:
comparison_obj = PreprodArtifactSizeComparison.objects.get(
head_size_analysis_id=head_size_metric_id,
base_size_analysis_id=base_size_metric_id,
organization_id=project.organization_id,
)
except PreprodArtifactSizeComparison.DoesNotExist:
logger.info(
"preprod.size_analysis.compare.api.download.no_comparison_obj",
extra={
"head_size_metric_id": head_size_metric_id,
"base_size_metric_id": base_size_metric_id,
},
)
return Response({"error": "Comparison not found."}, status=404)
if comparison_obj.file_id is None:
logger.info(
"preprod.size_analysis.compare.api.download.no_file_id",
extra={"comparison_id": comparison_obj.id},
)
return Response({"error": "Comparison not found."}, status=404)
try:
file_obj = File.objects.get(id=comparison_obj.file_id)
except File.DoesNotExist:
logger.info(
"preprod.size_analysis.compare.api.download.no_file",
extra={"comparison_id": comparison_obj.id},
)
return Response({"error": "Comparison not found."}, status=404)
try:
fp = file_obj.getfile()
except Exception:
logger.info(
"preprod.size_analysis.compare.api.download.no_file_getfile",
extra={"comparison_id": comparison_obj.id},
)
return Response({"error": "Failed to retrieve size analysis comparison."}, status=500)
response = FileResponse(
fp,
content_type="application/json",
)
response["Content-Length"] = file_obj.size
return response
|
ProjectPreprodArtifactSizeAnalysisCompareDownloadEndpoint
|
python
|
plotly__plotly.py
|
plotly/figure_factory/_streamline.py
|
{
"start": 4332,
"end": 14499
}
|
class ____(object):
"""
Refer to FigureFactory.create_streamline() for docstring
"""
def __init__(self, x, y, u, v, density, angle, arrow_scale, **kwargs):
self.x = np.array(x)
self.y = np.array(y)
self.u = np.array(u)
self.v = np.array(v)
self.angle = angle
self.arrow_scale = arrow_scale
self.density = int(30 * density) # Scale similarly to other functions
self.delta_x = self.x[1] - self.x[0]
self.delta_y = self.y[1] - self.y[0]
self.val_x = self.x
self.val_y = self.y
# Set up spacing
self.blank = np.zeros((self.density, self.density))
self.spacing_x = len(self.x) / float(self.density - 1)
self.spacing_y = len(self.y) / float(self.density - 1)
self.trajectories = []
# Rescale speed onto axes-coordinates
self.u = self.u / (self.x[-1] - self.x[0])
self.v = self.v / (self.y[-1] - self.y[0])
self.speed = np.sqrt(self.u**2 + self.v**2)
# Rescale u and v for integrations.
self.u *= len(self.x)
self.v *= len(self.y)
self.st_x = []
self.st_y = []
self.get_streamlines()
streamline_x, streamline_y = self.sum_streamlines()
arrows_x, arrows_y = self.get_streamline_arrows()
def blank_pos(self, xi, yi):
"""
Set up positions for trajectories to be used with rk4 function.
"""
return (int((xi / self.spacing_x) + 0.5), int((yi / self.spacing_y) + 0.5))
def value_at(self, a, xi, yi):
"""
Set up for RK4 function, based on Bokeh's streamline code
"""
if isinstance(xi, np.ndarray):
self.x = xi.astype(int)
self.y = yi.astype(int)
else:
self.val_x = int(xi)
self.val_y = int(yi)
a00 = a[self.val_y, self.val_x]
a01 = a[self.val_y, self.val_x + 1]
a10 = a[self.val_y + 1, self.val_x]
a11 = a[self.val_y + 1, self.val_x + 1]
xt = xi - self.val_x
yt = yi - self.val_y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
return a0 * (1 - yt) + a1 * yt
def rk4_integrate(self, x0, y0):
"""
RK4 forward and back trajectories from the initial conditions.
Adapted from Bokeh's streamline -uses Runge-Kutta method to fill
x and y trajectories then checks length of traj (s in units of axes)
"""
def f(xi, yi):
dt_ds = 1.0 / self.value_at(self.speed, xi, yi)
ui = self.value_at(self.u, xi, yi)
vi = self.value_at(self.v, xi, yi)
return ui * dt_ds, vi * dt_ds
def g(xi, yi):
dt_ds = 1.0 / self.value_at(self.speed, xi, yi)
ui = self.value_at(self.u, xi, yi)
vi = self.value_at(self.v, xi, yi)
return -ui * dt_ds, -vi * dt_ds
def check(xi, yi):
return (0 <= xi < len(self.x) - 1) and (0 <= yi < len(self.y) - 1)
xb_changes = []
yb_changes = []
def rk4(x0, y0, f):
ds = 0.01
stotal = 0
xi = x0
yi = y0
xb, yb = self.blank_pos(xi, yi)
xf_traj = []
yf_traj = []
while check(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + 0.5 * ds * k1x, yi + 0.5 * ds * k1y)
k3x, k3y = f(xi + 0.5 * ds * k2x, yi + 0.5 * ds * k2y)
k4x, k4y = f(xi + ds * k3x, yi + ds * k3y)
except IndexError:
break
xi += ds * (k1x + 2 * k2x + 2 * k3x + k4x) / 6.0
yi += ds * (k1y + 2 * k2y + 2 * k3y + k4y) / 6.0
if not check(xi, yi):
break
stotal += ds
new_xb, new_yb = self.blank_pos(xi, yi)
if new_xb != xb or new_yb != yb:
if self.blank[new_yb, new_xb] == 0:
self.blank[new_yb, new_xb] = 1
xb_changes.append(new_xb)
yb_changes.append(new_yb)
xb = new_xb
yb = new_yb
else:
break
if stotal > 2:
break
return stotal, xf_traj, yf_traj
sf, xf_traj, yf_traj = rk4(x0, y0, f)
sb, xb_traj, yb_traj = rk4(x0, y0, g)
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if len(x_traj) < 1:
return None
if stotal > 0.2:
initxb, inityb = self.blank_pos(x0, y0)
self.blank[inityb, initxb] = 1
return x_traj, y_traj
else:
for xb, yb in zip(xb_changes, yb_changes):
self.blank[yb, xb] = 0
return None
def traj(self, xb, yb):
"""
Integrate trajectories
:param (int) xb: results of passing xi through self.blank_pos
:param (int) xy: results of passing yi through self.blank_pos
Calculate each trajectory based on rk4 integrate method.
"""
if xb < 0 or xb >= self.density or yb < 0 or yb >= self.density:
return
if self.blank[yb, xb] == 0:
t = self.rk4_integrate(xb * self.spacing_x, yb * self.spacing_y)
if t is not None:
self.trajectories.append(t)
def get_streamlines(self):
"""
Get streamlines by building trajectory set.
"""
for indent in range(self.density // 2):
for xi in range(self.density - 2 * indent):
self.traj(xi + indent, indent)
self.traj(xi + indent, self.density - 1 - indent)
self.traj(indent, xi + indent)
self.traj(self.density - 1 - indent, xi + indent)
self.st_x = [
np.array(t[0]) * self.delta_x + self.x[0] for t in self.trajectories
]
self.st_y = [
np.array(t[1]) * self.delta_y + self.y[0] for t in self.trajectories
]
for index in range(len(self.st_x)):
self.st_x[index] = self.st_x[index].tolist()
self.st_x[index].append(np.nan)
for index in range(len(self.st_y)):
self.st_y[index] = self.st_y[index].tolist()
self.st_y[index].append(np.nan)
def get_streamline_arrows(self):
"""
Makes an arrow for each streamline.
Gets angle of streamline at 1/3 mark and creates arrow coordinates
based off of user defined angle and arrow_scale.
:param (array) st_x: x-values for all streamlines
:param (array) st_y: y-values for all streamlines
:param (angle in radians) angle: angle of arrowhead. Default = pi/9
:param (float in [0,1]) arrow_scale: value to scale length of arrowhead
Default = .09
:rtype (list, list) arrows_x: x-values to create arrowhead and
arrows_y: y-values to create arrowhead
"""
arrow_end_x = np.empty((len(self.st_x)))
arrow_end_y = np.empty((len(self.st_y)))
arrow_start_x = np.empty((len(self.st_x)))
arrow_start_y = np.empty((len(self.st_y)))
for index in range(len(self.st_x)):
arrow_end_x[index] = self.st_x[index][int(len(self.st_x[index]) / 3)]
arrow_start_x[index] = self.st_x[index][
(int(len(self.st_x[index]) / 3)) - 1
]
arrow_end_y[index] = self.st_y[index][int(len(self.st_y[index]) / 3)]
arrow_start_y[index] = self.st_y[index][
(int(len(self.st_y[index]) / 3)) - 1
]
dif_x = arrow_end_x - arrow_start_x
dif_y = arrow_end_y - arrow_start_y
orig_err = np.geterr()
np.seterr(divide="ignore", invalid="ignore")
streamline_ang = np.arctan(dif_y / dif_x)
np.seterr(**orig_err)
ang1 = streamline_ang + (self.angle)
ang2 = streamline_ang - (self.angle)
seg1_x = np.cos(ang1) * self.arrow_scale
seg1_y = np.sin(ang1) * self.arrow_scale
seg2_x = np.cos(ang2) * self.arrow_scale
seg2_y = np.sin(ang2) * self.arrow_scale
point1_x = np.empty((len(dif_x)))
point1_y = np.empty((len(dif_y)))
point2_x = np.empty((len(dif_x)))
point2_y = np.empty((len(dif_y)))
for index in range(len(dif_x)):
if dif_x[index] >= 0:
point1_x[index] = arrow_end_x[index] - seg1_x[index]
point1_y[index] = arrow_end_y[index] - seg1_y[index]
point2_x[index] = arrow_end_x[index] - seg2_x[index]
point2_y[index] = arrow_end_y[index] - seg2_y[index]
else:
point1_x[index] = arrow_end_x[index] + seg1_x[index]
point1_y[index] = arrow_end_y[index] + seg1_y[index]
point2_x[index] = arrow_end_x[index] + seg2_x[index]
point2_y[index] = arrow_end_y[index] + seg2_y[index]
space = np.empty((len(point1_x)))
space[:] = np.nan
# Combine arrays into array
arrows_x = np.array([point1_x, arrow_end_x, point2_x, space])
arrows_x = arrows_x.flatten("F")
arrows_x = arrows_x.tolist()
# Combine arrays into array
arrows_y = np.array([point1_y, arrow_end_y, point2_y, space])
arrows_y = arrows_y.flatten("F")
arrows_y = arrows_y.tolist()
return arrows_x, arrows_y
def sum_streamlines(self):
"""
Makes all streamlines readable as a single trace.
:rtype (list, list): streamline_x: all x values for each streamline
combined into single list and streamline_y: all y values for each
streamline combined into single list
"""
streamline_x = sum(self.st_x, [])
streamline_y = sum(self.st_y, [])
return streamline_x, streamline_y
|
_Streamline
|
python
|
django__django
|
django/contrib/admin/tests.py
|
{
"start": 711,
"end": 9398
}
|
class ____(SeleniumTestCase, StaticLiveServerTestCase):
available_apps = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
]
def tearDown(self):
# Ensure that no CSP violations were logged in the browser.
self.assertEqual(self.get_browser_logs(source="security"), [])
super().tearDown()
def wait_until(self, callback, timeout=10):
"""
Block the execution of the tests until the specified callback returns a
value that is not falsy. This method can be called, for example, after
clicking a link or submitting a form. See the other public methods that
call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_for_and_switch_to_popup(self, num_windows=2, timeout=10):
"""
Block until `num_windows` are present and are ready (usually 2, but can
be overridden in the case of pop-ups opening other pop-ups). Switch the
current window to the new pop-up.
"""
self.wait_until(lambda d: len(d.window_handles) == num_windows, timeout)
self.selenium.switch_to.window(self.selenium.window_handles[-1])
self.wait_page_ready()
def wait_for(self, css_selector, timeout=10):
"""
Block until a CSS selector is found on the page.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)), timeout
)
def wait_for_text(self, css_selector, text, timeout=10):
"""
Block until the text is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element((By.CSS_SELECTOR, css_selector), text),
timeout,
)
def wait_for_value(self, css_selector, text, timeout=10):
"""
Block until the value is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element_value(
(By.CSS_SELECTOR, css_selector), text
),
timeout,
)
def wait_until_visible(self, css_selector, timeout=10):
"""
Block until the element described by the CSS selector is visible.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.visibility_of_element_located((By.CSS_SELECTOR, css_selector)), timeout
)
def wait_until_invisible(self, css_selector, timeout=10):
"""
Block until the element described by the CSS selector is invisible.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.invisibility_of_element_located((By.CSS_SELECTOR, css_selector)), timeout
)
def wait_page_ready(self, timeout=10):
"""
Block until the page is ready.
"""
self.wait_until(
lambda driver: driver.execute_script("return document.readyState;")
== "complete",
timeout,
)
@contextmanager
def wait_page_loaded(self, timeout=10):
"""
Block until a new page has loaded and is ready.
"""
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
old_page = self.selenium.find_element(By.TAG_NAME, "html")
yield
# Wait for the next page to be loaded
try:
self.wait_until(ec.staleness_of(old_page), timeout=timeout)
except WebDriverException:
# Issue in version 113+ of Chrome driver where a WebDriverException
# error is raised rather than a StaleElementReferenceException.
# See: https://issues.chromium.org/issues/42323468
pass
self.wait_page_ready(timeout=timeout)
def trigger_resize(self):
width = self.selenium.get_window_size()["width"]
height = self.selenium.get_window_size()["height"]
self.selenium.set_window_size(width + 1, height)
self.wait_page_ready()
self.selenium.set_window_size(width, height)
self.wait_page_ready()
def admin_login(self, username, password, login_url="/admin/"):
"""
Log in to the admin.
"""
from selenium.webdriver.common.by import By
self.selenium.get("%s%s" % (self.live_server_url, login_url))
username_input = self.selenium.find_element(By.NAME, "username")
username_input.send_keys(username)
password_input = self.selenium.find_element(By.NAME, "password")
password_input.send_keys(password)
login_text = _("Log in")
with self.wait_page_loaded():
self.selenium.find_element(
By.XPATH, '//input[@value="%s"]' % login_text
).click()
def select_option(self, selector, value):
"""
Select the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))
select.select_by_value(value)
def deselect_option(self, selector, value):
"""
Deselect the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
select = Select(self.selenium.find_element(By.CSS_SELECTOR, selector))
select.deselect_by_value(value)
def assertCountSeleniumElements(self, selector, count, root_element=None):
"""
Assert number of matches for a CSS selector.
`root_element` allow restriction to a pre-selected node.
"""
from selenium.webdriver.common.by import By
root_element = root_element or self.selenium
self.assertEqual(
len(root_element.find_elements(By.CSS_SELECTOR, selector)), count
)
def _assertOptionsValues(self, options_selector, values):
from selenium.webdriver.common.by import By
if values:
options = self.selenium.find_elements(By.CSS_SELECTOR, options_selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute("value"))
self.assertEqual(values, actual_values)
else:
# Prevent the `find_elements(By.CSS_SELECTOR, …)` call from
# blocking if the selector doesn't match any options as we expect
# it to be the case.
with self.disable_implicit_wait():
self.wait_until(
lambda driver: not driver.find_elements(
By.CSS_SELECTOR, options_selector
)
)
def assertSelectOptions(self, selector, values):
"""
Assert that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
self._assertOptionsValues("%s > option" % selector, values)
def assertSelectedOptions(self, selector, values):
"""
Assert that the <SELECT> widget identified by `selector` has the
selected options with the given `values`.
"""
self._assertOptionsValues("%s > option:checked" % selector, values)
def is_disabled(self, selector):
"""
Return True if the element identified by `selector` has the `disabled`
attribute.
"""
from selenium.webdriver.common.by import By
return (
self.selenium.find_element(By.CSS_SELECTOR, selector).get_attribute(
"disabled"
)
== "true"
)
|
AdminSeleniumTestCase
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_cloud_base.py
|
{
"start": 1056,
"end": 1352
}
|
class ____(GoogleCloudBaseOperator):
def __init__(
self,
retry: Retry | _MethodDefault = DEFAULT,
config: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.retry = retry
self.config = config
|
GoogleSampleOperator
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 218912,
"end": 219224
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("column", "line")
column = sgqlc.types.Field(Int, graphql_name="column")
line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="line")
|
CheckAnnotationPosition
|
python
|
astropy__astropy
|
astropy/io/votable/tree.py
|
{
"start": 15309,
"end": 16481
}
|
class ____(SimpleElement):
"""
A base class for simple elements, such as FIELD, PARAM and INFO
that don't require any special parsing or outputting machinery.
"""
def __init__(self):
SimpleElement.__init__(self)
self._content = None
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start and tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
elif tag == self._element_name:
if data:
self.content = data
break
return self
def to_xml(self, w, **kwargs):
w.element(
self._element_name,
self._content,
attrib=w.object_attrs(self, self._attr_list),
)
@property
def content(self):
"""The content of the element."""
return self._content
@content.setter
def content(self, content):
check_string(content, "content", self._config, self._pos)
self._content = content
@content.deleter
def content(self):
self._content = None
|
SimpleElementWithContent
|
python
|
apache__airflow
|
providers/apache/beam/src/airflow/providers/apache/beam/operators/beam.py
|
{
"start": 2781,
"end": 6948
}
|
class ____(metaclass=ABCMeta):
"""
Helper class to store common, Dataflow specific logic for both.
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`,
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator` and
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunGoPipelineOperator`.
"""
dataflow_hook: DataflowHook | None
dataflow_config: DataflowConfiguration
gcp_conn_id: str
dataflow_support_impersonation: bool = True
def __init__(self):
if not GOOGLE_PROVIDER:
raise AirflowOptionalProviderFeatureException(
"Failed to import apache-airflow-google-provider. To use the dataflow service please install "
"the appropriate version of the google provider."
)
def _set_dataflow(
self,
pipeline_options: dict,
job_name_variable_key: str | None = None,
) -> tuple[str, dict, Callable[[str], None], Callable[[], bool]]:
self.dataflow_hook = self.__set_dataflow_hook()
self.dataflow_config.project_id = self.dataflow_config.project_id or self.dataflow_hook.project_id
dataflow_job_name = self.__get_dataflow_job_name()
pipeline_options = self.__get_dataflow_pipeline_options(
pipeline_options, dataflow_job_name, job_name_variable_key
)
process_line_callback = self.__get_dataflow_process_callback()
is_dataflow_job_id_exist_callback = self.__is_dataflow_job_id_exist_callback()
return dataflow_job_name, pipeline_options, process_line_callback, is_dataflow_job_id_exist_callback
def __set_dataflow_hook(self) -> DataflowHook:
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.dataflow_config.gcp_conn_id or self.gcp_conn_id,
poll_sleep=self.dataflow_config.poll_sleep,
impersonation_chain=self.dataflow_config.impersonation_chain,
drain_pipeline=self.dataflow_config.drain_pipeline,
cancel_timeout=self.dataflow_config.cancel_timeout,
wait_until_finished=self.dataflow_config.wait_until_finished,
)
return self.dataflow_hook
def __get_dataflow_job_name(self) -> str:
return DataflowHook.build_dataflow_job_name(
self.dataflow_config.job_name, # type: ignore
self.dataflow_config.append_job_name,
)
def __get_dataflow_pipeline_options(
self, pipeline_options: dict, job_name: str, job_name_key: str | None = None
) -> dict:
pipeline_options = copy.deepcopy(pipeline_options)
if job_name_key is not None:
pipeline_options[job_name_key] = job_name
if self.dataflow_config.service_account:
pipeline_options["serviceAccount"] = self.dataflow_config.service_account
if self.dataflow_support_impersonation and self.dataflow_config.impersonation_chain:
if isinstance(self.dataflow_config.impersonation_chain, list):
pipeline_options["impersonateServiceAccount"] = ",".join(
self.dataflow_config.impersonation_chain
)
else:
pipeline_options["impersonateServiceAccount"] = self.dataflow_config.impersonation_chain
pipeline_options["project"] = self.dataflow_config.project_id
pipeline_options["region"] = self.dataflow_config.location
pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
return pipeline_options
def __get_dataflow_process_callback(self) -> Callable[[str], None]:
def set_current_dataflow_job_id(job_id):
self.dataflow_job_id = job_id
return process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_dataflow_job_id
)
def __is_dataflow_job_id_exist_callback(self) -> Callable[[], bool]:
def is_dataflow_job_id_exist() -> bool:
return True if self.dataflow_job_id else False
return is_dataflow_job_id_exist
|
BeamDataflowMixin
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_nvvm_driver.py
|
{
"start": 5857,
"end": 7252
}
|
class ____(unittest.TestCase):
def test_libdevice_load(self):
# Test that constructing LibDevice gives a bitcode file
libdevice = LibDevice()
self.assertEqual(libdevice.bc[:4], b'BC\xc0\xde')
nvvmir_generic = '''\
target triple="nvptx64-nvidia-cuda"
target datalayout = "{data_layout}"
define i32 @ave(i32 %a, i32 %b) {{
entry:
%add = add nsw i32 %a, %b
%div = sdiv i32 %add, 2
ret i32 %div
}}
define void @simple(i32* %data) {{
entry:
%0 = call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
%1 = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
%mul = mul i32 %0, %1
%2 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
%add = add i32 %mul, %2
%call = call i32 @ave(i32 %add, i32 %add)
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32, i32* %data, i64 %idxprom
store i32 %call, i32* %arrayidx, align 4
ret void
}}
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x() nounwind readnone
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x() nounwind readnone
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x() nounwind readnone
!nvvmir.version = !{{!1}}
!1 = !{{i32 {v[0]}, i32 {v[1]}, i32 {v[2]}, i32 {v[3]}}}
!nvvm.annotations = !{{!2}}
!2 = !{{void (i32*)* @simple, !"kernel", i32 1}}
@"llvm.used" = appending global [1 x i8*] [i8* bitcast (void (i32*)* @simple to i8*)], section "llvm.metadata"
''' # noqa: E501
if __name__ == '__main__':
unittest.main()
|
TestLibDevice
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/state/schema.py
|
{
"start": 2521,
"end": 3691
}
|
class ____(BaseModel):
"""Metadata about a Ray Train worker."""
world_rank: int = Field(
description="The global rank of the worker in the training cluster."
)
local_rank: int = Field(description="The local rank of the worker on its node.")
node_rank: int = Field(description="The rank of the worker's node in the cluster.")
actor_id: str = Field(description="The unique ID of the worker's actor.")
node_id: str = Field(
description="The unique ID of the node where the worker is running."
)
node_ip: str = Field(
description="The IP address of the node where the worker is running."
)
pid: int = Field(description="The process ID of the worker.")
gpu_ids: List[int] = Field(description="A list of GPU IDs allocated to the worker.")
status: Optional[ActorStatus] = Field(
description="The current status of the worker actor."
)
resources: TrainResources = Field(
description="The resources allocated to this Train worker."
)
log_file_path: Optional[str] = Field(
description="The path to the log file for the Train worker."
)
@DeveloperAPI
|
TrainWorker
|
python
|
aio-libs__aiohttp
|
aiohttp/web_urldispatcher.py
|
{
"start": 7187,
"end": 8282
}
|
class ____(UrlMappingMatchInfo):
__slots__ = ("_exception",)
def __init__(self, http_exception: HTTPException) -> None:
self._exception = http_exception
super().__init__({}, SystemRoute(self._exception))
@property
def http_exception(self) -> HTTPException:
return self._exception
def __repr__(self) -> str:
return f"<MatchInfoError {self._exception.status}: {self._exception.reason}>"
async def _default_expect_handler(request: Request) -> None:
"""Default handler for Expect header.
Just send "100 Continue" to client.
raise HTTPExpectationFailed if value of header is not "100-continue"
"""
expect = request.headers.get(hdrs.EXPECT, "")
if request.version == HttpVersion11:
if expect.lower() == "100-continue":
await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
# Reset output_size as we haven't started the main body yet.
request.writer.output_size = 0
else:
raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
|
MatchInfoError
|
python
|
matplotlib__matplotlib
|
galleries/examples/event_handling/poly_editor.py
|
{
"start": 1032,
"end": 6597
}
|
class ____:
"""
A polygon editor.
Key-bindings
't' toggle vertex markers on and off. When vertex markers are on,
you can move them, delete them
'd' delete the vertex under point
'i' insert a vertex at point. You must be within epsilon of the
line connecting two existing vertices
"""
showverts = True
epsilon = 5 # max pixel distance to count as a vertex hit
def __init__(self, ax, poly):
if poly.figure is None:
raise RuntimeError('You must first add the polygon to a figure '
'or canvas before defining the interactor')
self.ax = ax
canvas = poly.figure.canvas
self.poly = poly
x, y = zip(*self.poly.xy)
self.line = Line2D(x, y,
marker='o', markerfacecolor='r',
animated=True)
self.ax.add_line(self.line)
self.cid = self.poly.add_callback(self.poly_changed)
self._ind = None # the active vert
canvas.mpl_connect('draw_event', self.on_draw)
canvas.mpl_connect('button_press_event', self.on_button_press)
canvas.mpl_connect('key_press_event', self.on_key_press)
canvas.mpl_connect('button_release_event', self.on_button_release)
canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self.canvas = canvas
def on_draw(self, event):
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
# do not need to blit here, this will fire before the screen is
# updated
def poly_changed(self, poly):
"""This method is called whenever the pathpatch object is called."""
# only copy the artist props to the line (except visibility)
vis = self.line.get_visible()
Artist.update_from(self.line, poly)
self.line.set_visible(vis) # don't use the poly visibility state
def get_ind_under_point(self, event):
"""
Return the index of the point closest to the event position or *None*
if no point is within ``self.epsilon`` to the event position.
"""
# display coords
xy = np.asarray(self.poly.xy)
xyt = self.poly.get_transform().transform(xy)
xt, yt = xyt[:, 0], xyt[:, 1]
d = np.hypot(xt - event.x, yt - event.y)
indseq, = np.nonzero(d == d.min())
ind = indseq[0]
if d[ind] >= self.epsilon:
ind = None
return ind
def on_button_press(self, event):
"""Callback for mouse button presses."""
if not self.showverts:
return
if event.inaxes is None:
return
if event.button != 1:
return
self._ind = self.get_ind_under_point(event)
def on_button_release(self, event):
"""Callback for mouse button releases."""
if not self.showverts:
return
if event.button != 1:
return
self._ind = None
def on_key_press(self, event):
"""Callback for key presses."""
if not event.inaxes:
return
if event.key == 't':
self.showverts = not self.showverts
self.line.set_visible(self.showverts)
if not self.showverts:
self._ind = None
elif event.key == 'd':
ind = self.get_ind_under_point(event)
if ind is not None:
self.poly.xy = np.delete(self.poly.xy,
ind, axis=0)
self.line.set_data(zip(*self.poly.xy))
elif event.key == 'i':
xys = self.poly.get_transform().transform(self.poly.xy)
p = event.x, event.y # display coords
for i in range(len(xys) - 1):
s0 = xys[i]
s1 = xys[i + 1]
d = dist_point_to_segment(p, s0, s1)
if d <= self.epsilon:
self.poly.xy = np.insert(
self.poly.xy, i+1,
[event.xdata, event.ydata],
axis=0)
self.line.set_data(zip(*self.poly.xy))
break
if self.line.stale:
self.canvas.draw_idle()
def on_mouse_move(self, event):
"""Callback for mouse movements."""
if not self.showverts:
return
if self._ind is None:
return
if event.inaxes is None:
return
if event.button != 1:
return
x, y = event.xdata, event.ydata
self.poly.xy[self._ind] = x, y
if self._ind == 0:
self.poly.xy[-1] = x, y
elif self._ind == len(self.poly.xy) - 1:
self.poly.xy[0] = x, y
self.line.set_data(zip(*self.poly.xy))
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.poly)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
theta = np.arange(0, 2*np.pi, 0.1)
r = 1.5
xs = r * np.cos(theta)
ys = r * np.sin(theta)
poly = Polygon(np.column_stack([xs, ys]), animated=True)
fig, ax = plt.subplots()
ax.add_patch(poly)
p = PolygonInteractor(ax, poly)
ax.set_title('Click and drag a point to move it')
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
plt.show()
|
PolygonInteractor
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/mro4.py
|
{
"start": 424,
"end": 479
}
|
class ____(Generic[T1, T2], Foo1[T1], Foo2[T2]): ...
|
Bar1
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments_differ.py
|
{
"start": 667,
"end": 835
}
|
class ____(Classmethod):
@staticmethod
def func(): # [arguments-differ]
pass
@classmethod
def func1(cls):
return cls()
|
ClassmethodChild
|
python
|
django__django
|
django/db/models/fields/__init__.py
|
{
"start": 69874,
"end": 72278
}
|
class ____(Field):
description = _("File path")
def __init__(
self,
verbose_name=None,
name=None,
path="",
match=None,
recursive=False,
allow_files=True,
allow_folders=False,
**kwargs,
):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
kwargs.setdefault("max_length", 100)
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_allowing_files_or_folders(**kwargs),
]
def _check_allowing_files_or_folders(self, **kwargs):
if not self.allow_files and not self.allow_folders:
return [
checks.Error(
"FilePathFields must have either 'allow_files' or 'allow_folders' "
"set to True.",
obj=self,
id="fields.E140",
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.path != "":
kwargs["path"] = self.path
if self.match is not None:
kwargs["match"] = self.match
if self.recursive is not False:
kwargs["recursive"] = self.recursive
if self.allow_files is not True:
kwargs["allow_files"] = self.allow_files
if self.allow_folders is not False:
kwargs["allow_folders"] = self.allow_folders
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
return name, path, args, kwargs
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is None:
return None
return str(value)
def formfield(self, **kwargs):
return super().formfield(
**{
"path": self.path() if callable(self.path) else self.path,
"match": self.match,
"recursive": self.recursive,
"form_class": forms.FilePathField,
"allow_files": self.allow_files,
"allow_folders": self.allow_folders,
**kwargs,
}
)
def get_internal_type(self):
return "FilePathField"
|
FilePathField
|
python
|
doocs__leetcode
|
solution/2400-2499/2496.Maximum Value of a String in an Array/Solution2.py
|
{
"start": 0,
"end": 314
}
|
class ____:
def maximumValue(self, strs: List[str]) -> int:
def f(s: str) -> int:
x = 0
for c in s:
if c.isalpha():
return len(s)
x = x * 10 + ord(c) - ord("0")
return x
return max(f(s) for s in strs)
|
Solution
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/graph_png.py
|
{
"start": 321,
"end": 6473
}
|
class ____:
"""Helper class to draw a state graph into a PNG file.
It requires `graphviz` and `pygraphviz` to be installed.
Example:
```python
drawer = PngDrawer()
drawer.draw(state_graph, "graph.png")
```
"""
def __init__(
self, fontname: str | None = None, labels: LabelsDict | None = None
) -> None:
"""Initializes the PNG drawer.
Args:
fontname: The font to use for the labels. Defaults to "arial".
labels: A dictionary of label overrides. The dictionary
should have the following format:
{
"nodes": {
"node1": "CustomLabel1",
"node2": "CustomLabel2",
"__end__": "End Node"
},
"edges": {
"continue": "ContinueLabel",
"end": "EndLabel"
}
}
The keys are the original labels, and the values are the new labels.
"""
self.fontname = fontname or "arial"
self.labels = labels or LabelsDict(nodes={}, edges={})
def get_node_label(self, label: str) -> str:
"""Returns the label to use for a node.
Args:
label: The original label.
Returns:
The new label.
"""
label = self.labels.get("nodes", {}).get(label, label)
return f"<<B>{label}</B>>"
def get_edge_label(self, label: str) -> str:
"""Returns the label to use for an edge.
Args:
label: The original label.
Returns:
The new label.
"""
label = self.labels.get("edges", {}).get(label, label)
return f"<<U>{label}</U>>"
def add_node(self, viz: Any, node: str) -> None:
"""Adds a node to the graph.
Args:
viz: The graphviz object.
node: The node to add.
"""
viz.add_node(
node,
label=self.get_node_label(node),
style="filled",
fillcolor="yellow",
fontsize=15,
fontname=self.fontname,
)
def add_edge(
self,
viz: Any,
source: str,
target: str,
label: str | None = None,
conditional: bool = False, # noqa: FBT001,FBT002
) -> None:
"""Adds an edge to the graph.
Args:
viz: The graphviz object.
source: The source node.
target: The target node.
label: The label for the edge.
conditional: Whether the edge is conditional.
"""
viz.add_edge(
source,
target,
label=self.get_edge_label(label) if label else "",
fontsize=12,
fontname=self.fontname,
style="dotted" if conditional else "solid",
)
def draw(self, graph: Graph, output_path: str | None = None) -> bytes | None:
"""Draw the given state graph into a PNG file.
Requires `graphviz` and `pygraphviz` to be installed.
Args:
graph: The graph to draw
output_path: The path to save the PNG. If `None`, PNG bytes are returned.
Raises:
ImportError: If `pygraphviz` is not installed.
Returns:
The PNG bytes if `output_path` is None, else None.
"""
if not _HAS_PYGRAPHVIZ:
msg = "Install pygraphviz to draw graphs: `pip install pygraphviz`."
raise ImportError(msg)
# Create a directed graph
viz = pgv.AGraph(directed=True, nodesep=0.9, ranksep=1.0)
# Add nodes, conditional edges, and edges to the graph
self.add_nodes(viz, graph)
self.add_edges(viz, graph)
self.add_subgraph(viz, [node.split(":") for node in graph.nodes])
# Update entrypoint and END styles
self.update_styles(viz, graph)
# Save the graph as PNG
try:
return viz.draw(output_path, format="png", prog="dot")
finally:
viz.close()
def add_nodes(self, viz: Any, graph: Graph) -> None:
"""Add nodes to the graph.
Args:
viz: The graphviz object.
graph: The graph to draw.
"""
for node in graph.nodes:
self.add_node(viz, node)
def add_subgraph(
self,
viz: Any,
nodes: list[list[str]],
parent_prefix: list[str] | None = None,
) -> None:
"""Add subgraphs to the graph.
Args:
viz: The graphviz object.
nodes: The nodes to add.
parent_prefix: The prefix of the parent subgraph.
"""
for prefix, grouped in groupby(
[node[:] for node in sorted(nodes)],
key=lambda x: x.pop(0),
):
current_prefix = (parent_prefix or []) + [prefix]
grouped_nodes = list(grouped)
if len(grouped_nodes) > 1:
subgraph = viz.add_subgraph(
[":".join(current_prefix + node) for node in grouped_nodes],
name="cluster_" + ":".join(current_prefix),
)
self.add_subgraph(subgraph, grouped_nodes, current_prefix)
def add_edges(self, viz: Any, graph: Graph) -> None:
"""Add edges to the graph.
Args:
viz: The graphviz object.
graph: The graph to draw.
"""
for start, end, data, cond in graph.edges:
self.add_edge(
viz, start, end, str(data) if data is not None else None, cond
)
def update_styles(self, viz: Any, graph: Graph) -> None:
"""Update the styles of the entrypoint and END nodes.
Args:
viz: The graphviz object.
graph: The graph to draw.
"""
if first := graph.first_node():
viz.get_node(first.id).attr.update(fillcolor="lightblue")
if last := graph.last_node():
viz.get_node(last.id).attr.update(fillcolor="orange")
|
PngDrawer
|
python
|
viewflow__viewflow
|
tests/test_this_object.py
|
{
"start": 710,
"end": 1464
}
|
class ____(TestCase):
def test_this_refs_data(self):
self.assertEqual(this.some_name.name, 'some_name')
self.assertEqual(this.another_some_name.name, 'another_some_name')
def test_this_ref_resolve(self):
review = Review()
approve = this.approve.resolve(review)
self.assertEqual(approve, review.approve)
self.assertEqual(Review.approver, this.approve.owner.resolve(review))
self.assertEqual(this.approve.call.resolve(review)(), 'approve')
publish = this.publish.resolve(review)
self.assertEqual(publish, review.publish)
self.assertEqual(Review.publisher, this.publish.owner.resolve(review))
self.assertEqual(this.publish.call.resolve(review)(), 'publish')
|
Test
|
python
|
hynek__structlog
|
src/structlog/twisted.py
|
{
"start": 8500,
"end": 10118
}
|
class ____:
"""
Adapt an ``event_dict`` to Twisted logging system.
Particularly, make a wrapped `twisted.python.log.err
<https://docs.twisted.org/en/stable/api/twisted.python.log.html#err>`_
behave as expected.
Args:
dictRenderer:
Renderer that is used for the actual log message. Please note that
structlog comes with a dedicated `JSONRenderer`.
**Must** be the last processor in the chain and requires a *dictRenderer*
for the actual formatting as an constructor argument in order to be able to
fully support the original behaviors of ``log.msg()`` and ``log.err()``.
"""
def __init__(
self,
dictRenderer: (
Callable[[WrappedLogger, str, EventDict], str] | None
) = None,
) -> None:
self._dictRenderer = dictRenderer or _BUILTIN_DEFAULT_PROCESSORS[-1]
def __call__(
self, logger: WrappedLogger, name: str, eventDict: EventDict
) -> Any:
if name == "err":
# This aspires to handle the following cases correctly:
# 1. log.err(failure, _why='event', **kw)
# 2. log.err('event', **kw)
# 3. log.err(_stuff=failure, _why='event', **kw)
_stuff, _why, eventDict = _extractStuffAndWhy(eventDict)
eventDict["event"] = _why
return (
(),
{
"_stuff": _stuff,
"_why": self._dictRenderer(logger, name, eventDict),
},
)
return self._dictRenderer(logger, name, eventDict)
|
EventAdapter
|
python
|
numpy__numpy
|
numpy/_core/tests/test_multiarray.py
|
{
"start": 224447,
"end": 243403
}
|
class ____:
"""Test tofile, fromfile, tobytes, and fromstring"""
def _create_data(self):
shape = (2, 4, 3)
rand = np.random.random
x = rand(shape) + rand(shape).astype(complex) * 1j
x[0, :, 1] = [np.nan, np.inf, -np.inf, np.nan]
return x
@pytest.fixture(params=["string", "path_obj"])
def param_filename(self, request):
# This fixtures returns string or path_obj
# so that every test doesn't need to have the
# paramterize marker.
return request.param
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(OSError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(OSError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_fromstring_count0(self):
d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
assert d.shape == (0,)
def test_empty_files_text(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
with open(tmp_filename, 'w') as f:
pass
y = np.fromfile(tmp_filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_binary(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
with open(tmp_filename, 'wb') as f:
pass
y = np.fromfile(tmp_filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
with open(tmp_filename, 'wb') as f:
x.tofile(f)
# NB. doesn't work with flush+seek, due to use of C stdio
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
x.tofile(tmp_filename)
y = np.fromfile(tmp_filename, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_roundtrip_dump_pathlib(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
p = pathlib.Path(tmp_filename)
x.dump(p)
y = np.load(p, allow_pickle=True)
assert_array_equal(y, x)
def test_roundtrip_binary_str(self):
x = self._create_data()
s = x.tobytes()
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flat)
s = x.tobytes('F')
y = np.frombuffer(s, dtype=x.dtype)
assert_array_equal(y, x.flatten('F'))
def test_roundtrip_str(self):
x = self._create_data()
x = x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_equal(x[~nan_mask], y[~nan_mask])
def test_roundtrip_repr(self):
x = self._create_data()
x = x.real.ravel()
s = "@".join(repr(x)[11:-1] for x in x)
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unseekable_fromfile(self, tmp_path, param_filename):
# gh-6246
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
x.tofile(tmp_filename)
def fail(*args, **kwargs):
raise OSError('Can not tell or seek')
with open(tmp_filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
assert_raises(OSError, np.fromfile, f, dtype=x.dtype)
def test_io_open_unbuffered_fromfile(self, tmp_path, param_filename):
# gh-6632
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
x.tofile(tmp_filename)
with open(tmp_filename, 'rb', buffering=0) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_largish_file(self, tmp_path, param_filename):
# check the fallocate path on files > 16MB
tmp_filename = normalize_filename(tmp_path, param_filename)
d = np.zeros(4 * 1024 ** 2)
d.tofile(tmp_filename)
assert_equal(os.path.getsize(tmp_filename), d.nbytes)
assert_array_equal(d, np.fromfile(tmp_filename))
# check offset
with open(tmp_filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
# check append mode (gh-8329)
open(tmp_filename, "w").close() # delete file contents
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(tmp_filename))
with open(tmp_filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2)
def test_io_open_buffered_fromfile(self, tmp_path, param_filename):
# gh-6632
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
x.tofile(tmp_filename)
with open(tmp_filename, 'rb', buffering=-1) as f:
y = np.fromfile(f, dtype=x.dtype)
assert_array_equal(y, x.flat)
def test_file_position_after_fromfile(self, tmp_path, param_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE // 8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE * 8]
tmp_filename = normalize_filename(tmp_path, param_filename)
for size in sizes:
with open(tmp_filename, 'wb') as f:
f.seek(size - 1)
f.write(b'\0')
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
with open(tmp_filename, mode) as f:
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self, tmp_path, param_filename):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE // 8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE * 8]
tmp_filename = normalize_filename(tmp_path, param_filename)
for size in sizes:
err_msg = "%d" % (size,)
with open(tmp_filename, 'wb') as f:
f.seek(size - 1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
with open(tmp_filename, 'r+b') as f:
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
assert_equal(pos, 10, err_msg=err_msg)
def test_load_object_array_fromfile(self, tmp_path, param_filename):
# gh-12300
tmp_filename = normalize_filename(tmp_path, param_filename)
with open(tmp_filename, 'w') as f:
# Ensure we have a file with consistent contents
pass
with open(tmp_filename, 'rb') as f:
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, f, dtype=object)
assert_raises_regex(ValueError, "Cannot read into object array",
np.fromfile, tmp_filename, dtype=object)
def test_fromfile_offset(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
with open(tmp_filename, 'wb') as f:
x.tofile(f)
with open(tmp_filename, 'rb') as f:
y = np.fromfile(f, dtype=x.dtype, offset=0)
assert_array_equal(y, x.flat)
with open(tmp_filename, 'rb') as f:
count_items = len(x.flat) // 8
offset_items = len(x.flat) // 4
offset_bytes = x.dtype.itemsize * offset_items
y = np.fromfile(
f, dtype=x.dtype, count=count_items, offset=offset_bytes
)
assert_array_equal(
y, x.flat[offset_items:offset_items + count_items]
)
# subsequent seeks should stack
offset_bytes = x.dtype.itemsize
z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes)
assert_array_equal(z, x.flat[offset_items + count_items + 1:])
with open(tmp_filename, 'wb') as f:
x.tofile(f, sep=",")
with open(tmp_filename, 'rb') as f:
assert_raises_regex(
TypeError,
"'offset' argument only permitted for binary files",
np.fromfile, tmp_filename, dtype=x.dtype,
sep=",", offset=1)
@pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t")
def test_fromfile_bad_dup(self, tmp_path, param_filename, monkeypatch):
def dup_str(fd):
return 'abc'
def dup_bigint(fd):
return 2**68
tmp_filename = normalize_filename(tmp_path, param_filename)
x = self._create_data()
with open(tmp_filename, 'wb') as f:
x.tofile(f)
for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)):
monkeypatch.setattr(os, "dup", dup)
assert_raises(exc, np.fromfile, f)
def _check_from(self, s, value, filename, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
else:
y = np.fromstring(s, **kw)
assert_array_equal(y, value)
with open(filename, 'wb') as f:
f.write(s)
y = np.fromfile(filename, **kw)
assert_array_equal(y, value)
@pytest.fixture(params=["period", "comma"])
def decimal_sep_localization(self, request):
"""
Including this fixture in a test will automatically
execute it with both types of decimal separator.
So::
def test_decimal(decimal_sep_localization):
pass
is equivalent to the following two tests::
def test_decimal_period_separator():
pass
def test_decimal_comma_separator():
with CommaDecimalPointLocale():
pass
"""
if request.param == "period":
yield
elif request.param == "comma":
with CommaDecimalPointLocale():
yield
else:
assert False, request.param
def test_nan(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
tmp_filename,
sep=' ')
def test_inf(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
tmp_filename,
sep=' ')
def test_numbers(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b"1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133],
tmp_filename,
sep=' ')
def test_binary(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
tmp_filename,
dtype='<f4')
def test_string(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, sep=',')
def test_counted_string(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=4, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3.], tmp_filename, count=3, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, count=-1, sep=',')
def test_string_with_ws(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b'1 2 3 4 ', [1, 2, 3, 4], tmp_filename, dtype=int, sep=' ')
def test_counted_string_with_ws(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b'1 2 3 4 ', [1, 2, 3], tmp_filename, count=3, dtype=int,
sep=' ')
def test_ascii(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b'1 , 2 , 3 , 4', [1., 2., 3., 4.], tmp_filename, sep=',')
self._check_from(
b'1,2,3,4', [1., 2., 3., 4.], tmp_filename, dtype=float, sep=',')
def test_malformed(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
with assert_raises(ValueError):
self._check_from(
b'1.234 1,234', [1.234, 1.], tmp_filename, sep=' ')
def test_long_sep(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
self._check_from(
b'1_x_3_x_4_x_5', [1, 3, 4, 5], tmp_filename, sep='_x_')
def test_dtype(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from(b'1,2,3,4', v, tmp_filename, sep=',', dtype=np.int_)
def test_dtype_bool(self, tmp_path, param_filename):
# can't use _check_from because fromstring can't handle True/False
tmp_filename = normalize_filename(tmp_path, param_filename)
v = np.array([True, False, True, False], dtype=np.bool)
s = b'1,0,-2.3,0'
with open(tmp_filename, 'wb') as f:
f.write(s)
y = np.fromfile(tmp_filename, sep=',', dtype=np.bool)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',')
with open(tmp_filename, 'r') as f:
s = f.read()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x, y)
def test_tofile_format(self, tmp_path, param_filename, decimal_sep_localization):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = np.array([1.51, 2, 3.51, 4], dtype=float)
with open(tmp_filename, 'w') as f:
x.tofile(f, sep=',', format='%.2f')
with open(tmp_filename, 'r') as f:
s = f.read()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_tofile_cleanup(self, tmp_path, param_filename):
tmp_filename = normalize_filename(tmp_path, param_filename)
x = np.zeros((10), dtype=object)
with open(tmp_filename, 'wb') as f:
assert_raises(OSError, lambda: x.tofile(f, sep=''))
# Dup-ed file handle should be closed or remove will fail on Windows OS
os.remove(tmp_filename)
# Also make sure that we close the Python handle
assert_raises(OSError, lambda: x.tofile(tmp_filename))
os.remove(tmp_filename)
def test_fromfile_subarray_binary(self, tmp_path, param_filename):
# Test subarray dtypes which are absorbed into the shape
tmp_filename = normalize_filename(tmp_path, param_filename)
x = np.arange(24, dtype="i4").reshape(2, 3, 4)
x.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(3,4)i4")
assert_array_equal(x, res)
x_str = x.tobytes()
with pytest.raises(ValueError):
# binary fromstring raises
np.fromstring(x_str, dtype="(3,4)i4")
def test_parsing_subarray_unsupported(self, tmp_path, param_filename):
# We currently do not support parsing subarray dtypes
tmp_filename = normalize_filename(tmp_path, param_filename)
data = "12,42,13," * 50
with pytest.raises(ValueError):
expected = np.fromstring(data, dtype="(3,)i", sep=",")
with open(tmp_filename, "w") as f:
f.write(data)
with pytest.raises(ValueError):
np.fromfile(tmp_filename, dtype="(3,)i", sep=",")
def test_read_shorter_than_count_subarray(self, tmp_path, param_filename):
# Test that requesting more values does not cause any problems
# in conjunction with subarray dimensions being absorbed into the
# array dimension.
tmp_filename = normalize_filename(tmp_path, param_filename)
expected = np.arange(511 * 10, dtype="i").reshape(-1, 10)
binary = expected.tobytes()
with pytest.raises(ValueError):
np.fromstring(binary, dtype="(10,)i", count=10000)
expected.tofile(tmp_filename)
res = np.fromfile(tmp_filename, dtype="(10,)i", count=10000)
assert_array_equal(res, expected)
|
TestIO
|
python
|
PrefectHQ__prefect
|
src/prefect/server/utilities/messaging/__init__.py
|
{
"start": 2020,
"end": 3071
}
|
class ____(Publisher):
messages: list[CapturedMessage] = []
deduplicate_by: Optional[str]
def __init__(
self,
topic: str,
cache: Optional[Cache] = None,
deduplicate_by: Optional[str] = None,
) -> None:
self.topic = topic
self.cache: Cache = cache or create_cache()
self.deduplicate_by = deduplicate_by
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
pass
async def publish_data(self, data: bytes, attributes: Mapping[str, str]) -> None:
to_publish = [CapturedMessage(data, attributes)]
if self.deduplicate_by:
to_publish = await self.cache.without_duplicates(
self.deduplicate_by, to_publish
)
self.messages.extend(to_publish)
MessageHandler = Callable[[Message], Awaitable[None]]
|
CapturingPublisher
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/datasets/imagewoof.py
|
{
"start": 928,
"end": 1446
}
|
class ____(ImageClassificationDataset):
"""
`Imagewoof <https://github.com/fastai/imagenette#imagewoof>`_ Dataset
with images resized so that the shortest size is 320 px.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewoof2-320"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewoof2-320.tgz",
"0f46d997ec2264e97609196c95897a44",
)
]
__all__ = ["Imagewoof", "Imagewoof160", "Imagewoof320"]
|
Imagewoof320
|
python
|
pytorch__pytorch
|
test/distributed/test_c10d_spawn.py
|
{
"start": 806,
"end": 3033
}
|
class ____:
world_size = 2
def _test_multiprocess(self, f, shared_tensors, init_pg, n_output):
ws = self.world_size
# file store will delete the test file on destruction
file = tempfile.NamedTemporaryFile(delete=False)
ctx = mp.get_context("spawn")
c2p = ctx.Queue(2)
p2c = ctx.Queue(2)
ps = []
for i in range(ws):
p = ctx.Process(
target=f, args=(i, file.name, shared_tensors, ws, init_pg, c2p, p2c)
)
p.start()
ps.append(p)
for _ in range(ws * n_output):
pid, expected, result = c2p.get()
self.assertEqual(
expected,
result,
msg=f"Expect rank {pid} to receive tensor {expected} but got {result}.",
)
for _ in range(ws):
p2c.put(0)
for p in ps:
p.join(2)
# Why classmethod? multiprocessing cannot pickle TestCase subclass when in
# spawn mode. See https://bugs.python.org/issue33884.
@classmethod
def _test_broadcast_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.broadcast(xs).wait()
c2p.put((rank, torch.zeros(2, 2), xs[0].to("cpu")))
p2c.get()
@classmethod
def _test_allreduce_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
pg.allreduce(xs, op=c10d.ReduceOp.SUM).wait()
c2p.put((rank, torch.ones(2, 2) * 2, xs[0].to("cpu")))
p2c.get()
@classmethod
def _test_allgather_process(
cls, rank, filename, shared_tensors, world_size, init_pg, c2p, p2c
):
pg = init_pg(rank, filename, world_size)
xs = [shared_tensors[rank]]
ys = [[torch.zeros_like(xs[0]) for i in range(world_size)]]
pg.allgather(ys, xs).wait()
for i in range(world_size):
c2p.put((rank, torch.ones(2, 2) * i, ys[0][i].to("cpu")))
p2c.get()
|
AbstractProcessGroupShareTensorTest
|
python
|
huggingface__transformers
|
src/transformers/models/xlm_roberta/modeling_xlm_roberta.py
|
{
"start": 38032,
"end": 39017
}
|
class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
XLM-RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
|
XLMRobertaClassificationHead
|
python
|
pytorch__pytorch
|
test/profiler/test_profiler_tree.py
|
{
"start": 8565,
"end": 48935
}
|
class ____(TestCase):
def assertTreesMatch(self, actual: str, expected: str, allow_failure: bool = False):
# Warning: Here be dragons
# Different platforms will have subtly different behavior for Python
# tracing. Observed differences include:
# 1) Windows symbolicates names differently from posix
# 2) The profile callback for c_call does not fire for Tensor.__pow__
# on certain platforms. This is not caused by the function tracer,
# but by cPython itself.
#
# The purpose of these unit tests is to ensure that the profiler is
# doing reasonable things. When these platform dependent variations occur
# simply coerce them into a platform independent form. If you made a
# change in the codebase which changes the trace produced, simply use
# EXPECTTEST_ACCEPT=1 to update the tests to reflect the new structure.
# expecttest will not show the diff view if `len(actual) < len(expected)`
if not expecttest.ACCEPT:
actual = actual.ljust(len(expected))
self.maxDiff = None
replicate = getattr(self, "tree_replicate", None)
self.assertIsNotNone(
replicate, "Please annotate test with `@ProfilerTree.test`"
)
# The profiler should produce deterministic results and should return
# to a clean state after each run. As a result, only the first
# replicate is allowed to update `expected`. If subsequent runs do not
# match it is a bug in the profiler.
if replicate:
self.assertEqual(actual, expected)
else:
try:
self.assertExpectedInline(actual, expected, skip=1)
except AssertionError as e:
if allow_failure:
self.tree_replicate = None
msg = traceback.format_exception_only(type(e), e)[0]
print(msg.split("AssertionError:")[-1])
else:
raise
# TODO: Add logic for CUDA version of test
@ProfilerTree.test
@unittest.skipIf(
torch.cuda.is_available() or torch.xpu.is_available(),
"Test not working for CUDA and XPU",
)
def test_profiler_experimental_tree(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
with torch.profiler.profile() as p:
z = torch.add(t1, t2)
y = torch.ones(1)
loss = (y - z) ** 2
loss.backward()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::add
aten::ones
aten::empty
aten::fill_
aten::sub
aten::pow
aten::result_type
aten::to
aten::ones_like
aten::empty_like
aten::empty_strided
aten::fill_
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
aten::copy_
aten::mul
aten::mul
aten::to
aten::_to_copy
aten::empty_strided
aten::copy_
aten::mul
autograd::engine::evaluate_function: SubBackward0
SubBackward0
aten::neg
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
aten::copy_
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach""",
)
# TODO: Add logic for CUDA version of test
@ProfilerTree.test
@unittest.skipIf(
torch.cuda.is_available() or torch.xpu.is_available(),
"Test not working for CUDA and XPU",
)
def test_profiler_experimental_tree_with_record_function(self):
with torch.profiler.profile() as p:
with torch.autograd.profiler.record_function("Top level Annotation"):
with torch.autograd.profiler.record_function("First Annotation"):
x = torch.ones((1,), requires_grad=True)
# Check that we correctly handle the case when a user
# annotation does not call `__exit__`.
_ = torch.autograd.profiler.record_function(
"Second Annotation"
).__enter__()
y = x + 1
with torch.autograd.profiler.record_function("Third Annotation"):
y.backward()
# NB: The `aten::zeros` before the record function annotations are due to
# `at::cpp_custom_type_hack`. When we switch to `torch::CustomClassHolder`
# they will disappear.
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
Top level Annotation
First Annotation
aten::ones
aten::empty
aten::fill_
Second Annotation
aten::add
aten::to
aten::_to_copy
aten::empty_strided
aten::copy_
Third Annotation
aten::ones_like
aten::empty_like
aten::empty_strided
aten::fill_
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
aten::copy_""",
)
# TODO: Add logic for CUDA version of test
@ProfilerTree.test
@unittest.skipIf(
torch.cuda.is_available() or torch.xpu.is_available(),
"Test not working for CUDA and XPU",
)
def test_profiler_experimental_tree_with_memory(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
with torch.profiler.profile(profile_memory=True) as p:
z = torch.add(t1, t2)
y = torch.ones(1)
loss = (y - z) ** 2
loss.backward()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::add
[memory]
aten::ones
aten::empty
[memory]
aten::fill_
aten::sub
[memory]
aten::pow
aten::result_type
aten::to
[memory]
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
[memory]
aten::copy_
aten::mul
[memory]
aten::mul
aten::to
aten::_to_copy
aten::empty_strided
[memory]
aten::copy_
[memory]
[memory]
[memory]
aten::mul
[memory]
[memory]
[memory]
[memory]
autograd::engine::evaluate_function: SubBackward0
SubBackward0
aten::neg
[memory]
[memory]
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
[memory]
aten::copy_
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach
[memory]""",
)
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
@ProfilerTree.test
def test_profiler_experimental_tree_with_memory_and_stack(self):
t1, t2 = torch.ones(1, requires_grad=True), torch.ones(1, requires_grad=True)
with torch.profiler.profile(with_stack=True, profile_memory=True) as p:
z = torch.add(t1, t2)
y = torch.ones(1)
loss = torch.pow(y - z, 2)
loss.backward()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_with_memory_and_stack
torch/profiler/profiler.py(...): __enter__
...
<built-in method add of type object at 0xXXXXXXXXXXXX>
aten::add
[memory]
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
[memory]
aten::fill_
aten::sub
[memory]
<built-in method pow of type object at 0xXXXXXXXXXXXX>
aten::pow
aten::result_type
aten::to
[memory]
torch/_tensor.py(...): backward
<built-in function _has_torch_function_unary>
torch/autograd/__init__.py(...): backward
<built-in method _are_functorch_transforms_active of PyCapsule object at 0xXXXXXXXXXXXX>
<built-in function isinstance>
<built-in function isinstance>
<built-in function len>
torch/autograd/__init__.py(...): _tensor_or_tensors_to_tuple
torch/autograd/__init__.py(...): _make_grads
typing.py(...): inner
typing.py(...): __hash__
<built-in function hash>
typing.py(...): cast
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in method numel of Tensor object at 0xXXXXXXXXXXXX>
<built-in function isinstance>
<built-in function isinstance>
<built-in method ones_like of type object at 0xXXXXXXXXXXXX>
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/autograd/graph.py(...): _engine_run_backward
logging/__init__.py(...): getEffectiveLevel
<built-in method run_backward of torch._C._EngineBase object at 0xXXXXXXXXXXXX>
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
[memory]
aten::copy_
aten::mul
[memory]
aten::mul
aten::to
aten::_to_copy
aten::empty_strided
[memory]
aten::copy_
[memory]
[memory]
[memory]
aten::mul
[memory]
[memory]
[memory]
[memory]
autograd::engine::evaluate_function: SubBackward0
SubBackward0
aten::neg
[memory]
[memory]
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::new_empty_strided
aten::empty_strided
[memory]
aten::copy_
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach
[memory]
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
...""",
)
@skipIfTorchDynamo("too slow")
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_modules(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = [
torch.nn.ReLU(),
torch.nn.Linear(1, 1),
torch.nn.ReLU(),
]
def forward(self, x: torch.Tensor) -> torch.Tensor:
for l in self.layers:
x = l(x)
return x
model = MyModule()
with torch.profiler.profile(with_stack=True) as p:
for _ in range(2):
model(torch.ones((1,)))
self.maxDiff = None
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_with_stack_and_modules
torch/profiler/profiler.py(...): __enter__
...
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
aten::fill_
nn.Module: MyModule_0
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
test_profiler_tree.py(...): forward
nn.Module: ReLU_0
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
nn.Module: Linear_0
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/linear.py(...): forward
torch/nn/modules/module.py(...): __getattr__
torch/nn/modules/module.py(...): __getattr__
<built-in function linear>
aten::linear
aten::view
aten::t
aten::transpose
aten::as_strided
aten::addmm
aten::expand
aten::as_strided
aten::copy_
aten::resolve_conj
aten::resolve_conj
aten::resolve_conj
aten::view
nn.Module: ReLU_1
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
aten::fill_
nn.Module: MyModule_0
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
test_profiler_tree.py(...): forward
nn.Module: ReLU_0
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
nn.Module: Linear_0
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/linear.py(...): forward
torch/nn/modules/module.py(...): __getattr__
torch/nn/modules/module.py(...): __getattr__
<built-in function linear>
aten::linear
aten::view
aten::t
aten::transpose
aten::as_strided
aten::addmm
aten::expand
aten::as_strided
aten::copy_
aten::resolve_conj
aten::resolve_conj
aten::resolve_conj
aten::view
nn.Module: ReLU_1
torch/nn/modules/module.py(...): _call_impl
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/activation.py(...): forward
torch/nn/functional.py(...): relu
<built-in function _has_torch_function_unary>
<built-in method relu of type object at 0xXXXXXXXXXXXX>
aten::relu
aten::clamp_min
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
...""",
)
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_torch_function(self):
x = TorchFunctionTensor(torch.ones((1,)))
y = torch.ones((1,))
# There's some lazy initialization in __torch_function__. If we don't
# run this the first run won't match the replicates.
torch.add(x, y)
with torch.profiler.profile(with_stack=True) as p:
torch.add(x, y)
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_with_stack_and_torch_function
torch/profiler/profiler.py(...): __enter__
...
<built-in method add of type object at 0xXXXXXXXXXXXX>
test_profiler_tree.py(...): __torch_function__
torch/_tensor.py(...): __torch_function__
torch/_tensor.py(...): <genexpr>
<built-in function issubclass>
torch/_tensor.py(...): <genexpr>
<built-in method add of type object at 0xXXXXXXXXXXXX>
aten::add
torch/_tensor.py(...): _convert
<built-in function isinstance>
<built-in function isinstance>
<built-in method as_subclass of Tensor object at 0xXXXXXXXXXXXX>
aten::alias
<built-in function isinstance>
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
...""",
)
@skipIfTorchDynamo("segfaults in 3.13+")
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
@ProfilerTree.test
def test_profiler_experimental_tree_with_stack_and_torch_dispatch(self):
x = TorchDispatchTensor(torch.ones((1,)))
y = torch.ones((1,))
# warmup round
with torch.profiler.profile(with_stack=True):
x + y
with torch.profiler.profile(with_stack=True) as p:
x + y
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_with_stack_and_torch_dispatch
torch/profiler/profiler.py(...): __enter__
...
aten::add
PythonSubclass
torch/_library/simple_registry.py(...): find_torch_dispatch_rule
torch/_library/simple_registry.py(...): find
<built-in method get of dict object at 0xXXXXXXXXXXXX>
torch/_library/simple_registry.py(...): find
<built-in method get of dict object at 0xXXXXXXXXXXXX>
test_profiler_tree.py(...): __torch_dispatch__
torch/utils/_pytree.py(...): tree_map
...
torch/utils/_pytree.py(...): tree_map
...
torch/_ops.py(...): __call__
<built-in method of PyCapsule object at 0xXXXXXXXXXXXX>
aten::add
torch/utils/_pytree.py(...): tree_map
...
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
...""",
)
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda(self):
with torch.profiler.profile(profile_memory=True) as p:
weight = torch.ones(1, device="cuda", requires_grad=True)
x = torch.ones(1, device="cuda")
y = torch.add(weight, x)
loss = torch.pow(y, 2)
loss.backward()
torch.optim.SGD([weight], lr=0.01, momentum=0.9).step()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::add
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::pow
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::result_type
aten::to
[memory]
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
autograd::engine::evaluate_function: PowBackward0
PowBackward0
aten::pow
aten::result_type
aten::to
[memory]
aten::copy_
cudaMemcpyAsync
Memcpy DtoD (Device -> Device)
aten::mul
[memory]
aten::mul
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
aten::mul
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
[memory]
autograd::engine::evaluate_function: AddBackward0
AddBackward0
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::detach
detach
[memory]
aten::zeros
aten::zeros
aten::empty
[memory]
aten::zero_
Optimizer.step#SGD.step
aten::empty
[memory]
[memory]
[memory]
aten::clone
aten::empty_strided
[memory]
aten::copy_
cudaMemcpyAsync
Memcpy DtoD (Device -> Device)
aten::detach
detach
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]""", # noqa: B950
allow_failure=ALLOW_CUDA_FAILURE,
)
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda_with_stream(self):
streams = [torch.cuda.Stream() for _ in range(3)]
results = []
with torch.profiler.profile(profile_memory=True) as p:
x = torch.ones((4, 4), device="cuda")
for stream in streams:
with torch.cuda.stream(stream):
results.append(torch.tanh(x) - x)
del results
for s in streams:
torch.cuda.current_stream().wait_stream(s)
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
aten::tanh
cudaMalloc
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::sub
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
aten::tanh
cudaMalloc
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::sub
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
aten::tanh
cudaMalloc
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
aten::sub
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]""",
allow_failure=ALLOW_CUDA_FAILURE,
)
@unittest.skip("https://github.com/pytorch/pytorch/issues/83606")
@unittest.skipIf(
TEST_WITH_CROSSREF, "crossref intercepts calls and changes the callsite."
)
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
@ProfilerTree.test
def test_profiler_experimental_tree_cuda_detailed(self):
# Do lazy imports ahead of time to avoid it showing up in the tree
import torch.nested._internal.nested_tensor
model = torch.nn.modules.Linear(1, 1, device="cuda")
model.train()
opt = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
def step():
x = torch.ones((1, 1), device="cuda")
loss = model(x)
loss.backward()
opt.step()
# Warmup
for _ in range(3):
step()
with torch.profiler.profile(profile_memory=True, with_stack=True) as p:
step()
self.assertTreesMatch(
ProfilerTree.format(p.profiler, 12),
"""\
test_profiler_tree.py(...): test_profiler_experimental_tree_cuda_detailed
torch/profiler/profiler.py(...): __enter__
...
test_profiler_tree.py(...): step
<built-in method ones of type object at 0xXXXXXXXXXXXX>
aten::ones
aten::empty
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
nn.Module: Linear_0
<built-in method _get_tracing_state of PyCapsule object at 0xXXXXXXXXXXXX>
torch/nn/modules/linear.py(...): forward
torch/nn/modules/module.py(...): __getattr__
torch/nn/modules/module.py(...): __getattr__
<built-in function linear>
aten::linear
aten::t
aten::transpose
aten::as_strided
aten::addmm
cudaMemcpyAsync
Memcpy DtoD (Device -> Device)
cudaLaunchKernel
void ..._kernel<...>(...)
[memory]
aten::expand
aten::as_strided
torch/_tensor.py(...): backward
<built-in function _has_torch_function_unary>
torch/autograd/__init__.py(...): backward
<built-in function isinstance>
<built-in function isinstance>
<built-in function len>
torch/autograd/__init__.py(...): _tensor_or_tensors_to_tuple
torch/autograd/__init__.py(...): _make_grads
typing.py(...): inner
typing.py(...): __hash__
<built-in function hash>
typing.py(...): cast
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in function isinstance>
<built-in method numel of Tensor object at 0xXXXXXXXXXXXX>
<built-in function isinstance>
<built-in function isinstance>
<built-in method ones_like of type object at 0xXXXXXXXXXXXX>
aten::ones_like
aten::empty_like
aten::empty_strided
[memory]
aten::fill_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method run_backward of torch._C._EngineBase object at 0xXXXXXXXXXXXX>
autograd::engine::evaluate_function: AddmmBackward0
AddmmBackward0
aten::t
aten::transpose
aten::as_strided
aten::mm
cudaLaunchKernel
void ..._kernel<...>(...)
[memory]
aten::t
aten::transpose
aten::as_strided
aten::sum
aten::sum
cudaLaunchKernel
void at::native::reduce_kernel<...>(...)
[memory]
aten::view
aten::view
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
autograd::engine::evaluate_function: TBackward0
TBackward0
aten::t
aten::transpose
aten::as_strided
autograd::engine::evaluate_function: torch::autograd::AccumulateGrad
torch::autograd::AccumulateGrad
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
[memory]
torch/optim/optimizer.py(...): wrapper
<built-in method format of str object at 0xXXXXXXXXXXXX>
torch/autograd/profiler.py(...): __init__
<built-in method zeros of type object at 0xXXXXXXXXXXXX>
aten::zeros
aten::zeros
aten::empty
[memory]
aten::zero_
torch/autograd/profiler.py(...): __enter__
torch/_ops.py(...): __call__
<built-in method _record_function_enter of PyCapsule object at 0xXXXXXXXXXXXX>
Optimizer.step#SGD.step
aten::empty
[memory]
[memory]
[memory]
torch/optim/optimizer.py(...): _use_grad
<built-in function is_grad_enabled>
torch/autograd/grad_mode.py(...): __init__
<built-in function is_grad_enabled>
<built-in function _set_grad_enabled>
torch/optim/sgd.py(...): step
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/_tensor.py(...): __hash__
<built-in function id>
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method append of list object at 0xXXXXXXXXXXXX>
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/_tensor.py(...): __hash__
<built-in function id>
<built-in method append of list object at 0xXXXXXXXXXXXX>
torch/optim/sgd.py(...): sgd
torch/optim/sgd.py(...): _single_tensor_sgd
<built-in method mul_ of Tensor object at 0xXXXXXXXXXXXX>
[memory]
aten::mul_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method mul_ of Tensor object at 0xXXXXXXXXXXXX>
[memory]
aten::mul_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
[memory]
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
<built-in method add_ of Tensor object at 0xXXXXXXXXXXXX>
aten::add_
cudaLaunchKernel
void at::native::vectorized_elementwise_kernel<...>(...)
torch/_tensor.py(...): __hash__
<built-in function id>
torch/_tensor.py(...): __hash__
<built-in function id>
torch/autograd/grad_mode.py(...): __init__
<built-in function is_grad_enabled>
<built-in function _set_grad_enabled>
torch/autograd/profiler.py(...): __exit__
torch/_ops.py(...): __call__
<built-in method _record_function_exit of PyCapsule object at 0xXXXXXXXXXXXX>
[memory]
[memory]
torch/profiler/profiler.py(...): __exit__
torch/profiler/profiler.py(...): stop
torch/profiler/profiler.py(...): _transit_action
<built-in method get of dict object at 0xXXXXXXXXXXXX>
enum.py(...): __hash__
<built-in function hash>
...""", # noqa: B950
allow_failure=ALLOW_CUDA_FAILURE,
)
if __name__ == "__main__":
run_tests()
|
TestProfilerTree
|
python
|
huggingface__transformers
|
src/transformers/models/pegasus/modeling_pegasus.py
|
{
"start": 55347,
"end": 55800
}
|
class ____(PegasusPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the [`EncoderDecoderModel`] framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = PegasusDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
|
PegasusDecoderWrapper
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/controller/state.py
|
{
"start": 4673,
"end": 4920
}
|
class ____(TrainControllerState):
def __init__(
self,
scaling_decision: ScalingDecision,
):
super().__init__(state_type=TrainControllerStateType.RESIZING)
self.scaling_decision = scaling_decision
|
ResizingState
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-aws/dagster_aws/ecs/launcher.py
|
{
"start": 2338,
"end": 39234
}
|
class ____(RunLauncher[T_DagsterInstance], ConfigurableClass):
"""RunLauncher that starts a task in ECS for each Dagster job run.
Args:
inst_data (Optional[ConfigurableClassData]): If not provided, defaults to None.
task_definition: If not provided, defaults to None.
container_name (str): If not provided, defaults to "run".
secrets (Optional[list[str]]): If not provided, defaults to None.
secrets_tag (str): If not provided, defaults to "dagster".
env_vars (Optional[Sequence[str]]): If not provided, defaults to None.
include_sidecars (bool): If not provided, defaults to False.
use_current_ecs_task_config (bool): If not provided, defaults to True.
run_task_kwargs (Optional[Mapping[str, Any]]): If not provided, defaults to None.
run_resources (Optional[dict[str, Any]]): If not provided, defaults to None.
run_ecs_tags (Optional[list[dict[str, Optional[str]]]]): If not provided, defaults to None.
propagate_tags (Optional[dict[str, Any]]): If not provided, defaults to None.
task_definition_prefix (str): If not provided, defaults to "run".
"""
def __init__(
self,
inst_data: Optional[ConfigurableClassData] = None,
task_definition=None,
container_name: str = "run",
secrets: Optional[list[str]] = None,
secrets_tag: str = "dagster",
env_vars: Optional[Sequence[str]] = None,
include_sidecars: bool = False,
use_current_ecs_task_config: bool = True,
run_task_kwargs: Optional[Mapping[str, Any]] = None,
run_resources: Optional[dict[str, Any]] = None,
run_ecs_tags: Optional[list[dict[str, Optional[str]]]] = None,
propagate_tags: Optional[dict[str, Any]] = None,
task_definition_prefix: str = "run",
):
self._inst_data = inst_data
boto_client_config = self.get_boto_client_config()
self.ecs = boto3.client("ecs", config=boto_client_config)
self.ec2 = boto3.resource("ec2")
self.secrets_manager = boto3.client("secretsmanager", config=boto_client_config)
self.logs = boto3.client("logs", config=boto_client_config)
self._task_definition_prefix = task_definition_prefix
check.invariant(
len(self._task_definition_prefix) <= 16,
"Task definition prefix must be no more than 16 characters",
)
self.task_definition = None
self.task_definition_dict = {}
if isinstance(task_definition, str):
self.task_definition = task_definition
elif task_definition and "env" in task_definition:
check.invariant(
len(task_definition) == 1,
"If `task_definition` is set to a dictionary with `env`, `env` must be the only"
" key.",
)
env_var = task_definition["env"]
self.task_definition = os.getenv(env_var)
if not self.task_definition:
raise Exception(
f"You have attempted to fetch the environment variable {env_var} which is not"
" set."
)
else:
self.task_definition_dict = task_definition or {}
self.container_name = container_name
self.secrets = check.opt_list_param(secrets, "secrets")
self.env_vars = check.opt_list_param(env_vars, "env_vars")
if self.secrets and all(isinstance(secret, str) for secret in self.secrets):
warnings.warn(
"Setting secrets as a list of ARNs is deprecated. "
"Secrets should instead follow the same structure as the ECS API: "
"https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html",
DeprecationWarning,
)
self.secrets = [
{"name": name, "valueFrom": value_from}
for name, value_from in get_secrets_from_arns(
self.secrets_manager, self.secrets
).items()
]
self.secrets_tags = [secrets_tag] if secrets_tag else []
self.include_sidecars = include_sidecars
if self.task_definition:
task_definition = self.ecs.describe_task_definition(taskDefinition=self.task_definition)
container_names = [
container.get("name")
for container in task_definition["taskDefinition"]["containerDefinitions"]
]
check.invariant(
container_name in container_names,
f"Cannot override container '{container_name}' in task definition "
f"'{self.task_definition}' because the container is not defined.",
)
self.task_definition = task_definition["taskDefinition"]["taskDefinitionArn"]
self.use_current_ecs_task_config = check.opt_bool_param(
use_current_ecs_task_config, "use_current_ecs_task_config"
)
self.run_task_kwargs = check.opt_mapping_param(run_task_kwargs, "run_task_kwargs")
if run_task_kwargs:
check.invariant(
"taskDefinition" not in run_task_kwargs,
"Use the `taskDefinition` config field to pass in a task definition to run.",
)
check.invariant(
"overrides" not in run_task_kwargs,
"Task overrides are set by the run launcher and cannot be set in run_task_kwargs.",
)
expected_keys = [
key for key in self.ecs.meta.service_model.shape_for("RunTaskRequest").members
]
for key in run_task_kwargs:
check.invariant(
key in expected_keys, f"Found an unexpected key {key} in run_task_kwargs"
)
self.run_resources = check.opt_mapping_param(run_resources, "run_resources")
self.run_ecs_tags = check.opt_sequence_param(run_ecs_tags, "run_ecs_tags")
self.propagate_tags = check.opt_dict_param(
propagate_tags,
"propagate_tags",
key_type=str,
value_type=list,
)
if self.propagate_tags:
check.invariant(
list(self.propagate_tags.keys()) == ["allow_list"],
"Only allow_list can be set for the propagate_tags config property",
)
if self.propagate_tags.get("allow_list"):
# These tags are potentially very large and can cause ECS to fail to start a task. They also don't seem particularly useful in a task-tagging context
check.invariant(
TAGS_TO_EXCLUDE_FROM_PROPAGATION - set(self.propagate_tags.get("allow_list", []))
== TAGS_TO_EXCLUDE_FROM_PROPAGATION,
f"Cannot include {TAGS_TO_EXCLUDE_FROM_PROPAGATION} in allow_list",
)
self._current_task_metadata = None
self._current_task = None
def get_boto_client_config(self) -> Optional["Config"]:
return None
@property
def inst_data(self):
return self._inst_data
@property
def task_role_arn(self) -> Optional[str]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("task_role_arn")
@property
def execution_role_arn(self) -> Optional[str]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("execution_role_arn")
@property
def runtime_platform(self) -> Optional[Mapping[str, Any]]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("runtime_platform")
@property
def mount_points(self) -> Optional[Sequence[Mapping[str, Any]]]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("mount_points")
@property
def volumes(self) -> Optional[Sequence[Mapping[str, Any]]]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("volumes")
@property
def repository_credentials(self) -> Optional[str]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("repository_credentials")
@property
def run_sidecar_containers(self) -> Optional[Sequence[Mapping[str, Any]]]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("sidecar_containers")
@property
def linux_parameters(self) -> Optional[Mapping[str, Any]]:
if not self.task_definition_dict:
return None
return self.task_definition_dict.get("linux_parameters")
@classmethod
def config_type(cls):
return {
"task_definition": Field(
ScalarUnion(
scalar_type=str,
non_scalar_schema={
"log_group": Field(StringSource, is_required=False),
"sidecar_containers": Field(Array(Permissive({})), is_required=False),
"requires_compatibilities": Field(Array(str), is_required=False),
"env": Field(
str,
is_required=False,
description=(
"Backwards-compatibility for when task_definition was a"
" StringSource.Can be used to source the task_definition scalar"
" from an environment variable."
),
),
"linux_parameters": Field(Permissive(), is_required=False),
**SHARED_TASK_DEFINITION_FIELDS,
},
),
is_required=False,
description=(
"Either the short name of an existing task definition to use when launching new"
" tasks, or a dictionary configuration to use when creating a task definition"
" for the run.If neither is provided, the task definition will be created based"
" on the current task's task definition."
),
),
"container_name": Field(
StringSource,
is_required=False,
default_value="run",
description=(
"The container name to use when launching new tasks. Defaults to 'run'."
),
),
"secrets": Field(
Array(
ScalarUnion(
scalar_type=str,
non_scalar_schema={"name": StringSource, "valueFrom": StringSource},
)
),
is_required=False,
description=(
"An array of AWS Secrets Manager secrets. These secrets will "
"be mounted as environment variables in the container. See "
"https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Secret.html."
),
),
"secrets_tag": Field(
Noneable(StringSource),
is_required=False,
default_value="dagster",
description=(
"AWS Secrets Manager secrets with this tag will be mounted as "
"environment variables in the container. Defaults to 'dagster'."
),
),
"include_sidecars": Field(
bool,
is_required=False,
default_value=False,
description=(
"Whether each run should use the same sidecars as the task that launches it. "
"Defaults to False."
),
),
"use_current_ecs_task_config": Field(
bool,
is_required=False,
default_value=True,
description=(
"Whether to use the run launcher's current ECS task in order to determine "
"the cluster and networking configuration for the launched task. Defaults to "
"True. Should only be called if the run launcher is running within an ECS "
"task."
),
),
"run_task_kwargs": Field(
Permissive(
{
"cluster": Field(
StringSource,
is_required=False,
description="Name of the ECS cluster to launch ECS tasks in.",
),
}
),
is_required=False,
description=(
"Additional arguments to include while running the task. See"
" https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task"
" for the available parameters. The overrides and taskDefinition arguments will"
" always be set by the run launcher."
),
),
"propagate_tags": Field(
Shape(
{
"allow_list": Field(
Array(str),
is_required=True,
description="List of specific tag keys from the Dagster run which should be propagated to the ECS task.",
),
}
),
is_required=False,
description="Configuration for propagating tags from Dagster runs to ECS tasks. Currently only exposes an allow list.",
),
"task_definition_prefix": Field(
StringSource,
is_required=False,
default_value="run",
description=(
"A prefix that is applied to all task definitions created by the EcsRunLauncher. Defaults to 'run'."
),
),
**SHARED_ECS_SCHEMA,
}
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data, **config_value)
def _set_run_tags(self, run_id: str, cluster: str, task_arn: str):
tags = {
"ecs/task_arn": task_arn,
"ecs/cluster": cluster,
RUN_WORKER_ID_TAG: str(uuid.uuid4().hex)[0:6],
}
self._instance.add_run_tags(run_id, tags)
def build_ecs_tags_for_run_task(self, run: DagsterRun, container_context: EcsContainerContext):
run_id_tag = "dagster/run_id"
if any(tag["key"] == run_id_tag for tag in container_context.run_ecs_tags):
raise Exception(f"Cannot override system ECS tag: {run_id_tag}")
tags_to_propagate = self._get_tags_to_propagate_to_ecs_task(run)
return [
{"key": run_id_tag, "value": run.run_id},
{"key": "dagster/job_name", "value": run.job_name},
*container_context.run_ecs_tags,
*tags_to_propagate,
]
def _get_tags_to_propagate_to_ecs_task(self, run: DagsterRun) -> list[dict[str, str]]:
# These tags often contain * or + characters which are not allowed in ECS tags.
# They don't seem super useful from an observability perspective, so are excluded from the ECS tags
tags_to_propagate = []
if allow_list := (self.propagate_tags or {}).get("allow_list", []):
# Add contextual Dagster run tags to ECS tags
tags_to_propagate = [
{"key": k, "value": v}
for k, v in run.tags.items()
if k in allow_list and k not in TAGS_TO_EXCLUDE_FROM_PROPAGATION
]
return tags_to_propagate
def _get_run_tags(self, run_id: str) -> Tags:
run = self._instance.get_run_by_id(run_id)
tags = run.tags if run else {}
arn = tags.get("ecs/task_arn")
cluster = tags.get("ecs/cluster")
cpu = tags.get("ecs/cpu")
memory = tags.get("ecs/memory")
return Tags(arn, cluster, cpu, memory)
def _get_command_args(self, run_args: ExecuteRunArgs, context: LaunchRunContext):
return run_args.get_command_args()
def get_image_for_run(self, context: LaunchRunContext) -> Optional[str]:
"""Child classes can override this method to determine the image to use for a run. This is considered a public API."""
run = context.dagster_run
return (
run.job_code_origin.repository_origin.container_image
if run.job_code_origin is not None
else None
)
def _run_task(self, **run_task_kwargs):
return run_ecs_task(self.ecs, run_task_kwargs)
def launch_run(self, context: LaunchRunContext) -> None:
"""Launch a run in an ECS task."""
run = context.dagster_run
container_context = EcsContainerContext.create_for_run(run, self)
job_origin = check.not_none(context.job_code_origin)
# ECS limits overrides to 8192 characters including json formatting
# https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html
# When container_context is serialized as part of the ExecuteRunArgs, we risk
# going over this limit (for example, if many secrets have been set). This strips
# the container context off of our job origin because we don't actually need
# it to launch the run; we only needed it to create the task definition.
repository_origin = job_origin.repository_origin
stripped_repository_origin = repository_origin._replace(container_context={})
stripped_job_origin = job_origin._replace(repository_origin=stripped_repository_origin)
args = ExecuteRunArgs(
job_origin=stripped_job_origin,
run_id=run.run_id,
instance_ref=self._instance.get_ref(),
)
command = self._get_command_args(args, context)
image = self.get_image_for_run(context)
run_task_kwargs = self._run_task_kwargs(run, image, container_context)
# Set cpu or memory overrides
# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html
cpu_and_memory_overrides = self.get_cpu_and_memory_overrides(container_context, run)
task_overrides = self._get_task_overrides(container_context, run)
container_overrides: list[dict[str, Any]] = [
{
"name": self.get_container_name(container_context),
"command": command,
# containerOverrides expects cpu/memory as integers
**{k: int(v) for k, v in cpu_and_memory_overrides.items()},
}
]
run_task_kwargs["overrides"] = {
"containerOverrides": container_overrides,
# taskOverrides expects cpu/memory as strings
**cpu_and_memory_overrides,
**task_overrides,
}
run_task_kwargs_from_run = self._get_run_task_kwargs_from_run(run)
run_task_kwargs["tags"] = [
*run_task_kwargs.get("tags", []),
*self.build_ecs_tags_for_run_task(run, container_context),
*run_task_kwargs_from_run.get("tags", []),
]
run_task_kwargs.update(run_task_kwargs_from_run)
# launchType and capacityProviderStrategy are incompatible - prefer the latter if it is set
if "launchType" in run_task_kwargs and run_task_kwargs.get("capacityProviderStrategy"):
del run_task_kwargs["launchType"]
# Remove networkConfiguration if it is set to None
if (
"networkConfiguration" in run_task_kwargs
and run_task_kwargs.get("networkConfiguration") is None
):
del run_task_kwargs["networkConfiguration"]
# Run a task using the same network configuration as this processes's task.
task = backoff(
self._run_task,
retry_on=(RetryableEcsException,),
kwargs=run_task_kwargs,
max_retries=int(
os.getenv("RUN_TASK_RETRIES", DEFAULT_RUN_TASK_RETRIES),
),
)
arn = task["taskArn"]
cluster_arn = task["clusterArn"]
self._set_run_tags(run.run_id, cluster=cluster_arn, task_arn=arn)
self.report_launch_events(run, arn, cluster_arn)
def report_launch_events(
self, run: DagsterRun, arn: Optional[str] = None, cluster: Optional[str] = None
):
# Extracted method to allow for subclasses to customize the launch reporting behavior
metadata = {}
if arn:
metadata["ECS Task ARN"] = arn
if cluster:
metadata["ECS Cluster"] = cluster
metadata["Run ID"] = run.run_id
self._instance.report_engine_event(
message="Launching run in ECS task",
dagster_run=run,
engine_event_data=EngineEventData(metadata),
cls=self.__class__,
)
def get_cpu_and_memory_overrides(
self, container_context: EcsContainerContext, run: DagsterRun
) -> Mapping[str, str]:
overrides = {}
cpu = run.tags.get("ecs/cpu", container_context.run_resources.get("cpu"))
memory = run.tags.get("ecs/memory", container_context.run_resources.get("memory"))
if cpu:
overrides["cpu"] = cpu
if memory:
overrides["memory"] = memory
return overrides
def _get_task_overrides(
self, container_context: EcsContainerContext, run: DagsterRun
) -> Mapping[str, Any]:
tag_overrides = run.tags.get("ecs/task_overrides")
overrides = {}
if tag_overrides:
overrides = json.loads(tag_overrides)
ephemeral_storage = run.tags.get(
"ecs/ephemeral_storage", container_context.run_resources.get("ephemeral_storage")
)
if ephemeral_storage:
overrides["ephemeralStorage"] = {"sizeInGiB": int(ephemeral_storage)}
return overrides
def _get_run_task_kwargs_from_run(self, run: DagsterRun) -> Mapping[str, Any]:
run_task_kwargs = run.tags.get("ecs/run_task_kwargs")
if run_task_kwargs:
result = json.loads(run_task_kwargs)
check.invariant(
not isinstance(result, list),
f"Unexpected type for `ecs/run_task_kwargs` tag: {type(result)}",
)
return result
return {}
def terminate(self, run_id: str):
tags = self._get_run_tags(run_id)
run = self._instance.get_run_by_id(run_id)
if not run or run.is_finished:
return False
self._instance.report_run_canceling(run)
if not (tags.arn and tags.cluster):
return False
tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")
if not tasks:
return False
status = tasks[0].get("lastStatus")
if status == "STOPPED":
return False
self.ecs.stop_task(task=tags.arn, cluster=tags.cluster)
return True
def _get_current_task_metadata(self):
if self._current_task_metadata is None:
self._current_task_metadata = get_current_ecs_task_metadata()
return self._current_task_metadata
def _get_current_task(self):
if self._current_task is None:
current_task_metadata = self._get_current_task_metadata()
self._current_task = get_current_ecs_task(
self.ecs, current_task_metadata.task_arn, current_task_metadata.cluster
)
return self._current_task
def _get_run_task_definition_family(self, run: DagsterRun) -> str:
return get_task_definition_family(
self._task_definition_prefix, check.not_none(run.remote_job_origin)
)
def get_container_name(self, container_context: EcsContainerContext) -> str:
return container_context.container_name or self.container_name
def _run_task_kwargs(
self, run: DagsterRun, image: Optional[str], container_context: EcsContainerContext
) -> dict[str, Any]:
"""Return a dictionary of args to launch the ECS task, registering a new task
definition if needed.
"""
environment = self._environment(container_context)
environment.append({"name": "DAGSTER_RUN_JOB_NAME", "value": run.job_name})
secrets = self._secrets(container_context)
if container_context.task_definition_arn:
task_definition = container_context.task_definition_arn
elif image is not None:
family = self._get_run_task_definition_family(run)
if self.task_definition_dict or not self.use_current_ecs_task_config:
runtime_platform = container_context.runtime_platform
is_windows = container_context.runtime_platform.get(
"operatingSystemFamily"
) not in {None, "LINUX"}
default_resources = (
DEFAULT_WINDOWS_RESOURCES if is_windows else DEFAULT_LINUX_RESOURCES
)
task_definition_config = DagsterEcsTaskDefinitionConfig(
family,
image,
self.get_container_name(container_context),
command=None,
log_configuration=(
{
"logDriver": "awslogs",
"options": {
"awslogs-group": self.task_definition_dict["log_group"],
"awslogs-region": self.ecs.meta.region_name,
"awslogs-stream-prefix": family,
},
}
if self.task_definition_dict.get("log_group")
else None
),
secrets=secrets if secrets else [],
environment=environment,
execution_role_arn=container_context.execution_role_arn,
task_role_arn=container_context.task_role_arn,
sidecars=container_context.run_sidecar_containers,
requires_compatibilities=self.task_definition_dict.get(
"requires_compatibilities", []
),
cpu=container_context.run_resources.get("cpu", default_resources["cpu"]),
memory=container_context.run_resources.get(
"memory", default_resources["memory"]
),
ephemeral_storage=container_context.run_resources.get("ephemeral_storage"),
runtime_platform=runtime_platform,
volumes=container_context.volumes,
mount_points=container_context.mount_points,
repository_credentials=container_context.repository_credentials,
linux_parameters=self.linux_parameters,
)
task_definition_dict = task_definition_config.task_definition_dict()
else:
task_definition_dict = get_task_definition_dict_from_current_task(
self.ecs,
family,
self._get_current_task(),
image,
self.get_container_name(container_context),
environment=environment,
secrets=secrets if secrets else {},
include_sidecars=self.include_sidecars,
task_role_arn=container_context.task_role_arn,
execution_role_arn=container_context.execution_role_arn,
cpu=container_context.run_resources.get("cpu"),
memory=container_context.run_resources.get("memory"),
runtime_platform=container_context.runtime_platform,
ephemeral_storage=container_context.run_resources.get("ephemeral_storage"),
volumes=container_context.volumes,
mount_points=container_context.mount_points,
additional_sidecars=container_context.run_sidecar_containers,
repository_credentials=container_context.repository_credentials,
)
task_definition_config = DagsterEcsTaskDefinitionConfig.from_task_definition_dict(
task_definition_dict,
self.get_container_name(container_context),
)
container_name = self.get_container_name(container_context)
backoff(
self._reuse_or_register_task_definition,
retry_on=(Exception,),
kwargs={
"desired_task_definition_config": task_definition_config,
"container_name": container_name,
"task_definition_dict": task_definition_dict,
},
max_retries=int(
os.getenv(
"REGISTER_TASK_DEFINITION_RETRIES", DEFAULT_REGISTER_TASK_DEFINITION_RETRIES
),
),
)
task_definition = family
else:
# since image was not set, we cannot construct a task definition automatically
raise DagsterInvariantViolationError(
"Could not determine image to use for the run. It has to be provided in the code location: https://docs.dagster.io/concepts/code-locations/workspace-files#specifying-a-docker-image"
)
if self.use_current_ecs_task_config:
current_task_metadata = get_current_ecs_task_metadata()
current_task = get_current_ecs_task(
self.ecs, current_task_metadata.task_arn, current_task_metadata.cluster
)
task_kwargs = get_task_kwargs_from_current_task(
self.ec2,
current_task_metadata.cluster,
current_task,
)
else:
task_kwargs = {}
return {**task_kwargs, **self.run_task_kwargs, "taskDefinition": task_definition}
def _reuse_task_definition(
self, desired_task_definition_config: DagsterEcsTaskDefinitionConfig, container_name: str
):
family = desired_task_definition_config.family
try:
existing_task_definition = self.ecs.describe_task_definition(taskDefinition=family)[
"taskDefinition"
]
except ClientError:
# task definition does not exist, do not reuse
return False
return task_definitions_match(
desired_task_definition_config,
existing_task_definition,
container_name=container_name,
)
def _reuse_or_register_task_definition(
self,
desired_task_definition_config: DagsterEcsTaskDefinitionConfig,
container_name: str,
task_definition_dict: dict,
):
if not self._reuse_task_definition(desired_task_definition_config, container_name):
self.ecs.register_task_definition(**task_definition_dict)
def _environment(self, container_context):
return [
{"name": key, "value": value}
for key, value in container_context.get_environment_dict().items()
]
def _secrets(self, container_context):
secrets = container_context.get_secrets_dict(self.secrets_manager)
return (
[{"name": key, "valueFrom": value} for key, value in secrets.items()] if secrets else []
)
@property
def supports_check_run_worker_health(self):
return True
@property
def include_cluster_info_in_failure_messages(self):
return True
def _is_transient_startup_failure(self, run: DagsterRun, task: dict[str, Any]):
if task.get("stoppedReason") is None:
return False
return run.status == DagsterRunStatus.STARTING and is_transient_task_stopped_reason(
task.get("stoppedReason", "")
)
def _add_eni_id_tags(self, run: DagsterRun, task: dict[str, Any]):
attachments = task.get("attachments", [])
eni_ids = {}
eni_count = 0
for attachment in attachments:
if attachment.get("type") == "ElasticNetworkInterface":
details = {d["name"]: d["value"] for d in attachment.get("details", [])}
if "networkInterfaceId" in details:
if eni_count == 0:
eni_ids[f"{HIDDEN_TAG_PREFIX}eni_id"] = details["networkInterfaceId"]
else:
eni_ids[f"{HIDDEN_TAG_PREFIX}eni_id_{eni_count}"] = details[
"networkInterfaceId"
]
eni_count += 1
self._instance.add_run_tags(run.run_id, eni_ids)
if eni_count > 0:
logging.info(f"Added {eni_count} ENI ID tags for run {run.run_id}: {eni_ids}")
else:
logging.warning(f"No ENI IDs found for run {run.run_id}")
def check_run_worker_health(self, run: DagsterRun):
run_worker_id = run.tags.get(RUN_WORKER_ID_TAG)
tags = self._get_run_tags(run.run_id)
container_context = EcsContainerContext.create_for_run(run, self)
if not (tags.arn and tags.cluster):
return CheckRunHealthResult(WorkerStatus.UNKNOWN, "", run_worker_id=run_worker_id)
tasks = self.ecs.describe_tasks(tasks=[tags.arn], cluster=tags.cluster).get("tasks")
if not tasks:
return CheckRunHealthResult(WorkerStatus.UNKNOWN, "", run_worker_id=run_worker_id)
t = tasks[0]
if get_boolean_tag_value(os.getenv("DAGSTER_AWS_ENI_TAGGING_ENABLED")) and not run.tags.get(
f"{HIDDEN_TAG_PREFIX}eni_id"
):
try:
self._add_eni_id_tags(run, t)
except Exception:
logging.exception(f"Error adding ENI ID tags for run {run.run_id}")
if t.get("lastStatus") in RUNNING_STATUSES:
return CheckRunHealthResult(WorkerStatus.RUNNING, run_worker_id=run_worker_id)
elif t.get("lastStatus") in STOPPED_STATUSES:
failed_containers = []
for c in t.get("containers"):
if c.get("exitCode") != 0:
failed_containers.append(c)
if len(failed_containers) > 0:
failure_text = ""
cluster_failure_info = (
f"Task {t.get('taskArn')} failed.\n"
f"Stop code: {t.get('stopCode')}.\n"
f"Stop reason: {t.get('stoppedReason')}.\n"
)
for c in failed_containers:
exit_code = c.get("exitCode")
exit_code_msg = f" - exit code {exit_code}" if exit_code is not None else ""
cluster_failure_info += f"Container '{c.get('name')}' failed{exit_code_msg}.\n"
logging.warning(
"Run monitoring detected run worker failure: " + cluster_failure_info
)
if self.include_cluster_info_in_failure_messages:
failure_text += cluster_failure_info
logs = []
try:
logs = get_task_logs(
self.ecs,
logs_client=self.logs,
cluster=tags.cluster,
task_arn=tags.arn,
container_name=self.get_container_name(container_context),
)
except:
logging.exception(f"Error trying to get logs for failed task {tags.arn}")
if logs:
failure_text += "Run worker logs:\n" + "\n".join(logs)
return CheckRunHealthResult(
WorkerStatus.FAILED,
failure_text,
transient=self._is_transient_startup_failure(run, t),
run_worker_id=run_worker_id,
)
return CheckRunHealthResult(WorkerStatus.SUCCESS, run_worker_id=run_worker_id)
return CheckRunHealthResult(
WorkerStatus.UNKNOWN, "ECS task health status is unknown.", run_worker_id=run_worker_id
)
|
EcsRunLauncher
|
python
|
instagram__MonkeyType
|
monkeytype/tracing.py
|
{
"start": 2510,
"end": 6380
}
|
class ____(metaclass=ABCMeta):
"""Log and store/print records collected by a CallTracer."""
@abstractmethod
def log(self, trace: CallTrace) -> None:
"""Log a single call trace."""
pass
def flush(self) -> None:
"""Flush all logged traces to output / database.
Not an abstractmethod because it's OK to leave it as a no-op; for very
simple loggers it may not be necessary to batch-flush traces, and `log`
can handle everything.
"""
pass
def get_func_in_mro(obj: Any, code: CodeType) -> Optional[Callable[..., Any]]:
"""Attempt to find a function in a side-effect free way.
This looks in obj's mro manually and does not invoke any descriptors.
"""
# FunctionType is incompatible with Callable
# https://github.com/python/typeshed/issues/1378
val = inspect.getattr_static(obj, code.co_name, None)
if val is None:
return None
if isinstance(val, (classmethod, staticmethod)):
cand = val.__func__
elif isinstance(val, property) and (val.fset is None) and (val.fdel is None):
cand = cast(Callable[..., Any], val.fget)
elif cached_property and isinstance(val, cached_property):
cand = val.func
else:
cand = cast(Callable[..., Any], val)
return _has_code(cand, code)
def _has_code(
func: Optional[Callable[..., Any]], code: CodeType
) -> Optional[Callable[..., Any]]:
while func is not None:
func_code = getattr(func, "__code__", None)
if func_code is code:
return func
# Attempt to find the decorated function
func = getattr(func, "__wrapped__", None)
return None
def get_previous_frames(frame: Optional[FrameType]) -> Iterator[FrameType]:
while frame is not None:
yield frame
frame = frame.f_back
def get_locals_from_previous_frames(frame: FrameType) -> Iterator[Any]:
for previous_frame in get_previous_frames(frame):
yield from previous_frame.f_locals.values()
def get_func(frame: FrameType) -> Optional[Callable[..., Any]]:
"""Return the function whose code object corresponds to the supplied stack frame."""
code = frame.f_code
if code.co_name is None:
return None
# First, try to find the function in globals
cand = frame.f_globals.get(code.co_name, None)
func = _has_code(cand, code)
# If that failed, as will be the case with class and instance methods, try
# to look up the function from the first argument. In the case of class/instance
# methods, this should be the class (or an instance of the class) on which our
# method is defined.
if func is None and code.co_argcount >= 1:
first_arg = frame.f_locals.get(code.co_varnames[0])
func = get_func_in_mro(first_arg, code)
# If we still can't find the function, as will be the case with static methods,
# try looking at classes in global scope.
if func is None:
for v in frame.f_globals.values():
if not isinstance(v, type):
continue
func = get_func_in_mro(v, code)
if func is not None:
break
# If we still can't find the function, try looking at the locals of all previous frames.
if func is None:
for v in get_locals_from_previous_frames(frame):
if not callable(v):
continue
func = _has_code(v, code)
if func is not None:
break
return func
RETURN_VALUE_OPCODE = opcode.opmap["RETURN_VALUE"]
YIELD_VALUE_OPCODE = opcode.opmap["YIELD_VALUE"]
# A CodeFilter is a predicate that decides whether or not a the call for the
# supplied code object should be traced.
CodeFilter = Callable[[CodeType], bool]
EVENT_CALL = "call"
EVENT_RETURN = "return"
SUPPORTED_EVENTS = {EVENT_CALL, EVENT_RETURN}
|
CallTraceLogger
|
python
|
pyparsing__pyparsing
|
pyparsing/diagram/__init__.py
|
{
"start": 3224,
"end": 9077
}
|
class ____(Generic[T]):
"""
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
constructed.
"""
# We need this here because the railroad constructors actually transform the data, so can't be called until the
# entire tree is assembled
def __init__(self, func: Callable[..., T], args: list, kwargs: dict) -> None:
self.func = func
self.args = args
self.kwargs = kwargs
@classmethod
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> EditablePartial[T]:
"""
If you call this function in the same way that you would call the constructor,
it will store the arguments as you expect. For example
``EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)``
"""
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
@property
def name(self):
return self.kwargs["name"]
def __call__(self) -> T:
"""
Evaluate the partial and return the result
"""
args = self.args.copy()
kwargs = self.kwargs.copy()
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
# args=['list', 'of', 'things'])
arg_spec = inspect.getfullargspec(self.func)
if arg_spec.varargs in self.kwargs:
args += kwargs.pop(arg_spec.varargs)
return self.func(*args, **kwargs)
def railroad_to_html(diagrams: list[NamedDiagram], embed=False, **kwargs) -> str:
"""
Given a list of :class:`NamedDiagram`, produce a single HTML string
that visualises those diagrams.
:params kwargs: kwargs to be passed in to the template
"""
data = []
for diagram in diagrams:
if diagram.diagram is None:
continue
io = StringIO()
try:
css = kwargs.get("css")
diagram.diagram.writeStandalone(io.write, css=css)
except AttributeError:
diagram.diagram.writeSvg(io.write)
title = diagram.name
if diagram.index == 0:
title += " (root)"
data.append(
{
"title": title, "text": "", "svg": io.getvalue(), "bookmark": diagram.bookmark
}
)
return template.render(diagrams=data, embed=embed, **kwargs)
def resolve_partial(partial: EditablePartial[T]) -> T:
"""
Recursively resolves a collection of Partials into whatever type they are
"""
if isinstance(partial, EditablePartial):
partial.args = resolve_partial(partial.args)
partial.kwargs = resolve_partial(partial.kwargs)
return partial()
elif isinstance(partial, list):
return [resolve_partial(x) for x in partial]
elif isinstance(partial, dict):
return {key: resolve_partial(x) for key, x in partial.items()}
else:
return partial
def to_railroad(
element: pyparsing.ParserElement,
diagram_kwargs: typing.Optional[dict] = None,
vertical: int = 3,
show_results_names: bool = False,
show_groups: bool = False,
show_hidden: bool = False,
) -> list[NamedDiagram]:
"""
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
creation if you want to access the Railroad tree before it is converted to HTML
:param element: base element of the parser being diagrammed
:param diagram_kwargs: kwargs to pass to the :meth:`Diagram` constructor
:param vertical: (optional) int - limit at which number of alternatives
should be shown vertically instead of horizontally
:param show_results_names: bool to indicate whether results name
annotations should be included in the diagram
:param show_groups: bool to indicate whether groups should be highlighted
with an unlabeled surrounding box
:param show_hidden: bool to indicate whether internal elements that are
typically hidden should be shown
"""
# Convert the whole tree underneath the root
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
_to_diagram_element(
element,
lookup=lookup,
parent=None,
vertical=vertical,
show_results_names=show_results_names,
show_groups=show_groups,
show_hidden=show_hidden,
)
root_id = id(element)
# Convert the root if it hasn't been already
if root_id in lookup:
if not element.customName:
lookup[root_id].name = ""
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
# Now that we're finished, we can convert from intermediate structures into Railroad elements
diags = list(lookup.diagrams.values())
if len(diags) > 1:
# collapse out duplicate diags with the same name
seen = set()
deduped_diags = []
for d in diags:
# don't extract SkipTo elements, they are uninformative as subdiagrams
if d.name == "...":
continue
if d.name is not None and d.name not in seen:
seen.add(d.name)
deduped_diags.append(d)
resolved = [resolve_partial(partial) for partial in deduped_diags]
else:
# special case - if just one diagram, always display it, even if
# it has no name
resolved = [resolve_partial(partial) for partial in diags]
return sorted(resolved, key=lambda diag: diag.index)
def _should_vertical(
specification: int, exprs: Iterable[pyparsing.ParserElement]
) -> bool:
"""
Returns true if we should return a vertical list of elements
"""
if specification is None:
return False
else:
return len(_visible_exprs(exprs)) >= specification
@dataclasses.dataclass
|
EditablePartial
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/events.py
|
{
"start": 136292,
"end": 139227
}
|
class ____(Request):
"""
Get the image for the next variant for the same iteration or for the next iteration
:param task: Task ID
:type task: str
:param scroll_id: Scroll ID from the previous call to get_debug_image_sample
:type scroll_id: str
:param navigate_earlier: If set then get the either previous variant event from
the current iteration or (if does not exist) the last variant event from the
previous iteration. Otherwise next variant event from the current iteration or
first variant event from the next iteration
:type navigate_earlier: bool
"""
_service = "events"
_action = "next_debug_image_sample"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"navigate_earlier": {
"description": "If set then get the either previous variant event from the current iteration or (if does not exist) the last variant event from the previous iteration. Otherwise next variant event from the current iteration or first variant event from the next iteration",
"type": "boolean",
},
"scroll_id": {
"description": "Scroll ID from the previous call to get_debug_image_sample",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "scroll_id"],
"type": "object",
}
def __init__(self, task: str, scroll_id: str, navigate_earlier: Optional[bool] = None, **kwargs: Any) -> None:
super(NextDebugImageSampleRequest, self).__init__(**kwargs)
self.task = task
self.scroll_id = scroll_id
self.navigate_earlier = navigate_earlier
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("scroll_id")
def scroll_id(self) -> str:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: str) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("navigate_earlier")
def navigate_earlier(self) -> Optional[bool]:
return self._property_navigate_earlier
@navigate_earlier.setter
def navigate_earlier(self, value: Optional[bool]) -> None:
if value is None:
self._property_navigate_earlier = None
return
self.assert_isinstance(value, "navigate_earlier", (bool,))
self._property_navigate_earlier = value
|
NextDebugImageSampleRequest
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_pkcs7.py
|
{
"start": 51646,
"end": 54433
}
|
class ____:
@pytest.mark.parametrize(
("encoding", "loader"),
[
(serialization.Encoding.PEM, pkcs7.load_pem_pkcs7_certificates),
(serialization.Encoding.DER, pkcs7.load_der_pkcs7_certificates),
],
)
def test_roundtrip(self, encoding, loader, backend):
certs = load_vectors_from_file(
os.path.join("pkcs7", "amazon-roots.der"),
lambda derfile: pkcs7.load_der_pkcs7_certificates(derfile.read()),
mode="rb",
)
p7 = pkcs7.serialize_certificates(certs, encoding)
certs2 = loader(p7)
assert certs == certs2
def test_ordering(self, backend):
certs = load_vectors_from_file(
os.path.join("pkcs7", "amazon-roots.der"),
lambda derfile: pkcs7.load_der_pkcs7_certificates(derfile.read()),
mode="rb",
)
p7 = pkcs7.serialize_certificates(
list(reversed(certs)), serialization.Encoding.DER
)
certs2 = pkcs7.load_der_pkcs7_certificates(p7)
assert certs == certs2
def test_pem_matches_vector(self, backend):
p7_pem = load_vectors_from_file(
os.path.join("pkcs7", "isrg.pem"),
lambda p: p.read(),
mode="rb",
)
certs = pkcs7.load_pem_pkcs7_certificates(p7_pem)
p7 = pkcs7.serialize_certificates(certs, serialization.Encoding.PEM)
assert p7 == p7_pem
def test_der_matches_vector(self, backend):
p7_der = load_vectors_from_file(
os.path.join("pkcs7", "amazon-roots.der"),
lambda p: p.read(),
mode="rb",
)
certs = pkcs7.load_der_pkcs7_certificates(p7_der)
p7 = pkcs7.serialize_certificates(certs, serialization.Encoding.DER)
assert p7 == p7_der
def test_invalid_types(self):
certs = load_vectors_from_file(
os.path.join("pkcs7", "amazon-roots.der"),
lambda derfile: pkcs7.load_der_pkcs7_certificates(derfile.read()),
mode="rb",
)
with pytest.raises(TypeError):
pkcs7.serialize_certificates(
object(), # type: ignore[arg-type]
serialization.Encoding.PEM,
)
with pytest.raises(TypeError):
pkcs7.serialize_certificates([], serialization.Encoding.PEM)
with pytest.raises(TypeError):
pkcs7.serialize_certificates(
certs,
"not an encoding", # type: ignore[arg-type]
)
@pytest.mark.supported(
only_if=lambda backend: backend.pkcs7_supported()
and not backend.rsa_encryption_supported(padding.PKCS1v15()),
skip_message="Requires OpenSSL with no PKCS1 v1.5 padding support",
)
|
TestPKCS7SerializeCerts
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 921373,
"end": 939538
}
|
class ____(PolarDef):
r"""
PositionFieldDefBase schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, Literal['binned'], :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
stack : bool, :class:`StackOffset`, Literal['zero', 'center', 'normalize'], None
Type of stacking offset if the field should be stacked. ``stack`` is only applicable
for ``x``, ``y``, ``theta``, and ``radius`` channels with continuous domains. For
example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* ``"zero"`` or ``true``: stacking with baseline offset at zero value of the scale
(for creating typical stacked `bar
<https://vega.github.io/vega-lite/docs/stack.html#bar>`__ and `area
<https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__ and pie charts
`with percentage tooltip
<https://vega.github.io/vega-lite/docs/arc.html#tooltip>`__).
* ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__).
* ``null`` or ``false`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar``, ``area``, or ``arc``; (2) the stacked measure channel (x or
y) has a linear scale; (3) At least one of non-position channels mapped to an
unaggregated field that is different from x and y. Otherwise, ``null`` by default.
**See also:** `stack <https://vega.github.io/vega-lite/docs/stack.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/PositionFieldDefBase"}
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
stack: Optional[bool | SchemaBase | StackOffset_T | None] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
scale=scale,
sort=sort,
stack=stack,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
|
PositionFieldDefBase
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0061_add_imported_file_ignore.py
|
{
"start": 149,
"end": 570
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0060_make_rank_not_null"),
]
operations = [
migrations.AddField(
model_name="importedfile",
name="ignore",
field=models.BooleanField(
null=True, verbose_name="Ignore this file from operations like indexing"
),
),
]
|
Migration
|
python
|
eventlet__eventlet
|
tests/zmq_test.py
|
{
"start": 15400,
"end": 17224
}
|
class ____(tests.LimitedTestCase):
@tests.skip_unless(zmq_supported)
def test_queue_lock_order(self):
q = zmq._QueueLock()
s = eventlet.Semaphore(0)
results = []
def lock(x):
with q:
results.append(x)
s.release()
q.acquire()
eventlet.spawn(lock, 1)
eventlet.sleep()
eventlet.spawn(lock, 2)
eventlet.sleep()
eventlet.spawn(lock, 3)
eventlet.sleep()
self.assertEqual(results, [])
q.release()
s.acquire()
s.acquire()
s.acquire()
self.assertEqual(results, [1, 2, 3])
@tests.skip_unless(zmq_supported)
def test_count(self):
q = zmq._QueueLock()
self.assertFalse(q)
q.acquire()
self.assertTrue(q)
q.release()
self.assertFalse(q)
with q:
self.assertTrue(q)
self.assertFalse(q)
@tests.skip_unless(zmq_supported)
def test_errors(self):
q = zmq._QueueLock()
self.assertRaises(zmq.LockReleaseError, q.release)
q.acquire()
q.release()
self.assertRaises(zmq.LockReleaseError, q.release)
@tests.skip_unless(zmq_supported)
def test_nested_acquire(self):
q = zmq._QueueLock()
self.assertFalse(q)
q.acquire()
q.acquire()
s = eventlet.Semaphore(0)
results = []
def lock(x):
with q:
results.append(x)
s.release()
eventlet.spawn(lock, 1)
eventlet.sleep()
self.assertEqual(results, [])
q.release()
eventlet.sleep()
self.assertEqual(results, [])
self.assertTrue(q)
q.release()
s.acquire()
self.assertEqual(results, [1])
|
TestQueueLock
|
python
|
huggingface__transformers
|
src/transformers/models/esm/modeling_esmfold.py
|
{
"start": 46324,
"end": 47775
}
|
class ____:
def __init__(self, param, bins=50, start=0, end=1):
# All tensors are of shape ..., bins.
self.logits = param
bins = torch.linspace(start, end, bins + 1, device=self.logits.device, dtype=self.logits.dtype)
self.v_bins = (bins[:-1] + bins[1:]) / 2
def log_prob(self, true):
# Shapes are:
# self.probs: ... x bins
# true : ...
true_index = (true.unsqueeze(-1) - self.v_bins[[None] * true.ndim]).abs().argmin(-1)
nll = self.logits.log_softmax(-1)
return torch.take_along_dim(nll, true_index.unsqueeze(-1), dim=-1).squeeze(-1)
def mean(self):
return (self.logits.softmax(-1) @ self.v_bins.unsqueeze(1)).squeeze(-1)
def categorical_lddt(logits, bins=50):
# Logits are ..., 37, bins.
return EsmCategoricalMixture(logits, bins=bins).mean()
def get_axial_mask(mask):
"""
Helper to convert B x L mask of valid positions to axial mask used in row column attentions.
Input:
mask: B x L tensor of booleans
Output:
mask: B x L x L tensor of booleans
"""
if mask is None:
return None
if len(mask.shape) != 2:
raise ValueError(f"`mask` should be a 2d-tensor, got {len(mask.shape)} dims.")
batch_dim, seq_dim = mask.shape
m = mask.unsqueeze(1).expand(batch_dim, seq_dim, seq_dim)
m = m.reshape(batch_dim * seq_dim, seq_dim)
return m
|
EsmCategoricalMixture
|
python
|
dagster-io__dagster
|
examples/assets_pandas_type_metadata/assets_pandas_type_metadata/resources/csv_io_manager.py
|
{
"start": 298,
"end": 3123
}
|
class ____(ConfigurableIOManager):
"""Translates between Pandas DataFrames and CSVs on the local filesystem."""
base_dir: Optional[str] = Field(default=None)
@property
@cached_method
def resolved_base_dir(self) -> str:
if self.base_dir:
return self.base_dir
resource_context = self.get_resource_context()
if resource_context.instance is not None:
return resource_context.instance.storage_directory()
else:
return os.getenv("DAGSTER_HOME", ".")
def _get_fs_path(self, asset_key: AssetKey) -> str:
rpath = os.path.join(self.resolved_base_dir, *asset_key.path) + ".csv"
return os.path.abspath(rpath)
def handle_output(self, context, obj: pd.DataFrame):
"""This saves the dataframe as a CSV."""
fpath = self._get_fs_path(asset_key=context.asset_key)
os.makedirs(os.path.dirname(fpath), exist_ok=True)
obj.to_csv(fpath)
with open(fpath + ".version", "w", encoding="utf8") as f:
f.write(context.version if context.version else "None")
context.add_output_metadata(
{
"Rows": MetadataValue.int(obj.shape[0]),
"Path": MetadataValue.path(fpath),
"Sample": MetadataValue.md(obj.head(5).to_markdown()),
"Resolved version": MetadataValue.text(context.version), # type: ignore
"Schema": MetadataValue.table_schema(self.get_schema(context.dagster_type)),
}
)
def get_schema(self, dagster_type):
schema_value = next(
(x for x in dagster_type.metadata.values() if isinstance(x, TableSchemaMetadataValue)),
None,
)
assert schema_value
return schema_value.schema
def load_input(self, context):
"""This reads a dataframe from a CSV."""
fpath = self._get_fs_path(asset_key=context.asset_key)
date_col_names = [
table_col.name
for table_col in self.get_schema(context.upstream_output.dagster_type).columns
if table_col.type == "datetime64[ns]"
]
return pd.read_csv(fpath, parse_dates=date_col_names)
def has_output(self, context) -> bool:
fpath = self._get_fs_path(asset_key=context.asset_key)
version_fpath = fpath + ".version"
if not os.path.exists(version_fpath):
return False
with open(version_fpath, encoding="utf8") as f:
version = f.read()
return version == context.version
def pandas_columns_to_markdown(dataframe: pd.DataFrame) -> str:
return textwrap.dedent(
"""
| Name | Type |
| ---- | ---- |
"""
) + "\n".join([f"| {name} | {dtype} |" for name, dtype in dataframe.dtypes.items()])
|
LocalCsvIOManager
|
python
|
dask__distributed
|
distributed/deploy/cluster.py
|
{
"start": 862,
"end": 21279
}
|
class ____(SyncMethodMixin):
"""Superclass for cluster objects
This class contains common functionality for Dask Cluster manager classes.
To implement this class, you must provide
1. A ``scheduler_comm`` attribute, which is a connection to the scheduler
following the ``distributed.core.rpc`` API.
2. Implement ``scale``, which takes an integer and scales the cluster to
that many workers, or else set ``_supports_scaling`` to False
For that, you should get the following:
1. A standard ``__repr__``
2. A live IPython widget
3. Adaptive scaling
4. Integration with dask-labextension
5. A ``scheduler_info`` attribute which contains an up-to-date copy of
``Scheduler.identity()``, which is used for much of the above
6. Methods to gather logs
"""
_supports_scaling = True
__loop: IOLoop | None = None
def __init__(
self,
asynchronous=False,
loop=None,
quiet=False,
name=None,
scheduler_sync_interval=1,
):
self._loop_runner = LoopRunner(loop=loop, asynchronous=asynchronous)
self.__asynchronous = asynchronous
self.scheduler_info = {"workers": {}}
self.periodic_callbacks = {}
self._watch_worker_status_comm = None
self._watch_worker_status_task = None
self._cluster_manager_logs = []
self.quiet = quiet
self.scheduler_comm = None
self._adaptive = None
self._sync_interval = parse_timedelta(
scheduler_sync_interval, default="seconds"
)
self._sync_cluster_info_task = None
if name is None:
name = str(uuid.uuid4())[:8]
self._cluster_info = {
"name": name,
"type": typename(type(self)),
}
self.status = Status.created
@property
def loop(self) -> IOLoop | None:
loop = self.__loop
if loop is None:
# If the loop is not running when this is called, the LoopRunner.loop
# property will raise a DeprecationWarning
# However subsequent calls might occur - eg atexit, where a stopped
# loop is still acceptable - so we cache access to the loop.
self.__loop = loop = self._loop_runner.loop
return loop
@loop.setter
def loop(self, value: IOLoop) -> None:
warnings.warn(
"setting the loop property is deprecated", DeprecationWarning, stacklevel=2
)
if value is None:
raise ValueError("expected an IOLoop, got None")
self.__loop = value
@property
def called_from_running_loop(self):
try:
return (
getattr(self.loop, "asyncio_loop", None) is asyncio.get_running_loop()
)
except RuntimeError:
return self.__asynchronous
@property
def name(self):
return self._cluster_info["name"]
@name.setter
def name(self, name):
self._cluster_info["name"] = name
async def _start(self):
comm = await self.scheduler_comm.live_comm()
comm.name = "Cluster worker status"
await comm.write({"op": "subscribe_worker_status"})
self.scheduler_info = SchedulerInfo(await comm.read())
self._watch_worker_status_comm = comm
self._watch_worker_status_task = asyncio.ensure_future(
self._watch_worker_status(comm)
)
info = await self.scheduler_comm.get_metadata(
keys=["cluster-manager-info"], default={}
)
self._cluster_info.update(info)
# Start a background task for syncing cluster info with the scheduler
self._sync_cluster_info_task = asyncio.ensure_future(self._sync_cluster_info())
for pc in self.periodic_callbacks.values():
pc.start()
self.status = Status.running
async def _sync_cluster_info(self):
err_count = 0
warn_at = 5
max_interval = 10 * self._sync_interval
# Loop until the cluster is shutting down. We shouldn't really need
# this check (the `CancelledError` should be enough), but something
# deep in the comms code is silencing `CancelledError`s _some_ of the
# time, resulting in a cancellation not always bubbling back up to
# here. Relying on the status is fine though, not worth changing.
while self.status == Status.running:
try:
await self.scheduler_comm.set_metadata(
keys=["cluster-manager-info"],
value=self._cluster_info.copy(),
)
err_count = 0
except Exception:
err_count += 1
# Only warn if multiple subsequent attempts fail, and only once
# per set of subsequent failed attempts. This way we're not
# excessively noisy during a connection blip, but we also don't
# silently fail.
if err_count == warn_at:
logger.warning(
"Failed to sync cluster info multiple times - perhaps "
"there's a connection issue? Error:",
exc_info=True,
)
# Sleep, with error backoff
interval = _exponential_backoff(
err_count, self._sync_interval, 1.5, max_interval
)
await asyncio.sleep(interval)
async def _close(self):
if self.status == Status.closed:
return
self.status = Status.closing
with suppress(AttributeError):
self._adaptive.stop()
if self._watch_worker_status_comm:
await self._watch_worker_status_comm.close()
if self._watch_worker_status_task:
await self._watch_worker_status_task
if self._sync_cluster_info_task:
self._sync_cluster_info_task.cancel()
with suppress(asyncio.CancelledError):
await self._sync_cluster_info_task
if self.scheduler_comm:
await self.scheduler_comm.close_rpc()
for pc in self.periodic_callbacks.values():
pc.stop()
self.status = Status.closed
def close(self, timeout: float | None = None) -> Any:
# If the cluster is already closed, we're already done
if self.status == Status.closed:
if self.asynchronous:
return NoOpAwaitable()
return None
try:
return self.sync(self._close, callback_timeout=timeout)
except RuntimeError: # loop closed during process shutdown
return None
def __del__(self, _warn=warnings.warn):
if getattr(self, "status", Status.closed) != Status.closed:
try:
self_r = repr(self)
except Exception:
self_r = f"with a broken __repr__ {object.__repr__(self)}"
_warn(f"unclosed cluster {self_r}", ResourceWarning, source=self)
async def _watch_worker_status(self, comm):
"""Listen to scheduler for updates on adding and removing workers"""
while True:
try:
msgs = await comm.read()
except OSError:
break
with log_errors():
for op, msg in msgs:
self._update_worker_status(op, msg)
await comm.close()
def _update_worker_status(self, op, msg):
if op == "add":
workers = msg.pop("workers")
self.scheduler_info["workers"].update(workers)
self.scheduler_info.update(msg)
elif op == "remove":
del self.scheduler_info["workers"][msg]
else: # pragma: no cover
raise ValueError("Invalid op", op, msg)
def adapt(self, Adaptive: type[Adaptive] = Adaptive, **kwargs: Any) -> Adaptive:
"""Turn on adaptivity
For keyword arguments see dask.distributed.Adaptive
Examples
--------
>>> cluster.adapt(minimum=0, maximum=10, interval='500ms')
"""
with suppress(AttributeError):
self._adaptive.stop()
if not hasattr(self, "_adaptive_options"):
self._adaptive_options = {}
self._adaptive_options.update(kwargs)
self._adaptive = Adaptive(self, **self._adaptive_options)
return self._adaptive
def scale(self, n: int) -> None:
"""Scale cluster to n workers
Parameters
----------
n : int
Target number of workers
Examples
--------
>>> cluster.scale(10) # scale cluster to ten workers
"""
raise NotImplementedError()
def _log(self, log):
"""Log a message.
Output a message to the user and also store for future retrieval.
For use in subclasses where initialisation may take a while and it would
be beneficial to feed back to the user.
Examples
--------
>>> self._log("Submitted job X to batch scheduler")
"""
self._cluster_manager_logs.append((datetime.datetime.now(), log))
if not self.quiet:
print(log)
async def _get_logs(self, cluster=True, scheduler=True, workers=True):
logs = Logs()
if cluster:
logs["Cluster"] = Log(
"\n".join(line[1] for line in self._cluster_manager_logs)
)
if scheduler:
L = await self.scheduler_comm.get_logs()
logs["Scheduler"] = Log("\n".join(line for level, line in L))
if workers:
if workers is True:
workers = None
d = await self.scheduler_comm.worker_logs(workers=workers)
for k, v in d.items():
logs[k] = Log("\n".join(line for level, line in v))
return logs
def get_logs(self, cluster=True, scheduler=True, workers=True):
"""Return logs for the cluster, scheduler and workers
Parameters
----------
cluster : boolean
Whether or not to collect logs for the cluster manager
scheduler : boolean
Whether or not to collect logs for the scheduler
workers : boolean or Iterable[str], optional
A list of worker addresses to select.
Defaults to all workers if `True` or no workers if `False`
Returns
-------
logs: Dict[str]
A dictionary of logs, with one item for the scheduler and one for
each worker
"""
return self.sync(
self._get_logs, cluster=cluster, scheduler=scheduler, workers=workers
)
@_deprecated(use_instead="get_logs")
def logs(self, *args, **kwargs):
return self.get_logs(*args, **kwargs)
def get_client(self):
"""Return client for the cluster
If a client has already been initialized for the cluster, return that
otherwise initialize a new client object.
"""
from distributed.client import Client
try:
current_client = Client.current()
if current_client and current_client.cluster == self:
return current_client
except ValueError:
pass
return Client(self)
@property
def dashboard_link(self):
try:
port = self.scheduler_info["services"]["dashboard"]
except KeyError:
return ""
else:
host = self.scheduler_address.split("://")[1].split("/")[0].split(":")[0]
return format_dashboard_link(host, port)
def _scaling_status(self):
if self._adaptive and self._adaptive.periodic_callback:
mode = "Adaptive"
else:
mode = "Manual"
workers = len(self.scheduler_info["workers"])
if hasattr(self, "worker_spec"):
requested = sum(
1 if "group" not in each else len(each["group"])
for each in self.worker_spec.values()
)
elif hasattr(self, "workers"):
requested = len(self.workers)
else:
requested = workers
worker_count = workers if workers == requested else f"{workers} / {requested}"
return f"""
<table>
<tr><td style="text-align: left;">Scaling mode: {mode}</td></tr>
<tr><td style="text-align: left;">Workers: {worker_count}</td></tr>
</table>
"""
def _widget(self):
"""Create IPython widget for display within a notebook"""
try:
return self._cached_widget
except AttributeError:
pass
try:
from ipywidgets import (
HTML,
Accordion,
Button,
HBox,
IntText,
Layout,
Tab,
VBox,
)
except ImportError:
self._cached_widget = None
return None
layout = Layout(width="150px")
status = HTML(self._repr_html_())
if self._supports_scaling:
request = IntText(0, description="Workers", layout=layout)
scale = Button(description="Scale", layout=layout)
minimum = IntText(0, description="Minimum", layout=layout)
maximum = IntText(0, description="Maximum", layout=layout)
adapt = Button(description="Adapt", layout=layout)
accordion = Accordion(
[HBox([request, scale]), HBox([minimum, maximum, adapt])],
layout=Layout(min_width="500px"),
)
accordion.selected_index = None
accordion.set_title(0, "Manual Scaling")
accordion.set_title(1, "Adaptive Scaling")
def adapt_cb(b):
self.adapt(minimum=minimum.value, maximum=maximum.value)
update()
adapt.on_click(adapt_cb)
@log_errors
def scale_cb(b):
n = request.value
with suppress(AttributeError):
self._adaptive.stop()
self.scale(n)
update()
scale.on_click(scale_cb)
else: # pragma: no cover
accordion = HTML("")
scale_status = HTML(self._scaling_status())
tab = Tab()
tab.children = [status, VBox([scale_status, accordion])]
tab.set_title(0, "Status")
tab.set_title(1, "Scaling")
self._cached_widget = tab
def update():
status.value = self._repr_html_()
scale_status.value = self._scaling_status()
cluster_repr_interval = parse_timedelta(
dask.config.get("distributed.deploy.cluster-repr-interval", default="ms")
)
def install():
pc = PeriodicCallback(update, cluster_repr_interval * 1000)
self.periodic_callbacks["cluster-repr"] = pc
pc.start()
self.loop.add_callback(install)
return tab
def _repr_html_(self, cluster_status=None):
try:
scheduler_info_repr = self.scheduler_info._repr_html_()
except AttributeError:
scheduler_info_repr = "Scheduler not started yet."
return get_template("cluster.html.j2").render(
type=type(self).__name__,
name=self.name,
workers=self.scheduler_info["workers"],
dashboard_link=self.dashboard_link,
scheduler_info_repr=scheduler_info_repr,
cluster_status=cluster_status,
)
def _ipython_display_(self, **kwargs):
"""Display the cluster rich IPython repr"""
# Note: it would be simpler to just implement _repr_mimebundle_,
# but we cannot do that until we drop ipywidgets 7 support, as
# it does not provide a public way to get the mimebundle for a
# widget. So instead we fall back on the more customizable _ipython_display_
# and display as a side-effect.
from IPython.display import display
widget = self._widget()
if widget:
import ipywidgets
if parse_version(ipywidgets.__version__) >= parse_version("8.0.0"):
mimebundle = widget._repr_mimebundle_(**kwargs) or {}
mimebundle["text/plain"] = repr(self)
mimebundle["text/html"] = self._repr_html_()
display(mimebundle, raw=True)
else:
display(widget, **kwargs)
else:
mimebundle = {"text/plain": repr(self), "text/html": self._repr_html_()}
display(mimebundle, raw=True)
def __enter__(self):
if self.asynchronous:
raise TypeError(
"Used 'with' with asynchronous class; please use 'async with'"
)
return self.sync(self.__aenter__)
def __exit__(self, exc_type, exc_value, traceback):
aw = self.close()
assert aw is None, aw
def __await__(self):
return self
yield
async def __aenter__(self):
await self
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._close()
@property
def scheduler_address(self) -> str:
if not self.scheduler_comm:
return "<Not Connected>"
return self.scheduler_comm.address
@property
def _cluster_class_name(self):
return getattr(self, "_name", type(self).__name__)
def __repr__(self):
text = "%s(%s, %r, workers=%d, threads=%d" % (
self._cluster_class_name,
self.name,
self.scheduler_address,
len(self.scheduler_info["workers"]),
sum(w["nthreads"] for w in self.scheduler_info["workers"].values()),
)
memory = [w["memory_limit"] for w in self.scheduler_info["workers"].values()]
if all(memory):
text += ", memory=" + format_bytes(sum(memory))
text += ")"
return text
@property
def plan(self):
return set(self.workers)
@property
def requested(self):
return set(self.workers)
@property
def observed(self):
return {d["name"] for d in self.scheduler_info["workers"].values()}
def __eq__(self, other):
return type(other) == type(self) and self.name == other.name
def __hash__(self):
return id(self)
async def _wait_for_workers(self, n_workers=0, timeout=None):
self.scheduler_info = SchedulerInfo(await self.scheduler_comm.identity())
if timeout:
deadline = time() + parse_timedelta(timeout)
else:
deadline = None
def running_workers(info):
return len(
[
ws
for ws in info["workers"].values()
if ws["status"] == Status.running.name
]
)
while n_workers and running_workers(self.scheduler_info) < n_workers:
if deadline and time() > deadline:
raise WorkerStartTimeoutError(
running_workers(self.scheduler_info), n_workers, timeout
)
await asyncio.sleep(0.1)
self.scheduler_info = SchedulerInfo(await self.scheduler_comm.identity())
def wait_for_workers(self, n_workers: int, timeout: float | None = None) -> None:
"""Blocking call to wait for n workers before continuing
Parameters
----------
n_workers : int
The number of workers
timeout : number, optional
Time in seconds after which to raise a
``dask.distributed.TimeoutError``
"""
if not isinstance(n_workers, int) or n_workers < 1:
raise ValueError(
f"`n_workers` must be a positive integer. Instead got {n_workers}."
)
return self.sync(self._wait_for_workers, n_workers, timeout=timeout)
def _exponential_backoff(
attempt: int, multiplier: float, exponential_base: float, max_interval: float
) -> float:
"""Calculate the duration of an exponential backoff"""
try:
interval = multiplier * exponential_base**attempt
except OverflowError:
return max_interval
return min(max_interval, interval)
|
Cluster
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_auth.py
|
{
"start": 3557,
"end": 5906
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="bar@example.com")
self.organization = self.create_organization(name="Test")
with assume_test_silo_mode(SiloMode.CONTROL):
self.provider = AuthProvider.objects.create(
organization_id=self.organization.id, provider="dummy"
)
self.om = self.create_member(
user_id=self.user.id,
organization=self.organization,
flags=OrganizationMember.flags["sso:linked"],
)
assert self.om.flags["sso:linked"]
self.user2 = self.create_user(email="baz@example.com")
om2 = self.create_member(user_id=self.user2.id, organization=self.organization, flags=0)
assert not om2.flags["sso:linked"]
# Invited members don't get emails
self.create_member(organization=self.organization, email="invited@example.com")
def test_email_unlink_notifications_with_password(self) -> None:
with self.tasks():
email_unlink_notifications(
self.organization.id, self.user.email, self.provider.provider
)
emails = sorted(message.body for message in mail.outbox)
assert len(emails) == 2
assert f"can now login using your email {self.user.email}, and password" in emails[0]
assert "you'll first have to set a password" not in emails[0]
self.om.refresh_from_db()
assert not self.om.flags["sso:linked"]
def test_email_unlink_notifications_without_password(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.user.password = ""
self.user.save()
with self.tasks():
email_unlink_notifications(
self.organization.id, self.user.email, self.provider.provider
)
emails = sorted(message.body for message in mail.outbox)
assert len(emails) == 2
assert "you'll first have to set a password" in emails[0]
assert f"can now login using your email {self.user.email}, and password" not in emails[0]
assert f"can now login using your email {self.user2.email}, and password" in emails[1]
self.om.refresh_from_db()
assert not self.om.flags["sso:linked"]
|
EmailUnlinkNotificationsTest
|
python
|
doocs__leetcode
|
solution/1500-1599/1595.Minimum Cost to Connect Two Groups of Points/Solution2.py
|
{
"start": 0,
"end": 580
}
|
class ____:
def connectTwoGroups(self, cost: List[List[int]]) -> int:
m, n = len(cost), len(cost[0])
f = [inf] * (1 << n)
f[0] = 0
g = f[:]
for i in range(1, m + 1):
for j in range(1 << n):
g[j] = inf
for k in range(n):
if (j >> k & 1) == 0:
continue
c = cost[i - 1][k]
x = min(g[j ^ (1 << k)], f[j], f[j ^ (1 << k)]) + c
g[j] = min(g[j], x)
f = g[:]
return f[-1]
|
Solution
|
python
|
joke2k__faker
|
tests/providers/test_automotive.py
|
{
"start": 4485,
"end": 5924
}
|
class ____:
"""Test es_ES automotive provider methods"""
new_format_pattern: Pattern = re.compile(r"\d{4}\s[A-Z]{3}")
old_format_pattern: Pattern = re.compile(r"(?P<province_prefix>[A-Z]{1,2})\s\d{4}\s[A-Z]{2}")
def test_plate_new_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_unified()
assert isinstance(plate, str)
assert self.new_format_pattern.match(plate)
def test_plate_old_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_by_province()
assert isinstance(plate, str)
match = self.old_format_pattern.match(plate)
assert match
assert match.group("province_prefix") in EsEsAutomotiveProvider.province_prefix
def test_plate_old_format_explicit_province_prefix(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate_by_province(province_prefix="CA")
assert isinstance(plate, str)
assert self.old_format_pattern.match(plate)
assert plate[:2] == "CA"
def test_plate_format(self, faker, num_samples):
for _ in range(num_samples):
plate = faker.license_plate()
assert isinstance(plate, str)
assert self.new_format_pattern.match(plate) or self.old_format_pattern.match(plate)
|
TestEsEs
|
python
|
matplotlib__matplotlib
|
galleries/examples/misc/multiprocess_sgskip.py
|
{
"start": 1710,
"end": 2416
}
|
class ____:
def __init__(self):
self.plot_pipe, plotter_pipe = mp.Pipe()
self.plotter = ProcessPlotter()
self.plot_process = mp.Process(
target=self.plotter, args=(plotter_pipe,), daemon=True)
self.plot_process.start()
def plot(self, finished=False):
send = self.plot_pipe.send
if finished:
send(None)
else:
data = np.random.random(2)
send(data)
def main():
pl = NBPlot()
for _ in range(10):
pl.plot()
time.sleep(0.5)
pl.plot(finished=True)
if __name__ == '__main__':
if plt.get_backend() == "MacOSX":
mp.set_start_method("forkserver")
main()
|
NBPlot
|
python
|
yaml__pyyaml
|
packaging/_pyyaml_pep517.py
|
{
"start": 388,
"end": 1149
}
|
class ____:
_current = {}
def __init__(self, config_settings):
self._config = config_settings
def __enter__(self):
type(self)._current = self._config
def __exit__(self, exc_type, exc_val, exc_tb):
type(self)._current = {}
@classmethod
def current(cls):
return cls._current
def _expose_config_settings(real_method, *args, **kwargs):
from contextlib import nullcontext
import inspect
sig = inspect.signature(real_method)
boundargs = sig.bind(*args, **kwargs)
config = boundargs.arguments.get('config_settings')
ctx = ActiveConfigSettings(config) if config else nullcontext()
with ctx:
return real_method(*args, **kwargs)
_bridge_build_meta()
|
ActiveConfigSettings
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/sparse_bincount_ops_test.py
|
{
"start": 1626,
"end": 24240
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "_no_maxlength",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [2, 6]
}, {
"testcase_name": "_maxlength",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [1, 1, 1, 1, 2],
"expected_shape": [2, 7]
}, {
"testcase_name": "_maxlength_zero",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 0,
"expected_indices": np.empty([0, 2], dtype=np.int64),
"expected_values": [],
"expected_shape": [2, 0]
}, {
"testcase_name": "_minlength",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [2, 9]
}, {
"testcase_name": "_minlength_larger_values",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 2, 1],
"expected_shape": [2, 8]
}, {
"testcase_name": "_no_maxlength_binary",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [2, 6],
"binary_output": True,
}, {
"testcase_name": "_maxlength_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [2, 7],
"binary_output": True,
}, {
"testcase_name": "_minlength_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [2, 9],
"binary_output": True,
}, {
"testcase_name": "_minlength_larger_values_binary",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [1, 1, 1, 1, 1, 1, 1],
"expected_shape": [2, 8],
"binary_output": True,
}, {
"testcase_name": "_no_maxlength_weights",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 4], [1, 5]],
"expected_values": [2, 1, 0.5, 9, 3],
"expected_shape": [2, 6],
"weights": [[0.5, 1, 2], [3, 4, 5]]
}, {
"testcase_name": "_maxlength_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"maxlength": 7,
"expected_indices": [[0, 1], [0, 2], [0, 3], [1, 0], [1, 4]],
"expected_values": [2, 1, 0.5, 3, 9],
"expected_shape": [2, 7],
"weights": [[0.5, 1, 2, 11], [7, 3, 4, 5]]
}, {
"testcase_name": "_minlength_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 9,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [2, 1, 0.5, 3, 5, 13, 4],
"expected_shape": [2, 9],
"weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]]
}, {
"testcase_name": "_minlength_larger_values_weights",
"x": np.array([[3, 2, 1, 7], [7, 0, 4, 4]], dtype=np.int32),
"minlength": 3,
"expected_indices": [[0, 1], [0, 2], [0, 3], [0, 7], [1, 0], [1, 4],
[1, 7]],
"expected_values": [2, 1, 0.5, 3, 5, 13, 4],
"expected_shape": [2, 8],
"weights": [[0.5, 1, 2, 3], [4, 5, 6, 7]]
}, {
"testcase_name": "_1d",
"x": np.array([3, 2, 1, 1], dtype=np.int32),
"expected_indices": [[1], [2], [3]],
"expected_values": [2, 1, 1],
"expected_shape": [4]
}, {
"testcase_name": "_all_axes",
"x": np.array([[3, 2, 1], [5, 4, 4]], dtype=np.int32),
"expected_indices": [[1], [2], [3], [4], [5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [6],
"axis": None
}, {
"testcase_name":
"_large_inputs",
"x":
np.array([[
1941591354222760687, 1748591354222760687, 1241591354229760689
], [
1941591354222760687, 1241591354229760689, 1241591354229760687
]],
dtype=np.int64),
"expected_indices": [[1241591354229760687], [1241591354229760689],
[1748591354222760687], [1941591354222760687]],
"expected_values": [1, 2, 1, 2],
"expected_shape": [1941591354222760687 + 1],
"axis":
None
})
def test_dense_input(self,
x,
expected_indices,
expected_values,
expected_shape,
minlength=None,
maxlength=None,
binary_output=False,
weights=None,
axis=-1):
y = sparse_ops.sparse_bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
@parameterized.named_parameters(
{
"testcase_name":
"_no_maxlength",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [3, 6],
}, {
"testcase_name":
"_maxlength",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [3, 7],
"maxlength":
7,
}, {
"testcase_name":
"_maxlength_zero",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices":
np.empty([0, 2], dtype=np.int64),
"expected_values": [],
"expected_shape": [3, 0],
"maxlength":
0,
}, {
"testcase_name":
"_minlength",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [3, 9],
"minlength":
9,
}, {
"testcase_name":
"_minlength_larger_values",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 2, 1],
"expected_shape": [3, 8],
"minlength":
3,
}, {
"testcase_name":
"_no_maxlength_binary",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1],
"expected_shape": [3, 6],
"binary_output":
True,
}, {
"testcase_name":
"_maxlength_binary",
"x":
np.array([[3, 0, 1, 0], [0, 0, 7, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1],
"expected_shape": [3, 7],
"maxlength":
7,
"binary_output":
True,
}, {
"testcase_name":
"_minlength_binary",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [3, 9],
"minlength":
9,
"binary_output":
True,
}, {
"testcase_name":
"_minlength_larger_values_binary",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [1, 1, 1, 1, 1],
"expected_shape": [3, 8],
"minlength":
3,
"binary_output":
True,
}, {
"testcase_name":
"_no_maxlength_weights",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [2, 6, 7, 10],
"expected_shape": [3, 6],
"weights":
np.array([[6, 0, 2, 0], [0, 0, 0, 0], [10, 0, 3.5, 3.5]]),
}, {
"testcase_name":
"_maxlength_weights",
"x":
np.array([[3, 0, 1, 0], [0, 0, 7, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [2, 4], [2, 5]],
"expected_values": [2, 6, 7, 10],
"expected_shape": [3, 7],
"maxlength":
7,
"weights":
np.array([[6, 0, 2, 0], [0, 0, 14, 0], [10, 0, 3.5, 3.5]]),
}, {
"testcase_name":
"_minlength_weights",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [2, 6, 14, 6.5, 10],
"expected_shape": [3, 9],
"minlength":
9,
"weights":
np.array([[6, 0, 2, 0], [14, 0, 0, 0], [10, 0, 3, 3.5]]),
}, {
"testcase_name":
"_minlength_larger_values_weights",
"x":
np.array([[3, 0, 1, 0], [7, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[0, 1], [0, 3], [1, 7], [2, 4], [2, 5]],
"expected_values": [2, 6, 14, 6.5, 10],
"expected_shape": [3, 8],
"minlength":
3,
"weights":
np.array([[6, 0, 2, 0], [14, 0, 0, 0], [10, 0, 3, 3.5]]),
}, {
"testcase_name": "_1d",
"x": np.array([3, 0, 1, 1], dtype=np.int32),
"expected_indices": [[1], [3]],
"expected_values": [2, 1],
"expected_shape": [4],
}, {
"testcase_name":
"_all_axes",
"x":
np.array([[3, 0, 1, 0], [0, 0, 0, 0], [5, 0, 4, 4]],
dtype=np.int32),
"expected_indices": [[1], [3], [4], [5]],
"expected_values": [1, 1, 2, 1],
"expected_shape": [6],
"axis":
None,
}, {
"testcase_name":
"_large_inputs",
"x":
np.array([[1941591354222760687, 0, 1241591354229760689],
[0, 1241591354229760689, 1241591354229760687]],
dtype=np.int64),
"expected_indices": [[1241591354229760687], [1241591354229760689],
[1941591354222760687]],
"expected_values": [1, 2, 1],
"expected_shape": [1941591354222760687 + 1],
"axis":
None
})
def test_sparse_input(self,
x,
expected_indices,
expected_values,
expected_shape,
maxlength=None,
minlength=None,
binary_output=False,
weights=None,
axis=-1):
x_sparse = sparse_ops.from_dense(x)
w_sparse = sparse_ops.from_dense(weights) if weights is not None else None
y = sparse_ops.sparse_bincount(
x_sparse,
weights=w_sparse,
minlength=minlength,
maxlength=maxlength,
binary_output=binary_output,
axis=axis)
self.assertAllEqual(expected_indices, y.indices)
self.assertAllEqual(expected_values, y.values)
self.assertAllEqual(expected_shape, y.dense_shape)
@parameterized.product(
(
dict(
tid="_s1",
x_factory=_sparse_factory([1, 2, 2, 3, 3, 3]),
expected=[0, 1, 2, 3],
),
dict(
tid="_s1_some_zeros",
x_factory=_sparse_factory([1, 0, 0, 3, 3, 3]),
expected=[0, 1, 0, 3],
),
dict(
tid="_s1_all_zeros",
x_factory=_sparse_factory([0, 0, 0, 0, 0, 0]),
expected=[],
),
dict(
tid="_s2",
x_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 2], [3, 3, 3]]
),
expected=[0, 1, 2, 3],
),
dict(
tid="_s3",
x_factory=_sparse_factory(
[[[0, 0, 0], [0, 1, 0]], [[2, 0, 2], [3, 3, 3]]]
),
expected=[0, 1, 2, 3],
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_default(
self,
x_factory,
minlength,
maxlength,
expected,
tid=None,
):
x = x_factory()
expected = _adjust_expected_rank1(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
sparse_ops.bincount(x, minlength=minlength, maxlength=maxlength)
),
)
self.assertAllEqual(
expected,
self.evaluate(
sparse_ops.bincount(
x, minlength=minlength, maxlength=maxlength, axis=0
)
),
)
@parameterized.product(
(
dict(
tid="_s2",
x_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 2], [3, 3, 3]]
),
expected=[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 3]],
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_axis_neg_one(
self, tid, x_factory, minlength, maxlength, expected
):
x = x_factory()
expected = _adjust_expected_rank2(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
sparse_ops.bincount(
x, minlength=minlength, maxlength=maxlength, axis=-1
)
),
)
@parameterized.product(
(
dict(
tid="_s1",
x_factory=_sparse_factory([1, 2, 2, 3, 3, 3]),
weights_factory=_sparse_factory([1, 2, 3, 4, 5, 6]),
expected=[0, 1, 5, 15],
axis=None,
),
dict(
tid="_s2",
x_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 2], [3, 3, 3]]
),
# weights have the same shape as x, so when x has an implicit
# zero, the corresponding weight is as an implicit zero
weights_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 3], [4, 5, 6]]
),
axis=None,
expected=[0, 1, 5, 15],
),
dict(
tid="_s3",
x_factory=_sparse_factory(
[[[0, 0, 0], [0, 1, 0]], [[2, 0, 2], [3, 3, 3]]]
),
# weights have the same shape as x, so when x has an implicit
# zero, the corresponding weight is as an implicit zero
weights_factory=_sparse_factory(
[[[0, 0, 0], [0, 1, 0]], [[2, 0, 3], [4, 5, 6]]]
),
axis=None,
expected=[0, 1, 5, 15],
),
dict(
tid="_s2_axis_neg_1",
x_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 2], [3, 3, 3]]
),
# weights have the same shape as x, so when x has an implicit
# zero, the corresponding weight is as an implicit zero
weights_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 3], [4, 5, 6]]
),
expected=[
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 15],
],
axis=-1,
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_weights(
self,
tid,
x_factory,
weights_factory,
minlength,
maxlength,
expected,
axis,
):
device_set = set([d.device_type for d in tf_config.list_physical_devices()])
if "GPU" in device_set and not test_util.is_xla_enabled():
self.skipTest(
"b/263004039 The DenseBincount GPU kernel does not support weights."
" unsorted_segment_sum should be used instead on GPU."
)
x = x_factory()
weights = weights_factory()
if axis == -1:
expected = _adjust_expected_rank2(expected, minlength, maxlength)
else:
expected = _adjust_expected_rank1(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
sparse_ops.bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
axis=axis,
)
),
)
@parameterized.product(
(
dict(
tid="_s1",
x_factory=_sparse_factory([1, 2, 2, 3, 3, 3]),
expected=[0, 1, 1, 1],
axis=None,
),
dict(
tid="_s1_zeros",
x_factory=_sparse_factory([1, 0, 0, 3, 3, 3]),
expected=[0, 1, 0, 1],
axis=None,
),
dict(
tid="_s2",
x_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 2], [3, 3, 3]]
),
expected=[0, 1, 1, 1],
axis=None,
),
dict(
tid="_s3",
x_factory=_sparse_factory(
[[[0, 0, 0], [0, 1, 0]], [[2, 0, 2], [3, 3, 3]]]
),
expected=[0, 1, 1, 1],
axis=None,
),
dict(
tid="_s2_axis_neg_1",
x_factory=_sparse_factory(
[[0, 0, 0], [0, 1, 0], [2, 0, 2], [3, 3, 3]]
),
expected=[[0, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
axis=-1,
),
),
(
dict(minlength=None, maxlength=None),
dict(minlength=3, maxlength=None),
dict(minlength=5, maxlength=None),
dict(minlength=None, maxlength=3),
dict(minlength=None, maxlength=5),
dict(minlength=2, maxlength=3),
dict(minlength=3, maxlength=5),
dict(minlength=5, maxlength=10),
dict(minlength=None, maxlength=0),
),
)
def test_binary_output(
self,
tid,
x_factory,
minlength,
maxlength,
expected,
axis=None,
):
x = x_factory()
if axis == -1:
expected = _adjust_expected_rank2(expected, minlength, maxlength)
else:
expected = _adjust_expected_rank1(expected, minlength, maxlength)
self.assertAllEqual(
expected,
self.evaluate(
sparse_ops.bincount(
x,
minlength=minlength,
maxlength=maxlength,
binary_output=True,
axis=axis,
)
),
)
|
TestSparseCount
|
python
|
tiangolo__fastapi
|
scripts/notify_translations.py
|
{
"start": 2327,
"end": 2401
}
|
class ____(BaseModel):
discussion: CommentsDiscussion
|
CommentsRepository
|
python
|
ray-project__ray
|
python/ray/data/_internal/block_batching/interfaces.py
|
{
"start": 399,
"end": 600
}
|
class ____:
"""A batch of data.
Attributes:
metadata: Metadata associated with this batch.
data: The batch of data.
"""
metadata: BatchMetadata
data: DataBatch
|
Batch
|
python
|
huggingface__transformers
|
src/transformers/models/arcee/modular_arcee.py
|
{
"start": 8539,
"end": 8658
}
|
class ____(LlamaForQuestionAnswering):
pass
@auto_docstring(checkpoint="arcee-ai/AFM-4.5B")
|
ArceeForQuestionAnswering
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_python_ast_rule.py
|
{
"start": 9733,
"end": 13828
}
|
class ____:
"""Test the validate_python_code_blocks function."""
def test_valid_code_blocks(self):
"""Test validation with valid Python code blocks."""
docstring = """
Function with valid examples.
.. code-block:: python
def example():
return "hello"
.. code-block:: python
x = [1, 2, 3]
print(x)
"""
context = ValidationContext(docstring=docstring, symbol_path="test.symbol")
result = ValidationResult.create("test.symbol")
result = validate_python_code_blocks(context, result)
assert not result.has_errors()
assert not result.has_warnings()
assert result.is_valid()
def test_invalid_code_blocks(self):
"""Test validation with invalid Python code blocks."""
docstring = """
Function with syntax errors.
.. code-block:: python
def broken_function(
return "missing paren"
.. code-block:: python
valid_code = "this is fine"
.. code-block:: python
another broken function(
print("another error")
"""
context = ValidationContext(docstring=docstring, symbol_path="test.symbol")
result = ValidationResult.create("test.symbol")
result = validate_python_code_blocks(context, result)
assert result.has_errors()
assert len(result.errors) == 2 # Two syntax errors
# Check that both errors are reported
error_text = " ".join(result.errors)
assert "Python code block syntax error" in error_text
assert "'(' was never closed" in error_text
def test_mixed_valid_invalid_blocks(self):
"""Test validation with mix of valid and invalid blocks."""
docstring = """
Mixed examples.
.. code-block:: python
# This is valid
print("hello")
.. code-block:: python
# This is broken
def bad_function(
return "error"
.. code-block:: python
# This is also valid
x = 42
y = x * 2
"""
context = ValidationContext(docstring=docstring, symbol_path="test.symbol")
result = ValidationResult.create("test.symbol")
result = validate_python_code_blocks(context, result)
assert result.has_errors()
assert len(result.errors) == 1 # Only one error
assert "Python code block syntax error" in result.errors[0]
def test_no_code_blocks(self):
"""Test validation with no Python code blocks."""
docstring = """
Function with no code examples.
Args:
param: A parameter
Returns:
A value
"""
context = ValidationContext(docstring=docstring, symbol_path="test.symbol")
result = ValidationResult.create("test.symbol")
result = validate_python_code_blocks(context, result)
assert not result.has_errors()
assert not result.has_warnings()
assert result.is_valid()
def test_line_number_reporting(self):
"""Test that error line numbers are correctly reported."""
docstring = """
Function with error on specific line.
.. code-block:: python
# This is line 6 of the docstring
def broken(
return "error on line 8"
"""
context = ValidationContext(docstring=docstring, symbol_path="test.symbol")
result = ValidationResult.create("test.symbol")
result = validate_python_code_blocks(context, result)
assert result.has_errors()
assert len(result.errors) == 1
# Should include line number in error message
error = result.errors[0]
assert "line 6" in error # Line where the code block starts
|
TestValidatePythonCodeBlocks
|
python
|
pandas-dev__pandas
|
pandas/tests/api/test_api.py
|
{
"start": 11539,
"end": 14655
}
|
class ____(Base):
funcs = [
"assert_frame_equal",
"assert_series_equal",
"assert_index_equal",
"assert_extension_array_equal",
]
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
def test_util_in_top_level(self):
with pytest.raises(AttributeError, match="foo"):
pd.util.foo
def get_pandas_objects(
module_name: str, recurse: bool
) -> list[tuple[str, str, object]]:
"""
Get all pandas objects within a module.
An object is determined to be part of pandas if it has a string
__module__ attribute that starts with ``"pandas"``.
Parameters
----------
module_name : str
Name of the module to search.
recurse : bool
Whether to search submodules.
Returns
-------
List of all objects that are determined to be a part of pandas.
"""
module = importlib.import_module(module_name)
objs = []
for name, obj in inspect.getmembers(module):
module_dunder = getattr(obj, "__module__", None)
if isinstance(module_dunder, str) and module_dunder.startswith("pandas"):
objs.append((module_name, name, obj))
if not recurse:
return objs
# __file__ can, but shouldn't, be None
assert isinstance(module.__file__, str)
paths = [pathlib.Path(module.__file__).parent]
for module_info in pkgutil.walk_packages(paths):
name = module_info.name
if name.startswith("_") or name == "internals":
continue
objs.extend(
get_pandas_objects(f"{module.__name__}.{name}", recurse=module_info.ispkg)
)
return objs
@pytest.mark.slow
@pytest.mark.parametrize(
"module_name",
[
"pandas",
"pandas.api",
"pandas.arrays",
"pandas.errors",
pytest.param("pandas.io", marks=pytest.mark.xfail(reason="Private imports")),
"pandas.plotting",
"pandas.testing",
],
)
def test_attributes_module(module_name):
"""
Ensures that all public objects have their __module__ set to the public import path.
"""
recurse = module_name not in ["pandas", "pandas.testing"]
objs = get_pandas_objects(module_name, recurse=recurse)
failures = [
(module_name, name, type(obj), obj.__module__)
for module_name, name, obj in objs
if not (
obj.__module__ == module_name
# Explicit exceptions
or ("Dtype" in name and obj.__module__ == "pandas")
or (name == "Categorical" and obj.__module__ == "pandas")
)
]
assert len(failures) == 0, "\n".join(str(e) for e in failures)
# Check that all objects can indeed be imported from their __module__
failures = []
for module_name, name, obj in objs:
module = importlib.import_module(obj.__module__)
try:
getattr(module, name)
except Exception:
failures.append((module_name, name, type(obj), obj.__module__))
assert len(failures) == 0, "\n".join(str(e) for e in failures)
|
TestTesting
|
python
|
donnemartin__interactive-coding-challenges
|
math_probability/generate_primes/test_generate_primes.py
|
{
"start": 18,
"end": 991
}
|
class ____(unittest.TestCase):
def test_generate_primes(self):
prime_generator = PrimeGenerator()
self.assertRaises(TypeError, prime_generator.generate_primes, None)
self.assertRaises(TypeError, prime_generator.generate_primes, 98.6)
self.assertEqual(prime_generator.generate_primes(20), [False, False, True,
True, False, True,
False, True, False,
False, False, True,
False, True, False,
False, False, True,
False, True])
print('Success: generate_primes')
def main():
test = TestMath()
test.test_generate_primes()
if __name__ == '__main__':
main()
|
TestMath
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_vitality.py
|
{
"start": 24,
"end": 1380
}
|
class ____:
def test_unweighted(self):
G = nx.cycle_graph(3)
vitality = nx.closeness_vitality(G)
assert vitality == {0: 2, 1: 2, 2: 2}
def test_weighted(self):
G = nx.Graph()
nx.add_cycle(G, [0, 1, 2], weight=2)
vitality = nx.closeness_vitality(G, weight="weight")
assert vitality == {0: 4, 1: 4, 2: 4}
def test_unweighted_digraph(self):
G = nx.DiGraph(nx.cycle_graph(3))
vitality = nx.closeness_vitality(G)
assert vitality == {0: 4, 1: 4, 2: 4}
def test_weighted_digraph(self):
G = nx.DiGraph()
nx.add_cycle(G, [0, 1, 2], weight=2)
nx.add_cycle(G, [2, 1, 0], weight=2)
vitality = nx.closeness_vitality(G, weight="weight")
assert vitality == {0: 8, 1: 8, 2: 8}
def test_weighted_multidigraph(self):
G = nx.MultiDiGraph()
nx.add_cycle(G, [0, 1, 2], weight=2)
nx.add_cycle(G, [2, 1, 0], weight=2)
vitality = nx.closeness_vitality(G, weight="weight")
assert vitality == {0: 8, 1: 8, 2: 8}
def test_disconnecting_graph(self):
"""Tests that the closeness vitality of a node whose removal
disconnects the graph is negative infinity.
"""
G = nx.path_graph(3)
assert nx.closeness_vitality(G, node=1) == -float("inf")
|
TestClosenessVitality
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.