language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
aimacode__aima-python
|
utils.py
|
{
"start": 19555,
"end": 21732
}
|
class ____:
"""A Queue in which the minimum (or maximum) element (as determined by f and
order) is returned first.
If order is 'min', the item with minimum f(x) is
returned first; if order is 'max', then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order='min', f=lambda x: x):
self.heap = []
if order == 'min':
self.f = f
elif order == 'max': # now item with max f(x)
self.f = lambda x: -f(x) # will be popped first
else:
raise ValueError("Order must be either 'min' or 'max'.")
def append(self, item):
"""Insert item at its correct position."""
heapq.heappush(self.heap, (self.f(item), item))
def extend(self, items):
"""Insert each item in items at its correct position."""
for item in items:
self.append(item)
def pop(self):
"""Pop and return the item (with min or max f(x) value)
depending on the order."""
if self.heap:
return heapq.heappop(self.heap)[1]
else:
raise Exception('Trying to pop from empty PriorityQueue.')
def __len__(self):
"""Return current capacity of PriorityQueue."""
return len(self.heap)
def __contains__(self, key):
"""Return True if the key is in PriorityQueue."""
return any([item == key for _, item in self.heap])
def __getitem__(self, key):
"""Returns the first value associated with key in PriorityQueue.
Raises KeyError if key is not present."""
for value, item in self.heap:
if item == key:
return value
raise KeyError(str(key) + " is not in the priority queue")
def __delitem__(self, key):
"""Delete the first occurrence of key."""
try:
del self.heap[[item == key for _, item in self.heap].index(True)]
except ValueError:
raise KeyError(str(key) + " is not in the priority queue")
heapq.heapify(self.heap)
# ______________________________________________________________________________
# Useful Shorthands
|
PriorityQueue
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-upgradable-servers.py
|
{
"start": 55,
"end": 564
}
|
class ____(object):
def maxUpgrades(self, count, upgrade, sell, money):
"""
:type count: List[int]
:type upgrade: List[int]
:type sell: List[int]
:type money: List[int]
:rtype: List[int]
"""
# let x be the number of sold servers
# (c-x)*u <= m+(x*s)
# -x <= (m-c*u)//(u+s) <= 0
# c-x <= c+(m-c*u)//(u+s) <= c
return [min(c+(m-c*u)//(u+s), c) for c, u, s, m in itertools.izip(count, upgrade, sell, money)]
|
Solution
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/_cipheralgorithm.py
|
{
"start": 356,
"end": 882
}
|
class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def name(self) -> str:
"""
A string naming this mode (e.g. "AES", "Camellia").
"""
@property
@abc.abstractmethod
def key_sizes(self) -> frozenset[int]:
"""
Valid key sizes for this algorithm in bits
"""
@property
@abc.abstractmethod
def key_size(self) -> int:
"""
The size of the key being used as an integer in bits (e.g. 128, 256).
"""
|
CipherAlgorithm
|
python
|
PyCQA__pylint
|
doc/data/messages/t/too-many-public-methods/good.py
|
{
"start": 229,
"end": 281
}
|
class ____:
def launch(self):
pass
|
Missile
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/handlers/condition/event_frequency_query_handlers.py
|
{
"start": 1960,
"end": 11293
}
|
class ____(ABC):
intervals: ClassVar[dict[str, tuple[str, timedelta]]] = STANDARD_INTERVALS
def get_query_window(self, end: datetime, duration: timedelta) -> tuple[datetime, datetime]:
"""
Calculate the start and end times for the query.
"duration" is the length of the window we're querying over.
"""
start = end - duration
return (start, end)
def disable_consistent_snuba_mode(
self, duration: timedelta
) -> contextlib.AbstractContextManager[object]:
"""For conditions with interval >= 1 hour we don't need to worry about read or writes
consistency. Disable it so that we can scale to more nodes.
"""
option_override_cm: contextlib.AbstractContextManager[object] = contextlib.nullcontext()
if duration >= timedelta(hours=1):
option_override_cm = options_override({"consistent": False})
return option_override_cm
def get_snuba_query_result(
self,
tsdb_function: TSDBFunction,
keys: list[int],
group_id: int,
organization_id: int,
model: TSDBModel,
start: datetime,
end: datetime,
environment_id: int | None,
referrer_suffix: str,
conditions: list[SnubaCondition] | None = None,
group_on_time: bool = False,
project_ids: list[int] | None = None,
) -> Mapping[int, int]:
result: Mapping[int, int] = tsdb_function(
model=model,
keys=keys,
start=start,
end=end,
environment_id=environment_id,
use_cache=True,
jitter_value=group_id,
tenant_ids={"organization_id": organization_id},
referrer_suffix=referrer_suffix,
conditions=conditions,
group_on_time=group_on_time,
project_ids=project_ids,
)
return result
def get_chunked_result(
self,
tsdb_function: TSDBFunction,
model: TSDBModel,
group_ids: list[int],
organization_id: int,
start: datetime,
end: datetime,
environment_id: int | None,
referrer_suffix: str,
filters: list[QueryFilter] | None = None,
group_on_time: bool = False,
project_ids: list[int] | None = None,
) -> dict[int, int]:
batch_totals: dict[int, int] = defaultdict(int)
group_id = group_ids[0]
conditions = self.get_extra_snuba_conditions(model, filters) if filters else []
for group_chunk in chunked(group_ids, SNUBA_LIMIT):
result = self.get_snuba_query_result(
tsdb_function=tsdb_function,
model=model,
keys=[group_id for group_id in group_chunk],
group_id=group_id,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix=referrer_suffix,
conditions=conditions,
group_on_time=group_on_time,
project_ids=project_ids,
)
batch_totals.update(result)
return batch_totals
def get_group_ids_by_category(
self,
groups: list[GroupValues],
) -> dict[GroupCategory, list[int]]:
"""
Separate group ids into error group ids and generic group ids
"""
category_group_ids: dict[GroupCategory, list[int]] = defaultdict(list)
for group in groups:
issue_type = get_group_type_by_type_id(group["type"])
category = GroupCategory(issue_type.category)
category_group_ids[category].append(group["id"])
return category_group_ids
def get_value_from_groups(
self,
groups: list[GroupValues],
value: Literal["id", "project_id", "project__organization_id"],
) -> int | None:
result = None
if groups:
group = groups[0]
result = group.get(value)
return result
def get_extra_snuba_conditions(
self, category: TSDBModel, filters: list[QueryFilter]
) -> list[SnubaCondition]:
conditions = []
for filter in filters:
snuba_condition = self.convert_filter_to_snuba_condition(filter, category)
if snuba_condition:
conditions.append(snuba_condition)
return conditions
@staticmethod
def convert_filter_to_snuba_condition(
condition: dict[str, Any], tsdb_model: TSDBModel
) -> SnubaCondition | None:
# condition can be TaggedEventFilter (key) or EventAttributeFilter (attribute)
key = condition.get("key")
attribute = condition.get("attribute")
if not key and not attribute:
return None
lhs: str | None = None
if key:
lhs = f"tags[{condition['key']}]"
elif attribute:
column = ATTR_CHOICES.get(attribute)
if column is None:
return None
lhs = get_dataset_column_name(tsdb_model, column.value.alias)
if lhs is None:
# Some attribute columns are only available for errors.
# Raise and catch to return 0 events that meet the filters for other issue types
raise InvalidFilter
rhs = (
condition["value"]
if condition["match"] not in (MatchType.IS_SET, MatchType.NOT_SET)
else None
)
if attribute == "error.unhandled":
# flip values, since the queried column is "error.handled"
rhs = not condition["value"]
match condition["match"]:
case MatchType.EQUAL:
operator = Op.EQ
case MatchType.NOT_EQUAL:
operator = Op.NEQ
case MatchType.STARTS_WITH:
operator = Op.LIKE
rhs = f"{rhs}%"
case MatchType.NOT_STARTS_WITH:
operator = Op.NOT_LIKE
rhs = f"{rhs}%"
case MatchType.ENDS_WITH:
operator = Op.LIKE
rhs = f"%{rhs}"
case MatchType.NOT_ENDS_WITH:
operator = Op.NOT_LIKE
rhs = f"%{rhs}"
case MatchType.CONTAINS:
operator = Op.LIKE
rhs = f"%{rhs}%"
case MatchType.NOT_CONTAINS:
operator = Op.NOT_LIKE
rhs = f"%{rhs}%"
case MatchType.IS_SET:
operator = Op.IS_NOT_NULL
rhs = None
case MatchType.NOT_SET:
operator = Op.IS_NULL
rhs = None
case MatchType.IS_IN:
operator = Op.IN
if not isinstance(rhs, str):
raise ValueError(f"Unsupported value type for {condition['match']}")
rhs = rhs.split(",")
case MatchType.NOT_IN:
operator = Op.NOT_IN
if not isinstance(rhs, str):
raise ValueError(f"Unsupported value type for {condition['match']}")
rhs = rhs.split(",")
case _:
raise ValueError(f"Unsupported match type: {condition['match']}")
return (lhs, operator.value, rhs)
@abstractmethod
def batch_query(
self,
groups: list[GroupValues],
start: datetime,
end: datetime,
environment_id: int | None,
filters: list[QueryFilter] | None = None,
) -> QueryResult:
"""
Abstract method that specifies how to query Snuba for multiple groups
depending on the condition. Must be implemented by subclasses.
"""
raise NotImplementedError
def get_rate_bulk(
self,
duration: timedelta,
groups: list[GroupValues],
environment_id: int | None,
current_time: datetime,
comparison_interval: timedelta | None,
filters: list[QueryFilter] | None,
) -> QueryResult:
"""
Make a batch query for multiple groups. The return value is a dictionary
of group_id to the result for that group.
If comparison_interval is not None, we're making the second query in a
percent comparison condition. For example, if the condition is:
- num of issues is {}% higher in 1 hr compared to 5 min ago
The second query would be querying for num of events from:
- 5 min ago to 1 hr 5 min ago
"""
if comparison_interval:
current_time -= comparison_interval
start, end = self.get_query_window(end=current_time, duration=duration)
with self.disable_consistent_snuba_mode(duration):
result = self.batch_query(
groups=groups,
start=start,
end=end,
environment_id=environment_id,
filters=filters,
)
return result
slow_condition_query_handler_registry = Registry[type[BaseEventFrequencyQueryHandler]](
enable_reverse_lookup=False
)
@slow_condition_query_handler_registry.register(Condition.EVENT_FREQUENCY_COUNT)
@slow_condition_query_handler_registry.register(Condition.EVENT_FREQUENCY_PERCENT)
|
BaseEventFrequencyQueryHandler
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/declarative_automation/serialized_objects.py
|
{
"start": 8335,
"end": 12131
}
|
class ____(Generic[T_EntityKey]):
"""Incremental state calculated during the evaluation of a AutomationCondition. This may be used
on the subsequent evaluation to make the computation more efficient.
Args:
previous_requested_subset: The subset that was requested for this asset on the previous tick.
effective_timestamp: The timestamp at which the evaluation was performed.
last_event_id: The maximum storage ID over all events used in this evaluation.
node_cursors_by_unique_id: A mapping from the unique ID of each condition in the evaluation
tree to any incremental state calculated for it.
result_hash: A unique hash of the result for this tick. Used to determine if anything
has changed since the last time this was evaluated.
"""
previous_requested_subset: SerializableEntitySubset
effective_timestamp: float
last_event_id: Optional[int]
node_cursors_by_unique_id: Mapping[str, AutomationConditionNodeCursor]
result_value_hash: str
@staticmethod
def backcompat_from_evaluation_state(
evaluation_state: "AutomationConditionEvaluationState",
) -> "AutomationConditionCursor":
"""Serves as a temporary method to convert from old representation to the new representation."""
def _get_node_cursors(
evaluation: AutomationConditionEvaluation,
) -> Mapping[str, AutomationConditionNodeCursor]:
node_cursors = {
evaluation.condition_snapshot.unique_id: AutomationConditionNodeCursor(
true_subset=evaluation.true_subset,
candidate_subset=evaluation.candidate_subset,
subsets_with_metadata=evaluation.subsets_with_metadata,
extra_state=evaluation_state.extra_state_by_unique_id.get(
evaluation.condition_snapshot.unique_id
),
)
}
for child in evaluation.child_evaluations:
node_cursors.update(_get_node_cursors(child))
return node_cursors
return AutomationConditionCursor(
previous_requested_subset=evaluation_state.previous_evaluation.true_subset,
effective_timestamp=evaluation_state.previous_tick_evaluation_timestamp or 0,
last_event_id=evaluation_state.max_storage_id,
node_cursors_by_unique_id=_get_node_cursors(evaluation_state.previous_evaluation),
result_value_hash="",
)
@staticmethod
def from_result(
context: "AutomationContext", result: "AutomationResult", result_hash: str
) -> "AutomationConditionCursor":
def _gather_node_cursors(r: "AutomationResult"):
node_cursors = {r.condition_unique_id: r.node_cursor} if r.node_cursor else {}
for rr in r.child_results:
node_cursors.update(_gather_node_cursors(rr))
return node_cursors
return AutomationConditionCursor(
previous_requested_subset=result.true_subset.convert_to_serializable_subset(),
effective_timestamp=context.evaluation_time.timestamp(),
last_event_id=context.max_storage_id,
node_cursors_by_unique_id=_gather_node_cursors(result),
result_value_hash=result_hash,
)
@property
def key(self) -> T_EntityKey:
return self.previous_requested_subset.key
@property
def temporal_context(self) -> TemporalContext:
return TemporalContext(
effective_dt=datetime_from_timestamp(self.effective_timestamp),
last_event_id=self.last_event_id,
)
@whitelist_for_serdes(storage_name="AssetConditionEvaluationState")
@dataclass(frozen=True)
|
AutomationConditionCursor
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-google-search-console/components.py
|
{
"start": 2994,
"end": 4624
}
|
class ____(RecordTransformation):
"""
A record transformation that remaps each value in the keys array back to its associated
dimension. The reason this is a custom component is because we're unable to use list
comprehension and and enumerate() is not a valid function in our Jinja contact so can't
iterate over the dimensions defined in the config to create each field transformation on the
stream_template for each custom report.
If we were able to, the actual ComponentMappingDefinition would look like this:
type: ComponentMappingDefinition
field_path:
- transformations
- "1"
- fields
value: "{{ [{'path': [dimension], 'value': '{{ record['keys'][index]} for index, dimension in enumerate(components_values['dimensions'])] }}"
or
type: ComponentMappingDefinition
field_path:
- transformations
- "1"
- fields
value: >
{% for index, dimension in enumerate(components_values["dimensions"]) %}
- type: AddFields
fields:
- path: [ {{ dimension }} ]
value: "{{ record['keys'][index] }}"
{% endfor %}
"""
dimensions: List[str] = field(default_factory=lambda: [])
def transform(
self,
record: Dict[str, Any],
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> None:
for dimension in self.dimensions:
record[dimension] = record["keys"].pop(0)
record.pop("keys")
@dataclass
|
CustomReportExtractDimensionsFromKeys
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-inheritance_diagram/test.py
|
{
"start": 91,
"end": 132
}
|
class ____(DocSubDir1):
pass
|
DocSubDir2
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/zero_scrollbar_size.py
|
{
"start": 80,
"end": 314
}
|
class ____(App):
DEFAULT_CSS = """
Screen {
scrollbar-size: 0 0;
}
"""
def compose(self) -> ComposeResult:
yield Static("Hello, world!\n" * 100)
if __name__ == "__main__":
TestApp().run()
|
TestApp
|
python
|
doocs__leetcode
|
solution/1500-1599/1568.Minimum Number of Days to Disconnect Island/Solution.py
|
{
"start": 0,
"end": 1050
}
|
class ____:
def minDays(self, grid: List[List[int]]) -> int:
if self.count(grid) != 1:
return 0
m, n = len(grid), len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
grid[i][j] = 0
if self.count(grid) != 1:
return 1
grid[i][j] = 1
return 2
def count(self, grid):
def dfs(i, j):
grid[i][j] = 2
for a, b in [[0, -1], [0, 1], [1, 0], [-1, 0]]:
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and grid[x][y] == 1:
dfs(x, y)
m, n = len(grid), len(grid[0])
cnt = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
dfs(i, j)
cnt += 1
for i in range(m):
for j in range(n):
if grid[i][j] == 2:
grid[i][j] = 1
return cnt
|
Solution
|
python
|
bottlepy__bottle
|
test/test_multipart.py
|
{
"start": 2410,
"end": 7812
}
|
class ____(BaseMultipartTest):
def assertIterline(self, data, *expected, **options):
self.assertEqual(
list(bottle._MultipartParser(BytesIO(bottle.tob(data)), 'foo', **options)._lineiter()),
[(bottle.tob(l), bottle.tob(nl)) for l,nl in expected])
def test_iterlines(self):
self.assertIterline('abc\ndef\r\nghi', ('abc\ndef','\r\n'), ('ghi', ''))
def test_iterlines_limit(self):
self.assertIterline('abc\ndef\r\nghi', ('abc\ndef','\r\n'), ('g', ''), content_length=10)
self.assertIterline('abc\ndef\r\nghi', ('abc\ndef\r',''), content_length=8)
def test_fuzzy_lineiter(self):
""" Test all possible buffer sizes """
minbuflen = 9 # boundary size of '--foo--\r\n'
data = b'data\rdata\ndata\r\ndata\n\rdata\r\n'.replace(b'data', b'X'*minbuflen*2)
lines = data.split(b"\r\n")[:-1]
for tail in (b"", b"tail"):
for buffer_size in range(minbuflen, len(data+tail)+1):
splits = list(bottle._MultipartParser(
BytesIO(data+tail), 'foo',
buffer_size=buffer_size)._lineiter())
partial = b""
merged = []
for part, nl in splits:
self.assertTrue(nl in (b"", b"\r\n"))
self.assertTrue(len(part) >= buffer_size or nl or part == tail)
partial += part
if nl:
merged.append(partial)
partial = b""
self.assertEqual(merged, lines)
self.assertEqual(tail, partial)
def test_big_file(self):
''' If the size of an uploaded part exceeds memfile_limit,
it is written to disk. '''
test_file = 'abc'*1024
boundary = '---------------------------186454651713519341951581030105'
request = BytesIO(bottle.tob('\r\n').join(map(bottle.tob,[
'--' + boundary,
'Content-Disposition: form-data; name="file1"; filename="random.png"',
'Content-Type: image/png', '', test_file, '--' + boundary,
'Content-Disposition: form-data; name="file2"; filename="random.png"',
'Content-Type: image/png', '', test_file + 'a', '--' + boundary,
'Content-Disposition: form-data; name="file3"; filename="random.png"',
'Content-Type: image/png', '', test_file*2, '--'+boundary+'--',''])))
parts = list(bottle._MultipartParser(request, boundary, memfile_limit=len(test_file)).parse())
p = {p.name: p for p in parts}
try:
self.assertEqual(p.get('file1').file.read(), bottle.tob(test_file))
self.assertTrue(p.get('file1').is_buffered())
self.assertEqual(p.get('file2').file.read(), bottle.tob(test_file + 'a'))
self.assertFalse(p.get('file2').is_buffered())
self.assertEqual(p.get('file3').file.read(), bottle.tob(test_file*2))
self.assertFalse(p.get('file3').is_buffered())
finally:
for part in parts:
part.close()
def test_file_seek(self):
''' The file object should be readable withoud a seek(0). '''
test_file = 'abc'*1024
boundary = '---------------------------186454651713519341951581030105'
request = BytesIO(bottle.tob('\r\n').join(map(bottle.tob,[
'--' + boundary,
'Content-Disposition: form-data; name="file1"; filename="random.png"',
'Content-Type: image/png', '', test_file, '--' + boundary + '--',''])))
p = list(bottle._MultipartParser(request, boundary).parse())
self.assertEqual(p[0].file.read(), bottle.tob(test_file))
self.assertEqual(p[0].value, test_file)
def test_unicode_value(self):
''' The .value property always returns unicode '''
test_file = 'abc'*1024
boundary = '---------------------------186454651713519341951581030105'
request = BytesIO(bottle.tob('\r\n').join(map(bottle.tob,[
'--' + boundary,
'Content-Disposition: form-data; name="file1"; filename="random.png"',
'Content-Type: image/png', '', test_file, '--' + boundary + '--',''])))
p = list(bottle._MultipartParser(request, boundary).parse())
self.assertEqual(p[0].file.read(), bottle.tob(test_file))
self.assertEqual(p[0].value, test_file)
self.assertTrue(hasattr(p[0].value, 'encode'))
def test_multiline_header(self):
''' HTTP allows headers to be multiline. '''
test_file = bottle.tob('abc'*1024)
test_text = u'Test text\n with\r\n ΓΌmlΓ€uts!'
boundary = '---------------------------186454651713519341951581030105'
request = BytesIO(bottle.tob('\r\n').join(map(bottle.tob,[
'--' + boundary,
'Content-Disposition: form-data;',
'\tname="file1"; filename="random.png"',
'Content-Type: image/png', '', test_file, '--' + boundary,
'Content-Disposition: form-data;',
' name="text"', '', test_text,
'--' + boundary + '--',''])))
p = list(bottle._MultipartParser(request, boundary, charset='utf8').parse())
self.assertEqual(p[0].name, "file1")
self.assertEqual(p[0].file.read(), test_file)
self.assertEqual(p[0].filename, 'random.png')
self.assertEqual(p[1].name, "text")
self.assertEqual(p[1].value, test_text)
|
TestMultipartParser
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/network/netbsd.py
|
{
"start": 1643,
"end": 1748
}
|
class ____(NetworkCollector):
_fact_class = NetBSDNetwork
_platform = 'NetBSD'
|
NetBSDNetworkCollector
|
python
|
MongoEngine__mongoengine
|
tests/fields/test_datetime_field.py
|
{
"start": 6498,
"end": 7102
}
|
class ____(MongoDBTestCase):
def test_datetime_tz_aware_mark_as_changed(self):
# Reset the connections
connection._connection_settings = {}
connection._connections = {}
connection._dbs = {}
connect(db="mongoenginetest", tz_aware=True)
class LogEntry(Document):
time = DateTimeField()
LogEntry.drop_collection()
LogEntry(time=dt.datetime(2013, 1, 1, 0, 0, 0)).save()
log = LogEntry.objects.first()
log.time = dt.datetime(2013, 1, 1, 0, 0, 0)
assert ["time"] == log._changed_fields
|
TestDateTimeTzAware
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_percent_diff_greater_than_or_equal_to_threshold.py
|
{
"start": 7758,
"end": 15049
}
|
class ____(
ProfileNumericColumnsDiffExpectation
):
"""Expect a statistic's percent delta for a given column of a DataProfiler percent difference report to be greater than or equal to the specified threshold.
This expectation takes the percent difference report between the data it is called on and a DataProfiler profile of the same schema loaded from a provided path.
Each numerical column percent delta will be checked against a user provided dictionary of columns paired with dictionaries of statistics containing a threshold.
This function builds upon the custom ProfileNumericColumnsDiff Expectation of Capital One's DataProfiler Expectations.
It is expected that a statistic's percent delta for a given column is greater than or equal to the specified threshold.
Args:
profile_path (str): A path to a saved DataProfiler profile object on the local filesystem.
limit_check_report_keys (dict[str, dict[str, float]]): A dict, containing column names as keys and dicts as values that contain statistics as keys and thresholds as values
mostly (float - optional): a value indicating the lower bound percentage of successful values that must be present to evaluate to success=True.
validator.expect_profile_numeric_columns_percent_diff_greater_than_or_equal_to_threshold(
profile_path = "C:/path_to/my_profile.pkl",
limit_check_report_keys = {
"column_one": {
"min": 0.5, #Indicating the threshold for the 'min' statistic in 'column_one' is 50%
},
"*": {
"*": .25, #Indicating the threshold for every statistic in every column is 25%
},
}
)
Note: In limit_check_report_keys, "*" in place of a column denotes a general operator in which the value it stores will be applied to every column in the data that has no explicit key.
"*" in place of a statistic denotes a general operator in which the bounds it stores will be applied to every statistic for the given column that has no explicit key.
"""
example_profile_data = [
[2, 5, "10", "ten", 25],
[4, 10, "20", "twenty", 50],
[6, 15, "30", "thirty", 75],
[8, 20, "40", "forty", 100],
[10, 25, "50", "fifty", 125],
]
example_profile_columns = [
"by_2",
"by_5",
"str_by_10",
"words_by_10",
"by_25",
]
df = pd.DataFrame(example_profile_data, columns=example_profile_columns)
profiler_opts = dp.ProfilerOptions()
profiler_opts.structured_options.multiprocess.is_enabled = False
example_profile = dp.Profiler(df, options=profiler_opts)
profile_path = "/example_profiles/expect_profile_diff_less_than_threshold_profile.pkl"
dir_path = os.path.dirname(os.path.abspath(__file__)) # noqa: PTH120, PTH100
profile_path = dir_path + profile_path
example_profile.save(filepath=profile_path)
examples = [
{
"data": {
"by_2": [4, 6, 8, 10, 12],
"by_5": [10, 15, 20, 25, 30],
"str_by_10": ["20", "30", "40", "50", "60"],
"words_by_10": ["twenty", "thirty", "forty", "fifty", "sixty"],
"by_25": [50, 75, 100, 125, 150],
},
"tests": [
{
"title": "profile_min_delta_exceeds_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {
"min": 0.5,
},
},
},
"out": {"success": True},
},
{
"title": "single_column_min_delta_equals_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"by_2": {
"min": 1.0,
},
},
},
"out": {"success": True},
},
{
"title": "single_column_min_delta_below_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"by_2": {
"min": 1.01,
},
},
},
"out": {"success": False},
},
{
"title": "profile_all_stats_below_delta_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {"*": 100.0},
},
},
"out": {"success": False},
},
],
},
]
profile_metric = (
"data_profiler.profile_numeric_columns_percent_diff_greater_than_or_equal_to_threshold"
)
success_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
"mostly",
)
default_limit_check_report_keys = {
"*": {
"min": 0.0,
"max": 0.0,
"sum": 0.0,
"mean": 0.0,
"median": 0.0,
"median_absolute_deviation": 0.0,
"variance": 0.0,
"stddev": 0.0,
"unique_count": 0.0,
"unique_ratio": 0.0,
"gini_impurity": 0.0,
"unalikeability": 0.0,
"sample_size": 0.0,
"null_count": 0.0,
}
}
numerical_diff_statistics = list(default_limit_check_report_keys["*"].keys())
default_kwarg_values = {
"limit_check_report_keys": default_limit_check_report_keys,
"numerical_diff_statistics": numerical_diff_statistics,
"mostly": 1.0,
}
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": [
"dataprofiler",
"dataassistance",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@stevensecreti", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
diagnostics_report = (
ExpectProfileNumericColumnsPercentDiffGreaterThanOrEqualToThreshold().run_diagnostics()
)
print(diagnostics_report.generate_checklist())
|
ExpectProfileNumericColumnsPercentDiffGreaterThanOrEqualToThreshold
|
python
|
jazzband__django-oauth-toolkit
|
tests/test_authorization_code.py
|
{
"start": 20771,
"end": 26701
}
|
class ____(BaseTest):
def test_login(self):
"""
Test login page is rendered if user is not authenticated
"""
self.oauth2_settings.PKCE_REQUIRED = False
query_data = {
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
path = reverse("oauth2_provider:authorize")
response = self.client.get(path, data=query_data)
# The authorization view redirects to the login page with the
self.assertEqual(response.status_code, 302)
scheme, netloc, path, params, query, fragment = urlparse(response["Location"])
self.assertEqual(path, settings.LOGIN_URL)
parsed_query = parse_qs(query)
next = parsed_query["next"][0]
self.assertIn(f"client_id={self.application.client_id}", next)
self.assertIn("response_type=code", next)
self.assertIn("state=random_state_string", next)
self.assertIn("scope=openid", next)
self.assertIn("redirect_uri=http%3A%2F%2Fexample.org", next)
def test_id_token_skip_authorization_completely(self):
"""
If application.skip_authorization = True, should skip the authorization page.
"""
self.client.login(username="test_user", password="123456")
self.application.skip_authorization = True
self.application.save()
query_data = {
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
def test_id_token_pre_auth_valid_client(self):
"""
Test response for a valid client_id with response_type: code
"""
self.client.login(username="test_user", password="123456")
query_data = {
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 200)
# check form is in context and form params are valid
self.assertIn("form", response.context)
form = response.context["form"]
self.assertEqual(form["redirect_uri"].value(), "http://example.org")
self.assertEqual(form["state"].value(), "random_state_string")
self.assertEqual(form["scope"].value(), "openid")
self.assertEqual(form["client_id"].value(), self.application.client_id)
def test_id_token_code_post_auth_allow(self):
"""
Test authorization code is given for an allowed request with response_type: code
"""
self.client.login(username="test_user", password="123456")
form_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "openid",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=form_data)
self.assertEqual(response.status_code, 302)
self.assertIn("http://example.org?", response["Location"])
self.assertIn("state=random_state_string", response["Location"])
self.assertIn("code=", response["Location"])
def test_prompt_login(self):
"""
Test response for redirect when supplied with prompt: login
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
query_data = {
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"prompt": "login",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
scheme, netloc, path, params, query, fragment = urlparse(response["Location"])
self.assertEqual(path, settings.LOGIN_URL)
parsed_query = parse_qs(query)
next = parsed_query["next"][0]
self.assertIn("redirect_uri=http%3A%2F%2Fexample.org", next)
self.assertIn("state=random_state_string", next)
self.assertIn("scope=read+write", next)
self.assertIn(f"client_id={self.application.client_id}", next)
self.assertNotIn("prompt=login", next)
def test_prompt_none_unauthorized(self):
"""
Test response for redirect when supplied with prompt: none
Should redirect to redirect_uri with an error of login_required
"""
self.oauth2_settings.PKCE_REQUIRED = False
query_data = {
"client_id": self.application.client_id,
"response_type": "code",
"state": "random_state_string",
"scope": "read write",
"redirect_uri": "http://example.org",
"prompt": "none",
}
response = self.client.get(reverse("oauth2_provider:authorize"), data=query_data)
self.assertEqual(response.status_code, 302)
scheme, netloc, path, params, query, fragment = urlparse(response["Location"])
parsed_query = parse_qs(query)
self.assertIn("login_required", parsed_query["error"])
self.assertIn("random_state_string", parsed_query["state"])
|
TestOIDCAuthorizationCodeView
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/naming_test.py
|
{
"start": 819,
"end": 1750
}
|
class ____(test.TestCase):
def test_new_symbol_tracks_names(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp',), namer.generated_names)
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({})
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp', 'temp_1'), namer.generated_names)
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1})
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertItemsEqual(('temp_1', 'temp_3'), namer.generated_names)
if __name__ == '__main__':
test.main()
|
NamerTest
|
python
|
huggingface__transformers
|
tests/models/segformer/test_modeling_segformer.py
|
{
"start": 13684,
"end": 18634
}
|
class ____(unittest.TestCase):
@slow
def test_inference_image_segmentation_ade(self):
# only resize + normalize
image_processor = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False
)
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
torch_device
)
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
expected_shape = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
],
("cuda", 8): [
[[-4.6310, -5.5232, -6.2361], [-5.1918, -6.1445, -6.5996], [-5.4427, -6.2792, -6.7580]],
[[-12.1397, -13.3124, -13.9551], [-12.8736, -13.9347, -14.3569], [-12.9440, -13.8222, -14.2514]],
[[-12.5135, -13.4682, -14.4913], [-12.8670, -14.4339, -14.7766], [-13.2519, -14.5800, -15.0685]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
@slow
def test_inference_image_segmentation_city(self):
# only resize + normalize
image_processor = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False
)
model = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024"
).to(torch_device)
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
expected_shape = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([]).to(torch_device)
expectations = Expectations(
{
(None, None): [
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
],
("cuda", 8): [
[[-13.5728, -13.9089, -12.6492], [-14.3478, -15.3656, -14.2309], [-14.7512, -16.0394, -15.6065]],
[[-17.1642, -15.8704, -12.9641], [-17.2572, -17.3701, -14.8214], [-16.6043, -16.8761, -16.7425]],
[[-3.6444, -3.0189, -1.4195], [-3.0787, -3.1953, -1.9993], [-1.8755, -1.9219, -1.7002]],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3, :3, :3], expected_slice, rtol=1e-1, atol=1e-1)
@slow
def test_post_processing_semantic_segmentation(self):
# only resize + normalize
image_processor = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False
)
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
torch_device
)
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
outputs.logits = outputs.logits.detach().cpu()
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)])
expected_shape = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape, expected_shape)
segmentation = image_processor.post_process_semantic_segmentation(outputs=outputs)
expected_shape = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape, expected_shape)
|
SegformerModelIntegrationTest
|
python
|
sphinx-doc__sphinx
|
sphinx/addnodes.py
|
{
"start": 7635,
"end": 8243
}
|
class ____(nodes.Part, nodes.Inline, nodes.FixedTextElement):
"""Node for a general type parameter list.
As default the type parameters list is written in line with the rest of the signature.
Set ``multi_line_parameter_list = True`` to describe a multi-line type parameters list.
In that case each type parameter will then be written on its own, indented line.
A trailing comma will be added on the last line
if ``multi_line_trailing_comma`` is True.
"""
child_text_separator = ', '
def astext(self) -> str:
return f'[{super().astext()}]'
|
desc_type_parameter_list
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 561479,
"end": 561804
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteRef"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id",)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
|
DeleteRefPayload
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/events.py
|
{
"start": 22310,
"end": 24272
}
|
class ____(
NamedTuple(
"_ExpectationResult",
[
("success", PublicAttr[bool]),
("label", PublicAttr[Optional[str]]),
("description", PublicAttr[Optional[str]]),
("metadata", PublicAttr[Mapping[str, MetadataValue]]),
],
)
):
"""Event corresponding to a data quality test.
Op compute functions may yield events of this type whenever they wish to indicate to the
Dagster framework (and the end user) that a data quality test has produced a (positive or
negative) result.
Args:
success (bool): Whether the expectation passed or not.
label (Optional[str]): Short display name for expectation. Defaults to "result".
description (Optional[str]): A longer human-readable description of the expectation.
metadata (Optional[Dict[str, RawMetadataValue]]):
Arbitrary metadata about the failure. Keys are displayed string labels, and values are
one of the following: string, float, int, JSON-serializable dict, JSON-serializable
list, and one of the data classes returned by a MetadataValue static method.
"""
def __new__(
cls,
success: bool,
label: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[Mapping[str, RawMetadataValue]] = None,
):
normed_metadata = normalize_metadata(
check.opt_mapping_param(metadata, "metadata", key_type=str),
)
return super().__new__(
cls,
success=check.bool_param(success, "success"),
label=check.opt_str_param(label, "label", "result"),
description=check.opt_str_param(description, "description"),
metadata=normed_metadata,
)
@whitelist_for_serdes(
storage_field_names={"metadata": "metadata_entries"},
field_serializers={"metadata": MetadataFieldSerializer},
)
|
ExpectationResult
|
python
|
aio-libs__aiohttp
|
tests/test_proxy_functional.py
|
{
"start": 670,
"end": 32209
}
|
class ____(TypedDict):
status: int
headers: dict[str, str] | None
body: bytes | None
if sys.version_info >= (3, 11) and TYPE_CHECKING:
from typing import Unpack
async def get_request(
method: str = "GET",
*,
url: str | URL,
trust_env: bool = False,
**kwargs: Unpack[_RequestOptions],
) -> ClientResponse: ...
else:
from typing import Any
async def get_request(
method: str = "GET",
*,
url: str | URL,
trust_env: bool = False,
**kwargs: Any,
) -> ClientResponse:
connector = aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(
connector=connector, trust_env=trust_env
) as client:
async with client.request(method, url, **kwargs) as resp:
return resp
@pytest.fixture
def secure_proxy_url(tls_certificate_pem_path: str) -> Iterator[URL]:
"""Return the URL of an instance of a running secure proxy.
This fixture also spawns that instance and tears it down after the test.
"""
proxypy_args = [
# --threadless does not work on windows, see
# https://github.com/abhinavsingh/proxy.py/issues/492
"--threaded" if os.name == "nt" else "--threadless",
"--num-workers",
"1", # the tests only send one query anyway
"--hostname",
"127.0.0.1", # network interface to listen to
"--port",
"0", # ephemeral port, so that kernel allocates a free one
"--cert-file",
tls_certificate_pem_path, # contains both key and cert
"--key-file",
tls_certificate_pem_path, # contains both key and cert
]
with proxy.Proxy(input_args=proxypy_args) as proxy_instance:
yield URL.build(
scheme="https",
host=str(proxy_instance.flags.hostname),
port=proxy_instance.flags.port,
)
@pytest.fixture
def web_server_endpoint_payload() -> str:
return str(uuid4())
@pytest.fixture(params=("http", "https"))
def web_server_endpoint_type(request: pytest.FixtureRequest) -> str:
return request.param # type: ignore[no-any-return]
@pytest.fixture
async def web_server_endpoint_url(
aiohttp_server: AiohttpServer,
ssl_ctx: ssl.SSLContext,
web_server_endpoint_payload: str,
web_server_endpoint_type: str,
) -> URL:
async def handler(request: web.Request) -> web.Response:
return web.Response(text=web_server_endpoint_payload)
app = web.Application()
app.router.add_route("GET", "/", handler)
if web_server_endpoint_type == "https":
server = await aiohttp_server(app, ssl=ssl_ctx)
else:
server = await aiohttp_server(app)
return URL.build(
scheme=web_server_endpoint_type,
host=server.host,
port=server.port,
)
@pytest.mark.skipif(
not ASYNCIO_SUPPORTS_TLS_IN_TLS,
reason="asyncio on this python does not support TLS in TLS",
)
@pytest.mark.parametrize("web_server_endpoint_type", ("http", "https"))
@pytest.mark.filterwarnings(r"ignore:.*ssl.OP_NO_SSL*")
# Filter out the warning from
# https://github.com/abhinavsingh/proxy.py/blob/30574fd0414005dfa8792a6e797023e862bdcf43/proxy/common/utils.py#L226
# otherwise this test will fail because the proxy will die with an error.
@pytest.mark.usefixtures("loop")
async def test_secure_https_proxy_absolute_path(
client_ssl_ctx: ssl.SSLContext,
secure_proxy_url: URL,
web_server_endpoint_url: URL,
web_server_endpoint_payload: str,
) -> None:
"""Ensure HTTP(S) sites are accessible through a secure proxy."""
conn = aiohttp.TCPConnector()
sess = aiohttp.ClientSession(connector=conn)
async with sess.get(
web_server_endpoint_url,
proxy=secure_proxy_url,
ssl=client_ssl_ctx, # used for both proxy and endpoint connections
) as response:
assert response.status == 200
assert await response.text() == web_server_endpoint_payload
await sess.close()
await conn.close()
await asyncio.sleep(0.1)
@pytest.mark.parametrize("web_server_endpoint_type", ("https",))
@pytest.mark.usefixtures("loop")
@pytest.mark.skipif(
ASYNCIO_SUPPORTS_TLS_IN_TLS, reason="asyncio on this python supports TLS in TLS"
)
@pytest.mark.filterwarnings(r"ignore:.*ssl.OP_NO_SSL*")
# Filter out the warning from
# https://github.com/abhinavsingh/proxy.py/blob/30574fd0414005dfa8792a6e797023e862bdcf43/proxy/common/utils.py#L226
# otherwise this test will fail because the proxy will die with an error.
async def test_https_proxy_unsupported_tls_in_tls(
client_ssl_ctx: ssl.SSLContext,
secure_proxy_url: URL,
web_server_endpoint_type: str,
) -> None:
"""Ensure connecting to TLS endpoints w/ HTTPS proxy needs patching.
This also checks that a helpful warning on how to patch the env
is displayed.
"""
url = URL.build(scheme=web_server_endpoint_type, host="python.org")
assert url.host is not None
escaped_host_port = ":".join((url.host.replace(".", r"\."), str(url.port)))
escaped_proxy_url = str(secure_proxy_url).replace(".", r"\.")
conn = aiohttp.TCPConnector()
sess = aiohttp.ClientSession(connector=conn)
expected_warning_text = (
r"^"
r"An HTTPS request is being sent through an HTTPS proxy\. "
"This support for TLS in TLS is known to be disabled "
r"in the stdlib asyncio\. This is why you'll probably see "
r"an error in the log below\.\n\n"
r"It is possible to enable it via monkeypatching\. "
r"For more details, see:\n"
r"\* https://bugs\.python\.org/issue37179\n"
r"\* https://github\.com/python/cpython/pull/28073\n\n"
r"You can temporarily patch this as follows:\n"
r"\* https://docs\.aiohttp\.org/en/stable/client_advanced\.html#proxy-support\n"
r"\* https://github\.com/aio-libs/aiohttp/discussions/6044\n$"
)
type_err = (
r"transport <asyncio\.sslproto\._SSLProtocolTransport object at "
r"0x[\d\w]+> is not supported by start_tls\(\)"
)
expected_exception_reason = (
r"^"
"Cannot initialize a TLS-in-TLS connection to host "
f"{escaped_host_port!s} through an underlying connection "
f"to an HTTPS proxy {escaped_proxy_url!s} ssl:{client_ssl_ctx!s} "
f"[{type_err!s}]"
r"$"
)
with (
pytest.warns(
RuntimeWarning,
match=expected_warning_text,
),
pytest.raises(
ClientConnectionError,
match=expected_exception_reason,
) as conn_err,
):
async with sess.get(url, proxy=secure_proxy_url, ssl=client_ssl_ctx):
pass
assert isinstance(conn_err.value.__cause__, TypeError)
assert match_regex(f"^{type_err!s}$", str(conn_err.value.__cause__))
await sess.close()
await conn.close()
await asyncio.sleep(0.1)
@pytest.mark.skipif(
platform.system() == "Windows" or sys.implementation.name != "cpython",
reason="uvloop is not supported on Windows and non-CPython implementations",
)
@pytest.mark.filterwarnings(r"ignore:.*ssl.OP_NO_SSL*")
# Filter out the warning from
# https://github.com/abhinavsingh/proxy.py/blob/30574fd0414005dfa8792a6e797023e862bdcf43/proxy/common/utils.py#L226
# otherwise this test will fail because the proxy will die with an error.
async def test_uvloop_secure_https_proxy(
client_ssl_ctx: ssl.SSLContext,
secure_proxy_url: URL,
uvloop_loop: asyncio.AbstractEventLoop,
) -> None:
"""Ensure HTTPS sites are accessible through a secure proxy without warning when using uvloop."""
conn = aiohttp.TCPConnector(force_close=True)
sess = aiohttp.ClientSession(connector=conn)
try:
url = URL("https://example.com")
async with sess.get(
url, proxy=secure_proxy_url, ssl=client_ssl_ctx
) as response:
assert response.status == 200
# Ensure response body is read to completion
await response.read()
finally:
await sess.close()
await conn.close()
await asyncio.sleep(0)
await asyncio.sleep(0.1)
@pytest.fixture
def proxy_test_server(
aiohttp_raw_server: AiohttpRawServer,
loop: asyncio.AbstractEventLoop,
monkeypatch: pytest.MonkeyPatch,
) -> Callable[[], Awaitable[mock.Mock]]:
# Handle all proxy requests and imitate remote server response.
_patch_ssl_transport(monkeypatch)
default_response = _ResponseArgs(status=200, headers=None, body=None)
proxy_mock = mock.Mock()
async def proxy_handler(request: web.Request) -> web.Response:
proxy_mock.request = request
proxy_mock.requests_list.append(request)
response = default_response.copy()
if isinstance(proxy_mock.return_value, dict):
response.update(proxy_mock.return_value) # type: ignore[typeddict-item]
headers = response["headers"]
if not headers:
headers = {}
if request.method == "CONNECT":
response["body"] = None
response["headers"] = headers
resp = web.Response(**response)
await resp.prepare(request)
await resp.write_eof()
return resp
async def proxy_server() -> mock.Mock:
proxy_mock.request = None
proxy_mock.auth = None
proxy_mock.requests_list = []
server = await aiohttp_raw_server(proxy_handler) # type: ignore[arg-type]
proxy_mock.server = server
proxy_mock.url = server.make_url("/")
return proxy_mock
return proxy_server
async def test_proxy_http_absolute_path(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://aiohttp.io/path?query=yes"
proxy = await proxy_test_server()
await get_request(url=url, proxy=proxy.url)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path?query=yes"
async def test_proxy_http_raw_path(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://aiohttp.io:2561/space sheep?q=can:fly"
raw_url = "/space%20sheep?q=can:fly"
proxy = await proxy_test_server()
await get_request(url=url, proxy=proxy.url)
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == raw_url
async def test_proxy_http_idna_support(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://éé.com/"
proxy = await proxy_test_server()
await get_request(url=url, proxy=proxy.url)
assert proxy.request.host == "éé.com"
assert proxy.request.path_qs == "/"
async def test_proxy_http_connection_error() -> None:
url = "http://aiohttp.io/path"
proxy_url = "http://localhost:2242/"
with pytest.raises(aiohttp.ClientConnectorError):
await get_request(url=url, proxy=proxy_url)
async def test_proxy_http_bad_response(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
proxy.return_value = dict(status=502, headers={"Proxy-Agent": "TestProxy"})
resp = await get_request(url=url, proxy=proxy.url)
assert resp.status == 502
assert resp.headers["Proxy-Agent"] == "TestProxy"
async def test_proxy_http_auth(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
await get_request(url=url, proxy=proxy.url)
assert "Authorization" not in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
auth = aiohttp.BasicAuth("user", "pass")
await get_request(url=url, auth=auth, proxy=proxy.url)
assert "Authorization" in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
await get_request(url=url, proxy_auth=auth, proxy=proxy.url)
assert "Authorization" not in proxy.request.headers
assert "Proxy-Authorization" in proxy.request.headers
await get_request(url=url, auth=auth, proxy_auth=auth, proxy=proxy.url)
assert "Authorization" in proxy.request.headers
assert "Proxy-Authorization" in proxy.request.headers
async def test_proxy_http_auth_utf8(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://aiohttp.io/path"
auth = aiohttp.BasicAuth("ΡΠ·Π΅Ρ", "ΠΏΠ°ΡΡ", "utf-8")
proxy = await proxy_test_server()
await get_request(url=url, auth=auth, proxy=proxy.url)
assert "Authorization" in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
async def test_proxy_http_auth_from_url(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
auth_url = URL(url).with_user("user").with_password("pass")
await get_request(url=auth_url, proxy=proxy.url)
assert "Authorization" in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
proxy_url = URL(proxy.url).with_user("user").with_password("pass")
await get_request(url=url, proxy=proxy_url)
assert "Authorization" not in proxy.request.headers
assert "Proxy-Authorization" in proxy.request.headers
async def test_proxy_http_acquired_cleanup(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
url = "http://aiohttp.io/path"
conn = aiohttp.TCPConnector()
sess = aiohttp.ClientSession(connector=conn)
proxy = await proxy_test_server()
assert 0 == len(conn._acquired)
async with sess.get(url, proxy=proxy.url) as resp:
pass
assert resp.closed
assert 0 == len(conn._acquired)
await sess.close()
await conn.close()
@pytest.mark.skip("we need to reconsider how we test this")
async def test_proxy_http_acquired_cleanup_force(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
url = "http://aiohttp.io/path"
conn = aiohttp.TCPConnector(force_close=True)
sess = aiohttp.ClientSession(connector=conn)
proxy = await proxy_test_server()
assert 0 == len(conn._acquired)
async def request() -> None:
async with sess.get(url, proxy=proxy.url):
assert 1 == len(conn._acquired)
await request()
assert 0 == len(conn._acquired)
await sess.close()
await conn.close()
@pytest.mark.skip("we need to reconsider how we test this")
async def test_proxy_http_multi_conn_limit(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
url = "http://aiohttp.io/path"
limit, multi_conn_num = 1, 5
conn = aiohttp.TCPConnector(limit=limit)
sess = aiohttp.ClientSession(connector=conn)
proxy = await proxy_test_server()
current_pid = None
async def request(pid: int) -> ClientResponse:
# process requests only one by one
nonlocal current_pid
async with sess.get(url, proxy=proxy.url) as resp:
current_pid = pid
await asyncio.sleep(0.2)
assert current_pid == pid
return resp
requests = [request(pid) for pid in range(multi_conn_num)]
responses = await asyncio.gather(*requests)
assert len(responses) == multi_conn_num
assert {resp.status for resp in responses} == {200}
await sess.close()
await conn.close()
@pytest.mark.xfail
async def test_proxy_https_connect(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
proxy = await proxy_test_server()
url = "https://www.google.com.ua/search?q=aiohttp proxy"
await get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert connect.method == "CONNECT"
assert connect.path == "www.google.com.ua:443"
assert connect.host == "www.google.com.ua"
assert proxy.request.host == "www.google.com.ua"
assert proxy.request.path_qs == "/search?q=aiohttp+proxy"
@pytest.mark.xfail
async def test_proxy_https_connect_with_port(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
proxy = await proxy_test_server()
url = "https://secure.aiohttp.io:2242/path"
await get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert connect.method == "CONNECT"
assert connect.path == "secure.aiohttp.io:2242"
assert connect.host == "secure.aiohttp.io:2242"
assert proxy.request.host == "secure.aiohttp.io:2242"
assert proxy.request.path_qs == "/path"
@pytest.mark.xfail
async def test_proxy_https_send_body(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
sess = aiohttp.ClientSession()
try:
proxy = await proxy_test_server()
proxy.return_value = {"status": 200, "body": b"1" * (2**20)}
url = "https://www.google.com.ua/search?q=aiohttp proxy"
async with sess.get(url, proxy=proxy.url) as resp:
body = await resp.read()
assert body == b"1" * (2**20)
finally:
await sess.close()
@pytest.mark.xfail
async def test_proxy_https_idna_support(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "https://éé.com/"
proxy = await proxy_test_server()
await get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert connect.method == "CONNECT"
assert connect.path == "xn--9caa.com:443"
assert connect.host == "xn--9caa.com"
async def test_proxy_https_connection_error() -> None:
url = "https://secure.aiohttp.io/path"
proxy_url = "http://localhost:2242/"
with pytest.raises(aiohttp.ClientConnectorError):
await get_request(url=url, proxy=proxy_url)
async def test_proxy_https_bad_response(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "https://secure.aiohttp.io/path"
proxy = await proxy_test_server()
proxy.return_value = dict(status=502, headers={"Proxy-Agent": "TestProxy"})
with pytest.raises(aiohttp.ClientHttpProxyError):
await get_request(url=url, proxy=proxy.url)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "CONNECT"
# The following check fails on MacOS
# assert proxy.request.path == 'secure.aiohttp.io:443'
@pytest.mark.xfail
async def test_proxy_https_auth(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
) -> None:
url = "https://secure.aiohttp.io/path"
auth = aiohttp.BasicAuth("user", "pass")
proxy = await proxy_test_server()
await get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert "Authorization" not in connect.headers
assert "Proxy-Authorization" not in connect.headers
assert "Authorization" not in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
proxy = await proxy_test_server()
await get_request(url=url, auth=auth, proxy=proxy.url)
connect = proxy.requests_list[0]
assert "Authorization" not in connect.headers
assert "Proxy-Authorization" not in connect.headers
assert "Authorization" in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
proxy = await proxy_test_server()
await get_request(url=url, proxy_auth=auth, proxy=proxy.url)
connect = proxy.requests_list[0]
assert "Authorization" not in connect.headers
assert "Proxy-Authorization" in connect.headers
assert "Authorization" not in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
proxy = await proxy_test_server()
await get_request(url=url, auth=auth, proxy_auth=auth, proxy=proxy.url)
connect = proxy.requests_list[0]
assert "Authorization" not in connect.headers
assert "Proxy-Authorization" in connect.headers
assert "Authorization" in proxy.request.headers
assert "Proxy-Authorization" not in proxy.request.headers
@pytest.mark.xfail
async def test_proxy_https_acquired_cleanup(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
url = "https://secure.aiohttp.io/path"
conn = aiohttp.TCPConnector()
sess = aiohttp.ClientSession(connector=conn)
try:
proxy = await proxy_test_server()
assert 0 == len(conn._acquired)
async def request() -> None:
async with sess.get(url, proxy=proxy.url):
assert 1 == len(conn._acquired)
await request()
assert 0 == len(conn._acquired)
finally:
await sess.close()
await conn.close()
@pytest.mark.xfail
async def test_proxy_https_acquired_cleanup_force(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
url = "https://secure.aiohttp.io/path"
conn = aiohttp.TCPConnector(force_close=True)
sess = aiohttp.ClientSession(connector=conn)
try:
proxy = await proxy_test_server()
assert 0 == len(conn._acquired)
async def request() -> None:
async with sess.get(url, proxy=proxy.url):
assert 1 == len(conn._acquired)
await request()
assert 0 == len(conn._acquired)
finally:
await sess.close()
await conn.close()
@pytest.mark.xfail
async def test_proxy_https_multi_conn_limit(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
loop: asyncio.AbstractEventLoop,
) -> None:
url = "https://secure.aiohttp.io/path"
limit, multi_conn_num = 1, 5
conn = aiohttp.TCPConnector(limit=limit)
sess = aiohttp.ClientSession(connector=conn)
proxy = await proxy_test_server()
try:
current_pid = None
async def request(pid: int) -> ClientResponse:
# process requests only one by one
nonlocal current_pid
async with sess.get(url, proxy=proxy.url) as resp:
current_pid = pid
await asyncio.sleep(0.2)
assert current_pid == pid
return resp
requests = [request(pid) for pid in range(multi_conn_num)]
responses = await asyncio.gather(*requests, return_exceptions=True)
# Filter out exceptions to count actual responses
actual_responses = [r for r in responses if isinstance(r, ClientResponse)]
assert len(actual_responses) == multi_conn_num
assert {resp.status for resp in actual_responses} == {200}
finally:
await sess.close()
await conn.close()
def _patch_ssl_transport(monkeypatch: pytest.MonkeyPatch) -> None:
# Make ssl transport substitution to prevent ssl handshake.
def _make_ssl_transport_dummy(
self: asyncio.selector_events.BaseSelectorEventLoop,
rawsock: object,
protocol: object,
sslcontext: object,
waiter: object = None,
**kwargs: object,
) -> object:
return self._make_socket_transport( # type: ignore[attr-defined]
rawsock,
protocol,
waiter,
extra=kwargs.get("extra"),
server=kwargs.get("server"),
)
monkeypatch.setattr(
"asyncio.selector_events.BaseSelectorEventLoop._make_ssl_transport",
_make_ssl_transport_dummy,
)
original_is_file = pathlib.Path.is_file
def mock_is_file(self: pathlib.Path) -> bool:
# make real netrc file invisible in home dir
if self.name in ["_netrc", ".netrc"] and self.parent == self.home():
return False
else:
return original_is_file(self)
async def test_proxy_from_env_http(
proxy_test_server: Callable[[], Awaitable[mock.Mock]], mocker: MockerFixture
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
mocker.patch.dict(os.environ, {"http_proxy": str(proxy.url)})
mocker.patch("pathlib.Path.is_file", mock_is_file)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert "Proxy-Authorization" not in proxy.request.headers
async def test_proxy_from_env_http_with_auth(
proxy_test_server: Callable[[], Awaitable[mock.Mock]], mocker: MockerFixture
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
auth = aiohttp.BasicAuth("user", "pass")
mocker.patch.dict(
os.environ,
{
"http_proxy": str(
proxy.url.with_user(auth.login).with_password(auth.password)
)
},
)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert proxy.request.headers["Proxy-Authorization"] == auth.encode()
async def test_proxy_from_env_http_with_auth_from_netrc(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
tmp_path: pathlib.Path,
mocker: MockerFixture,
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
auth = aiohttp.BasicAuth("user", "pass")
netrc_file = tmp_path / "test_netrc"
netrc_file_data = f"machine 127.0.0.1 login {auth.login} password {auth.password}"
with netrc_file.open("w") as f:
f.write(netrc_file_data)
mocker.patch.dict(
os.environ, {"http_proxy": str(proxy.url), "NETRC": str(netrc_file)}
)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert proxy.request.headers["Proxy-Authorization"] == auth.encode()
async def test_proxy_from_env_http_without_auth_from_netrc(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
tmp_path: pathlib.Path,
mocker: MockerFixture,
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
auth = aiohttp.BasicAuth("user", "pass")
netrc_file = tmp_path / "test_netrc"
netrc_file_data = f"machine 127.0.0.2 login {auth.login} password {auth.password}"
with netrc_file.open("w") as f:
f.write(netrc_file_data)
mocker.patch.dict(
os.environ, {"http_proxy": str(proxy.url), "NETRC": str(netrc_file)}
)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert "Proxy-Authorization" not in proxy.request.headers
async def test_proxy_from_env_http_without_auth_from_wrong_netrc(
proxy_test_server: Callable[[], Awaitable[mock.Mock]],
tmp_path: pathlib.Path,
mocker: MockerFixture,
) -> None:
url = "http://aiohttp.io/path"
proxy = await proxy_test_server()
auth = aiohttp.BasicAuth("user", "pass")
netrc_file = tmp_path / "test_netrc"
invalid_data = f"machine 127.0.0.1 {auth.login} pass {auth.password}"
with netrc_file.open("w") as f:
f.write(invalid_data)
mocker.patch.dict(
os.environ, {"http_proxy": str(proxy.url), "NETRC": str(netrc_file)}
)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 1
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert "Proxy-Authorization" not in proxy.request.headers
@pytest.mark.xfail
async def test_proxy_from_env_https(
proxy_test_server: Callable[[], Awaitable[mock.Mock]], mocker: MockerFixture
) -> None:
url = "https://aiohttp.io/path"
proxy = await proxy_test_server()
mocker.patch.dict(os.environ, {"https_proxy": str(proxy.url)})
mocker.patch("pathlib.Path.is_file", mock_is_file)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 2
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert "Proxy-Authorization" not in proxy.request.headers
@pytest.mark.xfail
async def test_proxy_from_env_https_with_auth(
proxy_test_server: Callable[[], Awaitable[mock.Mock]], mocker: MockerFixture
) -> None:
url = "https://aiohttp.io/path"
proxy = await proxy_test_server()
auth = aiohttp.BasicAuth("user", "pass")
mocker.patch.dict(
os.environ,
{
"https_proxy": str(
proxy.url.with_user(auth.login).with_password(auth.password)
)
},
)
await get_request(url=url, trust_env=True)
assert len(proxy.requests_list) == 2
assert proxy.request.method == "GET"
assert proxy.request.host == "aiohttp.io"
assert proxy.request.path_qs == "/path"
assert "Proxy-Authorization" not in proxy.request.headers
r2 = proxy.requests_list[0]
assert r2.method == "CONNECT"
assert r2.host == "aiohttp.io"
assert r2.path_qs == "/path"
assert r2.headers["Proxy-Authorization"] == auth.encode()
async def test_proxy_auth() -> None:
async with aiohttp.ClientSession() as session:
with pytest.raises(
ValueError, match=r"proxy_auth must be None or BasicAuth\(\) tuple"
):
async with session.get(
"http://python.org",
proxy="http://proxy.example.com",
proxy_auth=("user", "pass"), # type: ignore[arg-type]
):
pass
async def test_https_proxy_connect_tunnel_session_close_no_hang(
aiohttp_server: AiohttpServer,
) -> None:
"""Test that CONNECT tunnel connections are not pooled."""
# Regression test for issue #11273.
# Create a minimal proxy server
# The CONNECT method is handled at the protocol level, not by the handler
proxy_app = web.Application()
proxy_server = await aiohttp_server(proxy_app)
proxy_url = f"http://{proxy_server.host}:{proxy_server.port}"
# Create session and make HTTPS request through proxy
session = aiohttp.ClientSession()
try:
# This will fail during TLS upgrade because proxy doesn't establish tunnel
with suppress(aiohttp.ClientError):
async with session.get("https://example.com/test", proxy=proxy_url) as resp:
await resp.read()
# The critical test: Check if any connections were pooled with proxy=None
# This is the root cause of the hang - CONNECT tunnel connections
# should NOT be pooled
connector = session.connector
assert connector is not None
# Count connections with proxy=None in the pool
proxy_none_keys = [key for key in connector._conns if key.proxy is None]
proxy_none_count = len(proxy_none_keys)
# Before the fix, there would be a connection with proxy=None
# After the fix, CONNECT tunnel connections are not pooled
assert proxy_none_count == 0, (
f"Found {proxy_none_count} connections with proxy=None in pool. "
f"CONNECT tunnel connections should not be pooled - this is bug #11273"
)
finally:
# Clean close
await session.close()
|
_ResponseArgs
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/events.py
|
{
"start": 29129,
"end": 31270
}
|
class ____(Response):
"""
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
:param metrics: Debug image events grouped by tasks and iterations
:type metrics: Sequence[DebugImagesResponseTaskMetrics]
"""
_service = "events"
_action = "debug_images"
_version = "2.20"
_schema = {
"properties": {
"metrics": {
"description": "Debug image events grouped by tasks and iterations",
"items": {"$ref": "#/definitions/debug_images_response_task_metrics"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, scroll_id: Optional[str] = None, metrics: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(DebugImagesResponse, self).__init__(**kwargs)
self.scroll_id = scroll_id
self.metrics = metrics
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [DebugImagesResponseTaskMetrics.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", DebugImagesResponseTaskMetrics, is_array=True)
self._property_metrics = value
|
DebugImagesResponse
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-default_ini/plugin_success.py
|
{
"start": 6696,
"end": 6953
}
|
class ____(BaseModel):
model_config = ConfigDict(validate_by_alias=False, validate_by_name=True)
my_field: str = Field(alias='my_alias')
m1 = Model1(my_field='foo')
# MYPY: error: Unexpected keyword argument "my_field" for "Model1" [call-arg]
|
Model1
|
python
|
django__django
|
tests/template_tests/test_base.py
|
{
"start": 1614,
"end": 2151
}
|
class ____(SimpleTestCase):
def test_lazy_template_string(self):
template_string = gettext_lazy("lazy string")
self.assertEqual(Template(template_string).render(Context()), template_string)
def test_repr(self):
template = Template(
"<html><body>\n"
"{% if test %}<h1>{{ varvalue }}</h1>{% endif %}"
"</body></html>"
)
self.assertEqual(
repr(template),
'<Template template_string="<html><body>{% if t...">',
)
|
TemplateTests
|
python
|
davidhalter__jedi
|
jedi/inference/gradual/typing.py
|
{
"start": 8802,
"end": 8912
}
|
class ____(ProxyTypingValue, _TypingClassMixin):
index_class = TypingClassWithGenerics
|
ProxyTypingClassValue
|
python
|
realpython__materials
|
python-import/finders_and_loaders/ban_importer.py
|
{
"start": 57,
"end": 273
}
|
class ____:
@classmethod
def find_spec(cls, name, path, target=None):
if name in BANNED_MODULES:
raise ModuleNotFoundError(f"{name!r} is banned")
sys.meta_path.insert(0, BanFinder)
|
BanFinder
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_I.py
|
{
"start": 65,
"end": 1082
}
|
class ____(Benchmark):
r"""
Infinity objective function.
This class defines the Infinity [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Infinity}}(x) = \sum_{i=1}^{n} x_i^{6}
\left [ \sin\left ( \frac{1}{x_i} \right ) + 2 \right ]
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N))
self.global_optimum = [[1e-16 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return sum(x ** 6.0 * (sin(1.0 / x) + 2.0))
|
Infinity
|
python
|
conda__conda
|
conda/core/path_actions.py
|
{
"start": 2447,
"end": 5421
}
|
class ____:
"""Base class for path manipulation actions, including linking, unlinking, and others.
Pre and post-transaction plugins should inherit this class to implement their
own verification, execution, reversing, and cleanup steps. These methods are
guaranteed to be called in the following order:
1. ``verify``
2. ``execute``
3. ``reverse`` (only if ``execute`` raises an exception)
4. ``cleanup``
:param transaction_context: Mapping between target prefixes and PrefixActions
instances
:param target_prefix: Target prefix for the action
:param unlink_precs: Package records to be unlinked
:param link_precs: Package records to link
:param remove_specs: Specs to be removed
:param update_specs: Specs to be updated
:param neutered_specs: Specs to be neutered
"""
_verified = False
def __init__(
self,
transaction_context: dict[str, str] | None = None,
target_prefix: str | None = None,
unlink_precs: Iterable[PackageRecord] | None = None,
link_precs: Iterable[PackageRecord] | None = None,
remove_specs: Iterable[MatchSpec] | None = None,
update_specs: Iterable[MatchSpec] | None = None,
neutered_specs: Iterable[MatchSpec] | None = None,
):
self.transaction_context = transaction_context
self.target_prefix = target_prefix
self.unlink_precs = unlink_precs
self.link_precs = link_precs
self.remove_specs = remove_specs
self.update_specs = update_specs
self.neutered_specs = neutered_specs
@abstractmethod
def verify(self) -> Exception | None:
"""Carry out any pre-execution verification.
Should set self._verified = True upon success.
:return: On failure, this function should return (not raise!) an exception
object. At the end of the verification run, all errors will be raised as a
CondaMultiError.
"""
@abstractmethod
def execute(self) -> None:
"""Execute the action.
Called after ``self.verify()``. If this function raises an exception,
``self.reverse()`` will be called.
"""
@abstractmethod
def reverse(self) -> None:
"""Reverse what was done in execute.
Called only if ``self.execute()`` raises an exception.
"""
pass
@abstractmethod
def cleanup(self) -> None:
"""Carry out any post-execution tasks."""
pass
@property
def verified(self):
return self._verified
def __repr__(self):
args = (
f"{key}={value!r}"
for key, value in vars(self).items()
if key not in REPR_IGNORE_KWARGS
)
return "{}({})".format(self.__class__.__name__, ", ".join(args))
deprecated.constant(
"25.9",
"26.3",
"_Action",
Action,
addendum="Use `conda.core.path_actions.Action` instead.",
)
|
Action
|
python
|
google__jax
|
tests/pallas/tpu_sparsecore_pallas_test.py
|
{
"start": 2733,
"end": 2995
}
|
class ____():
COMPILER_OPTIONS = {"xla_tpu_use_tc_device_shape_on_sc": "true"}
def setUp(self):
super().setUp()
if jtu.is_cloud_tpu():
# TODO(apaszke,slebedev): Fix those.
self.skipTest("Many tests are failing on Cloud TPUs")
|
TCTilingMixin
|
python
|
doocs__leetcode
|
solution/2900-2999/2966.Divide Array Into Arrays With Max Difference/Solution.py
|
{
"start": 0,
"end": 314
}
|
class ____:
def divideArray(self, nums: List[int], k: int) -> List[List[int]]:
nums.sort()
ans = []
n = len(nums)
for i in range(0, n, 3):
t = nums[i : i + 3]
if t[2] - t[0] > k:
return []
ans.append(t)
return ans
|
Solution
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/aten.py
|
{
"start": 1875,
"end": 2396
}
|
class ____(ATenConfigHeuristics):
def get_extra_kwargs(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> dict[str, Any]:
kwargs = super().get_extra_kwargs(kernel_inputs, op_name)
alpha = kernel_inputs.get_scalar("alpha")
beta = kernel_inputs.get_scalar("beta")
return {
**kwargs,
"alpha": alpha,
"beta": beta,
}
@register_template_heuristic(aten_bias_addmm.uid, None, op_name="addmm")
|
ATenAddMMConfigHeuristics
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/configs.py
|
{
"start": 824,
"end": 1612
}
|
class ____(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg', 'dosini']
filenames = ['*.ini', '*.cfg', '*.inf']
mimetypes = ['text/x-ini', 'text/inf']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*', Comment.Single),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Text, Operator, Text, String)),
# standalone option, supported by some INI parsers
(r'(.+?)$', Name.Attribute),
],
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
|
IniLexer
|
python
|
python-openxml__python-docx
|
src/docx/opc/parts/coreprops.py
|
{
"start": 458,
"end": 1775
}
|
class ____(XmlPart):
"""Corresponds to part named ``/docProps/core.xml``.
The "core" is short for "Dublin Core" and contains document metadata relatively common across
documents of all types, not just DOCX.
"""
@classmethod
def default(cls, package: OpcPackage):
"""Return a new |CorePropertiesPart| object initialized with default values for
its base properties."""
core_properties_part = cls._new(package)
core_properties = core_properties_part.core_properties
core_properties.title = "Word Document"
core_properties.last_modified_by = "python-docx"
core_properties.revision = 1
core_properties.modified = dt.datetime.now(dt.timezone.utc)
return core_properties_part
@property
def core_properties(self):
"""A |CoreProperties| object providing read/write access to the core properties
contained in this core properties part."""
return CoreProperties(self.element)
@classmethod
def _new(cls, package: OpcPackage) -> CorePropertiesPart:
partname = PackURI("/docProps/core.xml")
content_type = CT.OPC_CORE_PROPERTIES
coreProperties = CT_CoreProperties.new()
return CorePropertiesPart(partname, content_type, coreProperties, package)
|
CorePropertiesPart
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/resource_requirement.py
|
{
"start": 3235,
"end": 3445
}
|
class ____(ABC):
@abstractmethod
def with_resources(
self, resource_defs: Mapping[str, "ResourceDefinition"]
) -> "ResourceAddable":
raise NotImplementedError()
@record
|
ResourceAddable
|
python
|
openai__openai-python
|
src/openai/resources/beta/threads/messages.py
|
{
"start": 1149,
"end": 13584
}
|
class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return MessagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> MessagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return MessagesWithStreamingResponse(self)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def create(
self,
thread_id: str,
*,
content: Union[str, Iterable[MessageContentPartParam]],
role: Literal["user", "assistant"],
attachments: Optional[Iterable[message_create_params.Attachment]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Create a message.
Args:
content: The text contents of the message.
role:
The role of the entity that is creating the message. Allowed values include:
- `user`: Indicates the message is sent by an actual user and should be used in
most cases to represent user-generated messages.
- `assistant`: Indicates the message is generated by the assistant. Use this
value to insert messages from the assistant into the conversation.
attachments: A list of files attached to the message, and the tools they should be added to.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages",
body=maybe_transform(
{
"content": content,
"role": role,
"attachments": attachments,
"metadata": metadata,
},
message_create_params.MessageCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def retrieve(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Retrieve a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def update(
self,
message_id: str,
*,
thread_id: str,
metadata: Optional[Metadata] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Message:
"""
Modifies a message.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._post(
f"/threads/{thread_id}/messages/{message_id}",
body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def list(
self,
thread_id: str,
*,
after: str | Omit = omit,
before: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
run_id: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[Message]:
"""
Returns a list of messages for a given thread.
Args:
after: A cursor for use in pagination. `after` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
ending with obj_foo, your subsequent call can include after=obj_foo in order to
fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place
in the list. For instance, if you make a list request and receive 100 objects,
starting with obj_foo, your subsequent call can include before=obj_foo in order
to fetch the previous page of the list.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending
order and `desc` for descending order.
run_id: Filter messages by the run ID that generated them.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._get_api_list(
f"/threads/{thread_id}/messages",
page=SyncCursorPage[Message],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"before": before,
"limit": limit,
"order": order,
"run_id": run_id,
},
message_list_params.MessageListParams,
),
),
model=Message,
)
@typing_extensions.deprecated("The Assistants API is deprecated in favor of the Responses API")
def delete(
self,
message_id: str,
*,
thread_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> MessageDeleted:
"""
Deletes a message.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not thread_id:
raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}")
if not message_id:
raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}")
extra_headers = {"OpenAI-Beta": "assistants=v2", **(extra_headers or {})}
return self._delete(
f"/threads/{thread_id}/messages/{message_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=MessageDeleted,
)
|
Messages
|
python
|
getsentry__sentry
|
tests/sentry/integrations/pagerduty/test_integration.py
|
{
"start": 784,
"end": 9903
}
|
class ____(IntegrationTestCase):
provider = PagerDutyIntegrationProvider
base_url = "https://app.pagerduty.com"
def setUp(self) -> None:
super().setUp()
self.app_id = "app_1"
self.account_slug = "test-app"
self._stub_pagerduty()
def _stub_pagerduty(self):
options.set("pagerduty.app-id", self.app_id)
responses.reset()
responses.add(
responses.GET,
self.base_url
+ "/install/integration?app_id=%sredirect_url=%s&version=1"
% (self.app_id, self.setup_path),
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def assert_setup_flow(self, mock_record):
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "app.pagerduty.com"
assert redirect.path == "/install/integration"
config = {
"integration_keys": [
{
"integration_key": "key1",
"name": "Super Cool Service",
"id": "PD12345",
"type": "service",
},
{
"integration_key": "key3",
"name": "B Team's Rules",
"id": "PDBCDEF",
"type": "team_rule_set",
},
],
"account": {"subdomain": "test-app", "name": "Test App"},
}
resp = self.client.get(
"{}?{}".format(self.setup_path, urlencode({"config": orjson.dumps(config).decode()}))
)
self.assertDialogSuccess(resp)
# SLO assertions
# INSTALLATION_REDIRECT (success) -> POST_INSTALL (success) -> FINISH_PIPELINE (success) -> INSTALLATION_REDIRECT (success)
# The first INSTALLATION_REDIRECT exits early because we redirect the user, the second INSTALLATION_REDIRECT is the last layer of the onion
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=4
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=4
)
assert_success_metric(mock_record)
return resp
def assert_add_service_flow(self, integration):
query_param = "?account=%s" % (integration.metadata["domain_name"])
init_path_with_account = f"{self.init_path}{query_param}"
resp = self.client.get(init_path_with_account)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "%s.pagerduty.com" % integration.metadata["domain_name"]
assert redirect.path == "/install/integration"
config = {
"integration_keys": [
{
"integration_key": "additional-service",
"name": "Additional Service",
"id": "PD123467",
"type": "service",
}
],
"account": {"subdomain": "test-app", "name": "Test App"},
}
resp = self.client.get(
"{}?{}".format(self.setup_path, urlencode({"config": orjson.dumps(config).decode()}))
)
self.assertDialogSuccess(resp)
return resp
@responses.activate
def test_basic_flow(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == self.account_slug
assert integration.name == "Test App"
assert integration.metadata["services"] == [
{
"integration_key": "key1",
"name": "Super Cool Service",
"id": "PD12345",
"type": "service",
}
]
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
services = get_services(oi)
assert services[0]["service_name"] == "Super Cool Service"
@responses.activate
def test_add_services_flow(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
oi = OrganizationIntegration.objects.get(
integration_id=integration.id, organization_id=self.organization.id
)
service = get_services(oi)[0]
url = "https://%s.pagerduty.com" % (integration.metadata["domain_name"])
responses.add(
responses.GET,
url
+ "/install/integration?app_id=%sredirect_url=%s&version=1"
% (self.app_id, self.setup_path),
)
with self.tasks():
self.assert_add_service_flow(integration)
oi.refresh_from_db()
services = get_services(oi)
assert services[1]["id"]
del services[1]["id"] # type: ignore[misc]
assert services == [
service,
dict(
integration_id=integration.id,
integration_key="additional-service",
service_name="Additional Service",
),
]
@responses.activate
def test_update_organization_config(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
oi = OrganizationIntegration.objects.get(
integration_id=integration.id, organization_id=self.organization.id
)
service_id = get_services(oi)[0]["id"]
config_data = {
"service_table": [
{"service": "Mleep", "integration_key": "xxxxxxxxxxxxxxxx", "id": service_id},
{"service": "new_service", "integration_key": "new_key", "id": None},
]
}
integration.get_installation(self.organization.id).update_organization_config(config_data)
oi.refresh_from_db()
services = get_services(oi)
del services[1]["id"] # type: ignore[misc]
assert services == [
dict(
id=service_id,
integration_key="xxxxxxxxxxxxxxxx",
integration_id=oi.integration_id,
service_name="Mleep",
),
dict(
integration_key="new_key",
integration_id=oi.integration_id,
service_name="new_service",
),
]
@responses.activate
def test_delete_pagerduty_service(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
oi = OrganizationIntegration.objects.get(
integration_id=integration.id, organization_id=self.organization.id
)
services = get_services(oi)
assert len(services) == 1
service_id = services[0]["id"]
config_data = {
"service_table": [{"service": "new_service", "integration_key": "new_key", "id": None}]
}
integration.get_installation(self.organization.id).update_organization_config(config_data)
oi.refresh_from_db()
services = get_services(oi)
assert len(services) == 1
assert services[0]["id"] != service_id
@responses.activate
def test_no_name(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
service = get_services(oi)[0]
service_id = service["id"]
config_data = {
"service_table": [{"service": "new_service", "integration_key": "", "id": service_id}]
}
with pytest.raises(IntegrationError) as error:
integration.get_installation(self.organization.id).update_organization_config(
config_data
)
assert str(error.value) == "Name and key are required"
@responses.activate
def test_get_config_data(self) -> None:
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
oi = OrganizationIntegration.objects.get(
integration=integration, organization_id=self.organization.id
)
service = get_services(oi)[0]
config = integration.get_installation(self.organization.id).get_config_data()
assert config == {
"service_table": [
{
"id": service["id"],
"service": service["service_name"],
"integration_key": service["integration_key"],
}
]
}
|
PagerDutyIntegrationTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/bincount_ops_test.py
|
{
"start": 13107,
"end": 13785
}
|
class ____(test.TestCase, parameterized.TestCase):
@test_util.run_v1_only("Test security error")
def testSparseCountSparseOutputBadIndicesShapeTooSmall(self):
indices = [1]
values = [[1]]
weights = []
dense_shape = [10]
with self.assertRaisesRegex(ValueError,
"Shape must be rank 2 but is rank 1 for"):
self.evaluate(
gen_count_ops.SparseCountSparseOutput(
indices=indices,
values=values,
dense_shape=dense_shape,
weights=weights,
binary_output=True))
@test_util.run_all_in_graph_and_eager_modes
@test_util.disable_tfrt
|
RawOpsHeapOobTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linalg_ops_test.py
|
{
"start": 16068,
"end": 16156
}
|
class ____(test.TestCase, _LUReconstruct):
use_static_shape = False
|
LUReconstructDynamic
|
python
|
HIPS__autograd
|
examples/convnet.py
|
{
"start": 4810,
"end": 4899
}
|
class ____(full_layer):
def nonlinearity(self, x):
return np.tanh(x)
|
tanh_layer
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/attrs/test_attrs.py
|
{
"start": 1100,
"end": 2097
}
|
class ____:
n = attr.ib()
def test_jsonable_attrs():
obj = AttrsClass(n=10)
assert to_jsonable(obj, avoid_realization=False) == {"n": 10}
def test_hypothesis_is_not_the_first_to_import_attrs(testdir):
# We only import attrs if the user did so first.
test_path = testdir.makepyfile(
"""
import os
# don't load hypothesis plugins, which might transitively import attrs
os.environ["HYPOTHESIS_NO_PLUGINS"] = "1"
import sys
assert "attrs" not in sys.modules
from hypothesis import given, strategies as st
assert "attrs" not in sys.modules
@given(st.integers() | st.floats() | st.sampled_from(["a", "b"]))
def test_no_attrs_import(x):
assert "attrs" not in sys.modules
"""
)
# don't load pytest plugins, which might transitively import attrs
result = testdir.runpytest(test_path, "--disable-plugin-autoload")
result.assert_outcomes(passed=1, failed=0)
|
AttrsClass
|
python
|
apache__airflow
|
scripts/ci/testing/summarize_captured_warnings.py
|
{
"start": 3143,
"end": 10810
}
|
class ____:
category: str
message: str
filename: str
lineno: int
when: str
node_id: str | None
@property
def unique_warning(self) -> str:
return _unique_key(self.category, self.message, self.filename, str(self.lineno))
@property
def unique_key(self) -> str:
return _unique_key(self.node_id, self.category, self.message, self.filename, str(self.lineno))
@classmethod
def from_dict(cls, d: dict) -> CapturedWarnings:
fields_names = [f.name for f in fields(CapturedWarnings)]
return cls(**{k: v for k, v in d.items() if k in fields_names})
def output(self) -> str:
return json.dumps(asdict(self))
def find_files(directory: Path, glob_pattern: str) -> Iterator[tuple[Path, str]]:
print(f" Process directory {directory} with pattern {glob_pattern!r} ".center(CONSOLE_SIZE, "="))
directory = Path(directory)
for filepath in directory.rglob(glob_pattern):
yield from resolve_file(filepath, directory)
def resolve_file(filepath: Path, directory: Path | None = None) -> Iterator[tuple[Path, str]]:
if not filepath.is_file():
raise SystemExit("Provided path {filepath} is not a file.")
if directory:
source_path = filepath.relative_to(directory).as_posix()
else:
source_path = filepath.as_posix()
yield filepath, source_path
def merge_files(files: Iterator[tuple[Path, str]], output_directory: Path) -> Path:
output_file = output_directory.joinpath(WARNINGS_ALL)
output_bad = output_directory.joinpath(WARNINGS_BAD)
records = bad_records = 0
processed_files = 0
with output_file.open(mode="w") as wfp, output_bad.open(mode="w") as badwfp:
for filepath, source_filename in files:
print(f"Process file: {filepath.as_posix()}")
with open(filepath) as fp:
for lineno, line in enumerate(fp, start=1):
if not (line := line.strip()):
continue
try:
record = json.loads(line)
if not isinstance(record, dict):
raise TypeError
if not all(field in record for field in REQUIRED_FIELDS):
raise ValueError
except Exception:
bad_records += 1
dump = json.dumps({"source": source_filename, "lineno": lineno, "record": line})
badwfp.write(f"{dump}\n")
else:
records += 1
record["source"] = source_filename
wfp.write(f"{json.dumps(record)}\n")
processed_files += 1
print()
print(
f" Total processed lines {records + bad_records:,} in {processed_files:,} file(s) ".center(
CONSOLE_SIZE, "-"
)
)
print(f"Good Records: {records:,}. Saved into file {output_file.as_posix()}")
if bad_records:
print(f"Bad Records: {bad_records:,}. Saved into file {output_file.as_posix()}")
else:
output_bad.unlink()
return output_file
def group_report_warnings(group, when: str, group_records, output_directory: Path) -> None:
output_filepath = output_directory / warnings_filename(f"{group}-{when}")
group_warnings: dict[str, CapturedWarnings] = {}
unique_group_warnings: dict[str, CapturedWarnings] = {}
for record in group_records:
cw = CapturedWarnings.from_dict(record)
if cw.unique_key not in group_warnings:
group_warnings[cw.unique_key] = cw
if cw.unique_warning not in unique_group_warnings:
unique_group_warnings[cw.unique_warning] = cw
print(f" Group {group!r} on {when!r} ".center(CONSOLE_SIZE, "="))
with output_filepath.open(mode="w") as fp:
for cw in group_warnings.values():
fp.write(f"{cw.output()}\n")
print(f"Saved into file: {output_filepath.as_posix()}\n")
if when == "runtest": # Node id exists only during runtest
print(f"Unique warnings within the test cases: {len(group_warnings):,}\n")
print("Top 10 Tests Cases:")
it = count_groups(group_warnings.values(), grouping_key=lambda cw: (cw.category, cw.node_id), top=10)
for (category, node_id), count in it:
if suffix := IMPORTANT_WARNING_SIGN.get(category, ""):
suffix = f" ({suffix})"
print(f" {category} {node_id} - {count:,}{suffix}")
print()
print(f"Unique warnings: {len(unique_group_warnings):,}\n")
print("Warnings grouped by category:")
for category, count in count_groups(unique_group_warnings.values(), grouping_key=lambda cw: cw.category):
if suffix := IMPORTANT_WARNING_SIGN.get(category, ""):
suffix = f" ({suffix})"
print(f" {category} - {count:,}{suffix}")
print()
print("Top 10 Warnings:")
it = count_groups(
unique_group_warnings.values(), grouping_key=lambda cw: (cw.category, cw.filename, cw.lineno), top=10
)
for (category, filename, lineno), count in it:
if suffix := IMPORTANT_WARNING_SIGN.get(category, ""):
suffix = f" ({suffix})"
print(f" {filename}:{lineno}:{category} - {count:,}{suffix}")
print()
always = list(filter(lambda w: w.category in ALWAYS_SHOW_WARNINGS, unique_group_warnings.values()))
if always:
print(f" Always reported warnings {len(always):,}".center(CONSOLE_SIZE, "-"))
for cw in always:
print(f"{cw.filename}:{cw.lineno}")
print(f" {cw.category} - {cw.message}")
print()
def split_by_groups(output_file: Path, output_directory: Path) -> None:
records: list[dict] = []
with output_file.open() as fp:
records.extend(map(json.loads, fp))
for (group, when), group_records in sorted_groupby(
records, grouping_key=lambda record: (record["group"], record["when"])
):
group_report_warnings(group, when, group_records, output_directory)
def main(_input: str, _output: str | None, pattern: str | None) -> int | str:
cwd = Path(".").resolve()
print(f"Current Working Directory: {cwd.as_posix()}")
try:
input_path = Path(os.path.expanduser(os.path.expandvars(_input))).resolve(strict=True)
except FileNotFoundError:
print(f"The path {_input!r} does not exist. Skipping it.")
return 0
except OSError as ex:
return f"Unable to resolve {_input!r} path. {type(ex).__name__}: {ex}"
if not pattern:
print(f" Process file {input_path} ".center(CONSOLE_SIZE, "="))
if not input_path.is_file():
return f"{input_path} is not a file."
files = resolve_file(input_path, cwd if not input_path.is_absolute() else None)
else:
if not input_path.is_dir():
return f"{input_path} is not a file."
files = find_files(input_path, pattern)
output_directory = Path(_output or cwd).resolve()
output_directory.mkdir(parents=True, exist_ok=True)
output_file = merge_files(files, output_directory)
split_by_groups(output_file, output_directory)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Capture Warnings Summarizer")
parser.add_argument("input", help="Input file/or directory path")
parser.add_argument("-g", "--pattern", help="Glob pattern to filter warnings files")
parser.add_argument("-o", "--output", help="Output directory")
args = parser.parse_args()
raise SystemExit(main(args.input, args.output, args.pattern))
|
CapturedWarnings
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/streams.py
|
{
"start": 56604,
"end": 56929
}
|
class ____(SemiIncrementalMixin, GithubStream):
"""
API docs: https://docs.github.com/en/rest/deployments/deployments?apiVersion=2022-11-28#list-deployments
"""
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"repos/{stream_slice['repository']}/deployments"
|
Deployments
|
python
|
walkccc__LeetCode
|
solutions/329. Longest Increasing Path in a Matrix/329.py
|
{
"start": 0,
"end": 584
}
|
class ____:
def longestIncreasingPath(self, matrix: list[list[int]]) -> int:
m = len(matrix)
n = len(matrix[0])
@functools.lru_cache(None)
def dfs(i: int, j: int, prev: int) -> int:
if i < 0 or i == m or j < 0 or j == n:
return 0
if matrix[i][j] <= prev:
return 0
curr = matrix[i][j]
return 1 + max(dfs(i + 1, j, curr),
dfs(i - 1, j, curr),
dfs(i, j + 1, curr),
dfs(i, j - 1, curr))
return max(dfs(i, j, -math.inf) for i in range(m) for j in range(n))
|
Solution
|
python
|
coleifer__peewee
|
playhouse/pool.py
|
{
"start": 2360,
"end": 9700
}
|
class ____(object):
def __init__(self, database, max_connections=20, stale_timeout=None,
timeout=None, **kwargs):
self._max_connections = make_int(max_connections)
self._stale_timeout = make_int(stale_timeout)
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
self._pool_lock = threading.RLock()
# Available / idle connections stored in a heap, sorted oldest first.
self._connections = []
# Mapping of connection id to PoolConnection. Ordinarily we would want
# to use something like a WeakKeyDictionary, but Python typically won't
# allow us to create weak references to connection objects.
self._in_use = {}
# Use the memory address of the connection as the key in the event the
# connection object is not hashable. Connections will not get
# garbage-collected, however, because a reference to them will persist
# in "_in_use" as long as the conn has not been closed.
self.conn_key = id
super(PooledDatabase, self).__init__(database, **kwargs)
def init(self, database, max_connections=None, stale_timeout=None,
timeout=None, **connect_kwargs):
super(PooledDatabase, self).init(database, **connect_kwargs)
if max_connections is not None:
self._max_connections = make_int(max_connections)
if stale_timeout is not None:
self._stale_timeout = make_int(stale_timeout)
if timeout is not None:
self._wait_timeout = make_int(timeout)
if self._wait_timeout == 0:
self._wait_timeout = float('inf')
def connect(self, reuse_if_open=False):
if not self._wait_timeout:
return super(PooledDatabase, self).connect(reuse_if_open)
expires = time.time() + self._wait_timeout
while expires > time.time():
try:
ret = super(PooledDatabase, self).connect(reuse_if_open)
except MaxConnectionsExceeded:
time.sleep(0.1)
else:
return ret
raise MaxConnectionsExceeded('Max connections exceeded, timed out '
'attempting to connect.')
@locked
def _connect(self):
while True:
try:
# Remove the oldest connection from the heap.
ts, _, c_conn = heapq.heappop(self._connections)
conn = c_conn
key = self.conn_key(conn)
except IndexError:
ts = conn = None
logger.debug('No connection available in pool.')
break
else:
if self._is_closed(conn):
# This connecton was closed, but since it was not stale
# it got added back to the queue of available conns. We
# then closed it and marked it as explicitly closed, so
# it's safe to throw it away now.
# (Because Database.close() calls Database._close()).
logger.debug('Connection %s was closed.', key)
ts = conn = None
elif self._stale_timeout and self._is_stale(ts):
# If we are attempting to check out a stale connection,
# then close it. We don't need to mark it in the "closed"
# set, because it is not in the list of available conns
# anymore.
logger.debug('Connection %s was stale, closing.', key)
self._close(conn, True)
ts = conn = None
else:
break
if conn is None:
if self._max_connections and (
len(self._in_use) >= self._max_connections):
raise MaxConnectionsExceeded('Exceeded maximum connections.')
conn = super(PooledDatabase, self)._connect()
ts = time.time()
key = self.conn_key(conn)
logger.debug('Created new connection %s.', key)
self._in_use[key] = PoolConnection(ts, conn, time.time())
return conn
def _is_stale(self, timestamp):
# Called on check-out and check-in to ensure the connection has
# not outlived the stale timeout.
return (time.time() - timestamp) > self._stale_timeout
def _is_closed(self, conn):
return False
def _can_reuse(self, conn):
# Called on check-in to make sure the connection can be re-used.
return True
@locked
def _close(self, conn, close_conn=False):
key = self.conn_key(conn)
if close_conn:
super(PooledDatabase, self)._close(conn)
elif key in self._in_use:
pool_conn = self._in_use.pop(key)
if self._stale_timeout and self._is_stale(pool_conn.timestamp):
logger.debug('Closing stale connection %s.', key)
super(PooledDatabase, self)._close(conn)
elif self._can_reuse(conn):
logger.debug('Returning %s to pool.', key)
heapq.heappush(self._connections,
(pool_conn.timestamp, _sentinel(), conn))
else:
logger.debug('Closed %s.', key)
@locked
def manual_close(self):
"""
Close the underlying connection without returning it to the pool.
"""
if self.is_closed():
return False
# Obtain reference to the connection in-use by the calling thread.
conn = self.connection()
# A connection will only be re-added to the available list if it is
# marked as "in use" at the time it is closed. We will explicitly
# remove it from the "in use" list, call "close()" for the
# side-effects, and then explicitly close the connection.
self._in_use.pop(self.conn_key(conn), None)
self.close()
self._close(conn, close_conn=True)
@locked
def close_idle(self):
# Close any open connections that are not currently in-use.
for _, _, conn in self._connections:
self._close(conn, close_conn=True)
self._connections = []
@locked
def close_stale(self, age=600):
# Close any connections that are in-use but were checked out quite some
# time ago and can be considered stale.
in_use = {}
cutoff = time.time() - age
n = 0
for key, pool_conn in self._in_use.items():
if pool_conn.checked_out < cutoff:
self._close(pool_conn.connection, close_conn=True)
n += 1
else:
in_use[key] = pool_conn
self._in_use = in_use
return n
@locked
def close_all(self):
# Close all connections -- available and in-use. Warning: may break any
# active connections used by other threads.
self.close()
for _, _, conn in self._connections:
self._close(conn, close_conn=True)
for pool_conn in self._in_use.values():
self._close(pool_conn.connection, close_conn=True)
self._connections = []
self._in_use = {}
|
PooledDatabase
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 103699,
"end": 104139
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseServerInstallationOrderField),
graphql_name="field",
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
|
EnterpriseServerInstallationOrder
|
python
|
pandas-dev__pandas
|
pandas/core/computation/pytables.py
|
{
"start": 12849,
"end": 13453
}
|
class ____(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
(issubclass(klass, ConditionBinOp) and operand.condition is not None)
or (
not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
)
):
return operand.invert()
return None
|
UnaryOp
|
python
|
kamyu104__LeetCode-Solutions
|
Python/matrix-cells-in-distance-order.py
|
{
"start": 33,
"end": 698
}
|
class ____(object):
def allCellsDistOrder(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
def append(R, C, r, c, result):
if 0 <= r < R and 0 <= c < C:
result.append([r, c])
result = [[r0, c0]]
for d in xrange(1, R+C):
append(R, C, r0-d, c0, result)
for x in xrange(-d+1, d):
append(R, C, r0+x, c0+abs(x)-d, result)
append(R, C, r0+x, c0+d-abs(x), result)
append(R, C, r0+d, c0, result)
return result
|
Solution
|
python
|
fluentpython__example-code-2e
|
24-class-metaprog/persistent/persistlib.py
|
{
"start": 1800,
"end": 4112
}
|
class ____:
_TABLE_NAME: ClassVar[str]
_TABLE_READY: ClassVar[bool] = False
@classmethod
def _fields(cls) -> dict[str, type]:
return {
name: py_type
for name, py_type in get_type_hints(cls).items()
if not name.startswith('_')
}
def __init_subclass__(cls, *, table: str = '', **kwargs: Any):
super().__init_subclass__(**kwargs) # type:ignore
cls._TABLE_NAME = table if table else cls.__name__.lower() + 's'
for name, py_type in cls._fields().items():
setattr(cls, name, Field(name, py_type))
def __init__(self, *, _pk=None, **kwargs):
field_names = self._asdict().keys()
for name, arg in kwargs.items():
if name not in field_names:
msg = f'{self.__class__.__name__!r} has no attribute {name!r}'
raise AttributeError(msg)
setattr(self, name, arg)
self._pk = _pk
def __repr__(self) -> str:
kwargs = ', '.join(
f'{key}={value!r}' for key, value in self._asdict().items()
)
cls_name = self.__class__.__name__
if self._pk is None:
return f'{cls_name}({kwargs})'
return f'{cls_name}({kwargs}, _pk={self._pk})'
def _asdict(self) -> dict[str, Any]:
return {
name: getattr(self, name)
for name, attr in self.__class__.__dict__.items()
if isinstance(attr, Field)
}
# database methods
@staticmethod
def _connect(db_path: str = db.DEFAULT_DB_PATH):
return db.connect(db_path)
@classmethod
def _ensure_table(cls) -> str:
if not cls._TABLE_READY:
db.ensure_table(cls._TABLE_NAME, cls._fields())
cls._TABLE_READY = True
return cls._TABLE_NAME
def __class_getitem__(cls, pk: int) -> 'Persistent':
field_names = ['_pk'] + list(cls._fields())
values = db.fetch_record(cls._TABLE_NAME, pk)
return cls(**dict(zip(field_names, values)))
def _save(self) -> int:
table = self.__class__._ensure_table()
if self._pk is None:
self._pk = db.insert_record(table, self._asdict())
else:
db.update_record(table, self._pk, self._asdict())
return self._pk
|
Persistent
|
python
|
getsentry__sentry
|
tests/sentry/search/test_utils.py
|
{
"start": 2739,
"end": 6111
}
|
class ____(TestCase):
def test_ms(self) -> None:
assert parse_duration("123", "ms") == 123
def test_sec(self) -> None:
assert parse_duration("456", "s") == 456000
def test_minutes(self) -> None:
assert parse_duration("789", "min") == 789 * 60 * 1000
assert parse_duration("789", "m") == 789 * 60 * 1000
def test_hours(self) -> None:
assert parse_duration("234", "hr") == 234 * 60 * 60 * 1000
assert parse_duration("234", "h") == 234 * 60 * 60 * 1000
def test_days(self) -> None:
assert parse_duration("567", "day") == 567 * 24 * 60 * 60 * 1000
assert parse_duration("567", "d") == 567 * 24 * 60 * 60 * 1000
def test_weeks(self) -> None:
assert parse_duration("890", "wk") == 890 * 7 * 24 * 60 * 60 * 1000
assert parse_duration("890", "w") == 890 * 7 * 24 * 60 * 60 * 1000
def test_errors(self) -> None:
with pytest.raises(InvalidQuery):
parse_duration("test", "ms")
with pytest.raises(InvalidQuery):
parse_duration("123", "test")
def test_large_durations(self) -> None:
max_duration = 999999999 * 24 * 60 * 60 * 1000
assert parse_duration("999999999", "d") == max_duration
assert parse_duration(str(999999999 * 24), "h") == max_duration
assert parse_duration(str(999999999 * 24 * 60), "m") == max_duration
assert parse_duration(str(999999999 * 24 * 60 * 60), "s") == max_duration
assert parse_duration(str(999999999 * 24 * 60 * 60 * 1000), "ms") == max_duration
def test_overflow_durations(self) -> None:
with pytest.raises(InvalidQuery):
assert parse_duration(str(999999999 + 1), "d")
with pytest.raises(InvalidQuery):
assert parse_duration(str((999999999 + 1) * 24), "h")
with pytest.raises(InvalidQuery):
assert parse_duration(str((999999999 + 1) * 24 * 60 + 1), "m")
with pytest.raises(InvalidQuery):
assert parse_duration(str((999999999 + 1) * 24 * 60 * 60 + 1), "s")
with pytest.raises(InvalidQuery):
assert parse_duration(str((999999999 + 1) * 24 * 60 * 60 * 1000 + 1), "ms")
def test_tokenize_query_only_keyed_fields() -> None:
tests = [
("a:a", {"a": ["a"]}),
("(a:a AND b:b)", {"a": ["a"], "b": ["b"]}),
("( a:a AND (b:b OR c:c))", {"a": ["a"], "b": ["b"], "c": ["c"]}),
("( a:a AND (b:b OR c:c ) )", {"a": ["a"], "b": ["b"], "c": ["c"]}),
(
"(x y a:a AND (b:b OR c:c) z)",
{"a": ["a"], "b": ["b"], "c": ["c"], "query": ["x", "y", "z"]},
),
(
"((x y)) a:a AND (b:b OR c:c) z)",
{"a": ["a"], "b": ["b"], "c": ["c"], "query": ["x", "y", "z"]},
),
(
"((x y)) a():>a AND (!b:b OR c():<c) z)",
{"a()": [">a"], "!b": ["b"], "c()": ["<c"], "query": ["x", "y", "z"]},
),
('a:"\\"a\\""', {"a": ['\\"a\\"']}),
(
'a:"i \\" quote" b:"b\\"bb" c:"cc"',
{"a": ['i \\" quote'], "b": ['b\\"bb'], "c": ["cc"]},
),
]
for test in tests:
assert tokenize_query(test[0]) == test[1], test[0]
def test_get_numeric_field_value_invalid() -> None:
with pytest.raises(InvalidQuery):
get_numeric_field_value("foo", ">=1k")
|
TestParseDuration
|
python
|
django__django
|
tests/admin_views/test_nav_sidebar.py
|
{
"start": 4268,
"end": 9019
}
|
class ____(AdminSeleniumTestCase):
available_apps = ["admin_views"] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="super@example.com",
)
self.admin_login(
username="super",
password="secret",
login_url=reverse("test_with_sidebar:index"),
)
self.selenium.execute_script(
"localStorage.removeItem('django.admin.navSidebarIsOpen')"
)
def test_sidebar_starts_open(self):
from selenium.webdriver.common.by import By
self.selenium.get(
self.live_server_url + reverse("test_with_sidebar:auth_user_changelist")
)
main_element = self.selenium.find_element(By.CSS_SELECTOR, "#main")
self.assertIn("shifted", main_element.get_attribute("class").split())
def test_sidebar_can_be_closed(self):
from selenium.webdriver.common.by import By
self.selenium.get(
self.live_server_url + reverse("test_with_sidebar:auth_user_changelist")
)
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
self.assertEqual(toggle_button.tag_name, "button")
self.assertEqual(toggle_button.get_attribute("aria-label"), "Toggle navigation")
nav_sidebar = self.selenium.find_element(By.ID, "nav-sidebar")
self.assertEqual(nav_sidebar.get_attribute("aria-expanded"), "true")
self.assertTrue(nav_sidebar.is_displayed())
toggle_button.click()
# Hidden sidebar is not visible.
nav_sidebar = self.selenium.find_element(By.ID, "nav-sidebar")
self.assertEqual(nav_sidebar.get_attribute("aria-expanded"), "false")
self.assertFalse(nav_sidebar.is_displayed())
main_element = self.selenium.find_element(By.CSS_SELECTOR, "#main")
self.assertNotIn("shifted", main_element.get_attribute("class").split())
def test_sidebar_state_persists(self):
from selenium.webdriver.common.by import By
self.selenium.get(
self.live_server_url + reverse("test_with_sidebar:auth_user_changelist")
)
self.assertIsNone(
self.selenium.execute_script(
"return localStorage.getItem('django.admin.navSidebarIsOpen')"
)
)
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
toggle_button.click()
self.assertEqual(
self.selenium.execute_script(
"return localStorage.getItem('django.admin.navSidebarIsOpen')"
),
"false",
)
self.selenium.get(
self.live_server_url + reverse("test_with_sidebar:auth_user_changelist")
)
main_element = self.selenium.find_element(By.CSS_SELECTOR, "#main")
self.assertNotIn("shifted", main_element.get_attribute("class").split())
toggle_button = self.selenium.find_element(
By.CSS_SELECTOR, "#toggle-nav-sidebar"
)
# Hidden sidebar is not visible.
nav_sidebar = self.selenium.find_element(By.ID, "nav-sidebar")
self.assertEqual(nav_sidebar.get_attribute("aria-expanded"), "false")
self.assertFalse(nav_sidebar.is_displayed())
toggle_button.click()
nav_sidebar = self.selenium.find_element(By.ID, "nav-sidebar")
self.assertEqual(nav_sidebar.get_attribute("aria-expanded"), "true")
self.assertTrue(nav_sidebar.is_displayed())
self.assertEqual(
self.selenium.execute_script(
"return localStorage.getItem('django.admin.navSidebarIsOpen')"
),
"true",
)
self.selenium.get(
self.live_server_url + reverse("test_with_sidebar:auth_user_changelist")
)
main_element = self.selenium.find_element(By.CSS_SELECTOR, "#main")
self.assertIn("shifted", main_element.get_attribute("class").split())
def test_sidebar_filter_persists(self):
from selenium.webdriver.common.by import By
self.selenium.get(
self.live_server_url + reverse("test_with_sidebar:auth_user_changelist")
)
filter_value_script = (
"return sessionStorage.getItem('django.admin.navSidebarFilterValue')"
)
self.assertIsNone(self.selenium.execute_script(filter_value_script))
filter_input = self.selenium.find_element(By.CSS_SELECTOR, "#nav-filter")
filter_input.send_keys("users")
self.assertEqual(self.selenium.execute_script(filter_value_script), "users")
|
SeleniumTests
|
python
|
pexpect__pexpect
|
tests/test_misc.py
|
{
"start": 1319,
"end": 13616
}
|
class ____(PexpectTestCase.PexpectTestCase):
def test_isatty(self):
" Test isatty() is True after spawning process on most platforms. "
child = pexpect.spawn('cat')
if not child.isatty() and sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
assert child.isatty()
def test_isatty_poll(self):
" Test isatty() is True after spawning process on most platforms. "
child = pexpect.spawn('cat', use_poll=True)
if not child.isatty() and sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
assert child.isatty()
def test_read(self):
" Test spawn.read by calls of various size. "
child = pexpect.spawn('cat')
child.sendline("abc")
child.sendeof()
self.assertEqual(child.read(0), b'')
self.assertEqual(child.read(1), b'a')
self.assertEqual(child.read(1), b'b')
self.assertEqual(child.read(1), b'c')
self.assertEqual(child.read(2), b'\r\n')
remaining = child.read().replace(_CAT_EOF, b'')
self.assertEqual(remaining, b'abc\r\n')
def test_read_poll(self):
" Test spawn.read by calls of various size. "
child = pexpect.spawn('cat', use_poll=True)
child.sendline("abc")
child.sendeof()
self.assertEqual(child.read(0), b'')
self.assertEqual(child.read(1), b'a')
self.assertEqual(child.read(1), b'b')
self.assertEqual(child.read(1), b'c')
self.assertEqual(child.read(2), b'\r\n')
remaining = child.read().replace(_CAT_EOF, b'')
self.assertEqual(remaining, b'abc\r\n')
def test_read_poll_timeout(self):
" Test use_poll properly times out "
child = pexpect.spawn('sleep 5', use_poll=True)
with self.assertRaises(pexpect.TIMEOUT):
child.expect(pexpect.EOF, timeout=1)
def test_readline_bin_echo(self):
" Test spawn('echo'). "
# given,
child = pexpect.spawn('echo', ['alpha', 'beta'])
# exercise,
assert child.readline() == b'alpha beta' + child.crlf
def test_readline(self):
" Test spawn.readline(). "
# when argument 0 is sent, nothing is returned.
# Otherwise the argument value is meaningless.
child = pexpect.spawn('cat', echo=False)
child.sendline("alpha")
child.sendline("beta")
child.sendline("gamma")
child.sendline("delta")
child.sendeof()
assert child.readline(0) == b''
assert child.readline().rstrip() == b'alpha'
assert child.readline(1).rstrip() == b'beta'
assert child.readline(2).rstrip() == b'gamma'
assert child.readline().rstrip() == b'delta'
child.expect(pexpect.EOF)
assert not child.isalive()
assert child.exitstatus == 0
def test_iter(self):
" iterating over lines of spawn.__iter__(). "
child = pexpect.spawn('cat', echo=False)
child.sendline("abc")
child.sendline("123")
child.sendeof()
# Don't use ''.join() because we want to test __iter__().
page = b''
for line in child:
page += line
page = page.replace(_CAT_EOF, b'')
assert page == b'abc\r\n123\r\n'
def test_readlines(self):
" reading all lines of spawn.readlines(). "
child = pexpect.spawn('cat', echo=False)
child.sendline("abc")
child.sendline("123")
child.sendeof()
page = b''.join(child.readlines()).replace(_CAT_EOF, b'')
assert page == b'abc\r\n123\r\n'
child.expect(pexpect.EOF)
assert not child.isalive()
assert child.exitstatus == 0
def test_write(self):
" write a character and return it in return. "
child = pexpect.spawn('cat', echo=False)
child.write('a')
child.write('\r')
self.assertEqual(child.readline(), b'a\r\n')
def test_writelines(self):
" spawn.writelines() "
child = pexpect.spawn('cat')
# notice that much like file.writelines, we do not delimit by newline
# -- it is equivalent to calling write(''.join([args,]))
child.writelines(['abc', '123', 'xyz', '\r'])
child.sendeof()
line = child.readline()
assert line == b'abc123xyz\r\n'
def test_eof(self):
" call to expect() after EOF is received raises pexpect.EOF "
child = pexpect.spawn('cat')
child.sendeof()
with self.assertRaises(pexpect.EOF):
child.expect('the unexpected')
def test_with(self):
"spawn can be used as a context manager"
with pexpect.spawn(self.PYTHONBIN + ' echo_w_prompt.py') as p:
p.expect('<in >')
p.sendline(b'alpha')
p.expect(b'<out>alpha')
assert p.isalive()
assert not p.isalive()
def test_terminate(self):
" test force terminate always succeeds (SIGKILL). "
child = pexpect.spawn('cat')
child.terminate(force=1)
assert child.terminated
def test_sighup(self):
" validate argument `ignore_sighup=True` and `ignore_sighup=False`. "
getch = self.PYTHONBIN + ' getch.py'
child = pexpect.spawn(getch, ignore_sighup=True)
child.expect('READY')
child.kill(signal.SIGHUP)
for _ in range(10):
if not child.isalive():
self.fail('Child process should not have exited.')
time.sleep(0.1)
child = pexpect.spawn(getch, ignore_sighup=False)
child.expect('READY')
child.kill(signal.SIGHUP)
for _ in range(10):
if not child.isalive():
break
time.sleep(0.1)
else:
self.fail('Child process should have exited.')
def test_bad_child_pid(self):
" assert bad condition error in isalive(). "
expect_errmsg = re.escape("isalive() encountered condition where ")
child = pexpect.spawn('cat')
child.terminate(force=1)
# Force an invalid state to test isalive
child.ptyproc.terminated = 0
try:
with self.assertRaisesRegex(pexpect.ExceptionPexpect,
".*" + expect_errmsg):
child.isalive()
finally:
# Force valid state for child for __del__
child.terminated = 1
def test_bad_arguments_suggest_fdpsawn(self):
" assert custom exception for spawn(int). "
expect_errmsg = "maybe you want to use fdpexpect.fdspawn"
with self.assertRaisesRegex(pexpect.ExceptionPexpect,
".*" + expect_errmsg):
pexpect.spawn(1)
def test_bad_arguments_second_arg_is_list(self):
" Second argument to spawn, if used, must be only a list."
with self.assertRaises(TypeError):
pexpect.spawn('ls', '-la')
with self.assertRaises(TypeError):
# not even a tuple,
pexpect.spawn('ls', ('-la',))
def test_read_after_close_raises_value_error(self):
" Calling read_nonblocking after close raises ValueError. "
# as read_nonblocking underlies all other calls to read,
# ValueError should be thrown for all forms of read.
with self.assertRaises(ValueError):
p = pexpect.spawn('cat')
p.close()
p.read_nonblocking()
with self.assertRaises(ValueError):
p = pexpect.spawn('cat')
p.close()
p.read()
with self.assertRaises(ValueError):
p = pexpect.spawn('cat')
p.close()
p.readline()
with self.assertRaises(ValueError):
p = pexpect.spawn('cat')
p.close()
p.readlines()
def test_isalive(self):
" check isalive() before and after EOF. (True, False) "
child = pexpect.spawn('cat')
assert child.isalive() is True
child.sendeof()
child.expect(pexpect.EOF)
assert child.isalive() is False
def test_bad_type_in_expect(self):
" expect() does not accept dictionary arguments. "
child = pexpect.spawn('cat')
with self.assertRaises(TypeError):
child.expect({})
def test_cwd(self):
" check keyword argument `cwd=' of pexpect.run() "
tmp_dir = os.path.realpath(tempfile.gettempdir())
default = pexpect.run('pwd')
pwd_tmp = pexpect.run('pwd', cwd=tmp_dir).rstrip()
assert default != pwd_tmp
assert tmp_dir == _u(pwd_tmp)
def _test_searcher_as(self, searcher, plus=None):
# given,
given_words = ['alpha', 'beta', 'gamma', 'delta', ]
given_search = given_words
if searcher == pexpect.searcher_re:
given_search = [re.compile(word) for word in given_words]
if plus is not None:
given_search = given_search + [plus]
search_string = searcher(given_search)
basic_fmt = '\n {0}: {1}'
fmt = basic_fmt
if searcher is pexpect.searcher_re:
fmt = '\n {0}: re.compile({1})'
expected_output = '{0}:'.format(searcher.__name__)
idx = 0
for word in given_words:
expected_output += fmt.format(idx, "'{0}'".format(word))
idx += 1
if plus is not None:
if plus == pexpect.EOF:
expected_output += basic_fmt.format(idx, 'EOF')
elif plus == pexpect.TIMEOUT:
expected_output += basic_fmt.format(idx, 'TIMEOUT')
# exercise,
assert search_string.__str__() == expected_output
def test_searcher_as_string(self):
" check searcher_string(..).__str__() "
self._test_searcher_as(pexpect.searcher_string)
def test_searcher_as_string_with_EOF(self):
" check searcher_string(..).__str__() that includes EOF "
self._test_searcher_as(pexpect.searcher_string, plus=pexpect.EOF)
def test_searcher_as_string_with_TIMEOUT(self):
" check searcher_string(..).__str__() that includes TIMEOUT "
self._test_searcher_as(pexpect.searcher_string, plus=pexpect.TIMEOUT)
def test_searcher_re_as_string(self):
" check searcher_re(..).__str__() "
self._test_searcher_as(pexpect.searcher_re)
def test_searcher_re_as_string_with_EOF(self):
" check searcher_re(..).__str__() that includes EOF "
self._test_searcher_as(pexpect.searcher_re, plus=pexpect.EOF)
def test_searcher_re_as_string_with_TIMEOUT(self):
" check searcher_re(..).__str__() that includes TIMEOUT "
self._test_searcher_as(pexpect.searcher_re, plus=pexpect.TIMEOUT)
def test_nonnative_pty_fork(self):
" test forced self.__fork_pty() and __pty_make_controlling_tty "
# given,
class spawn_ourptyfork(pexpect.spawn):
def _spawn(self, command, args=[], preexec_fn=None,
dimensions=None):
self.use_native_pty_fork = False
pexpect.spawn._spawn(self, command, args, preexec_fn,
dimensions)
# exercise,
p = spawn_ourptyfork('cat', echo=False)
# verify,
p.sendline('abc')
p.expect('abc')
p.sendeof()
p.expect(pexpect.EOF)
assert not p.isalive()
def test_exception_tb(self):
" test get_trace() filters away pexpect/__init__.py calls. "
p = pexpect.spawn('sleep 1')
try:
p.expect('BLAH')
except pexpect.ExceptionPexpect as e:
# get_trace should filter out frames in pexpect's own code
tb = e.get_trace()
# exercise,
assert 'raise ' not in tb
assert 'pexpect/__init__.py' not in tb
else:
assert False, "Should have raised an exception."
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestCaseMisc)
|
TestCaseMisc
|
python
|
marshmallow-code__marshmallow
|
tests/base.py
|
{
"start": 1934,
"end": 3625
}
|
class ____:
SPECIES = "Homo sapiens"
def __init__(
self,
name,
*,
age=0,
id_=None,
homepage=None,
email=None,
registered=True,
time_registered=None,
birthdate=None,
birthtime=None,
balance=100,
sex=GenderEnum.male,
hair_color=HairColorEnum.black,
employer=None,
various_data=None,
):
self.name = name
self.age = age
# A naive datetime
self.created = dt.datetime(2013, 11, 10, 14, 20, 58)
# A TZ-aware datetime
self.updated = dt.datetime(2013, 11, 10, 14, 20, 58, tzinfo=central)
self.id = id_
self.homepage = homepage
self.email = email
self.balance = balance
self.registered = registered
self.hair_colors = list(HairColorEnum.__members__)
self.sex_choices = list(GenderEnum.__members__)
self.finger_count = 10
self.uid = uuid.uuid1()
self.time_registered = time_registered or dt.time(1, 23, 45, 6789)
self.birthdate = birthdate or dt.date(2013, 1, 23)
self.birthtime = birthtime or dt.time(0, 1, 2, 3333)
self.activation_date = dt.date(2013, 12, 11)
self.sex = sex
self.hair_color = hair_color
self.employer = employer
self.relatives = []
self.various_data = various_data or {
"pets": ["cat", "dog"],
"address": "1600 Pennsylvania Ave\nWashington, DC 20006",
}
@property
def since_created(self):
return dt.datetime(2013, 11, 24) - self.created
def __repr__(self):
return f"<User {self.name}>"
|
User
|
python
|
jina-ai__jina
|
tests/integration/hot_reload/my_executor_3_new.py
|
{
"start": 38,
"end": 169
}
|
class ____(Executor):
@requests
def x(self, docs, **kwargs):
for doc in docs:
doc.text = 'AAfterReload'
|
A
|
python
|
mlflow__mlflow
|
mlflow/entities/model_registry/model_version_search.py
|
{
"start": 58,
"end": 863
}
|
class ____(ModelVersion):
def __init__(self, *args, **kwargs):
kwargs["tags"] = []
kwargs["aliases"] = []
super().__init__(*args, **kwargs)
def tags(self):
raise Exception(
"UC Model Versions gathered through search_model_versions do not have tags. "
"Please use get_model_version to obtain an individual version's tags."
)
def aliases(self):
raise Exception(
"UC Model Versions gathered through search_model_versions do not have aliases. "
"Please use get_model_version to obtain an individual version's aliases."
)
def __eq__(self, other):
if type(other) in {type(self), ModelVersion}:
return self.__dict__ == other.__dict__
return False
|
ModelVersionSearch
|
python
|
huggingface__transformers
|
src/transformers/models/llava/image_processing_llava_fast.py
|
{
"start": 1181,
"end": 6233
}
|
class ____(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_pad = False
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
def pad_to_square(
self,
images: "torch.Tensor",
background_color: Union[int, tuple[int, int, int]] = 0,
) -> "torch.Tensor":
"""
Pads an image to a square based on the longest edge.
Args:
images (`np.ndarray`):
The images to pad.
background_color (`int` or `tuple[int, int, int]`, *optional*, defaults to 0):
The color to use for the padding. Can be an integer for single channel or a
tuple of integers representing for multi-channel images. If passed as integer
in multi-channel mode, it will default to `0` in subsequent channels.
Returns:
`torch.Tensor`: The padded images.
"""
height, width = get_image_size(images, ChannelDimension.FIRST)
if height == width:
return images
num_channels = images.shape[1] if len(images.shape) == 4 else images.shape[0]
if isinstance(background_color, int):
background_color = [background_color] + [0] * (num_channels - 1)
elif len(background_color) != num_channels:
raise ValueError(
f"background_color must have no more than {num_channels} elements to match the number of channels"
)
max_dim = max(height, width)
paste_x_left = (max_dim - width) // 2
paste_y_left = (max_dim - height) // 2
paste_x_right = max_dim - width - paste_x_left
paste_y_right = max_dim - height - paste_y_left
padded_images = F.pad(
images, padding=[paste_x_left, paste_y_left, paste_x_right, paste_y_right], fill=background_color
)
return padded_images
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_pad: bool,
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_pad:
stacked_images = self.pad_to_square(
images=stacked_images, background_color=tuple(int(x * 255) for x in self.image_mean)
)
resized_images_grouped[shape] = stacked_images
padded_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for batched resizing
# Needed in case do_pad is False, or padding returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(padded_images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["LlavaImageProcessorFast"]
|
LlavaImageProcessorFast
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/stats.py
|
{
"start": 4727,
"end": 7032
}
|
class ____(StatsWriter):
def __init__(self):
self.training_start_time = time.time()
# If self-play, we want to print ELO as well as reward
self.self_play = False
self.self_play_team = -1
self.rank = get_rank()
def write_stats(
self, category: str, values: Dict[str, StatsSummary], step: int
) -> None:
is_training = "Not Training"
if "Is Training" in values:
stats_summary = values["Is Training"]
if stats_summary.aggregated_value > 0.0:
is_training = "Training"
elapsed_time = time.time() - self.training_start_time
log_info: List[str] = [category]
log_info.append(f"Step: {step}")
log_info.append(f"Time Elapsed: {elapsed_time:0.3f} s")
if "Environment/Cumulative Reward" in values:
stats_summary = values["Environment/Cumulative Reward"]
if self.rank is not None:
log_info.append(f"Rank: {self.rank}")
log_info.append(f"Mean Reward: {stats_summary.mean:0.3f}")
if "Environment/Group Cumulative Reward" in values:
group_stats_summary = values["Environment/Group Cumulative Reward"]
log_info.append(f"Mean Group Reward: {group_stats_summary.mean:0.3f}")
else:
log_info.append(f"Std of Reward: {stats_summary.std:0.3f}")
log_info.append(is_training)
if self.self_play and "Self-play/ELO" in values:
elo_stats = values["Self-play/ELO"]
log_info.append(f"ELO: {elo_stats.mean:0.3f}")
else:
log_info.append("No episode was completed since last summary")
log_info.append(is_training)
logger.info(". ".join(log_info) + ".")
def add_property(
self, category: str, property_type: StatsPropertyType, value: Any
) -> None:
if property_type == StatsPropertyType.HYPERPARAMETERS:
logger.info(
"""Hyperparameters for behavior name {}: \n{}""".format(
category, _dict_to_str(value, 0)
)
)
elif property_type == StatsPropertyType.SELF_PLAY:
assert isinstance(value, bool)
self.self_play = value
|
ConsoleWriter
|
python
|
Pylons__pyramid
|
src/pyramid/config/zca.py
|
{
"start": 55,
"end": 889
}
|
class ____:
def hook_zca(self):
"""Call :func:`zope.component.getSiteManager.sethook` with the
argument :data:`pyramid.threadlocal.get_current_registry`, causing
the :term:`Zope Component Architecture` 'global' APIs such as
:func:`zope.component.getSiteManager`,
:func:`zope.component.getAdapter` and others to use the
:app:`Pyramid` :term:`application registry` rather than the Zope
'global' registry."""
from zope.component import getSiteManager
getSiteManager.sethook(get_current_registry)
def unhook_zca(self):
"""Call :func:`zope.component.getSiteManager.reset` to undo the
action of :meth:`pyramid.config.Configurator.hook_zca`."""
from zope.component import getSiteManager
getSiteManager.reset()
|
ZCAConfiguratorMixin
|
python
|
PrefectHQ__prefect
|
src/prefect/settings/legacy.py
|
{
"start": 480,
"end": 5693
}
|
class ____:
"""Mimics the old Setting object for compatibility with existing code."""
def __init__(
self, name: str, default: Any, type_: Any, accessor: Optional[str] = None
):
self._name = name
self._default = default
self._type = type_
if accessor is None:
self.accessor: str = _env_var_to_accessor(name)
else:
self.accessor: str = accessor
@property
def name(self) -> str:
return self._name
@property
def is_secret(self) -> bool:
if self._type in _SECRET_TYPES:
return True
for secret_type in _SECRET_TYPES:
if secret_type in get_args(self._type):
return True
return False
def default(self) -> Any:
return self._default
def value(self: Self) -> Any:
if (
self.name == "PREFECT_TEST_SETTING"
or self.name == "PREFECT_TESTING_TEST_SETTING"
):
if (
"PREFECT_TEST_MODE" in os.environ
or "PREFECT_TESTING_TEST_MODE" in os.environ
):
return get_current_settings().testing.test_setting
else:
return None
return self.value_from(get_current_settings())
def value_from(self: Self, settings: Settings) -> Any:
path = self.accessor.split(".")
current_value = settings
for key in path:
current_value = getattr(current_value, key, None)
if isinstance(current_value, _SECRET_TYPES):
return current_value.get_secret_value() # type: ignore
return current_value
def __bool__(self) -> bool:
return bool(self.value())
def __str__(self) -> str:
return str(self.value())
def __repr__(self) -> str:
return f"<{self.name}: {self._type!r}>"
def __eq__(self, __o: object) -> bool:
return __o.__eq__(self.value())
def __hash__(self) -> int:
return hash((type(self), self.name))
def _env_var_to_accessor(env_var: str) -> str:
"""
Convert an environment variable name to a settings accessor.
"""
if (field := _get_settings_fields(Settings).get(env_var)) is not None:
return field.accessor
return env_var.replace("PREFECT_", "").lower()
@cache
def _get_valid_setting_names(cls: type[BaseSettings]) -> Set[str]:
"""
A set of valid setting names, e.g. "PREFECT_API_URL" or "PREFECT_API_KEY".
"""
settings_fields: set[str] = set()
for field_name, field in cls.model_fields.items():
if inspect.isclass(field.annotation) and issubclass(
field.annotation, PrefectBaseSettings
):
settings_fields.update(_get_valid_setting_names(field.annotation))
else:
if field.validation_alias and isinstance(
field.validation_alias, AliasChoices
):
for alias in field.validation_alias.choices:
if not isinstance(alias, str):
continue
settings_fields.add(alias.upper())
else:
settings_fields.add(
f"{cls.model_config.get('env_prefix')}{field_name.upper()}"
)
return settings_fields
@cache
def _get_settings_fields(
settings: Type[BaseSettings], accessor_prefix: Optional[str] = None
) -> Dict[str, "Setting"]:
"""Get the settings fields for the settings object"""
settings_fields: dict[str, Setting] = {}
for field_name, field in settings.model_fields.items():
if inspect.isclass(field.annotation) and issubclass(
field.annotation, PrefectBaseSettings
):
accessor = (
field_name
if accessor_prefix is None
else f"{accessor_prefix}.{field_name}"
)
settings_fields.update(_get_settings_fields(field.annotation, accessor))
else:
accessor = (
field_name
if accessor_prefix is None
else f"{accessor_prefix}.{field_name}"
)
if field.validation_alias and isinstance(
field.validation_alias, AliasChoices
):
for alias in field.validation_alias.choices:
if not isinstance(alias, str):
continue
setting = Setting(
name=alias.upper(),
default=field.default,
type_=field.annotation,
accessor=accessor,
)
settings_fields[setting.name] = setting
settings_fields[setting.accessor] = setting
else:
setting = Setting(
name=f"{settings.model_config.get('env_prefix')}{field_name.upper()}",
default=field.default,
type_=field.annotation,
accessor=accessor,
)
settings_fields[setting.name] = setting
settings_fields[setting.accessor] = setting
return settings_fields
|
Setting
|
python
|
pytorch__pytorch
|
torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py
|
{
"start": 3789,
"end": 12226
}
|
class ____(ActivationWrapper):
"""
An ``nn.Module`` that wraps another ``nn.Module`` with checkpointing.
Note that this module is not meant to be used directly but instead,
it is to be used through the ``checkpoint_wrapper`` function.
"""
def __init__(
self,
mod: torch.nn.Module,
checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT,
checkpoint_fn=None,
**checkpoint_fn_kwargs,
):
super().__init__(mod)
self.checkpoint_impl = checkpoint_impl
if checkpoint_fn is None:
# use torch.utils.checkpoint
self.checkpoint_fn = partial(
torch_utils_checkpoint,
use_reentrant=(self.checkpoint_impl == CheckpointImpl.REENTRANT),
**checkpoint_fn_kwargs,
)
else:
# Construct user-specified checkpoint function.
self.checkpoint_fn = partial(
checkpoint_fn,
**checkpoint_fn_kwargs,
)
def forward(self, *args, **kwargs):
# Support keyword arguments for reentrant checkpoint. Note that this
# only works if user has specified self.checkpoint_impl and is not
# using their own custom checkpoint_fn.
if self.checkpoint_impl == CheckpointImpl.REENTRANT and kwargs != {}:
# Pack the args and kwargs
flat_args, kwarg_keys = _pack_kwargs(*args, **kwargs)
# Function that only takes (packed) args, but can unpack them
# into the original args and kwargs for the checkpointed
# function, and runs that function.
def my_function(*inputs):
# unpack back into args and kwargs
unpacked_args, unpacked_kwargs = _unpack_kwargs(inputs, kwarg_keys)
# run original module
return self._checkpoint_wrapped_module(
*unpacked_args, **unpacked_kwargs
)
# Pass the function that only takes packed args into reentrant
# checkpoint API.
return self.checkpoint_fn( # type: ignore[misc]
my_function,
*flat_args,
)
else:
return self.checkpoint_fn( # type: ignore[misc]
self._checkpoint_wrapped_module, *args, **kwargs
)
def offload_wrapper(module: torch.nn.Module) -> torch.nn.Module:
"""
Wrap a module for activation offloading to CPU.
Offloads intermediate activations to the CPU for modules wrapped with this function.
Wrappers with activation offload can be composed with ones that do recomputation-based
checkpoint to trade off increased compute versus increased CPU
memory usage and additional H2D transfers.
Usage::
offloaded_module = offload_wrapper(module)
outputs = checkpointed_module(inputs)
Args:
module (nn.Module):
The module to be wrapped
Returns:
(nn.Module):
Wrapped module
"""
return OffloadWrapper(module)
def checkpoint_wrapper(
module: torch.nn.Module,
checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT,
checkpoint_fn=None,
**checkpoint_fn_kwargs,
) -> torch.nn.Module:
"""
Wrap a module for activation checkpointing.
If the module is wrapped with this function, all subsequent calls to the module will,
automatically perform checkpointing without the user having to explicitly call ``checkpoint`` function.
Usage::
checkpointed_module = checkpoint_wrapper(module)
outputs = checkpointed_module(inputs)
Args:
module (nn.Module):
The module to be wrapped
checkpoint_impl (Optional[CheckpointImpl]):
The checkpointing implementation to use. Note that this will only
be passed into the ``torch.utils.checkpoint.checkpoint``
implementation, and is ignored if a custom ``checkpoint_fn`` is
specified. Note that for implementations using reentrant checkpoint
from ``torch.utils.checkpoint``, keyword arguments will only be
supported if ``checkpoint_impl`` is passed as ``CheckpointImpl.REENTRANT`.
checkpoint_fn (Optional[Callable]):
Functional checkpoint implementation to use. If this is specified,
it will be used over the default ``torch.utils.checkpoint.checkpoint``
implementation and the `checkpoint_impl` argument will be ignored.
**checkpoint_fn_kwargs: (Dict[str, Any]): Keyword arguments to pass into `checkpoint_fn`.
Returns:
(nn.Module):
Wrapped module
"""
if checkpoint_impl == CheckpointImpl.REENTRANT:
warnings.warn(
f"Please specify {CheckpointImpl.NO_REENTRANT} as "
f"{CheckpointImpl.REENTRANT} will soon be removed as "
"the default and eventually deprecated.",
FutureWarning,
stacklevel=2,
)
return CheckpointWrapper(
module,
checkpoint_impl,
checkpoint_fn,
**checkpoint_fn_kwargs,
)
def apply_activation_checkpointing(
model,
checkpoint_wrapper_fn=checkpoint_wrapper,
check_fn=lambda _: True,
auto_wrap_policy: Callable[[nn.Module, bool, int], bool] | None = None,
):
"""
Apply :func:`checkpoint_wrapper` to modules within `model` based on a user-defined configuration.
For each module within `model`, the `check_fn` is used to decide
whether `module` should be wrapped with :func:`checkpoint_wrapper` or not.
Note::
This function modifies `model` in place and replaces appropriate layers with
their checkpoint-wrapped modules.
Note::
This function will not wrap the overall root module. If this is needed, please directly use
:func:`checkpoint_wrapper` or :func:`offload_wrapper`.
Usage::
model = nn.Sequential(
nn.Linear(10, 10), nn.Linear(10, 10), nn.Linear(10, 10)
)
check_fn = lambda l: isinstance(l, nn.Linear)
# checkpoint activations
apply_activation_checkpointing(model, checkpoint_wrapper_fn=checkpoint_wrapper, check_fn=check_fn)
# Or offload activations to CPU
apply_activation_checkpointing(model, checkpoint_wrapper_fn=offload_wrapper, check_fn=check_fn)
Args:
model (nn.Module):
The model whose submodules should be wrapped with activation checkpointing.
checkpoint_wrapper_fn (Optional[Callable[nn.Module]])
A ``Callable`` which will wrap modules
check_fn (Optional[Callable[nn.Module, nn.Module]])
A lambda function which will be passed each child submodule of ``model`` and returns
``True`` or ``False`` depending on whether the submodule should be wrapped.
auto_wrap_policy (Optional[Callable[[nn.Module, bool, int], bool]]): A policy to wrap model's
submodules with AC. Note that if this is specified, it takes precedence over ``check_fn``.
Returns: None (`model` is modified inplace)
"""
# TODO: Importing inside function to avoid circular import issue between FSDP and
# checkpoint_wrapper. This can be resolved once wrap() APIs are decoupled from FSDP code.
from torch.distributed.fsdp._wrap_utils import _construct_wrap_fn, _post_order_apply
from torch.distributed.fsdp.wrap import (
_Policy,
_recursive_wrap,
lambda_auto_wrap_policy,
)
policy = (
auto_wrap_policy
if auto_wrap_policy is not None
else partial(lambda_auto_wrap_policy, lambda_fn=check_fn)
)
if not callable(policy):
if not isinstance(policy, _Policy):
raise ValueError(
f"Expected {policy} to be callable or be a pre-defined wrap policy"
)
target_module_to_kwargs = policy._run_policy(
model, ignored_modules=set(), root_kwargs={}
)
wrap_fn = _construct_wrap_fn(
model, target_module_to_kwargs, checkpoint_wrapper_fn
)
_post_order_apply(model, wrap_fn)
return
_recursive_wrap(
module=model,
auto_wrap_policy=policy, # type: ignore[arg-type]
wrapper_cls=checkpoint_wrapper_fn,
ignored_modules=set(),
ignored_params=set(),
only_wrap_children=True,
)
|
CheckpointWrapper
|
python
|
readthedocs__readthedocs.org
|
readthedocs/invitations/views.py
|
{
"start": 808,
"end": 1597
}
|
class ____(PrivateViewMixin, UserPassesTestMixin, DeleteView):
"""
Revoke invitation view.
An invitation is revoked by simple deleting it.
"""
model = Invitation
pk_url_kwarg = "invitation_pk"
http_method_names = ["post"]
def form_valid(self, form):
invitation = self.get_object()
invitation.create_audit_log(
action=AuditLog.INVITATION_REVOKED,
request=self.request,
user=self.request.user,
)
return super().form_valid(form)
def test_func(self):
invitation = self.get_object()
return invitation.can_revoke_invitation(self.request.user)
def get_success_url(self):
invitation = self.get_object()
return invitation.get_origin_url()
|
RevokeInvitation
|
python
|
tiangolo__fastapi
|
tests/test_skip_defaults.py
|
{
"start": 363,
"end": 2071
}
|
class ____(BaseModel):
w: Optional[str] = None
x: Optional[str] = None
y: str = "y"
z: str = "z"
@app.get("/", response_model=Model, response_model_exclude_unset=True)
def get_root() -> ModelSubclass:
return ModelSubclass(sub={}, y=1, z=0)
@app.get(
"/exclude_unset", response_model=ModelDefaults, response_model_exclude_unset=True
)
def get_exclude_unset() -> ModelDefaults:
return ModelDefaults(x=None, y="y")
@app.get(
"/exclude_defaults",
response_model=ModelDefaults,
response_model_exclude_defaults=True,
)
def get_exclude_defaults() -> ModelDefaults:
return ModelDefaults(x=None, y="y")
@app.get(
"/exclude_none", response_model=ModelDefaults, response_model_exclude_none=True
)
def get_exclude_none() -> ModelDefaults:
return ModelDefaults(x=None, y="y")
@app.get(
"/exclude_unset_none",
response_model=ModelDefaults,
response_model_exclude_unset=True,
response_model_exclude_none=True,
)
def get_exclude_unset_none() -> ModelDefaults:
return ModelDefaults(x=None, y="y")
client = TestClient(app)
def test_return_defaults():
response = client.get("/")
assert response.json() == {"sub": {}}
def test_return_exclude_unset():
response = client.get("/exclude_unset")
assert response.json() == {"x": None, "y": "y"}
def test_return_exclude_defaults():
response = client.get("/exclude_defaults")
assert response.json() == {}
def test_return_exclude_none():
response = client.get("/exclude_none")
assert response.json() == {"y": "y", "z": "z"}
def test_return_exclude_unset_none():
response = client.get("/exclude_unset_none")
assert response.json() == {"y": "y"}
|
ModelDefaults
|
python
|
doocs__leetcode
|
solution/2300-2399/2369.Check if There is a Valid Partition For The Array/Solution.py
|
{
"start": 0,
"end": 559
}
|
class ____:
def validPartition(self, nums: List[int]) -> bool:
@cache
def dfs(i: int) -> bool:
if i >= n:
return True
a = i + 1 < n and nums[i] == nums[i + 1]
b = i + 2 < n and nums[i] == nums[i + 1] == nums[i + 2]
c = (
i + 2 < n
and nums[i + 1] - nums[i] == 1
and nums[i + 2] - nums[i + 1] == 1
)
return (a and dfs(i + 2)) or ((b or c) and dfs(i + 3))
n = len(nums)
return dfs(0)
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
|
{
"start": 12887,
"end": 13417
}
|
class ____(graphene.Mutation):
"""Cancels a set of partition backfill runs."""
Output = graphene.NonNull(GrapheneCancelBackfillResult)
class Arguments:
backfillId = graphene.NonNull(graphene.String)
class Meta:
name = "CancelBackfillMutation"
@capture_error
@require_permission_check(Permissions.CANCEL_PARTITION_BACKFILL)
def mutate(self, graphene_info: ResolveInfo, backfillId: str):
return cancel_partition_backfill(graphene_info, backfillId)
|
GrapheneCancelBackfillMutation
|
python
|
kamyu104__LeetCode-Solutions
|
Python/evaluate-division.py
|
{
"start": 2483,
"end": 3041
}
|
class ____(object):
def calcEquation(self, equations, values, queries):
"""
:type equations: List[List[str]]
:type values: List[float]
:type queries: List[List[str]]
:rtype: List[float]
"""
union_find = UnionFind()
for (a, b), k in itertools.izip(equations, values):
union_find.union_set(a, b, k)
return [union_find.query_set(a, b) for a, b in queries]
# Time: O(e + q * n), at most O(n^3 + q)
# Space: O(n^2)
# bfs solution
import collections
import itertools
|
Solution
|
python
|
great-expectations__great_expectations
|
great_expectations/data_context/store/query_store.py
|
{
"start": 884,
"end": 5436
}
|
class ____(Store):
"""SqlAlchemyQueryStore stores queries by name, and makes it possible to retrieve the resulting value by query
name.""" # noqa: E501 # FIXME CoP
_key_class: ClassVar[Type] = StringKey
def __init__(
self,
credentials,
queries=None,
store_backend=None,
runtime_environment=None,
store_name=None,
) -> None:
if not sa:
raise gx_exceptions.DataContextError( # noqa: TRY003 # FIXME CoP
"sqlalchemy module not found, but is required for SqlAlchemyQueryStore"
)
super().__init__(
store_backend=store_backend,
runtime_environment=runtime_environment,
store_name=store_name,
)
if queries:
# If queries are defined in configuration, then we load them into an InMemoryStoreBackend # noqa: E501 # FIXME CoP
try:
assert isinstance(queries, dict), (
"SqlAlchemyQueryStore queries must be defined as a dictionary"
)
assert (
store_backend is None or store_backend["class_name"] == "InMemoryStoreBackend"
), (
"If queries are provided in configuration, then store_backend must be empty or an " # noqa: E501 # FIXME CoP
"InMemoryStoreBackend"
)
for k, v in queries.items():
self._store_backend.set(tuple([k]), v)
except (AssertionError, KeyError) as e:
raise gx_exceptions.InvalidConfigError(str(e))
if "engine" in credentials:
self.engine = credentials["engine"]
elif "url" in credentials:
self.engine = sa.create_engine(credentials["url"])
elif "connection_string" in credentials:
self.engine = sa.create_engine(credentials["connection_string"])
else:
drivername = credentials.pop("drivername")
options = url_create_fn(drivername, **credentials)
self.engine = sa.create_engine(options)
# Gather the call arguments of the present function (include the "module_name" and add the "class_name"), filter # noqa: E501 # FIXME CoP
# out the Falsy values, and set the instance "_config" variable equal to the resulting dictionary. # noqa: E501 # FIXME CoP
self._config = {
"credentials": credentials,
"queries": queries,
"store_backend": store_backend,
"runtime_environment": runtime_environment,
"store_name": store_name,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
def _convert_key(self, key):
if isinstance(key, str):
return StringKey(key)
return key
def get(self, key): # type: ignore[explicit-override] # FIXME
return super().get(self._convert_key(key))
def set(self, key, value): # type: ignore[explicit-override] # FIXME
return super().set(self._convert_key(key), value)
def get_query_result(self, key, query_parameters=None):
if query_parameters is None:
query_parameters = {}
result = self._store_backend.get(self._convert_key(key).to_tuple())
if isinstance(result, dict):
query = result.get("query")
return_type = result.get("return_type", "list")
if return_type not in ["list", "scalar"]:
raise ValueError( # noqa: TRY003 # FIXME CoP
"The return_type of a SqlAlchemyQueryStore query must be one of either 'list' "
"or 'scalar'"
)
else:
query = result
return_type = None
assert query, "Query must be specified to use SqlAlchemyQueryStore"
query = Template(query).safe_substitute(query_parameters)
with self.engine.begin() as connection:
res = connection.execute(sa.text(query)).fetchall()
# NOTE: 20200617 - JPC: this approach is probably overly opinionated, but we can
# adjust based on specific user requests
res = [val for row in res for val in row]
if return_type == "scalar":
[res] = res
return res
@property
@override
def config(self) -> dict:
return self._config
|
SqlAlchemyQueryStore
|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
{
"start": 18865,
"end": 22235
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[Wav2Vec2Config] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
current_states = key_value_states if is_cross_attention else hidden_states
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights, None
|
Wav2Vec2Attention
|
python
|
pydantic__pydantic
|
tests/test_forward_ref.py
|
{
"start": 42770,
"end": 44839
}
|
class ____(Base):
pass
"""
)
ta = TypeAdapter(mod_2.Sub)
assert ta.validate_python({'f': '1'}) == {'f': 1}
def test_parameterized_with_annotated_forward_refs() -> None:
T = TypeVar('T')
class Parent(BaseModel, Generic[T]):
a: T
b: 'MyAnnotated[T, 1]'
c: Annotated[T, 2] = Field(gt=2)
M = Parent[Annotated['MyInt', 3]]
assert not M.__pydantic_fields_complete__
MyAnnotated = Annotated
MyInt = int
M.model_rebuild()
assert M.__pydantic_fields_complete__
assert M.model_fields['a'].annotation is int
assert M.model_fields['b'].annotation is int
assert M.model_fields['c'].annotation is int
assert M.model_fields['a'].metadata == [3]
assert M.model_fields['b'].metadata == [3, 1]
assert M.model_fields['c'].metadata == [Gt(2), 3, 2]
@pytest.mark.xfail(
reason=(
'Similar to `test_uses_the_correct_globals_to_resolve_model_forward_refs()`,'
"the NsResolver used for the `M.model_rebuild()` call doesn't make use of `Parent`, "
"so its `__type_params__` aren't available (they contain `T`)."
)
)
@pytest.mark.skipif(sys.version_info < (3, 12), reason='Test related to PEP 695 syntax.')
def test_parameterized_pep695_generic_with_annotated_forward_refs(create_module) -> None:
mod = create_module(
"""
from typing import Annotated
from pydantic import BaseModel
class Parent[T](BaseModel):
a: T
b: 'MyAnnotated[T, 1]'
c: Annotated[T, 2] = Field(gt=2)
M = Parent[Annotated['MyInt', 3]]
MyAnnotated = Annotated
MyInt = int
M.model_rebuild()
"""
)
M = mod.M
assert M.__pydantic_fields_complete__
assert M.model_fields['a'].annotation is int
assert M.model_fields['b'].annotation is int
assert M.model_fields['c'].annotation is int
assert M.model_fields['a'].metadata == [3]
assert M.model_fields['b'].metadata == [3, 1]
assert M.model_fields['c'].metadata == [Gt(2), 3, 2]
|
Sub
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/init_ops_v2.py
|
{
"start": 22952,
"end": 26343
}
|
class ____(Initializer):
"""Initializer that generates an orthogonal matrix.
Initializers allow you to pre-specify an initialization strategy, encoded in
the Initializer object, without knowing the shape and dtype of the variable
being initialized.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Examples:
>>> def make_variables(k, initializer):
... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),
... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))
>>> v1, v2 = make_variables(3, tf.initializers.Orthogonal())
>>> v1
<tf.Variable ... shape=(3, 3) ...
>>> v2
<tf.Variable ... shape=(3, 3, 3) ...
>>> make_variables(4, tf.initializers.Orthogonal(gain=0.5))
(<tf.Variable ... shape=(4, 4) dtype=float32...
<tf.Variable ... shape=(4, 4, 4) dtype=float32...
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.random.set_seed` for behavior.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
self._validate_kwargs(kwargs, support_partition=False)
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize, specified by argument `shape`"
" must be at least two-dimensional. Received shape="
f"{shape}")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed}
|
Orthogonal
|
python
|
pytorch__pytorch
|
torch/utils/benchmark/utils/fuzzer.py
|
{
"start": 14125,
"end": 18778
}
|
class ____:
def __init__(
self,
parameters: list[FuzzedParameter | list[FuzzedParameter]],
tensors: list[FuzzedTensor | list[FuzzedTensor]],
constraints: list[Callable] | None = None,
seed: int | None = None
) -> None:
"""
Args:
parameters:
List of FuzzedParameters which provide specifications
for generated parameters. Iterable elements will be
unpacked, though arbitrary nested structures will not.
tensors:
List of FuzzedTensors which define the Tensors which
will be created each step based on the parameters for
that step. Iterable elements will be unpacked, though
arbitrary nested structures will not.
constraints:
List of callables. They will be called with params
as kwargs, and if any of them return False the current
set of parameters will be rejected.
seed:
Seed for the RandomState used by the Fuzzer. This will
also be used to set the PyTorch random seed so that random
ops will create reproducible Tensors.
"""
import numpy as np
if seed is None:
seed = int(np.random.RandomState().randint(0, 2 ** 32 - 1, dtype=np.int64))
self._seed = seed
self._parameters = Fuzzer._unpack(parameters, FuzzedParameter)
self._tensors = Fuzzer._unpack(tensors, FuzzedTensor)
self._constraints = constraints or ()
p_names = {p.name for p in self._parameters}
t_names = {t.name for t in self._tensors}
name_overlap = p_names.intersection(t_names)
if name_overlap:
raise ValueError(f"Duplicate names in parameters and tensors: {name_overlap}")
self._rejections = 0
self._total_generated = 0
@staticmethod
def _unpack(values, cls):
return tuple(it.chain.from_iterable(
[[i] if isinstance(i, cls) else i for i in values]
))
def take(self, n):
import numpy as np
state = np.random.RandomState(self._seed)
torch.manual_seed(state.randint(low=0, high=2 ** 63, dtype=np.int64))
for _ in range(n):
params = self._generate(state)
tensors = {}
tensor_properties = {}
for t in self._tensors:
tensor, properties = t._make_tensor(params, state)
tensors[t.name] = tensor
tensor_properties[t.name] = properties
yield tensors, tensor_properties, params
@property
def rejection_rate(self):
if not self._total_generated:
return 0.
return self._rejections / self._total_generated
def _generate(self, state):
strict_params: dict[str, float | int | ParameterAlias] = {}
for _ in range(1000):
candidate_params: dict[str, float | int | ParameterAlias] = {}
for p in self._parameters:
if p.strict:
if p.name in strict_params:
candidate_params[p.name] = strict_params[p.name]
else:
candidate_params[p.name] = p.sample(state)
strict_params[p.name] = candidate_params[p.name]
else:
candidate_params[p.name] = p.sample(state)
candidate_params = self._resolve_aliases(candidate_params)
self._total_generated += 1
if not all(f(candidate_params) for f in self._constraints):
self._rejections += 1
continue
if not all(t.satisfies_constraints(candidate_params) for t in self._tensors):
self._rejections += 1
continue
return candidate_params
raise ValueError("Failed to generate a set of valid parameters.")
@staticmethod
def _resolve_aliases(params):
params = dict(params)
alias_count = sum(isinstance(v, ParameterAlias) for v in params.values())
keys = list(params.keys())
while alias_count:
for k in keys:
v = params[k]
if isinstance(v, ParameterAlias):
params[k] = params[v.alias_to]
alias_count_new = sum(isinstance(v, ParameterAlias) for v in params.values())
if alias_count == alias_count_new:
raise ValueError(f"ParameterAlias cycle detected\n{params}")
alias_count = alias_count_new
return params
|
Fuzzer
|
python
|
ray-project__ray
|
rllib/core/models/base.py
|
{
"start": 6446,
"end": 10138
}
|
class ____(Model, abc.ABC):
"""The framework-agnostic base class for all RLlib encoders.
Encoders are used to transform observations to a latent space.
Therefore, their `input_specs` contains the observation space dimensions.
Similarly, their `output_specs` contains the latent space dimensions.
Encoders can be recurrent, in which case the state should be part of input- and
output_specs. The latent vectors produced by an encoder are fed into subsequent
"heads". Any implementation of Encoder should also be callable. This should be done
by also inheriting from a framework-specific model base-class, s.a. TorchModel or
TfModel.
Abstract illustration of typical flow of tensors:
Inputs
|
Encoder
| \
Head1 Head2
| /
Outputs
Outputs of encoders are generally of shape (B, latent_dim) or (B, T, latent_dim).
That is, for time-series data, we encode into the latent space for each time step.
This should be reflected in the `output_specs`.
Usage example together with a ModelConfig:
.. testcode::
from dataclasses import dataclass
import numpy as np
from ray.rllib.core.columns import Columns
from ray.rllib.core.models.base import Encoder, ENCODER_OUT
from ray.rllib.core.models.configs import ModelConfig
from ray.rllib.policy.sample_batch import SampleBatch
class NumpyEncoder(Encoder):
def __init__(self, config):
super().__init__(config)
self.factor = config.factor
def __call__(self, *args, **kwargs):
# This is a dummy method to do checked forward passes.
return self._forward(*args, **kwargs)
def _forward(self, input_dict, **kwargs):
obs = input_dict[Columns.OBS]
return {
ENCODER_OUT: np.array(obs) * self.factor,
Columns.STATE_OUT: (
np.array(input_dict[Columns.STATE_IN])
* self.factor
),
}
@dataclass
class NumpyEncoderConfig(ModelConfig):
factor: int = None
def build(self, framework: str):
return NumpyEncoder(self)
config = NumpyEncoderConfig(factor=2)
encoder = NumpyEncoder(config)
print(encoder({Columns.OBS: 1, Columns.STATE_IN: 2}))
.. testoutput::
{'encoder_out': 2, 'state_out': 4}
"""
@abc.abstractmethod
def _forward(self, input_dict: dict, **kwargs) -> dict:
"""Returns the latent of the encoder for the given inputs.
This method is called by the forwarding method of the respective framework
that is itself wrapped by RLlib in order to check model inputs and outputs.
The input dict contains at minimum the observation and the state of the encoder
(None for stateless encoders).
The output dict contains at minimum the latent and the state of the encoder
(None for stateless encoders).
To establish an agreement between the encoder and RLModules, these values
have the fixed keys `Columns.OBS` for the `input_dict`,
and `ACTOR` and `CRITIC` for the returned dict.
Args:
input_dict: The input tensors. Must contain at a minimum the keys
Columns.OBS and Columns.STATE_IN (which might be None for stateless
encoders).
**kwargs: Forward compatibility kwargs.
Returns:
The output tensors. Must contain at a minimum the key ENCODER_OUT.
"""
@ExperimentalAPI
|
Encoder
|
python
|
bokeh__bokeh
|
src/bokeh/models/widgets/tables.py
|
{
"start": 4541,
"end": 5318
}
|
class ____(StringFormatter):
''' Display numeric values from continuous ranges as "basic numbers",
using scientific notation when appropriate by default.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
precision = Int(10, help="""
How many digits of precision to display.
""")
power_limit_high = Int(5, help="""
Limit the use of scientific notation to when::
log(x) >= power_limit_high
""")
power_limit_low = Int(-3, help="""
Limit the use of scientific notation to when::
log(x) <= power_limit_low
""")
nan_format = Override(default="-")
null_format = Override(default="-")
|
ScientificFormatter
|
python
|
doocs__leetcode
|
solution/1100-1199/1168.Optimize Water Distribution in a Village/Solution2.py
|
{
"start": 624,
"end": 1076
}
|
class ____:
def minCostToSupplyWater(
self, n: int, wells: List[int], pipes: List[List[int]]
) -> int:
for i, w in enumerate(wells, 1):
pipes.append([0, i, w])
pipes.sort(key=lambda x: x[2])
uf = UnionFind(n + 1)
ans = 0
for a, b, c in pipes:
if uf.union(a, b):
ans += c
n -= 1
if n == 0:
return ans
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/plotting/_misc.py
|
{
"start": 22046,
"end": 24868
}
|
class ____(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later.
See Also
--------
plotting.register_matplotlib_converters : Register pandas formatters and
converters with matplotlib.
plotting.bootstrap_plot : Bootstrap plot on mean, median and mid-range statistics.
plotting.autocorrelation_plot : Autocorrelation plot for time series.
plotting.lag_plot : Lag plot for time series.
Examples
--------
.. plot::
:context: close-figs
>>> np.random.seed(42)
>>> df = pd.DataFrame(
... {"A": np.random.randn(10), "B": np.random.randn(10)},
... index=pd.date_range("1/1/2000", freq="4MS", periods=10),
... )
>>> with pd.plotting.plot_params.use("x_compat", True):
... _ = df["A"].plot(color="r")
... _ = df["B"].plot(color="g")
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {"x_compat": "xaxis.compat"}
_DEFAULT_KEYS = ["xaxis.compat"]
def __init__(self) -> None:
super().__setitem__("xaxis.compat", False)
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError(f"{key} is not a valid pandas plotting option")
return super().__getitem__(key)
def __setitem__(self, key, value) -> None:
key = self._get_canonical_key(key)
super().__setitem__(key, value)
def __delitem__(self, key) -> None:
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError(f"Cannot remove default parameter {key}")
super().__delitem__(key)
def __contains__(self, key) -> bool:
key = self._get_canonical_key(key)
return super().__contains__(key)
def reset(self) -> None:
"""
Reset the option store to its initial state
Returns
-------
None
"""
# error: Cannot access "__init__" directly
self.__init__() # type: ignore[misc]
def _get_canonical_key(self, key: str) -> str:
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value) -> Generator[_Options]:
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
plot_params.__module__ = "pandas.plotting"
|
_Options
|
python
|
django__django
|
django/core/files/storage/memory.py
|
{
"start": 649,
"end": 977
}
|
class ____:
def _initialize_times(self):
self.created_time = now()
self.accessed_time = self.created_time
self.modified_time = self.created_time
def _update_accessed_time(self):
self.accessed_time = now()
def _update_modified_time(self):
self.modified_time = now()
|
TimingMixin
|
python
|
keras-team__keras
|
keras/src/ops/symbolic_arguments_test.py
|
{
"start": 164,
"end": 3623
}
|
class ____(testing.TestCase):
# Testing multiple args and empty kwargs
def test_args(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{},
)
self.assertEqual(args.keras_tensors, [a, b])
self.assertEqual(args._flat_arguments, [a, b])
self.assertEqual(args._single_positional_tensor, None)
# Testing single arg and single position tensor
def test_args_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
args = SymbolicArguments((a))
self.assertEqual(args.keras_tensors, [a])
self.assertEqual(args._flat_arguments, [a])
self.assertEqual(len(args.kwargs), 0)
self.assertEqual(isinstance(args.args[0], KerasTensor), True)
self.assertEqual(args._single_positional_tensor, a)
# Testing kwargs
def test_kwargs(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
self.assertEqual(args.keras_tensors, [a, b, c])
self.assertEqual(args._flat_arguments, [a, b, c])
self.assertEqual(args._single_positional_tensor, None)
# Testing conversion function with args and kwargs
def test_conversion_fn(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
sym_args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
(value, _) = sym_args.convert(lambda x: x**2)
args1 = value[0][0]
self.assertIsInstance(args1, KerasTensor)
mapped_value = tree.map_structure(lambda x: x**2, a)
self.assertEqual(mapped_value.shape, args1.shape)
self.assertEqual(mapped_value.dtype, args1.dtype)
# Testing fill in function with single args only
def test_fill_in_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
tensor_dict = {id(a): 3}
sym_args = SymbolicArguments((a))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, (3,))
# Testing fill in function with multiple args
def test_fill_in_multiple_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
tensor_dict = {id(b): 2}
sym_args = SymbolicArguments((a, b))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, ((None, 2),))
# Testing fill in function for args and kwargs
def test_fill_in(self):
shape1 = (2, 3, 4)
shape2 = (3, 2, 4)
a = KerasTensor(shape=shape1)
b = KerasTensor(shape=shape2)
c = KerasTensor(shape=shape2)
dictionary = {id(a): 3, id(c): 2}
sym_args = SymbolicArguments(
(
a,
b,
),
{"1": c},
)
(values, _) = sym_args.fill_in(dictionary)
self.assertEqual(values, ((3, None), {"1": 2}))
|
SymbolicArgumentsTest
|
python
|
getsentry__sentry
|
src/sentry/db/models/fields/bounded.py
|
{
"start": 2396,
"end": 2806
}
|
class ____(models.PositiveBigIntegerField):
description = _("Positive big integer")
MAX_VALUE = I64_MAX
def get_internal_type(self) -> str:
return "PositiveBigIntegerField"
def get_prep_value(self, value: int) -> int:
if value:
value = int(value)
assert value <= self.MAX_VALUE
return super().get_prep_value(value)
|
BoundedPositiveBigIntegerField
|
python
|
more-itertools__more-itertools
|
tests/test_more.py
|
{
"start": 198432,
"end": 199840
}
|
class ____(TestCase):
def test_basic(self):
self.assertTrue(mi.iequals("abc", iter("abc")))
self.assertTrue(mi.iequals(range(3), [0, 1, 2]))
self.assertFalse(mi.iequals("abc", [0, 1, 2]))
def test_no_iterables(self):
self.assertTrue(mi.iequals())
def test_one_iterable(self):
self.assertTrue(mi.iequals("abc"))
def test_more_than_two_iterable(self):
self.assertTrue(mi.iequals("abc", iter("abc"), ['a', 'b', 'c']))
self.assertFalse(mi.iequals("abc", iter("abc"), ['a', 'b', 'd']))
def test_order_matters(self):
self.assertFalse(mi.iequals("abc", "acb"))
def test_not_equal_lengths(self):
self.assertFalse(mi.iequals("abc", "ab"))
self.assertFalse(mi.iequals("abc", "bc"))
self.assertFalse(mi.iequals("aaa", "aaaa"))
def test_empty_iterables(self):
self.assertTrue(mi.iequals([], ""))
def test_none_is_not_a_sentinel(self):
# See https://stackoverflow.com/a/900444
self.assertFalse(mi.iequals([1, 2], [1, 2, None]))
self.assertFalse(mi.iequals([1, 2], [None, 1, 2]))
def test_not_identical_but_equal(self):
self.assertTrue([1, True], [1.0, complex(1, 0)])
def test_fillvalue_not_fakeable(self):
# See https://github.com/more-itertools/more-itertools/issues/900
self.assertFalse(mi.iequals([], [mock.ANY]))
|
IequalsTests
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autosummary-imported_members/autosummary_dummy_package/autosummary_dummy_module.py
|
{
"start": 0,
"end": 86
}
|
class ____:
"""Bar class"""
pass
def foo():
"""Foo function"""
pass
|
Bar
|
python
|
spyder-ide__spyder
|
spyder/config/user.py
|
{
"start": 984,
"end": 4536
}
|
class ____(cp.ConfigParser, object):
"""
Class used to save defaults to a file and as UserConfig base class.
"""
def __init__(self, name, path):
"""
Class used to save defaults to a file and as UserConfig base class.
"""
super().__init__(interpolation=None)
self._name = name
self._path = path
if not osp.isdir(osp.dirname(self._path)):
os.makedirs(osp.dirname(self._path))
def _write(self, fp):
"""
Write method for Python 2.
The one from configparser fails for non-ascii Windows accounts.
"""
if self._defaults:
fp.write('[{}]\n'.format(cp.DEFAULTSECT))
for (key, value) in self._defaults.items():
value_plus_end_of_line = str(value).replace('\n', '\n\t')
fp.write('{} = {}\n'.format(key, value_plus_end_of_line))
fp.write('\n')
for section in self._sections:
fp.write('[{}]\n'.format(section))
for (key, value) in self._sections[section].items():
if key == '__name__':
continue
if (value is not None) or (self._optcre == self.OPTCRE):
value = str(value)
value_plus_end_of_line = value.replace('\n', '\n\t')
key = ' = '.join((key, value_plus_end_of_line))
fp.write('{}\n'.format(key))
fp.write('\n')
def _set(self, section, option, value, verbose):
"""Set method."""
if not self.has_section(section):
self.add_section(section)
if not isinstance(value, str):
value = repr(value)
if verbose:
text = '[{}][{}] = {}'.format(section, option, value)
print(text) # spyder: test-skip
super().set(section, option, value)
def _save(self):
"""Save config into the associated .ini file."""
fpath = self.get_config_fpath()
def _write_file(fpath):
with io.open(fpath, 'w', encoding='utf-8') as configfile:
self.write(configfile)
# See spyder-ide/spyder#1086 and spyder-ide/spyder#1242 for background
# on why this method contains all the exception handling.
try:
# The "easy" way
_write_file(fpath)
except EnvironmentError:
try:
# The "delete and sleep" way
if osp.isfile(fpath):
os.remove(fpath)
time.sleep(0.05)
_write_file(fpath)
except Exception as e:
print('Failed to write user configuration file to disk, with '
'the exception shown below') # spyder: test-skip
print(e) # spyder: test-skip
def get_config_fpath(self):
"""Return the ini file where this configuration is stored."""
path = self._path
config_file = osp.join(path, '{}.ini'.format(self._name))
return config_file
def set_defaults(self, defaults):
"""Set default values and save to defaults folder location."""
for section, options in defaults:
for option in options:
new_value = options[option]
self._set(section, option, new_value, verbose=False)
# ============================================================================
# User config class
# ============================================================================
|
DefaultsConfig
|
python
|
getsentry__sentry
|
src/sentry/new_migrations/monkey/fields.py
|
{
"start": 894,
"end": 3916
}
|
class ____(RemoveField):
def __init__(self, *args, deletion_action: DeletionAction, **kwargs):
super().__init__(*args, **kwargs)
self.deletion_action = deletion_action
def state_forwards(self, app_label: str, state: SentryProjectState) -> None: # type: ignore[override]
if self.deletion_action == DeletionAction.MOVE_TO_PENDING:
field = state.apps.get_model(app_label, self.model_name_lower)._meta.get_field(
self.name_lower
)
if getattr(field, "db_constraint", False):
raise UnsafeOperationException(
f"Foreign key db constraint must be removed before dropping {app_label}.{self.model_name_lower}.{self.name}. "
"More info: https://develop.sentry.dev/api-server/application-domains/database-migrations/#deleting-columns"
)
if (
not isinstance(field, ManyToManyField)
and not field.null
and field.db_default is NOT_PROVIDED
):
raise UnsafeOperationException(
f"Field {app_label}.{self.model_name_lower}.{self.name} must either be nullable or have a db_default before dropping. "
"More info: https://develop.sentry.dev/api-server/application-domains/database-migrations/#deleting-columns"
)
state.remove_field(
app_label, self.model_name_lower, self.name_lower, deletion_action=self.deletion_action
)
def database_forwards(
self,
app_label: str,
schema_editor: SafePostgresDatabaseSchemaEditor, # type: ignore[override]
from_state: SentryProjectState, # type: ignore[override]
to_state: SentryProjectState, # type: ignore[override]
) -> None:
if self.deletion_action == DeletionAction.MOVE_TO_PENDING:
return
field = from_state.get_pending_deletion_field(app_label, self.model_name, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, field.model):
schema_editor.remove_field(field.model, field, is_safe=True)
def database_backwards(
self,
app_label: str,
schema_editor: SafePostgresDatabaseSchemaEditor, # type: ignore[override]
from_state: SentryProjectState, # type: ignore[override]
to_state: SentryProjectState, # type: ignore[override]
) -> None:
if self.deletion_action == DeletionAction.MOVE_TO_PENDING:
return
field = to_state.get_pending_deletion_field(app_label, self.model_name, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, field.model):
schema_editor.add_field(field.model, field)
def describe(self) -> str:
if self.deletion_action == DeletionAction.MOVE_TO_PENDING:
return f"Moved {self.model_name}.{self.name} field to pending deletion state"
else:
return super().describe()
|
SafeRemoveField
|
python
|
MorvanZhou__Reinforcement-learning-with-tensorflow
|
experiments/Solve_BipedalWalker/DDPG.py
|
{
"start": 1418,
"end": 4406
}
|
class ____(object):
def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter):
self.sess = sess
self.a_dim = action_dim
self.action_bound = action_bound
self.lr = learning_rate
self.t_replace_iter = t_replace_iter
self.t_replace_counter = 0
with tf.variable_scope('Actor'):
# input s, output a
self.a = self._build_net(S, scope='eval_net', trainable=True)
# input s_, output a, get a_ for critic
self.a_ = self._build_net(S_, scope='target_net', trainable=False)
self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
def _build_net(self, s, scope, trainable):
with tf.variable_scope(scope):
init_w = tf.random_normal_initializer(0., 0.01)
init_b = tf.constant_initializer(0.01)
net = tf.layers.dense(s, 500, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l1', trainable=trainable)
net = tf.layers.dense(net, 200, activation=tf.nn.relu,
kernel_initializer=init_w, bias_initializer=init_b, name='l2', trainable=trainable)
with tf.variable_scope('a'):
actions = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, kernel_initializer=init_w,
bias_initializer=init_b, name='a', trainable=trainable)
scaled_a = tf.multiply(actions, self.action_bound, name='scaled_a') # Scale output to -action_bound to action_bound
return scaled_a
def learn(self, s): # batch update
self.sess.run(self.train_op, feed_dict={S: s})
if self.t_replace_counter % self.t_replace_iter == 0:
self.sess.run([tf.assign(t, e) for t, e in zip(self.t_params, self.e_params)])
self.t_replace_counter += 1
def choose_action(self, s):
s = s[np.newaxis, :] # single state
return self.sess.run(self.a, feed_dict={S: s})[0] # single action
def add_grad_to_graph(self, a_grads):
with tf.variable_scope('policy_grads'):
# ys = policy;
# xs = policy's parameters;
# self.a_grads = the gradients of the policy to get more Q
# tf.gradients will calculate dys/dxs with a initial gradients for ys, so this is dq/da * da/dparams
self.policy_grads_and_vars = tf.gradients(ys=self.a, xs=self.e_params, grad_ys=a_grads)
with tf.variable_scope('A_train'):
opt = tf.train.RMSPropOptimizer(-self.lr) # (- learning rate) for ascent policy
self.train_op = opt.apply_gradients(zip(self.policy_grads_and_vars, self.e_params), global_step=GLOBAL_STEP)
############################### Critic ####################################
|
Actor
|
python
|
pytorch__pytorch
|
test/distributed/_pycute/test_right_inverse.py
|
{
"start": 2093,
"end": 3505
}
|
class ____(TestCase):
def helper_test_right_inverse(self, layout):
inv_layout = right_inverse(layout)
_LOGGER.debug(f"{layout} => {inv_layout}")
for i in range(size(inv_layout)):
self.assertEqual(layout(inv_layout(i)), i)
def test_right_inverse(self):
test = Layout(1, 0)
self.helper_test_right_inverse(test)
test = Layout((1, 1), (0, 0))
self.helper_test_right_inverse(test)
test = Layout((3, 7), (0, 0))
self.helper_test_right_inverse(test)
test = Layout(1, 1)
self.helper_test_right_inverse(test)
test = Layout(4, 0)
self.helper_test_right_inverse(test)
test = Layout(4, 1)
self.helper_test_right_inverse(test)
test = Layout(4, 2)
self.helper_test_right_inverse(test)
test = Layout((2, 4), (0, 2))
self.helper_test_right_inverse(test)
test = Layout((8, 4), (1, 8))
self.helper_test_right_inverse(test)
test = Layout((8, 4), (4, 1))
self.helper_test_right_inverse(test)
test = Layout((2, 4, 6), (1, 2, 8))
self.helper_test_right_inverse(test)
test = Layout((2, 4, 6), (4, 1, 8))
self.helper_test_right_inverse(test)
test = Layout((4, 2), (1, 16))
self.helper_test_right_inverse(test)
if __name__ == "__main__":
run_tests()
|
TestRightInverse
|
python
|
django-haystack__django-haystack
|
test_haystack/solr_tests/test_solr_backend.py
|
{
"start": 59420,
"end": 61152
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
# Wipe it clean.
self.raw_solr = pysolr.Solr(settings.HAYSTACK_CONNECTIONS["solr"]["URL"])
clear_solr_index()
# Stow.
self.old_ui = connections["solr"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SolrBoostMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["solr"]._index = self.ui
self.sb = connections["solr"].get_backend()
self.sample_objs = []
for i in range(1, 5):
mock = AFourthMockModel()
mock.id = i
if i % 2:
mock.author = "daniel"
mock.editor = "david"
else:
mock.author = "david"
mock.editor = "daniel"
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections["solr"]._index = self.old_ui
super().tearDown()
def test_boost(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_solr.search("*:*").hits, 4)
results = SearchQuerySet("solr").filter(
SQ(author="daniel") | SQ(editor="daniel")
)
self.assertEqual(
[result.id for result in results],
[
"core.afourthmockmodel.1",
"core.afourthmockmodel.3",
"core.afourthmockmodel.2",
"core.afourthmockmodel.4",
],
)
@unittest.skipIf(
Version(pysolr.__version__) < Version("3.1.1"),
"content extraction requires pysolr > 3.1.1",
)
|
SolrBoostBackendTestCase
|
python
|
huggingface__transformers
|
src/transformers/models/chameleon/modeling_chameleon.py
|
{
"start": 8238,
"end": 10794
}
|
class ____(nn.LayerNorm):
"""
LayerNorm but computes stats only over the last dim because Chameleon applies gamma and beta
from each shard separately to each head, instead of reducing. We can apply each head's own
gamma/beta by repeat-interleaving weights from each shard, but the stats have to be computed
in the last dimension. This module applies gamma/beta manually to fulfill this requirement.
"""
def __init__(self, hidden_size, *args, **kwargs):
super().__init__(hidden_size, *args, **kwargs)
self.normalized_shape = (hidden_size[-1],)
def forward(self, hidden_states):
hidden_states = F.layer_norm(hidden_states, self.normalized_shape, None, None, eps=1e-5)
hidden_states = hidden_states * self.weight + self.bias
return hidden_states
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
# Copied from transformers.models.llama.modeling_llama.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
|
ChameleonLayerNorm
|
python
|
joke2k__faker
|
faker/providers/date_time/pt_BR/__init__.py
|
{
"start": 46,
"end": 803
}
|
class ____(DateTimeProvider):
DAY_NAMES = {
"0": "domingo",
"1": "segunda-feira",
"2": "terΓ§a-feira",
"3": "quarta-feira",
"4": "quinta-feira",
"5": "sexta-feira",
"6": "sΓ‘bado",
}
MONTH_NAMES = {
"01": "janeiro",
"02": "fevereiro",
"03": "marΓ§o",
"04": "abril",
"05": "maio",
"06": "junho",
"07": "julho",
"08": "agosto",
"09": "setembro",
"10": "outubro",
"11": "novembro",
"12": "dezembro",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
|
Provider
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/web/server/routes_test.py
|
{
"start": 8808,
"end": 9629
}
|
class ____(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
return tornado.web.Application(
[
(
r"^/(?!/)(.*)",
RemoveSlashHandler,
)
]
)
def test_parse_url_path_301(self):
paths = ["/page1/", "/page2/page3/"]
responses = [self.fetch(path, follow_redirects=False) for path in paths]
for idx, r in enumerate(responses):
assert r.code == 301
assert r.headers["Location"] == paths[idx].rstrip("/")
def test_parse_url_path_404(self):
paths = ["//page1/", "//page2/page3/"]
responses = [self.fetch(path, follow_redirects=False) for path in paths]
for r in responses:
assert r.code == 404
|
RemoveSlashHandlerTest
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/array_spec.py
|
{
"start": 902,
"end": 2727
}
|
class ____:
"""
A model of the runtime configuration of an array.
Parameters
----------
order : MemoryOrder
The memory layout of the arrays returned when reading data from the store.
write_empty_chunks : bool
If True, empty chunks will be written to the store.
"""
order: MemoryOrder
write_empty_chunks: bool
def __init__(self, order: MemoryOrder, write_empty_chunks: bool) -> None:
order_parsed = parse_order(order)
write_empty_chunks_parsed = parse_bool(write_empty_chunks)
object.__setattr__(self, "order", order_parsed)
object.__setattr__(self, "write_empty_chunks", write_empty_chunks_parsed)
@classmethod
def from_dict(cls, data: ArrayConfigParams) -> Self:
"""
Create an ArrayConfig from a dict. The keys of that dict are a subset of the
attributes of the ArrayConfig class. Any keys missing from that dict will be set to the
the values in the ``array`` namespace of ``zarr.config``.
"""
kwargs_out: ArrayConfigParams = {}
for f in fields(ArrayConfig):
field_name = cast("Literal['order', 'write_empty_chunks']", f.name)
if field_name not in data:
kwargs_out[field_name] = zarr_config.get(f"array.{field_name}")
else:
kwargs_out[field_name] = data[field_name]
return cls(**kwargs_out)
ArrayConfigLike = ArrayConfig | ArrayConfigParams
def parse_array_config(data: ArrayConfigLike | None) -> ArrayConfig:
"""
Convert various types of data to an ArrayConfig.
"""
if data is None:
return ArrayConfig.from_dict({})
elif isinstance(data, ArrayConfig):
return data
else:
return ArrayConfig.from_dict(data)
@dataclass(frozen=True)
|
ArrayConfig
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/queues.py
|
{
"start": 61173,
"end": 63892
}
|
class ____(Response):
"""
Response of queues.get_next_task endpoint.
:param entry: Entry information
:type entry: Entry
:param task_info: Info about the returned task. Returned only if get_task_info
is set to True
:type task_info: dict
"""
_service = "queues"
_action = "get_next_task"
_version = "2.23"
_schema = {
"definitions": {
"entry": {
"properties": {
"added": {
"description": "Time this entry was added to the queue",
"format": "date-time",
"type": ["string", "null"],
},
"task": {
"description": "Queued task ID",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"entry": {
"description": "Entry information",
"oneOf": [{"$ref": "#/definitions/entry"}, {"type": "null"}],
},
"task_info": {
"description": "Info about the returned task. Returned only if get_task_info is set to True",
"properties": {
"company": {"description": "Task company ID", "type": "string"},
"user": {
"description": "ID of the user who created the task",
"type": "string",
},
},
"type": ["object", "null"],
},
},
"type": "object",
}
def __init__(self, entry: Any = None, task_info: Optional[dict] = None, **kwargs: Any) -> None:
super(GetNextTaskResponse, self).__init__(**kwargs)
self.entry = entry
self.task_info = task_info
@schema_property("entry")
def entry(self) -> Any:
return self._property_entry
@entry.setter
def entry(self, value: Any) -> None:
if value is None:
self._property_entry = None
return
if isinstance(value, dict):
value = Entry.from_dict(value)
else:
self.assert_isinstance(value, "entry", Entry)
self._property_entry = value
@schema_property("task_info")
def task_info(self) -> Optional[dict]:
return self._property_task_info
@task_info.setter
def task_info(self, value: Optional[dict]) -> None:
if value is None:
self._property_task_info = None
return
self.assert_isinstance(value, "task_info", (dict,))
self._property_task_info = value
|
GetNextTaskResponse
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py
|
{
"start": 7537,
"end": 8051
}
|
class ____:
cursor_field = "date"
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
params["from"] = stream_slice.get(self.cursor_field).to_date_string()
params["to"] = stream_slice.get(self.cursor_field + "_end").to_date_string()
return params
|
AggregateDataMixin
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/_response.py
|
{
"start": 20588,
"end": 21727
}
|
class ____(AnthropicError):
"""
Attempted to read or stream content, but the content has already
been streamed.
This can happen if you use a method like `.iter_lines()` and then attempt
to read th entire response body afterwards, e.g.
```py
response = await client.post(...)
async for line in response.iter_lines():
... # do something with `line`
content = await response.read()
# ^ error
```
If you want this behaviour you'll need to either manually accumulate the response
content or call `await response.read()` before iterating over the stream.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream some content, but the content has "
"already been streamed. "
"This could be due to attempting to stream the response "
"content more than once."
"\n\n"
"You can fix this by manually accumulating the response content while streaming "
"or by calling `.read()` before starting to stream."
)
super().__init__(message)
|
StreamAlreadyConsumed
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_bigquery.py
|
{
"start": 11979,
"end": 12582
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute(self, mock_hook):
operator = BigQueryDeleteDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
delete_contents=TEST_DELETE_CONTENTS,
)
operator.execute(None)
mock_hook.return_value.delete_dataset.assert_called_once_with(
dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID, delete_contents=TEST_DELETE_CONTENTS
)
|
TestBigQueryDeleteDatasetOperator
|
python
|
pytorch__pytorch
|
torchgen/api/autograd.py
|
{
"start": 8860,
"end": 9554
}
|
class ____:
name: str
type: Type
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
cpp_type: str
# Represents a differentiable `Return`.
# How it it different from the `Return` type?
# - The name in `Return` is optional. Here it is always populated using the same
# `cpp.return_names()` method.
# TODO: some cpp naming logic (e.g. resolving name conflict) might be irrelevant?
# - It's processed Returns which are differentiable, in compliance with the
# `output_differentiability` field defined in derivatives.yaml (if specified),
# and are only used in the context of the autograd codegen;
@dataclass(frozen=True)
|
DifferentiableInput
|
python
|
pola-rs__polars
|
py-polars/src/polars/expr/list.py
|
{
"start": 622,
"end": 50636
}
|
class ____:
"""Namespace for list related expressions."""
_accessor = "list"
def __init__(self, expr: Expr) -> None:
self._pyexpr = expr._pyexpr
def __getitem__(self, item: int) -> Expr:
return self.get(item)
def all(self) -> Expr:
"""
Evaluate whether all boolean values in a list are true.
Notes
-----
If there are no non-null elements in a row, the output is `True`.
Examples
--------
>>> df = pl.DataFrame(
... {"a": [[True, True], [False, True], [False, False], [None], [], None]}
... )
>>> df.with_columns(all=pl.col("a").list.all())
shape: (6, 2)
ββββββββββββββββββ¬ββββββββ
β a β all β
β --- β --- β
β list[bool] β bool β
ββββββββββββββββββͺββββββββ‘
β [true, true] β true β
β [false, true] β false β
β [false, false] β false β
β [null] β true β
β [] β true β
β null β null β
ββββββββββββββββββ΄ββββββββ
"""
return wrap_expr(self._pyexpr.list_all())
def any(self) -> Expr:
"""
Evaluate whether any boolean value in a list is true.
Notes
-----
If there are no non-null elements in a row, the output is `False`.
Examples
--------
>>> df = pl.DataFrame(
... {"a": [[True, True], [False, True], [False, False], [None], [], None]}
... )
>>> df.with_columns(any=pl.col("a").list.any())
shape: (6, 2)
ββββββββββββββββββ¬ββββββββ
β a β any β
β --- β --- β
β list[bool] β bool β
ββββββββββββββββββͺββββββββ‘
β [true, true] β true β
β [false, true] β true β
β [false, false] β false β
β [null] β false β
β [] β false β
β null β null β
ββββββββββββββββββ΄ββββββββ
"""
return wrap_expr(self._pyexpr.list_any())
def len(self) -> Expr:
"""
Return the number of elements in each list.
Null values count towards the total.
Returns
-------
Expr
Expression of data type :class:`UInt32`.
Examples
--------
>>> df = pl.DataFrame({"a": [[1, 2, None], [5]]})
>>> df.with_columns(len=pl.col("a").list.len())
shape: (2, 2)
ββββββββββββββββ¬ββββββ
β a β len β
β --- β --- β
β list[i64] β u32 β
ββββββββββββββββͺββββββ‘
β [1, 2, null] β 3 β
β [5] β 1 β
ββββββββββββββββ΄ββββββ
"""
return wrap_expr(self._pyexpr.list_len())
def drop_nulls(self) -> Expr:
"""
Drop all null values in the list.
The original order of the remaining elements is preserved.
Examples
--------
>>> df = pl.DataFrame({"values": [[None, 1, None, 2], [None], [3, 4]]})
>>> df.with_columns(drop_nulls=pl.col("values").list.drop_nulls())
shape: (3, 2)
ββββββββββββββββββ¬βββββββββββββ
β values β drop_nulls β
β --- β --- β
β list[i64] β list[i64] β
ββββββββββββββββββͺβββββββββββββ‘
β [null, 1, β¦ 2] β [1, 2] β
β [null] β [] β
β [3, 4] β [3, 4] β
ββββββββββββββββββ΄βββββββββββββ
"""
return wrap_expr(self._pyexpr.list_drop_nulls())
def sample(
self,
n: int | IntoExprColumn | None = None,
*,
fraction: float | IntoExprColumn | None = None,
with_replacement: bool = False,
shuffle: bool = False,
seed: int | None = None,
) -> Expr:
"""
Sample from this list.
Parameters
----------
n
Number of items to return. Cannot be used with `fraction`. Defaults to 1 if
`fraction` is None.
fraction
Fraction of items to return. Cannot be used with `n`.
with_replacement
Allow values to be sampled more than once.
shuffle
Shuffle the order of sampled data points.
seed
Seed for the random number generator. If set to None (default), a
random seed is generated for each sample operation.
Examples
--------
>>> df = pl.DataFrame({"values": [[1, 2, 3], [4, 5]], "n": [2, 1]})
>>> df.with_columns(sample=pl.col("values").list.sample(n=pl.col("n"), seed=1))
shape: (2, 3)
βββββββββββββ¬ββββββ¬ββββββββββββ
β values β n β sample β
β --- β --- β --- β
β list[i64] β i64 β list[i64] β
βββββββββββββͺββββββͺββββββββββββ‘
β [1, 2, 3] β 2 β [2, 3] β
β [4, 5] β 1 β [5] β
βββββββββββββ΄ββββββ΄ββββββββββββ
"""
if n is not None and fraction is not None:
msg = "cannot specify both `n` and `fraction`"
raise ValueError(msg)
if fraction is not None:
fraction_pyexpr = parse_into_expression(fraction)
return wrap_expr(
self._pyexpr.list_sample_fraction(
fraction_pyexpr, with_replacement, shuffle, seed
)
)
if n is None:
n = 1
n_pyexpr = parse_into_expression(n)
return wrap_expr(
self._pyexpr.list_sample_n(n_pyexpr, with_replacement, shuffle, seed)
)
def sum(self) -> Expr:
"""
Sum all the lists in the array.
Notes
-----
If there are no non-null elements in a row, the output is `0`.
Examples
--------
>>> df = pl.DataFrame({"values": [[1], [2, 3]]})
>>> df.with_columns(sum=pl.col("values").list.sum())
shape: (2, 2)
βββββββββββββ¬ββββββ
β values β sum β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺββββββ‘
β [1] β 1 β
β [2, 3] β 5 β
βββββββββββββ΄ββββββ
"""
return wrap_expr(self._pyexpr.list_sum())
def max(self) -> Expr:
"""
Compute the max value of the lists in the array.
Examples
--------
>>> df = pl.DataFrame({"values": [[1], [2, 3]]})
>>> df.with_columns(max=pl.col("values").list.max())
shape: (2, 2)
βββββββββββββ¬ββββββ
β values β max β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺββββββ‘
β [1] β 1 β
β [2, 3] β 3 β
βββββββββββββ΄ββββββ
"""
return wrap_expr(self._pyexpr.list_max())
def min(self) -> Expr:
"""
Compute the min value of the lists in the array.
Examples
--------
>>> df = pl.DataFrame({"values": [[1], [2, 3]]})
>>> df.with_columns(min=pl.col("values").list.min())
shape: (2, 2)
βββββββββββββ¬ββββββ
β values β min β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺββββββ‘
β [1] β 1 β
β [2, 3] β 2 β
βββββββββββββ΄ββββββ
"""
return wrap_expr(self._pyexpr.list_min())
def mean(self) -> Expr:
"""
Compute the mean value of the lists in the array.
Examples
--------
>>> df = pl.DataFrame({"values": [[1], [2, 3]]})
>>> df.with_columns(mean=pl.col("values").list.mean())
shape: (2, 2)
βββββββββββββ¬βββββββ
β values β mean β
β --- β --- β
β list[i64] β f64 β
βββββββββββββͺβββββββ‘
β [1] β 1.0 β
β [2, 3] β 2.5 β
βββββββββββββ΄βββββββ
"""
return wrap_expr(self._pyexpr.list_mean())
def median(self) -> Expr:
"""
Compute the median value of the lists in the array.
Examples
--------
>>> df = pl.DataFrame({"values": [[-1, 0, 1], [1, 10]]})
>>> df.with_columns(pl.col("values").list.median().alias("median"))
shape: (2, 2)
ββββββββββββββ¬βββββββββ
β values β median β
β --- β --- β
β list[i64] β f64 β
ββββββββββββββͺβββββββββ‘
β [-1, 0, 1] β 0.0 β
β [1, 10] β 5.5 β
ββββββββββββββ΄βββββββββ
"""
return wrap_expr(self._pyexpr.list_median())
def std(self, ddof: int = 1) -> Expr:
"""
Compute the std value of the lists in the array.
Parameters
----------
ddof
βDelta Degrees of Freedomβ: the divisor used in the calculation is N - ddof,
where N represents the number of elements.
By default ddof is 1.
Examples
--------
>>> df = pl.DataFrame({"values": [[-1, 0, 1], [1, 10]]})
>>> df.with_columns(pl.col("values").list.std().alias("std"))
shape: (2, 2)
ββββββββββββββ¬βββββββββββ
β values β std β
β --- β --- β
β list[i64] β f64 β
ββββββββββββββͺβββββββββββ‘
β [-1, 0, 1] β 1.0 β
β [1, 10] β 6.363961 β
ββββββββββββββ΄βββββββββββ
"""
return wrap_expr(self._pyexpr.list_std(ddof))
def var(self, ddof: int = 1) -> Expr:
"""
Compute the var value of the lists in the array.
Parameters
----------
ddof
βDelta Degrees of Freedomβ: the divisor used in the calculation is N - ddof,
where N represents the number of elements.
By default ddof is 1.
Examples
--------
>>> df = pl.DataFrame({"values": [[-1, 0, 1], [1, 10]]})
>>> df.with_columns(pl.col("values").list.var().alias("var"))
shape: (2, 2)
ββββββββββββββ¬βββββββ
β values β var β
β --- β --- β
β list[i64] β f64 β
ββββββββββββββͺβββββββ‘
β [-1, 0, 1] β 1.0 β
β [1, 10] β 40.5 β
ββββββββββββββ΄βββββββ
"""
return wrap_expr(self._pyexpr.list_var(ddof))
def sort(self, *, descending: bool = False, nulls_last: bool = False) -> Expr:
"""
Sort the lists in this column.
Parameters
----------
descending
Sort in descending order.
nulls_last
Place null values last.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[3, 2, 1], [9, 1, 2]],
... }
... )
>>> df.with_columns(sort=pl.col("a").list.sort())
shape: (2, 2)
βββββββββββββ¬ββββββββββββ
β a β sort β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββͺββββββββββββ‘
β [3, 2, 1] β [1, 2, 3] β
β [9, 1, 2] β [1, 2, 9] β
βββββββββββββ΄ββββββββββββ
>>> df.with_columns(sort=pl.col("a").list.sort(descending=True))
shape: (2, 2)
βββββββββββββ¬ββββββββββββ
β a β sort β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββͺββββββββββββ‘
β [3, 2, 1] β [3, 2, 1] β
β [9, 1, 2] β [9, 2, 1] β
βββββββββββββ΄ββββββββββββ
"""
return wrap_expr(self._pyexpr.list_sort(descending, nulls_last))
def reverse(self) -> Expr:
"""
Reverse the arrays in the list.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[3, 2, 1], [9, 1, 2]],
... }
... )
>>> df.with_columns(reverse=pl.col("a").list.reverse())
shape: (2, 2)
βββββββββββββ¬ββββββββββββ
β a β reverse β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββͺββββββββββββ‘
β [3, 2, 1] β [1, 2, 3] β
β [9, 1, 2] β [2, 1, 9] β
βββββββββββββ΄ββββββββββββ
"""
return wrap_expr(self._pyexpr.list_reverse())
def unique(self, *, maintain_order: bool = False) -> Expr:
"""
Get the unique/distinct values in the list.
Parameters
----------
maintain_order
Maintain order of data. This requires more work.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 1, 2]],
... }
... )
>>> df.with_columns(unique=pl.col("a").list.unique())
shape: (1, 2)
βββββββββββββ¬ββββββββββββ
β a β unique β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββͺββββββββββββ‘
β [1, 1, 2] β [1, 2] β
βββββββββββββ΄ββββββββββββ
"""
return wrap_expr(self._pyexpr.list_unique(maintain_order))
def n_unique(self) -> Expr:
"""
Count the number of unique values in every sub-lists.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 1, 2], [2, 3, 4]],
... }
... )
>>> df.with_columns(n_unique=pl.col("a").list.n_unique())
shape: (2, 2)
βββββββββββββ¬βββββββββββ
β a β n_unique β
β --- β --- β
β list[i64] β u32 β
βββββββββββββͺβββββββββββ‘
β [1, 1, 2] β 2 β
β [2, 3, 4] β 3 β
βββββββββββββ΄βββββββββββ
"""
return wrap_expr(self._pyexpr.list_n_unique())
def concat(self, other: list[Expr | str] | Expr | str | Series | list[Any]) -> Expr:
"""
Concat the arrays in a Series dtype List in linear time.
Parameters
----------
other
Columns to concat into a List Series
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [["a"], ["x"]],
... "b": [["b", "c"], ["y", "z"]],
... }
... )
>>> df.with_columns(concat=pl.col("a").list.concat("b"))
shape: (2, 3)
βββββββββββββ¬βββββββββββββ¬ββββββββββββββββββ
β a β b β concat β
β --- β --- β --- β
β list[str] β list[str] β list[str] β
βββββββββββββͺβββββββββββββͺββββββββββββββββββ‘
β ["a"] β ["b", "c"] β ["a", "b", "c"] β
β ["x"] β ["y", "z"] β ["x", "y", "z"] β
βββββββββββββ΄βββββββββββββ΄ββββββββββββββββββ
"""
if isinstance(other, list) and (
not isinstance(other[0], (pl.Expr, str, pl.Series))
):
return self.concat(pl.Series([other]))
other_list: list[Expr | str | Series]
other_list = [other] if not isinstance(other, list) else copy.copy(other) # type: ignore[arg-type]
other_list.insert(0, wrap_expr(self._pyexpr))
return F.concat_list(other_list)
def get(
self,
index: int | Expr | str,
*,
null_on_oob: bool = False,
) -> Expr:
"""
Get the value by index in the sublists.
So index `0` would return the first item of every sublist
and index `-1` would return the last item of every sublist
if an index is out of bounds, it will return a `None`.
Parameters
----------
index
Index to return per sublist
null_on_oob
Behavior if an index is out of bounds:
* True -> set as null
* False -> raise an error
Examples
--------
>>> df = pl.DataFrame({"a": [[3, 2, 1], [], [1, 2]]})
>>> df.with_columns(get=pl.col("a").list.get(0, null_on_oob=True))
shape: (3, 2)
βββββββββββββ¬βββββββ
β a β get β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺβββββββ‘
β [3, 2, 1] β 3 β
β [] β null β
β [1, 2] β 1 β
βββββββββββββ΄βββββββ
"""
index_pyexpr = parse_into_expression(index)
return wrap_expr(self._pyexpr.list_get(index_pyexpr, null_on_oob))
def gather(
self,
indices: Expr | Series | list[int] | list[list[int]],
*,
null_on_oob: bool = False,
) -> Expr:
"""
Take sublists by multiple indices.
The indices may be defined in a single column, or by sublists in another
column of dtype `List`.
Parameters
----------
indices
Indices to return per sublist
null_on_oob
Behavior if an index is out of bounds:
True -> set as null
False -> raise an error
Note that defaulting to raising an error is much cheaper
Examples
--------
>>> df = pl.DataFrame({"a": [[3, 2, 1], [], [1, 2, 3, 4, 5]]})
>>> df.with_columns(gather=pl.col("a").list.gather([0, 4], null_on_oob=True))
shape: (3, 2)
βββββββββββββββ¬βββββββββββββββ
β a β gather β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺβββββββββββββββ‘
β [3, 2, 1] β [3, null] β
β [] β [null, null] β
β [1, 2, β¦ 5] β [1, 5] β
βββββββββββββββ΄βββββββββββββββ
"""
indices_pyexpr = parse_into_expression(indices)
return wrap_expr(self._pyexpr.list_gather(indices_pyexpr, null_on_oob))
def gather_every(
self,
n: int | IntoExprColumn,
offset: int | IntoExprColumn = 0,
) -> Expr:
"""
Take every n-th value start from offset in sublists.
Parameters
----------
n
Gather every n-th element.
offset
Starting index.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2, 3, 4, 5], [6, 7, 8], [9, 10, 11, 12]],
... "n": [2, 1, 3],
... "offset": [0, 1, 0],
... }
... )
>>> df.with_columns(
... gather_every=pl.col("a").list.gather_every(
... n=pl.col("n"), offset=pl.col("offset")
... )
... )
shape: (3, 4)
βββββββββββββββββ¬ββββββ¬βββββββββ¬βββββββββββββββ
β a β n β offset β gather_every β
β --- β --- β --- β --- β
β list[i64] β i64 β i64 β list[i64] β
βββββββββββββββββͺββββββͺβββββββββͺβββββββββββββββ‘
β [1, 2, β¦ 5] β 2 β 0 β [1, 3, 5] β
β [6, 7, 8] β 1 β 1 β [7, 8] β
β [9, 10, β¦ 12] β 3 β 0 β [9, 12] β
βββββββββββββββββ΄ββββββ΄βββββββββ΄βββββββββββββββ
"""
n_pyexpr = parse_into_expression(n)
offset_pyexpr = parse_into_expression(offset)
return wrap_expr(self._pyexpr.list_gather_every(n_pyexpr, offset_pyexpr))
def first(self) -> Expr:
"""
Get the first value of the sublists.
Examples
--------
>>> df = pl.DataFrame({"a": [[3, 2, 1], [], [1, 2]]})
>>> df.with_columns(first=pl.col("a").list.first())
shape: (3, 2)
βββββββββββββ¬ββββββββ
β a β first β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺββββββββ‘
β [3, 2, 1] β 3 β
β [] β null β
β [1, 2] β 1 β
βββββββββββββ΄ββββββββ
"""
return self.get(0, null_on_oob=True)
def last(self) -> Expr:
"""
Get the last value of the sublists.
Examples
--------
>>> df = pl.DataFrame({"a": [[3, 2, 1], [], [1, 2]]})
>>> df.with_columns(last=pl.col("a").list.last())
shape: (3, 2)
βββββββββββββ¬βββββββ
β a β last β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺβββββββ‘
β [3, 2, 1] β 1 β
β [] β null β
β [1, 2] β 2 β
βββββββββββββ΄βββββββ
"""
return self.get(-1, null_on_oob=True)
@unstable()
def item(self, *, allow_empty: bool = False) -> Expr:
"""
Get the single value of the sublists.
This errors if the sublist length is not exactly one.
Parameters
----------
allow_empty
Allow having no values to return `null`.
See Also
--------
:meth:`Expr.list.get` : Get the value by index in the sublists.
Examples
--------
>>> df = pl.DataFrame({"a": [[3], [1], [2]]})
>>> df.with_columns(item=pl.col("a").list.item())
shape: (3, 2)
βββββββββββββ¬βββββββ
β a β item β
β --- β --- β
β list[i64] β i64 β
βββββββββββββͺβββββββ‘
β [3] β 3 β
β [1] β 1 β
β [2] β 2 β
βββββββββββββ΄βββββββ
>>> df = pl.DataFrame({"a": [[3, 2, 1], [1], [2]]})
>>> df.select(pl.col("a").list.item())
Traceback (most recent call last):
...
polars.exceptions.ComputeError: aggregation 'item' expected a single value, got 3 values
>>> df = pl.DataFrame({"a": [[], [1], [2]]})
>>> df.select(pl.col("a").list.item(allow_empty=True))
shape: (3, 1)
ββββββββ
β a β
β --- β
β i64 β
ββββββββ‘
β null β
β 1 β
β 2 β
ββββββββ
""" # noqa: W505
return self.agg(F.element().item(allow_empty=allow_empty))
def contains(self, item: IntoExpr, *, nulls_equal: bool = True) -> Expr:
"""
Check if sublists contain the given item.
Parameters
----------
item
Item that will be checked for membership
nulls_equal : bool, default True
If True, treat null as a distinct value. Null values will not propagate.
Returns
-------
Expr
Expression of data type :class:`Boolean`.
Examples
--------
>>> df = pl.DataFrame({"a": [[3, 2, 1], [], [1, 2]]})
>>> df.with_columns(contains=pl.col("a").list.contains(1))
shape: (3, 2)
βββββββββββββ¬βββββββββββ
β a β contains β
β --- β --- β
β list[i64] β bool β
βββββββββββββͺβββββββββββ‘
β [3, 2, 1] β true β
β [] β false β
β [1, 2] β true β
βββββββββββββ΄βββββββββββ
"""
item_pyexpr = parse_into_expression(item, str_as_lit=True)
return wrap_expr(self._pyexpr.list_contains(item_pyexpr, nulls_equal))
def join(self, separator: IntoExprColumn, *, ignore_nulls: bool = True) -> Expr:
"""
Join all string items in a sublist and place a separator between them.
This errors if inner type of list `!= String`.
Parameters
----------
separator
string to separate the items with
ignore_nulls
Ignore null values (default).
If set to ``False``, null values will be propagated.
If the sub-list contains any null values, the output is ``None``.
Returns
-------
Expr
Expression of data type :class:`String`.
Examples
--------
>>> df = pl.DataFrame({"s": [["a", "b", "c"], ["x", "y"]]})
>>> df.with_columns(join=pl.col("s").list.join(" "))
shape: (2, 2)
βββββββββββββββββββ¬ββββββββ
β s β join β
β --- β --- β
β list[str] β str β
βββββββββββββββββββͺββββββββ‘
β ["a", "b", "c"] β a b c β
β ["x", "y"] β x y β
βββββββββββββββββββ΄ββββββββ
>>> df = pl.DataFrame(
... {"s": [["a", "b", "c"], ["x", "y"]], "separator": ["*", "_"]}
... )
>>> df.with_columns(join=pl.col("s").list.join(pl.col("separator")))
shape: (2, 3)
βββββββββββββββββββ¬ββββββββββββ¬ββββββββ
β s β separator β join β
β --- β --- β --- β
β list[str] β str β str β
βββββββββββββββββββͺββββββββββββͺββββββββ‘
β ["a", "b", "c"] β * β a*b*c β
β ["x", "y"] β _ β x_y β
βββββββββββββββββββ΄ββββββββββββ΄ββββββββ
"""
separator_pyexpr = parse_into_expression(separator, str_as_lit=True)
return wrap_expr(self._pyexpr.list_join(separator_pyexpr, ignore_nulls))
def arg_min(self) -> Expr:
"""
Retrieve the index of the minimal value in every sublist.
Returns
-------
Expr
Expression of data type :class:`UInt32` or :class:`UInt64`
(depending on compilation).
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2], [2, 1]],
... }
... )
>>> df.with_columns(arg_min=pl.col("a").list.arg_min())
shape: (2, 2)
βββββββββββββ¬ββββββββββ
β a β arg_min β
β --- β --- β
β list[i64] β u32 β
βββββββββββββͺββββββββββ‘
β [1, 2] β 0 β
β [2, 1] β 1 β
βββββββββββββ΄ββββββββββ
"""
return wrap_expr(self._pyexpr.list_arg_min())
def arg_max(self) -> Expr:
"""
Retrieve the index of the maximum value in every sublist.
Returns
-------
Expr
Expression of data type :class:`UInt32` or :class:`UInt64`
(depending on compilation).
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2], [2, 1]],
... }
... )
>>> df.with_columns(arg_max=pl.col("a").list.arg_max())
shape: (2, 2)
βββββββββββββ¬ββββββββββ
β a β arg_max β
β --- β --- β
β list[i64] β u32 β
βββββββββββββͺββββββββββ‘
β [1, 2] β 1 β
β [2, 1] β 0 β
βββββββββββββ΄ββββββββββ
"""
return wrap_expr(self._pyexpr.list_arg_max())
def diff(self, n: int = 1, null_behavior: NullBehavior = "ignore") -> Expr:
"""
Calculate the first discrete difference between shifted items of every sublist.
Parameters
----------
n
Number of slots to shift.
null_behavior : {'ignore', 'drop'}
How to handle null values.
Examples
--------
>>> df = pl.DataFrame({"n": [[1, 2, 3, 4], [10, 2, 1]]})
>>> df.with_columns(diff=pl.col("n").list.diff())
shape: (2, 2)
βββββββββββββββ¬βββββββββββββββββ
β n β diff β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺβββββββββββββββββ‘
β [1, 2, β¦ 4] β [null, 1, β¦ 1] β
β [10, 2, 1] β [null, -8, -1] β
βββββββββββββββ΄βββββββββββββββββ
>>> df.with_columns(diff=pl.col("n").list.diff(n=2))
shape: (2, 2)
βββββββββββββββ¬ββββββββββββββββββββ
β n β diff β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺββββββββββββββββββββ‘
β [1, 2, β¦ 4] β [null, null, β¦ 2] β
β [10, 2, 1] β [null, null, -9] β
βββββββββββββββ΄ββββββββββββββββββββ
>>> df.with_columns(diff=pl.col("n").list.diff(n=2, null_behavior="drop"))
shape: (2, 2)
βββββββββββββββ¬ββββββββββββ
β n β diff β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺββββββββββββ‘
β [1, 2, β¦ 4] β [2, 2] β
β [10, 2, 1] β [-9] β
βββββββββββββββ΄ββββββββββββ
"""
return wrap_expr(self._pyexpr.list_diff(n, null_behavior))
def shift(self, n: int | IntoExprColumn = 1) -> Expr:
"""
Shift list values by the given number of indices.
Parameters
----------
n
Number of indices to shift forward. If a negative value is passed, values
are shifted in the opposite direction instead.
Notes
-----
This method is similar to the `LAG` operation in SQL when the value for `n`
is positive. With a negative value for `n`, it is similar to `LEAD`.
Examples
--------
By default, list values are shifted forward by one index.
>>> df = pl.DataFrame({"a": [[1, 2, 3], [4, 5]]})
>>> df.with_columns(shift=pl.col("a").list.shift())
shape: (2, 2)
βββββββββββββ¬βββββββββββββββ
β a β shift β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββͺβββββββββββββββ‘
β [1, 2, 3] β [null, 1, 2] β
β [4, 5] β [null, 4] β
βββββββββββββ΄βββββββββββββββ
Pass a negative value to shift in the opposite direction instead.
>>> df.with_columns(shift=pl.col("a").list.shift(-2))
shape: (2, 2)
βββββββββββββ¬ββββββββββββββββββ
β a β shift β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββͺββββββββββββββββββ‘
β [1, 2, 3] β [3, null, null] β
β [4, 5] β [null, null] β
βββββββββββββ΄ββββββββββββββββββ
"""
n_pyexpr = parse_into_expression(n)
return wrap_expr(self._pyexpr.list_shift(n_pyexpr))
def slice(
self, offset: int | str | Expr, length: int | str | Expr | None = None
) -> Expr:
"""
Slice every sublist.
Parameters
----------
offset
Start index. Negative indexing is supported.
length
Length of the slice. If set to `None` (default), the slice is taken to the
end of the list.
Examples
--------
>>> df = pl.DataFrame({"a": [[1, 2, 3, 4], [10, 2, 1]]})
>>> df.with_columns(slice=pl.col("a").list.slice(1, 2))
shape: (2, 2)
βββββββββββββββ¬ββββββββββββ
β a β slice β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺββββββββββββ‘
β [1, 2, β¦ 4] β [2, 3] β
β [10, 2, 1] β [2, 1] β
βββββββββββββββ΄ββββββββββββ
"""
if isinstance(offset, Collection) and not isinstance(offset, str):
msg = f"'offset' must be an integer, string, or expression, not {type(offset).__name__}"
raise TypeError(msg)
if (
length is not None
and isinstance(length, Collection)
and not isinstance(length, str)
):
msg = f"'length' must be an integer, string, or expression, not {type(length).__name__}"
raise TypeError(msg)
offset_pyexpr = parse_into_expression(offset)
length_pyexpr = parse_into_expression(length)
return wrap_expr(self._pyexpr.list_slice(offset_pyexpr, length_pyexpr))
def head(self, n: int | str | Expr = 5) -> Expr:
"""
Slice the first `n` values of every sublist.
Parameters
----------
n
Number of values to return for each sublist.
Examples
--------
>>> df = pl.DataFrame({"a": [[1, 2, 3, 4], [10, 2, 1]]})
>>> df.with_columns(head=pl.col("a").list.head(2))
shape: (2, 2)
βββββββββββββββ¬ββββββββββββ
β a β head β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺββββββββββββ‘
β [1, 2, β¦ 4] β [1, 2] β
β [10, 2, 1] β [10, 2] β
βββββββββββββββ΄ββββββββββββ
"""
return self.slice(0, n)
def tail(self, n: int | str | Expr = 5) -> Expr:
"""
Slice the last `n` values of every sublist.
Parameters
----------
n
Number of values to return for each sublist.
Examples
--------
>>> df = pl.DataFrame({"a": [[1, 2, 3, 4], [10, 2, 1]]})
>>> df.with_columns(tail=pl.col("a").list.tail(2))
shape: (2, 2)
βββββββββββββββ¬ββββββββββββ
β a β tail β
β --- β --- β
β list[i64] β list[i64] β
βββββββββββββββͺββββββββββββ‘
β [1, 2, β¦ 4] β [3, 4] β
β [10, 2, 1] β [2, 1] β
βββββββββββββββ΄ββββββββββββ
"""
n_pyexpr = parse_into_expression(n)
return wrap_expr(self._pyexpr.list_tail(n_pyexpr))
def explode(self, *, empty_as_null: bool = True, keep_nulls: bool = True) -> Expr:
"""
Returns a column with a separate row for every list element.
Parameters
----------
empty_as_null
Explode an empty list into a `null`.
keep_nulls
Explode a `null` list into a `null`.
Returns
-------
Expr
Expression with the data type of the list elements.
See Also
--------
Expr.reshape: Reshape this Expr to a flat Series or a Series of Lists.
Examples
--------
>>> df = pl.DataFrame({"a": [[1, 2, 3], [4, 5, 6]]})
>>> df.select(pl.col("a").list.explode())
shape: (6, 1)
βββββββ
β a β
β --- β
β i64 β
βββββββ‘
β 1 β
β 2 β
β 3 β
β 4 β
β 5 β
β 6 β
βββββββ
"""
return wrap_expr(
self._pyexpr.explode(empty_as_null=empty_as_null, keep_nulls=keep_nulls)
)
def count_matches(self, element: IntoExpr) -> Expr:
"""
Count how often the value produced by `element` occurs.
Parameters
----------
element
An expression that produces a single value
Examples
--------
>>> df = pl.DataFrame({"a": [[0], [1], [1, 2, 3, 2], [1, 2, 1], [4, 4]]})
>>> df.with_columns(number_of_twos=pl.col("a").list.count_matches(2))
shape: (5, 2)
βββββββββββββββ¬βββββββββββββββββ
β a β number_of_twos β
β --- β --- β
β list[i64] β u32 β
βββββββββββββββͺβββββββββββββββββ‘
β [0] β 0 β
β [1] β 0 β
β [1, 2, β¦ 2] β 2 β
β [1, 2, 1] β 1 β
β [4, 4] β 0 β
βββββββββββββββ΄βββββββββββββββββ
"""
element_pyexpr = parse_into_expression(element, str_as_lit=True)
return wrap_expr(self._pyexpr.list_count_matches(element_pyexpr))
def to_array(self, width: int) -> Expr:
"""
Convert a List column into an Array column with the same inner data type.
Parameters
----------
width
Width of the resulting Array column.
Returns
-------
Expr
Expression of data type :class:`Array`.
Examples
--------
>>> df = pl.DataFrame(
... data={"a": [[1, 2], [3, 4]]},
... schema={"a": pl.List(pl.Int8)},
... )
>>> df.with_columns(array=pl.col("a").list.to_array(2))
shape: (2, 2)
ββββββββββββ¬βββββββββββββββ
β a β array β
β --- β --- β
β list[i8] β array[i8, 2] β
ββββββββββββͺβββββββββββββββ‘
β [1, 2] β [1, 2] β
β [3, 4] β [3, 4] β
ββββββββββββ΄βββββββββββββββ
"""
return wrap_expr(self._pyexpr.list_to_array(width))
def to_struct(
self,
n_field_strategy: ListToStructWidthStrategy | None = None,
fields: Sequence[str] | Callable[[int], str] | None = None,
upper_bound: int | None = None,
) -> Expr:
"""
Convert the Series of type `List` to a Series of type `Struct`.
Parameters
----------
n_field_strategy : {'first_non_null', 'max_width'}
Deprecated and ignored.
fields
If the name and number of the desired fields is known in advance
a list of field names can be given, which will be assigned by index.
Otherwise, to dynamically assign field names, a custom function can be
used; if neither are set, fields will be `field_0, field_1 .. field_n`.
upper_bound
A polars expression needs to be able to evaluate the output datatype at all
times, so the caller must provide an upper bound of the number of struct
fields that will be created if `fields` is not a sequence of field names.
.. versionchanged:: 1.33.0
The `n_field_strategy` parameter is ignored and deprecated. The `fields`
needs to be a sequence of field names or the upper bound is regarded as
ground truth.
Examples
--------
Convert list to struct with default field name assignment:
>>> df = pl.DataFrame({"n": [[0, 1], [0, 1, 2]]})
>>> df.with_columns(
... struct=pl.col("n").list.to_struct(upper_bound=2)
... ) # doctest: +SKIP
shape: (2, 2)
βββββββββββββ¬ββββββββββββ
β n β struct β
β --- β --- β
β list[i64] β struct[2] β # <- struct with 2 fields
βββββββββββββͺββββββββββββ‘
β [0, 1] β {0,1} β # OK
β [0, 1, 2] β {0,1} β # NOT OK - last value missing
βββββββββββββ΄ββββββββββββ
Convert list to struct with field name assignment by function/index:
>>> df = pl.DataFrame({"n": [[0, 1], [2, 3]]})
>>> df.select(
... pl.col("n").list.to_struct(fields=lambda idx: f"n{idx}", upper_bound=2)
... ).rows(named=True) # doctest: +SKIP
[{'n': {'n0': 0, 'n1': 1}}, {'n': {'n0': 2, 'n1': 3}}]
Convert list to struct with field name assignment by index from a list of names:
>>> df.select(pl.col("n").list.to_struct(fields=["one", "two"])).rows(
... named=True
... )
[{'n': {'one': 0, 'two': 1}}, {'n': {'one': 2, 'two': 3}}]
"""
if n_field_strategy is not None:
issue_warning(
"`Expr.list.to_struct` with `n_field_strategy` is deprecated and has no effect on execution.",
DeprecationWarning,
)
if not isinstance(fields, Sequence):
if upper_bound is None:
msg = "`Expr.list.to_struct` requires either `fields` to be a sequence or `upper_bound` to be set.\n\nThis used to be allowed but produced unpredictable results."
raise exceptions.InvalidOperationError(msg)
if fields is None:
fields = [f"field_{i}" for i in range(upper_bound)]
else:
fields = [fields(i) for i in range(upper_bound)]
return wrap_expr(self._pyexpr.list_to_struct(fields))
def eval(self, expr: Expr, *, parallel: bool = False) -> Expr:
"""
Run any polars expression against the lists' elements.
Parameters
----------
expr
Expression to run. Note that you can select an element with `pl.element()`.
parallel
Run all expression parallel. Don't activate this blindly.
Parallelism is worth it if there is enough work to do per thread.
This likely should not be used in the group by context, because we already
parallel execution per group
Examples
--------
>>> df = pl.DataFrame({"a": [1, 8, 3], "b": [4, 5, 2]})
>>> df.with_columns(
... rank=pl.concat_list("a", "b").list.eval(pl.element().rank())
... )
shape: (3, 3)
βββββββ¬ββββββ¬βββββββββββββ
β a β b β rank β
β --- β --- β --- β
β i64 β i64 β list[f64] β
βββββββͺββββββͺβββββββββββββ‘
β 1 β 4 β [1.0, 2.0] β
β 8 β 5 β [2.0, 1.0] β
β 3 β 2 β [2.0, 1.0] β
βββββββ΄ββββββ΄βββββββββββββ
See Also
--------
polars.Expr.list.agg: Evaluate any expression and automatically explode.
polars.Expr.arr.eval: Same for the Array datatype.
"""
return wrap_expr(self._pyexpr.list_eval(expr._pyexpr, parallel))
def agg(self, expr: Expr) -> Expr:
"""
Run any polars aggregation expression against the lists' elements.
Parameters
----------
expr
Expression to run. Note that you can select an element with `pl.element()`.
Examples
--------
>>> df = pl.DataFrame({"a": [[1, None], [42, 13], [None, None]]})
>>> df.with_columns(null_count=pl.col.a.list.agg(pl.element().null_count()))
shape: (3, 2)
ββββββββββββββββ¬βββββββββββββ
β a β null_count β
β --- β --- β
β list[i64] β u32 β
ββββββββββββββββͺβββββββββββββ‘
β [1, null] β 1 β
β [42, 13] β 0 β
β [null, null] β 2 β
ββββββββββββββββ΄βββββββββββββ
>>> df.with_columns(no_nulls=pl.col.a.list.agg(pl.element().drop_nulls()))
shape: (3, 2)
ββββββββββββββββ¬ββββββββββββ
β a β no_nulls β
β --- β --- β
β list[i64] β list[i64] β
ββββββββββββββββͺββββββββββββ‘
β [1, null] β [1] β
β [42, 13] β [42, 13] β
β [null, null] β [] β
ββββββββββββββββ΄ββββββββββββ
See Also
--------
polars.Expr.list.eval: Evaluates expressions without automatically exploding.
polars.Expr.arr.agg: Same for the Array datatype.
"""
return wrap_expr(self._pyexpr.list_agg(expr._pyexpr))
def filter(self, predicate: Expr) -> Expr:
"""
Filter elements in each list by a boolean expression.
Parameters
----------
predicate
A boolean expression that is evaluated per list element.
You can refer to the current element with `pl.element()`.
Examples
--------
>>> import polars as pl
>>> df = pl.DataFrame({"a": [1, 8, 3], "b": [4, 5, 2]})
>>> df.with_columns(
... evens=pl.concat_list("a", "b").list.filter(pl.element() % 2 == 0)
... )
shape: (3, 3)
βββββββ¬ββββββ¬ββββββββββββ
β a β b β evens β
β --- β --- β --- β
β i64 β i64 β list[i64] β
βββββββͺββββββͺββββββββββββ‘
β 1 β 4 β [4] β
β 8 β 5 β [8] β
β 3 β 2 β [2] β
βββββββ΄ββββββ΄ββββββββββββ
"""
return wrap_expr(self._pyexpr.list_filter(predicate._pyexpr))
def set_union(self, other: IntoExpr | Collection[Any]) -> Expr:
"""
Compute the SET UNION between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2, 3], [], [None, 3], [5, 6, 7]],
... "b": [[2, 3, 4], [3], [3, 4, None], [6, 8]],
... }
... )
>>> df.with_columns(
... union=pl.col("a").list.set_union("b")
... ) # doctest: +IGNORE_RESULT
shape: (4, 3)
βββββββββββββ¬βββββββββββββββ¬ββββββββββββββββ
β a β b β union β
β --- β --- β --- β
β list[i64] β list[i64] β list[i64] β
βββββββββββββͺβββββββββββββββͺββββββββββββββββ‘
β [1, 2, 3] β [2, 3, 4] β [1, 2, 3, 4] β
β [] β [3] β [3] β
β [null, 3] β [3, 4, null] β [null, 3, 4] β
β [5, 6, 7] β [6, 8] β [5, 6, 7, 8] β
βββββββββββββ΄βββββββββββββββ΄ββββββββββββββββ
""" # noqa: W505
if isinstance(other, Collection) and not isinstance(other, str):
if not isinstance(other, (Sequence, pl.Series, pl.DataFrame)):
other = list(other) # eg: set, frozenset, etc
other_pyexpr = F.lit(other)._pyexpr
else:
other_pyexpr = parse_into_expression(other)
return wrap_expr(self._pyexpr.list_set_operation(other_pyexpr, "union"))
def set_difference(self, other: IntoExpr | Collection[Any]) -> Expr:
"""
Compute the SET DIFFERENCE between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2, 3], [], [None, 3], [5, 6, 7]],
... "b": [[2, 3, 4], [3], [3, 4, None], [6, 8]],
... }
... )
>>> df.with_columns(difference=pl.col("a").list.set_difference("b"))
shape: (4, 3)
βββββββββββββ¬βββββββββββββββ¬βββββββββββββ
β a β b β difference β
β --- β --- β --- β
β list[i64] β list[i64] β list[i64] β
βββββββββββββͺβββββββββββββββͺβββββββββββββ‘
β [1, 2, 3] β [2, 3, 4] β [1] β
β [] β [3] β [] β
β [null, 3] β [3, 4, null] β [] β
β [5, 6, 7] β [6, 8] β [5, 7] β
βββββββββββββ΄βββββββββββββββ΄βββββββββββββ
See Also
--------
polars.Expr.list.diff: Calculates the n-th discrete difference of every sublist.
""" # noqa: W505
if isinstance(other, Collection) and not isinstance(other, str):
if not isinstance(other, (Sequence, pl.Series, pl.DataFrame)):
other = list(other) # eg: set, frozenset, etc
other_pyexpr = F.lit(other)._pyexpr
else:
other_pyexpr = parse_into_expression(other)
return wrap_expr(self._pyexpr.list_set_operation(other_pyexpr, "difference"))
def set_intersection(self, other: IntoExpr | Collection[Any]) -> Expr:
"""
Compute the SET INTERSECTION between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2, 3], [], [None, 3], [5, 6, 7]],
... "b": [[2, 3, 4], [3], [3, 4, None], [6, 8]],
... }
... )
>>> df.with_columns(intersection=pl.col("a").list.set_intersection("b"))
shape: (4, 3)
βββββββββββββ¬βββββββββββββββ¬βββββββββββββββ
β a β b β intersection β
β --- β --- β --- β
β list[i64] β list[i64] β list[i64] β
βββββββββββββͺβββββββββββββββͺβββββββββββββββ‘
β [1, 2, 3] β [2, 3, 4] β [2, 3] β
β [] β [3] β [] β
β [null, 3] β [3, 4, null] β [null, 3] β
β [5, 6, 7] β [6, 8] β [6] β
βββββββββββββ΄βββββββββββββββ΄βββββββββββββββ
""" # noqa: W505
if isinstance(other, Collection) and not isinstance(other, str):
if not isinstance(other, (Sequence, pl.Series, pl.DataFrame)):
other = list(other) # eg: set, frozenset, etc
other_pyexpr = F.lit(other)._pyexpr
else:
other_pyexpr = parse_into_expression(other)
return wrap_expr(self._pyexpr.list_set_operation(other_pyexpr, "intersection"))
def set_symmetric_difference(self, other: IntoExpr | Collection[Any]) -> Expr:
"""
Compute the SET SYMMETRIC DIFFERENCE between the elements in this list and the elements of `other`.
Parameters
----------
other
Right hand side of the set operation.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [[1, 2, 3], [], [None, 3], [5, 6, 7]],
... "b": [[2, 3, 4], [3], [3, 4, None], [6, 8]],
... }
... )
>>> df.with_columns(sdiff=pl.col("b").list.set_symmetric_difference("a"))
shape: (4, 3)
βββββββββββββ¬βββββββββββββββ¬ββββββββββββ
β a β b β sdiff β
β --- β --- β --- β
β list[i64] β list[i64] β list[i64] β
βββββββββββββͺβββββββββββββββͺββββββββββββ‘
β [1, 2, 3] β [2, 3, 4] β [4, 1] β
β [] β [3] β [3] β
β [null, 3] β [3, 4, null] β [4] β
β [5, 6, 7] β [6, 8] β [8, 5, 7] β
βββββββββββββ΄βββββββββββββββ΄ββββββββββββ
""" # noqa: W505
if isinstance(other, Collection) and not isinstance(other, str):
if not isinstance(other, (Sequence, pl.Series, pl.DataFrame)):
other = list(other) # eg: set, frozenset, etc
other_pyexpr = F.lit(other)._pyexpr
else:
other_pyexpr = parse_into_expression(other)
return wrap_expr(
self._pyexpr.list_set_operation(other_pyexpr, "symmetric_difference")
)
|
ExprListNameSpace
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 2056,
"end": 4009
}
|
class ____(object):
def __init__(self):
self.functions = OrderedDict()
self.dependencies = OrderedDict()
self.values = OrderedDict()
def clear(self):
self.values = OrderedDict()
def __iter__(self):
return self.functions.__iter__()
def __getitem__(self, item):
return self.functions.__getitem__(item)
def __setitem__(self, key, value):
return self.functions.__setitem__(key, value)
def __delitem__(self, key):
return self.functions.__delitem__(key)
def __contains__(self, item):
return self.functions.__contains__(item)
def get_value(self, key):
return self.values[key].value
def set_value(self, key, item):
self.values[key] = item
def is_calculated(self, key):
"""Return if a helper function has already been executed.
Necessary as get_value() can return None if the helper function hasn't
been executed or if it returned None."""
return key in self.values
def get_dependency(self, name):
"""Return the dependency of metafeature "name"."""
return self.dependencies.get(name)
def define(self, name, dependency=None):
"""Decorator for adding metafeature functions to a "dictionary" of
metafeatures. This behaves like a function decorating a function,
not a class decorating a function"""
def wrapper(metafeature_class):
instance = metafeature_class()
self.__setitem__(name, instance)
self.dependencies[name] = dependency
return instance
return wrapper
metafeatures = MetafeatureFunctions()
helper_functions = HelperFunctions()
################################################################################
# Simple features
################################################################################
@metafeatures.define("NumberOfInstances")
|
MetafeatureFunctions
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_exclude_lists_audit.py
|
{
"start": 371,
"end": 2887
}
|
class ____:
"""Test suite for _audit_exclude_missing_public function."""
def test_audit_exclude_missing_public_all_valid(self):
"""Test audit when all EXCLUDE_MISSING_PUBLIC entries are still valid."""
# Mock validator with no symbols having @public decorators
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = []
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_MISSING_PUBLIC",
{"symbol.one", "symbol.two"},
):
with patch("automation.dagster_docs.commands.check.click.echo") as mock_echo:
result = _audit_exclude_missing_public(mock_validator)
assert result == 0
mock_echo.assert_called_with(
"β All entries in EXCLUDE_MISSING_PUBLIC are still valid (symbols still missing @public decorators)"
)
def test_audit_exclude_missing_public_some_have_public(self):
"""Test audit when some EXCLUDE_MISSING_PUBLIC entries now have @public decorators."""
# Mock validator with some symbols having @public decorators
mock_symbols = [
PublicSymbol(
module_path="test.module",
symbol_name="one",
symbol_type="function",
is_exported=False,
source_file="/path/to/test/module.py",
),
PublicSymbol(
module_path="test.module",
symbol_name="three",
symbol_type="class",
is_exported=False,
source_file="/path/to/test/module.py",
),
]
mock_validator = Mock()
mock_validator.find_public_symbols.return_value = mock_symbols
with patch(
"automation.dagster_docs.commands.check.EXCLUDE_MISSING_PUBLIC",
{"test.module.one", "test.module.two", "test.module.three"},
):
with patch("automation.dagster_docs.commands.check.click.echo") as mock_echo:
result = _audit_exclude_missing_public(mock_validator)
assert result == 1
# Should report the symbols that can be removed
call_args = [str(call.args[0]) for call in mock_echo.call_args_list if call.args]
output_text = "\n".join(call_args)
assert "test.module.one" in output_text
assert "test.module.three" in output_text
assert "test.module.two" not in output_text # This one doesn't have @public
|
TestAuditExcludeMissingPublic
|
python
|
kamyu104__LeetCode-Solutions
|
Python/lowest-common-ancestor-of-a-binary-search-tree.py
|
{
"start": 29,
"end": 458
}
|
class ____(object):
# @param {TreeNode} root
# @param {TreeNode} p
# @param {TreeNode} q
# @return {TreeNode}
def lowestCommonAncestor(self, root, p, q):
s, b = sorted([p.val, q.val])
while not s <= root.val <= b:
# Keep searching since root is outside of [s, b].
root = root.left if s <= root.val else root.right
# s <= root.val <= b.
return root
|
Solution
|
python
|
doocs__leetcode
|
solution/2900-2999/2936.Number of Equal Numbers Blocks/Solution.py
|
{
"start": 145,
"end": 539
}
|
class ____(object):
def countBlocks(self, nums: Optional["BigArray"]) -> int:
i, n = 0, nums.size()
ans = 0
while i < n:
ans += 1
x = nums.at(i)
if i + 1 < n and nums.at(i + 1) != x:
i += 1
else:
i += bisect_left(range(i, n), True, key=lambda j: nums.at(j) != x)
return ans
|
Solution
|
python
|
pydata__xarray
|
xarray/core/treenode.py
|
{
"start": 349,
"end": 461
}
|
class ____(Exception):
"""Raised when user attempts to create an invalid tree in some way."""
|
InvalidTreeError
|
python
|
pallets__werkzeug
|
src/werkzeug/sansio/multipart.py
|
{
"start": 318,
"end": 383
}
|
class ____(Event):
data: bytes
@dataclass(frozen=True)
|
Preamble
|
python
|
getsentry__sentry
|
src/sentry/users/api/serializers/authenticator.py
|
{
"start": 2819,
"end": 3736
}
|
class ____(AuthenticatorInterfaceSerializer):
def serialize(
self,
obj: AuthenticatorInterface,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> SmsInterfaceSerializerResponse:
data = cast(SmsInterfaceSerializerResponse, super().serialize(obj, attrs, user))
assert isinstance(
obj, SmsInterface
), "Interface must be SmsInterface to serialize phone number"
data["phone"] = obj.phone_number
return data
for interface in RecoveryCodeInterface, TotpInterface, U2fInterface:
register(interface)(AuthenticatorInterfaceSerializer)
def get_interface_serializer(interface: AuthenticatorInterface) -> AuthenticatorInterfaceSerializer:
if isinstance(interface, SmsInterface):
return SmsInterfaceSerializer()
return AuthenticatorInterfaceSerializer()
|
SmsInterfaceSerializer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.