language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_blocks/test_details.py
|
{
"start": 62,
"end": 5266
}
|
class ____(util.MdCase):
"""Test Blocks details cases."""
extension = ['pymdownx.blocks.details']
extension_configs = {
'pymdownx.blocks.details': {
'types': [
'custom',
{'name': 'custom2'},
{'name': 'custom3', 'class': 'different'},
{'name': 'custom4', 'class': 'different', 'title': 'Default'},
{'name': 'custom5', 'title': 'Default'}
]
}
}
def test_optional_title(self):
"""Test that tab is not processed if title is omitted."""
self.check_markdown(
R'''
/// details
Some *content*
///
''',
r'''
<details>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_type_no_title(self):
"""Test test type as title."""
self.check_markdown(
R'''
/// details
type: note
attrs: {class: other}
Some *content*
///
''',
r'''
<details class="note other">
<summary>Note</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_type_empty_title(self):
"""Test test empty title."""
self.check_markdown(
R'''
/// details |
type: note
attrs: {class: other}
Some *content*
///
''',
r'''
<details class="note other">
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_details(self):
"""Test details with title."""
self.check_markdown(
R'''
/// details | A Title
Some *content*
///
''',
r'''
<details>
<summary>A Title</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_details_open(self):
"""Test details forced open."""
self.check_markdown(
R'''
/// details | A Title
open: true
Some *content*
///
''',
r'''
<details open="open">
<summary>A Title</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_custom(self):
"""Test custom type (one not shipped by default)."""
self.check_markdown(
R'''
/// custom | A Title
Some *content*
///
''',
r'''
<details class="custom">
<summary>A Title</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_custom_title(self):
"""Test custom title."""
self.check_markdown(
R'''
/// custom
Some *content*
///
''',
r'''
<details class="custom">
<summary>Custom</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_custom_dict_title(self):
"""Test custom title with dictionary form."""
self.check_markdown(
R'''
/// custom2
Some *content*
///
''',
r'''
<details class="custom2">
<summary>Custom2</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_custom_explicit_title(self):
"""Test custom with an explicit, default title."""
self.check_markdown(
R'''
/// custom5
Some *content*
///
''',
r'''
<details class="custom5">
<summary>Default</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_custom_with_class(self):
"""Test custom title with configured custom class."""
self.check_markdown(
R'''
/// custom3
Some *content*
///
''',
r'''
<details class="different">
<summary>Different</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
def test_custom_with_class_and_title(self):
"""Test custom title with configured custom class and title."""
self.check_markdown(
R'''
/// custom4
Some *content*
///
''',
r'''
<details class="different">
<summary>Default</summary>
<p>Some <em>content</em></p>
</details>
''',
True
)
|
TestBlocksDetails
|
python
|
django-haystack__django-haystack
|
test_haystack/whoosh_tests/test_whoosh_backend.py
|
{
"start": 32743,
"end": 41795
}
|
class ____(WhooshTestCase):
def setUp(self):
super().setUp()
# Stow.
self.old_ui = connections["whoosh"].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections["whoosh"].get_backend()
connections["whoosh"]._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = "daniel%s" % i
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
self.sample_objs.append(mock)
self.sq = connections["whoosh"].get_query()
self.sqs = SearchQuerySet("whoosh")
def tearDown(self):
connections["whoosh"]._index = self.old_ui
super().tearDown()
def test_various_searchquerysets(self):
self.sb.update(self.wmmi, self.sample_objs)
sqs = self.sqs.filter(content="Index")
self.assertEqual(sqs.query.build_query(), "(Index)")
self.assertEqual(len(sqs), 3)
sqs = self.sqs.auto_query("Indexed!")
self.assertEqual(sqs.query.build_query(), "('Indexed!')")
self.assertEqual(len(sqs), 3)
sqs = self.sqs.auto_query("Indexed!").filter(pub_date__lte=date(2009, 8, 31))
self.assertEqual(
sqs.query.build_query(), "(('Indexed!') AND pub_date:([to 20090831000000]))"
)
self.assertEqual(len(sqs), 3)
sqs = self.sqs.auto_query("Indexed!").filter(pub_date__lte=date(2009, 2, 23))
self.assertEqual(
sqs.query.build_query(), "(('Indexed!') AND pub_date:([to 20090223000000]))"
)
self.assertEqual(len(sqs), 2)
sqs = (
self.sqs.auto_query("Indexed!")
.filter(pub_date__lte=date(2009, 2, 25))
.filter(django_id__in=[1, 2])
.exclude(name="daniel1")
)
self.assertEqual(
sqs.query.build_query(),
"(('Indexed!') AND pub_date:([to 20090225000000]) AND django_id:(1 OR 2) AND NOT (name:(daniel1)))",
)
self.assertEqual(len(sqs), 1)
sqs = self.sqs.auto_query("re-inker")
self.assertEqual(sqs.query.build_query(), "('re-inker')")
self.assertEqual(len(sqs), 0)
sqs = self.sqs.auto_query("0.7 wire")
self.assertEqual(sqs.query.build_query(), "('0.7' wire)")
self.assertEqual(len(sqs), 0)
sqs = self.sqs.auto_query("daler-rowney pearlescent 'bell bronze'")
self.assertEqual(
sqs.query.build_query(), "('daler-rowney' pearlescent 'bell bronze')"
)
self.assertEqual(len(sqs), 0)
sqs = self.sqs.models(MockModel)
self.assertEqual(sqs.query.build_query(), "*")
self.assertEqual(len(sqs), 3)
def test_all_regression(self):
sqs = SearchQuerySet("whoosh")
self.assertEqual([result.pk for result in sqs], [])
self.sb.update(self.wmmi, self.sample_objs)
self.assertTrue(self.sb.index.doc_count() > 0)
sqs = SearchQuerySet("whoosh")
self.assertEqual(len(sqs), 3)
self.assertEqual(sorted([result.pk for result in sqs]), ["1", "2", "3"])
try:
sqs = repr(SearchQuerySet("whoosh"))
except:
self.fail()
def test_regression_space_query(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertTrue(self.sb.index.doc_count() > 0)
sqs = SearchQuerySet("whoosh").auto_query(" ")
self.assertEqual(len(sqs), 3)
sqs = SearchQuerySet("whoosh").filter(content=" ")
self.assertEqual(len(sqs), 0)
def test_iter(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
sqs = self.sqs.auto_query("Indexed!")
results = [int(result.pk) for result in iter(sqs)]
self.assertEqual(sorted(results), [1, 2, 3])
self.assertEqual(len(connections["whoosh"].queries), 1)
def test_slice(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
results = self.sqs.auto_query("Indexed!")
self.assertEqual(sorted([int(result.pk) for result in results[1:3]]), [1, 2])
self.assertEqual(len(connections["whoosh"].queries), 1)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
results = self.sqs.auto_query("Indexed!")
self.assertEqual(int(results[0].pk), 1)
self.assertEqual(len(connections["whoosh"].queries), 1)
def test_values_slicing(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
# TODO: this would be a good candidate for refactoring into a TestCase subclass shared across backends
# The values will come back as strings because Hasytack doesn't assume PKs are integers.
# We'll prepare this set once since we're going to query the same results in multiple ways:
expected_pks = ["3", "2", "1"]
results = self.sqs.all().order_by("pub_date").values("pk")
self.assertListEqual([i["pk"] for i in results[1:11]], expected_pks)
results = self.sqs.all().order_by("pub_date").values_list("pk")
self.assertListEqual([i[0] for i in results[1:11]], expected_pks)
results = self.sqs.all().order_by("pub_date").values_list("pk", flat=True)
self.assertListEqual(results[1:11], expected_pks)
self.assertEqual(len(connections["whoosh"].queries), 3)
def test_manual_iter(self):
self.sb.update(self.wmmi, self.sample_objs)
results = self.sqs.auto_query("Indexed!")
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
results = [int(result.pk) for result in results._manual_iter()]
self.assertEqual(sorted(results), [1, 2, 3])
self.assertEqual(len(connections["whoosh"].queries), 1)
def test_fill_cache(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
results = self.sqs.auto_query("Indexed!")
self.assertEqual(len(results._result_cache), 0)
self.assertEqual(len(connections["whoosh"].queries), 0)
results._fill_cache(0, 10)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 3
)
self.assertEqual(len(connections["whoosh"].queries), 1)
results._fill_cache(10, 20)
self.assertEqual(
len([result for result in results._result_cache if result is not None]), 3
)
self.assertEqual(len(connections["whoosh"].queries), 2)
def test_cache_is_full(self):
self.sb.update(self.wmmi, self.sample_objs)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
self.assertEqual(self.sqs._cache_is_full(), False)
results = self.sqs.auto_query("Indexed!")
result_list = [i for i in iter(results)]
self.assertEqual(results._cache_is_full(), True)
self.assertEqual(len(connections["whoosh"].queries), 1)
def test_count(self):
more_samples = []
for i in range(1, 50):
mock = MockModel()
mock.id = i
mock.author = "daniel%s" % i
mock.pub_date = date(2009, 2, 25) - timedelta(days=i)
more_samples.append(mock)
self.sb.update(self.wmmi, more_samples)
reset_search_queries()
self.assertEqual(len(connections["whoosh"].queries), 0)
results = self.sqs.all()
self.assertEqual(len(results), 49)
self.assertEqual(results._cache_is_full(), False)
self.assertEqual(len(connections["whoosh"].queries), 1)
def test_query_generation(self):
sqs = self.sqs.filter(
SQ(content=AutoQuery("hello world")) | SQ(title=AutoQuery("hello world"))
)
self.assertEqual(
sqs.query.build_query(), "((hello world) OR title:(hello world))"
)
def test_result_class(self):
self.sb.update(self.wmmi, self.sample_objs)
# Assert that we're defaulting to ``SearchResult``.
sqs = self.sqs.all()
self.assertTrue(isinstance(sqs[0], SearchResult))
# Custom class.
sqs = self.sqs.result_class(MockSearchResult).all()
self.assertTrue(isinstance(sqs[0], MockSearchResult))
# Reset to default.
sqs = self.sqs.result_class(None).all()
self.assertTrue(isinstance(sqs[0], SearchResult))
|
LiveWhooshSearchQuerySetTestCase
|
python
|
plotly__plotly.py
|
plotly/graph_objs/contour/_stream.py
|
{
"start": 233,
"end": 3511
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour"
_path_str = "contour.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Stream
|
python
|
networkx__networkx
|
benchmarks/benchmarks/benchmark_neighbors.py
|
{
"start": 178,
"end": 1193
}
|
class ____:
param_names = ["num_nodes"]
params = [10, 100, 1000]
def setup(self, num_nodes):
self.star_graph = nx.star_graph(num_nodes)
self.complete_graph = nx.complete_graph(num_nodes)
self.path_graph = nx.path_graph(num_nodes)
def time_star_center(self, num_nodes):
set(nx.non_neighbors(self.star_graph, 0))
def time_star_rim(self, num_nodes):
set(nx.non_neighbors(self.star_graph, 5))
def time_complete(self, num_nodes):
set(nx.non_neighbors(self.complete_graph, 0))
def time_path_first(self, num_nodes):
set(nx.non_neighbors(self.path_graph, 0))
def time_path_last(self, num_nodes):
set(nx.non_neighbors(self.path_graph, num_nodes - 1))
def time_path_center(self, num_nodes):
set(nx.non_neighbors(self.path_graph, num_nodes // 2))
# NOTE: explicit set construction in benchmarks is required for meaningful
# comparisons due to change in return type from generator -> set. See gh-7244.
|
NonNeighbors
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/cloud_formation.py
|
{
"start": 1250,
"end": 3030
}
|
class ____(AwsBaseSensor[CloudFormationHook]):
"""
Waits for a stack to be created successfully on AWS CloudFormation.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:CloudFormationCreateStackSensor`
:param stack_name: The name of the stack to wait for (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = CloudFormationHook
template_fields: Sequence[str] = aws_template_fields("stack_name")
ui_color = "#C5CAE9"
def __init__(self, *, stack_name, **kwargs):
super().__init__(**kwargs)
self.stack_name = stack_name
def poke(self, context: Context):
stack_status = self.hook.get_stack_status(self.stack_name)
if stack_status == "CREATE_COMPLETE":
return True
if stack_status in ("CREATE_IN_PROGRESS", None):
return False
raise ValueError(f"Stack {self.stack_name} in bad state: {stack_status}")
|
CloudFormationCreateStackSensor
|
python
|
pytorch__pytorch
|
torch/fx/graph.py
|
{
"start": 7180,
"end": 7970
}
|
class ____:
"""
Represents all the information necessary to exec or save a graph as Python code.
"""
# Python source code for the forward function definition.
src: str
# Values in global scope during execution of `src_def`.
globals: dict[str, Any]
# Optional mapping from the forward function's line number to
# node index. Line number starts at the prologue (i.e. forward()).
_lineno_map: Optional[dict[int, Optional[int]]]
# The line number of prologue in fn_code
_prologue_start: int = 0
def _format_target(base: str, target: str) -> str:
elems = target.split(".")
r = base
for e in elems:
if not e.isidentifier():
r = f'getattr({r}, "{e}")'
else:
r = f"{r}.{e}"
return r
|
PythonCode
|
python
|
keras-team__keras
|
keras/src/initializers/random_initializers.py
|
{
"start": 17358,
"end": 18769
}
|
class ____(VarianceScaling):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0 with
`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in
the weight tensor.
Examples:
>>> # Standalone usage:
>>> initializer = HeNormal()
>>> values = initializer(shape=(2, 2))
>>> # Usage in a Keras layer:
>>> initializer = HeNormal()
>>> layer = Dense(3, kernel_initializer=initializer)
Args:
seed: A Python integer or instance of
`keras.backend.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or `None` (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.backend.SeedGenerator`.
Reference:
- [He et al., 2015](https://arxiv.org/abs/1502.01852)
"""
def __init__(self, seed=None):
super().__init__(
scale=2.0, mode="fan_in", distribution="truncated_normal", seed=seed
)
def get_config(self):
return {
"seed": serialization_lib.serialize_keras_object(self._init_seed)
}
@keras_export(["keras.initializers.HeUniform", "keras.initializers.he_uniform"])
|
HeNormal
|
python
|
modin-project__modin
|
modin/core/io/column_stores/column_store_dispatcher.py
|
{
"start": 1316,
"end": 8699
}
|
class ____(FileDispatcher):
"""
Class handles utils for reading columnar store format files.
Inherits some util functions for processing files from `FileDispatcher` class.
"""
@classmethod
def call_deploy(cls, fname, col_partitions, **kwargs):
"""
Deploy remote tasks to the workers with passed parameters.
Parameters
----------
fname : str, path object or file-like object
Name of the file to read.
col_partitions : list
List of arrays with columns names that should be read
by each partition.
**kwargs : dict
Parameters of deploying read_* function.
Returns
-------
np.ndarray
Array with references to the task deploy result for each partition.
"""
return np.array(
[
cls.deploy(
func=cls.parse,
f_kwargs={
"fname": fname,
"columns": cols,
"num_splits": NPartitions.get(),
**kwargs,
},
num_returns=NPartitions.get() + 2,
)
for cols in col_partitions
]
).T
@classmethod
def build_partition(cls, partition_ids, row_lengths, column_widths):
"""
Build array with partitions of `cls.frame_partition_cls` class.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
row_lengths : list
Partitions rows lengths.
column_widths : list
Number of columns in each partition.
Returns
-------
np.ndarray
array with shape equals to the shape of `partition_ids` and
filed with partition objects.
"""
return np.array(
[
[
cls.frame_partition_cls(
partition_ids[i][j],
length=row_lengths[i],
width=column_widths[j],
)
for j in range(len(partition_ids[i]))
]
for i in range(len(partition_ids))
]
)
@classmethod
def build_index(cls, partition_ids):
"""
Compute index and its split sizes of resulting Modin DataFrame.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
Returns
-------
index : pandas.Index
Index of resulting Modin DataFrame.
row_lengths : list
List with lengths of index chunks.
"""
index_len = (
0 if len(partition_ids) == 0 else cls.materialize(partition_ids[-2][0])
)
if isinstance(index_len, int):
index = pandas.RangeIndex(index_len)
else:
index = index_len
index_len = len(index)
num_partitions = NPartitions.get()
min_block_size = MinRowPartitionSize.get()
index_chunksize = compute_chunksize(index_len, num_partitions, min_block_size)
if index_chunksize > index_len:
row_lengths = [index_len] + [0 for _ in range(num_partitions - 1)]
else:
row_lengths = [
(
index_chunksize
if (i + 1) * index_chunksize < index_len
else max(0, index_len - (index_chunksize * i))
)
for i in range(num_partitions)
]
return index, row_lengths
@classmethod
def build_columns(cls, columns, num_row_parts=None):
"""
Split columns into chunks that should be read by workers.
Parameters
----------
columns : list
List of columns that should be read from file.
num_row_parts : int, optional
Number of parts the dataset is split into. This parameter is used
to align the column partitioning with it so we won't end up with an
over partitioned frame.
Returns
-------
col_partitions : list
List of lists with columns for reading by workers.
column_widths : list
List with lengths of `col_partitions` subarrays
(number of columns that should be read by workers).
"""
columns_length = len(columns)
if columns_length == 0:
return [], []
if num_row_parts is None:
# in column formats we mostly read columns in parallel rather than rows,
# so we try to chunk columns as much as possible
min_block_size = 1
else:
num_remaining_parts = round(NPartitions.get() / num_row_parts)
min_block_size = min(
columns_length // num_remaining_parts, MinColumnPartitionSize.get()
)
column_splits = compute_chunksize(
columns_length, NPartitions.get(), max(1, min_block_size)
)
col_partitions = [
columns[i : i + column_splits]
for i in range(0, columns_length, column_splits)
]
column_widths = [len(c) for c in col_partitions]
return col_partitions, column_widths
@classmethod
def build_dtypes(cls, partition_ids, columns):
"""
Compute common for all partitions `dtypes` for each of the DataFrame column.
Parameters
----------
partition_ids : list
Array with references to the partitions data.
columns : list
List of columns that should be read from file.
Returns
-------
dtypes : pandas.Series
Series with dtypes for columns.
"""
dtypes = pandas.concat(cls.materialize(list(partition_ids)), axis=0)
dtypes.index = columns
return dtypes
@classmethod
def build_query_compiler(cls, path, columns, **kwargs):
"""
Build query compiler from deployed tasks outputs.
Parameters
----------
path : str, path object or file-like object
Path to the file to read.
columns : list
List of columns that should be read from file.
**kwargs : dict
Parameters of deploying read_* function.
Returns
-------
new_query_compiler : BaseQueryCompiler
Query compiler with imported data for further processing.
"""
col_partitions, column_widths = cls.build_columns(columns)
partition_ids = cls.call_deploy(path, col_partitions, **kwargs)
index, row_lens = cls.build_index(partition_ids)
remote_parts = cls.build_partition(partition_ids[:-2], row_lens, column_widths)
dtypes = (
cls.build_dtypes(partition_ids[-1], columns)
if len(partition_ids) > 0
else None
)
new_query_compiler = cls.query_compiler_cls(
cls.frame_cls(
remote_parts,
index,
columns,
row_lens,
column_widths,
dtypes=dtypes,
)
)
return new_query_compiler
|
ColumnStoreDispatcher
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/prompts/chat.py
|
{
"start": 1299,
"end": 6446
}
|
class ____(BaseMessagePromptTemplate):
"""Prompt template that assumes variable is already list of messages.
A placeholder which can be used to pass in a list of messages.
Direct usage:
```python
from langchain_core.prompts import MessagesPlaceholder
prompt = MessagesPlaceholder("history")
prompt.format_messages() # raises KeyError
prompt = MessagesPlaceholder("history", optional=True)
prompt.format_messages() # returns empty list []
prompt.format_messages(
history=[
("system", "You are an AI assistant."),
("human", "Hello!"),
]
)
# -> [
# SystemMessage(content="You are an AI assistant."),
# HumanMessage(content="Hello!"),
# ]
```
Building a prompt with chat history:
```python
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
MessagesPlaceholder("history"),
("human", "{question}"),
]
)
prompt.invoke(
{
"history": [("human", "what's 5 + 2"), ("ai", "5 + 2 is 7")],
"question": "now multiply that by 4",
}
)
# -> ChatPromptValue(messages=[
# SystemMessage(content="You are a helpful assistant."),
# HumanMessage(content="what's 5 + 2"),
# AIMessage(content="5 + 2 is 7"),
# HumanMessage(content="now multiply that by 4"),
# ])
```
Limiting the number of messages:
```python
from langchain_core.prompts import MessagesPlaceholder
prompt = MessagesPlaceholder("history", n_messages=1)
prompt.format_messages(
history=[
("system", "You are an AI assistant."),
("human", "Hello!"),
]
)
# -> [
# HumanMessage(content="Hello!"),
# ]
```
"""
variable_name: str
"""Name of variable to use as messages."""
optional: bool = False
"""If `True` format_messages can be called with no arguments and will return an
empty list. If `False` then a named argument with name `variable_name` must be
passed in, even if the value is an empty list."""
n_messages: PositiveInt | None = None
"""Maximum number of messages to include. If `None`, then will include all.
"""
def __init__(
self, variable_name: str, *, optional: bool = False, **kwargs: Any
) -> None:
"""Create a messages placeholder.
Args:
variable_name: Name of variable to use as messages.
optional: If `True` format_messages can be called with no arguments and will
return an empty list. If `False` then a named argument with name
`variable_name` must be passed in, even if the value is an empty list.
"""
# mypy can't detect the init which is defined in the parent class
# b/c these are BaseModel classes.
super().__init__(variable_name=variable_name, optional=optional, **kwargs)
def format_messages(self, **kwargs: Any) -> list[BaseMessage]:
"""Format messages from kwargs.
Args:
**kwargs: Keyword arguments to use for formatting.
Returns:
List of BaseMessage.
Raises:
ValueError: If variable is not a list of messages.
"""
value = (
kwargs.get(self.variable_name, [])
if self.optional
else kwargs[self.variable_name]
)
if not isinstance(value, list):
msg = (
f"variable {self.variable_name} should be a list of base messages, "
f"got {value} of type {type(value)}"
)
raise ValueError(msg) # noqa: TRY004
value = convert_to_messages(value)
if self.n_messages:
value = value[-self.n_messages :]
return value
@property
def input_variables(self) -> list[str]:
"""Input variables for this prompt template.
Returns:
List of input variable names.
"""
return [self.variable_name] if not self.optional else []
@override
def pretty_repr(self, html: bool = False) -> str:
"""Human-readable representation.
Args:
html: Whether to format as HTML.
Returns:
Human-readable representation.
"""
var = "{" + self.variable_name + "}"
if html:
title = get_msg_title_repr("Messages Placeholder", bold=True)
var = get_colored_text(var, "yellow")
else:
title = get_msg_title_repr("Messages Placeholder")
return f"{title}\n\n{var}"
MessagePromptTemplateT = TypeVar(
"MessagePromptTemplateT", bound="BaseStringMessagePromptTemplate"
)
"""Type variable for message prompt templates."""
|
MessagesPlaceholder
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/users/main.py
|
{
"start": 788,
"end": 1413
}
|
class ____(webapp2.RequestHandler):
def get(self):
# [START gae_users_get_details]
user = users.get_current_user()
if user:
nickname = user.nickname()
logout_url = users.create_logout_url("/")
greeting = 'Welcome, {}! (<a href="{}">sign out</a>)'.format(
nickname, logout_url
)
else:
login_url = users.create_login_url("/")
greeting = '<a href="{}">Sign in</a>'.format(login_url)
# [END gae_users_get_details]
self.response.write("<html><body>{}</body></html>".format(greeting))
|
MainPage
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 18073,
"end": 18130
}
|
class ____(BaseInheritTracking2):
pass
|
InheritTracking2
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-python-http-tutorial/source_python_http_tutorial/source.py
|
{
"start": 355,
"end": 4012
}
|
class ____(HttpStream):
url_base = "https://api.apilayer.com/exchangerates_data/"
cursor_field = "date"
primary_key = "date"
def __init__(self, config: Mapping[str, Any], start_date: datetime, **kwargs):
super().__init__()
self.base = config["base"]
self.access_key = config["access_key"]
self.start_date = start_date
self._cursor_value = None
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
# The API does not offer pagination, so we return None to indicate there are no more pages in the response
return None
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return stream_slice["date"]
def request_headers(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> Mapping[str, Any]:
# The api requires that we include apikey as a header so we do that in this method
return {"apikey": self.apikey}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
# The api requires that we include the base currency as a query param so we do that in this method
return {"base": self.base}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
# The response is a simple JSON whose schema matches our stream's schema exactly,
# so we just return a list containing the response
return [response.json()]
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, any]:
# This method is called once for each record returned from the API to compare the cursor field value in that record with the current state
# we then return an updated state object. If this is the first time we run a sync or no state was passed, current_stream_state will be None.
if current_stream_state is not None and "date" in current_stream_state:
current_parsed_date = datetime.strptime(current_stream_state["date"], "%Y-%m-%d")
latest_record_date = datetime.strptime(latest_record["date"], "%Y-%m-%d")
return {"date": max(current_parsed_date, latest_record_date).strftime("%Y-%m-%d")}
else:
return {"date": self.start_date.strftime("%Y-%m-%d")}
def _chunk_date_range(self, start_date: datetime) -> List[Mapping[str, any]]:
"""
Returns a list of each day between the start date and now.
The return value is a list of dicts {'date': date_string}.
"""
dates = []
while start_date < datetime.now():
self.logger.info(start_date.strftime("%Y-%m-%d"))
dates.append({"date": start_date.strftime("%Y-%m-%d")})
start_date += timedelta(days=1)
return dates
def stream_slices(
self, sync_mode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, any]]]:
start_date = datetime.strptime(stream_state["date"], "%Y-%m-%d") if stream_state and "date" in stream_state else self.start_date
return self._chunk_date_range(start_date)
|
ExchangeRates
|
python
|
ray-project__ray
|
release/ray_release/exception.py
|
{
"start": 3156,
"end": 3239
}
|
class ____(ReleaseTestError):
exit_code = ExitCode.COMMAND_TIMEOUT
|
CommandTimeout
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/network/darwin.py
|
{
"start": 1853,
"end": 1958
}
|
class ____(NetworkCollector):
_fact_class = DarwinNetwork
_platform = 'Darwin'
|
DarwinNetworkCollector
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_have_elevation.py
|
{
"start": 1894,
"end": 4713
}
|
class ____(ColumnMapExpectation):
"""Expect the column values to be points that have elevation."""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"elevated": [
mapping(
Point(1, 1, 1),
),
mapping(
Point(2, 2, 2),
),
mapping(Point(3, 3, 3)),
],
"not_elevated": [
mapping(Point(1, 1)),
mapping(Point(2, 2)),
mapping(Point(3, 3)),
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "elevated",
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "not_elevated",
},
"out": {
"success": False,
},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"geospatial",
"hackathon-22",
], # Tags for this Expectation in the gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73",
],
"requirements": ["geopandas", "shapely"],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.elevated"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"mostly": 1.0,
}
if __name__ == "__main__":
ExpectColumnValuesToHaveElevation().print_diagnostic_checklist()
|
ExpectColumnValuesToHaveElevation
|
python
|
PyCQA__pylint
|
doc/data/messages/n/no-member/good.py
|
{
"start": 60,
"end": 128
}
|
class ____:
def meow(self):
print("Meow")
Cat().meow()
|
Cat
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-code-hierarchy/llama_index/packs/code_hierarchy/code_hierarchy.py
|
{
"start": 6736,
"end": 6937
}
|
class ____(BaseModel):
"""The output of a chunk_node call."""
this_document: Optional[TextNode]
upstream_children_documents: List[TextNode]
all_documents: List[TextNode]
|
_ChunkNodeOutput
|
python
|
openai__gym
|
tests/test_core.py
|
{
"start": 1692,
"end": 1996
}
|
class ____(core.Env):
"""This environment doesn't accept any arguments in reset, ideally we want to support this too (for now)"""
def __init__(self):
pass
def reset(self):
super().reset()
return 0
def step(self, action):
return 0, 0, False, {}
|
OldStyleEnv
|
python
|
davidhalter__jedi
|
jedi/inference/lazy_value.py
|
{
"start": 803,
"end": 1498
}
|
class ____(AbstractLazyValue):
def __init__(self, context, node, min=1, max=1):
super().__init__(node, min, max)
self.context = context
# We need to save the predefined names. It's an unfortunate side effect
# that needs to be tracked otherwise results will be wrong.
self._predefined_names = dict(context.predefined_names)
def infer(self):
with monkeypatch(self.context, 'predefined_names', self._predefined_names):
return self.context.infer_node(self.data)
def get_merged_lazy_value(lazy_values):
if len(lazy_values) > 1:
return MergedLazyValues(lazy_values)
else:
return lazy_values[0]
|
LazyTreeValue
|
python
|
huggingface__transformers
|
src/transformers/models/deepseek_vl/image_processing_deepseek_vl.py
|
{
"start": 1934,
"end": 2232
}
|
class ____(ImagesKwargs, total=False):
r"""
min_size (`int`, *optional*, defaults to 14):
The minimum allowed size for the resized image. Ensures that neither the height nor width
falls below this value after resizing.
"""
min_size: int
|
DeepseekVLImageProcessorKwargs
|
python
|
has2k1__plotnine
|
plotnine/geoms/geom_jitter.py
|
{
"start": 333,
"end": 2291
}
|
class ____(geom_point):
"""
Scatter plot with points jittered to reduce overplotting
{usage}
Parameters
----------
{common_parameters}
width : float, default=None
Proportion to jitter in horizontal direction.
The default value is that from
[](`~plotnine.positions.position_jitter`)
height : float, default=None
Proportion to jitter in vertical direction.
The default value is that from
[](`~plotnine.positions.position_jitter`).
random_state : int | ~numpy.random.RandomState, default=None
Seed or Random number generator to use. If `None`, then
numpy global generator [](`numpy.random`) is used.
See Also
--------
plotnine.position_jitter
plotnine.geom_point
"""
DEFAULT_PARAMS = {
"stat": "identity",
"position": "jitter",
"na_rm": False,
"width": None,
"height": None,
"random_state": None,
}
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
if {"width", "height", "random_state"} & set(kwargs):
if "position" in kwargs:
raise PlotnineError(
"Specify either 'position' or "
"'width'/'height'/'random_state'"
)
try:
width = kwargs.pop("width")
except KeyError:
width = None
try:
height = kwargs.pop("height")
except KeyError:
height = None
try:
random_state = kwargs.pop("random_state")
except KeyError:
random_state = None
kwargs["position"] = position_jitter(
width=width, height=height, random_state=random_state
)
geom_point.__init__(self, mapping, data, **kwargs)
|
geom_jitter
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/internal/decoder_test.py
|
{
"start": 771,
"end": 4787
}
|
class ____(parameterized.TestCase):
def test_decode_varint_bytes(self):
(size, pos) = decoder._DecodeVarint(_INPUT_BYTES, 0)
self.assertEqual(size, _EXPECTED[0])
self.assertEqual(pos, 2)
(size, pos) = decoder._DecodeVarint(_INPUT_BYTES, 2)
self.assertEqual(size, _EXPECTED[1])
self.assertEqual(pos, 3)
def test_decode_varint_bytes_empty(self):
with self.assertRaises(IndexError) as context:
decoder._DecodeVarint(b'', 0)
self.assertIn('index out of range', str(context.exception))
def test_decode_varint_bytesio(self):
index = 0
input_io = io.BytesIO(_INPUT_BYTES)
while True:
size = decoder._DecodeVarint(input_io)
if size is None:
break
self.assertEqual(size, _EXPECTED[index])
index += 1
self.assertEqual(index, len(_EXPECTED))
def test_decode_varint_bytesio_empty(self):
input_io = io.BytesIO(b'')
size = decoder._DecodeVarint(input_io)
self.assertIsNone(size)
def test_decode_unknown_group_field(self):
data = memoryview(b'\013\020\003\014\040\005')
parsed, pos = decoder._DecodeUnknownField(
data, 1, len(data), 1, wire_format.WIRETYPE_START_GROUP
)
self.assertEqual(pos, 4)
self.assertEqual(len(parsed), 1)
self.assertEqual(parsed[0].field_number, 2)
self.assertEqual(parsed[0].data, 3)
def test_decode_unknown_group_field_nested(self):
data = memoryview(b'\013\023\013\030\004\014\024\014\050\006')
parsed, pos = decoder._DecodeUnknownField(
data, 1, len(data), 1, wire_format.WIRETYPE_START_GROUP
)
self.assertEqual(pos, 8)
self.assertEqual(len(parsed), 1)
self.assertEqual(parsed[0].field_number, 2)
self.assertEqual(len(parsed[0].data), 1)
self.assertEqual(parsed[0].data[0].field_number, 1)
self.assertEqual(len(parsed[0].data[0].data), 1)
self.assertEqual(parsed[0].data[0].data[0].field_number, 3)
self.assertEqual(parsed[0].data[0].data[0].data, 4)
def test_decode_unknown_group_field_too_many_levels(self):
data = memoryview(b'\023' * 5_000_000)
self.assertRaisesRegex(
message.DecodeError,
'Error parsing message',
decoder._DecodeUnknownField,
data,
1,
len(data),
1,
wire_format.WIRETYPE_START_GROUP,
)
def test_decode_unknown_mismatched_end_group(self):
self.assertRaisesRegex(
message.DecodeError,
'Missing group end tag.*',
decoder._DecodeUnknownField,
memoryview(b'\013\024'),
1,
2,
1,
wire_format.WIRETYPE_START_GROUP,
)
def test_decode_unknown_mismatched_end_group_nested(self):
self.assertRaisesRegex(
message.DecodeError,
'Missing group end tag.*',
decoder._DecodeUnknownField,
memoryview(b'\013\023\034\024\014'),
1,
5,
1,
wire_format.WIRETYPE_START_GROUP,
)
def test_decode_message_set_unknown_mismatched_end_group(self):
proto = message_set_extensions_pb2.TestMessageSet()
self.assertRaisesRegex(
message.DecodeError,
'Unexpected end-group tag.'
if api_implementation.Type() == 'python'
else '.*',
proto.ParseFromString,
b'\013\054\014',
)
def test_unknown_message_set_decoder_mismatched_end_group(self):
# This behavior isn't actually reachable in practice, but it's good to
# test anyway.
decode = decoder.UnknownMessageSetItemDecoder()
self.assertRaisesRegex(
message.DecodeError,
'Unexpected end-group tag.',
decode,
memoryview(b'\054\014'),
)
@parameterized.parameters(int(0), float(0.0), False, '')
def test_default_scalar(self, value):
self.assertTrue(decoder.IsDefaultScalarValue(value))
@parameterized.parameters(int(1), float(-0.0), float(1.0), True, 'a')
def test_not_default_scalar(self, value):
self.assertFalse(decoder.IsDefaultScalarValue(value))
if __name__ == '__main__':
unittest.main()
|
DecoderTest
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 576368,
"end": 576805
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "identity_provider")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
identity_provider = sgqlc.types.Field(
"EnterpriseIdentityProvider", graphql_name="identityProvider"
)
|
RegenerateEnterpriseIdentityProviderRecoveryCodesPayload
|
python
|
walkccc__LeetCode
|
solutions/1648. Sell Diminishing-Valued Colored Balls/1648.py
|
{
"start": 0,
"end": 1032
}
|
class ____:
def maxProfit(self, inventory: list[int], orders: int) -> int:
MOD = 1_000_000_007
ans = 0
largestCount = 1
def trapezoid(a: int, b: int) -> int:
return (a + b) * (a - b + 1) // 2
for a, b in itertools.pairwise(sorted(inventory, reverse=True) + [0]):
if a > b:
# If we are at the last inventory, or inventory[i] > inventory[i + 1].
# In either case, we will pick inventory[i - largestCount + 1..i].
pick = a - b
# We have run out of orders, so we need to recalculate the number of
# balls that we actually pick for inventory[i - largestCount + 1..i].
if largestCount * pick >= orders:
actualPick, remaining = divmod(orders, largestCount)
return (ans +
largestCount * trapezoid(a, a - actualPick + 1) +
remaining * (a - actualPick)) % MOD
ans += largestCount * trapezoid(a, a - pick + 1)
ans %= MOD
orders -= largestCount * pick
largestCount += 1
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/multi/test_indexing.py
|
{
"start": 28824,
"end": 29389
}
|
class ____:
def test_where(self):
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
msg = r"\.where is not supported for MultiIndex operations"
with pytest.raises(NotImplementedError, match=msg):
i.where(True)
def test_where_array_like(self, listlike_box):
mi = MultiIndex.from_tuples([("A", 1), ("A", 2)])
cond = [False, True]
msg = r"\.where is not supported for MultiIndex operations"
with pytest.raises(NotImplementedError, match=msg):
mi.where(listlike_box(cond))
|
TestWhere
|
python
|
django__django
|
tests/forms_tests/widget_tests/test_multiwidget.py
|
{
"start": 998,
"end": 1608
}
|
class ____(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=WidgetTest.beatles),
SplitDateTimeField(),
)
super().__init__(
fields, required=required, widget=widget, label=label, initial=initial
)
def compress(self, data_list):
if data_list:
return "%s,%s,%s" % (
data_list[0],
"".join(data_list[1]),
data_list[2],
)
return None
|
ComplexField
|
python
|
pytorch__pytorch
|
test/inductor/test_alignment.py
|
{
"start": 943,
"end": 8128
}
|
class ____:
def test_unaligned_input(self):
def fn(x):
return torch.nn.functional.relu(x)
x = torch.randn(1024 + 16, device=self.device)[1:-15]
# TODO (malfet): Investigate failures on MacOS-14
with (
contextlib.nullcontext()
if self.device != "mps" or MACOS_VERSION >= 15.0
else self.assertRaises(AssertionError)
):
self.common(fn, (x,), check_lowp=False)
def test_unaligned_input_2d(self):
def fn(x):
return torch.nn.functional.relu(x)
x = torch.randn(1024, 1024 + 16, device=self.device)[:, 1:-15]
self.common(fn, (x,), check_lowp=False)
def test_alignment_without_custom_op(self):
def fn(x):
a = torch.nn.functional.relu(x)
b = (3 * a)[1:-15]
c = torch.cos(b)
return c
x = torch.randn(1024 + 16, device=self.device)
self.common(fn, (x,), check_lowp=False)
@config.patch(implicit_fallbacks=True)
def test_no_align_for_custom_op(self):
def slice1d(x):
return (3 * x)[1:-15]
def slice1d_meta(x):
return torch.empty_like(x)[1:-15]
define_custom_op_for_test("slice1d", slice1d, slice1d_meta)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.test.slice1d(a)
c = torch.cos(b)
return c
x = torch.randn(1024 + 16, device=self.device)
self.common(fn, (x,), check_lowp=False)
@config.patch(implicit_fallbacks=True)
def test_no_align_for_custom_op_2d(self):
def slice2d(x):
return (3 * x)[..., 1:-15]
def slice2d_meta(x):
return torch.empty_like(x)[..., 1:-15]
define_custom_op_for_test("slice2d", slice2d, slice2d_meta)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.test.slice2d(a)
c = torch.cos(b)
return c
x = torch.randn(1024, 1024 + 16, device=self.device)
self.common(fn, (x,), check_lowp=False)
@config.patch(implicit_fallbacks=True, alignment_asserts=True)
@skip_if_cpp_wrapper(
"Inductor does not generate alignment assertion for cpp_wrapper right now"
)
def test_incorrect_meta_for_custom_op_2d(self):
def slice2d(x):
return (3 * x)[..., 1:-15]
def slice2d_meta(x):
return torch.empty_like(x)[..., 0:-16]
define_custom_op_for_test("slice2d_incorrect_meta", slice2d, slice2d_meta)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.test.slice2d_incorrect_meta(a)
c = torch.cos(b)
return c
x = torch.randn(1024, 1024 + 16, device=self.device)
expected_error = "Expect the tensor to be 16 bytes aligned. Fail due to storage_offset=1 itemsize=4"
with self.assertRaisesRegex(AssertionError, expected_error):
self.common(fn, (x,), check_lowp=False)
def test_slice(self):
def f(x):
return x[1:] + 1
x = torch.randn(1025, device=self.device)
self.common(f, (x,))
def test_view_dtype_slice(self):
def f(x):
return x.view(dtype=torch.float32)[1:] + 1
x = torch.randn(1025 * 2, dtype=torch.bfloat16, device=self.device)
self.common(f, (x,), reference_in_float=False)
@parametrize(
"size",
(
# wrapper for size = 128: https://gist.github.com/shunting314/88f1e72957b9fc5e9826aaa346a0e652
# ptx: https://gist.github.com/shunting314/eb657ee8821eef9f0685b7b91e2ad5c2
# the ptx file uses ld.global.b32 to load input buffer
128,
# wrapper for size = 1024: https://gist.github.com/shunting314/d7f64e1f52f6b1e2ec25e1a51052ce43
# ptx: https://gist.github.com/shunting314/a24ff7563bb6b04523d11b119ab0f2b2
# the ptx file uses ld.global.v2.b32 to load input buffer
1024,
# wrapper for size = 1024 * 1024: https://gist.github.com/shunting314/016b95cf0b6e9a75c25f5c9d5ed0a2ba
# ptx: https://gist.github.com/shunting314/360112a4893c759b114c12fc99958297
# the ptx file uses ld.global.v4.b32 to load input buffer
1024 * 1024,
),
)
def test_slice_view_dtype(self, size):
offset = 1
def f(x):
return x[2:].view(dtype=torch.float32) + 1
x = torch.randn((size + offset) * 2, dtype=torch.bfloat16, device=self.device)
self.common(f, (x,), reference_in_float=False)
def test_Q4_K_dequantization(self):
"""
Test the alignment issue for Q4_K dequantization.
"""
QK_K = 256
K_SCALE_SIZE = 12
def get_scale_min(scales):
n_blocks = scales.shape[0]
scales = scales.view(torch.uint8)
scales = scales.reshape((n_blocks, 3, 4))
d, m, m_d = torch.split(scales, scales.shape[-2] // 3, dim=-2)
sc = torch.cat([d & 0x3F, (m_d & 0x0F) | ((d >> 2) & 0x30)], dim=-1)
min = torch.cat([m & 0x3F, (m_d >> 4) | ((m >> 2) & 0x30)], dim=-1)
return (sc.reshape((n_blocks, 8)), min.reshape((n_blocks, 8)))
def split_block_dims(blocks, *args):
n_max = blocks.shape[1]
dims = list(args) + [n_max - sum(args)]
return torch.split(blocks, dims, dim=1)
def dequantize_blocks_Q4_K(blocks, block_size, type_size):
n_blocks = blocks.shape[0]
d, dmin, scales, qs = split_block_dims(blocks, 2, 2, K_SCALE_SIZE)
d = d.view(torch.float16)
dmin = dmin.view(torch.float16)
sc, m = get_scale_min(scales)
d = (d * sc).reshape((n_blocks, -1, 1))
dm = (dmin * m).reshape((n_blocks, -1, 1))
qs = qs.reshape((n_blocks, -1, 1, 32)) >> torch.tensor(
[0, 4], device=d.device, dtype=torch.uint8
).reshape((1, 1, 2, 1))
qs = (qs & 0x0F).reshape((n_blocks, -1, 32))
return (d * qs - dm).reshape((n_blocks, QK_K))
data = torch.randint(
0, 16, (18432, 1728), device=self.device, dtype=torch.uint8
)
def dequantize(data):
block_size, type_size = 256, 144
rows = data.reshape((-1, data.shape[-1])).view(torch.uint8)
n_blocks = rows.numel() // type_size
blocks = rows.reshape((n_blocks, type_size))
blocks = dequantize_blocks_Q4_K(blocks, block_size, type_size)
return blocks.reshape(18432, 3072)
self.common(dequantize, (data,), check_lowp=False, atol=1e-3, rtol=1e-3)
if RUN_CPU:
class CpuTests(TestCase):
common = check_model
device = "cpu"
copy_tests(CommonTemplate, CpuTests, "cpu")
if RUN_GPU:
class GPUTests(TestCase):
common = check_model_gpu
device = GPU_TYPE
copy_tests(CommonTemplate, GPUTests, GPU_TYPE)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if RUN_CPU or RUN_GPU:
run_tests()
|
CommonTemplate
|
python
|
django-haystack__django-haystack
|
test_haystack/whoosh_tests/test_whoosh_backend.py
|
{
"start": 2734,
"end": 3345
}
|
class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True,
use_template=True,
template_name="search/indexes/core/mockmodel_template.txt",
)
author = indexes.CharField(model_attr="author", weight=2.0)
editor = indexes.CharField(model_attr="editor")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return AFourthMockModel
def prepare(self, obj):
data = super().prepare(obj)
if obj.pk % 2 == 0:
data["boost"] = 2.0
return data
|
WhooshBoostMockSearchIndex
|
python
|
celery__celery
|
t/unit/utils/test_platforms.py
|
{
"start": 14454,
"end": 16394
}
|
class ____:
@patch('multiprocessing.util._run_after_forkers')
@patch('os.fork')
@patch('os.setsid')
@patch('os._exit')
@patch('os.chdir')
@patch('os.umask')
@patch('os.close')
@patch('os.closerange')
@patch('os.open')
@patch('os.dup2')
@patch('celery.platforms.close_open_fds')
def test_open(self, _close_fds, dup2, open, close, closer, umask, chdir,
_exit, setsid, fork, run_after_forkers):
x = DaemonContext(workdir='/opt/workdir', umask=0o22)
x.stdfds = [0, 1, 2]
fork.return_value = 0
with x:
assert x._is_open
with x:
pass
assert fork.call_count == 2
setsid.assert_called_with()
_exit.assert_not_called()
chdir.assert_called_with(x.workdir)
umask.assert_called_with(0o22)
dup2.assert_called()
fork.reset_mock()
fork.return_value = 1
x = DaemonContext(workdir='/opt/workdir')
x.stdfds = [0, 1, 2]
with x:
pass
assert fork.call_count == 1
_exit.assert_called_with(0)
x = DaemonContext(workdir='/opt/workdir', fake=True)
x.stdfds = [0, 1, 2]
x._detach = Mock()
with x:
pass
x._detach.assert_not_called()
x.after_chdir = Mock()
with x:
pass
x.after_chdir.assert_called_with()
x = DaemonContext(workdir='/opt/workdir', umask='0755')
assert x.umask == 493
x = DaemonContext(workdir='/opt/workdir', umask='493')
assert x.umask == 493
x.redirect_to_null(None)
with patch('celery.platforms.mputil') as mputil:
x = DaemonContext(after_forkers=True)
x.open()
mputil._run_after_forkers.assert_called_with()
x = DaemonContext(after_forkers=False)
x.open()
@t.skip.if_win32
|
test_DaemonContext
|
python
|
ionelmc__pytest-benchmark
|
src/pytest_benchmark/stats.py
|
{
"start": 253,
"end": 4449
}
|
class ____:
fields = (
'min',
'max',
'mean',
'stddev',
'rounds',
'median',
'iqr',
'q1',
'q3',
'iqr_outliers',
'stddev_outliers',
'outliers',
'ld15iqr',
'hd15iqr',
'ops',
'total',
)
def __init__(self):
self.data = []
def __bool__(self):
return bool(self.data)
def __nonzero__(self):
return bool(self.data)
def as_dict(self):
return {field: getattr(self, field) for field in self.fields}
def update(self, duration):
self.data.append(duration)
@cached_property
def sorted_data(self):
return sorted(self.data)
@cached_property
def total(self):
return sum(self.data)
@cached_property
def min(self):
return min(self.data)
@cached_property
def max(self):
return max(self.data)
@cached_property
def mean(self):
return statistics.mean(self.data)
@cached_property
def stddev(self):
if len(self.data) > 1:
return statistics.stdev(self.data)
else:
return 0
@property
def stddev_outliers(self):
"""
Count of StdDev outliers: what's beyond (Mean - StdDev, Mean - StdDev)
"""
count = 0
q0 = self.mean - self.stddev
q4 = self.mean + self.stddev
for val in self.data:
if val < q0 or val > q4:
count += 1
return count
@cached_property
def rounds(self):
return len(self.data)
@cached_property
def median(self):
return statistics.median(self.data)
@cached_property
def ld15iqr(self):
"""
Tukey-style Lowest Datum within 1.5 IQR under Q1.
"""
if len(self.data) == 1:
return self.data[0]
else:
return self.sorted_data[bisect_left(self.sorted_data, self.q1 - 1.5 * self.iqr)]
@cached_property
def hd15iqr(self):
"""
Tukey-style Highest Datum within 1.5 IQR over Q3.
"""
if len(self.data) == 1:
return self.data[0]
else:
pos = bisect_right(self.sorted_data, self.q3 + 1.5 * self.iqr)
if pos == len(self.data):
return self.sorted_data[-1]
else:
return self.sorted_data[pos]
@cached_property
def q1(self):
rounds = self.rounds
data = self.sorted_data
# See: https://en.wikipedia.org/wiki/Quartile#Computing_methods
if rounds == 1:
return data[0]
elif rounds % 2: # Method 3
n, q = rounds // 4, rounds % 4
if q == 1:
return 0.25 * data[n - 1] + 0.75 * data[n]
else:
return 0.75 * data[n] + 0.25 * data[n + 1]
else: # Method 2
return statistics.median(data[: rounds // 2])
@cached_property
def q3(self):
rounds = self.rounds
data = self.sorted_data
# See: https://en.wikipedia.org/wiki/Quartile#Computing_methods
if rounds == 1:
return data[0]
elif rounds % 2: # Method 3
n, q = rounds // 4, rounds % 4
if q == 1:
return 0.75 * data[3 * n] + 0.25 * data[3 * n + 1]
else:
return 0.25 * data[3 * n + 1] + 0.75 * data[3 * n + 2]
else: # Method 2
return statistics.median(data[rounds // 2 :])
@cached_property
def iqr(self):
return self.q3 - self.q1
@property
def iqr_outliers(self):
"""
Count of Tukey outliers: what's beyond (Q1 - 1.5IQR, Q3 + 1.5IQR)
"""
count = 0
q0 = self.q1 - 1.5 * self.iqr
q4 = self.q3 + 1.5 * self.iqr
for val in self.data:
if val < q0 or val > q4:
count += 1
return count
@cached_property
def outliers(self):
return f'{self.stddev_outliers};{self.iqr_outliers}'
@cached_property
def ops(self):
if self.total:
return self.rounds / self.total
return 0
|
Stats
|
python
|
spack__spack
|
lib/spack/spack/util/gpg.py
|
{
"start": 4656,
"end": 12070
}
|
class ____(spack.error.SpackError):
"""Class raised when GPG errors are detected."""
@_autoinit
def create(**kwargs):
"""Create a new key pair."""
r, w = os.pipe()
with contextlib.closing(os.fdopen(r, "r")) as r:
with contextlib.closing(os.fdopen(w, "w")) as w:
w.write(
"""
Key-Type: rsa
Key-Length: 4096
Key-Usage: sign
Name-Real: %(name)s
Name-Email: %(email)s
Name-Comment: %(comment)s
Expire-Date: %(expires)s
%%no-protection
%%commit
"""
% kwargs
)
GPG("--gen-key", "--batch", input=r)
@_autoinit
def signing_keys(*args) -> List[str]:
"""Return the keys that can be used to sign binaries."""
assert GPG
output: str = GPG("--list-secret-keys", "--with-colons", "--fingerprint", *args, output=str)
return _parse_secret_keys_output(output)
@_autoinit
def public_keys_to_fingerprint(*args):
"""Return the keys that can be used to verify binaries."""
output = GPG("--list-public-keys", "--with-colons", "--fingerprint", *args, output=str)
return _parse_public_keys_output(output)
@_autoinit
def public_keys(*args):
"""Return a list of fingerprints"""
keys_and_fpr = public_keys_to_fingerprint(*args)
return [key_and_fpr[1] for key_and_fpr in keys_and_fpr]
@_autoinit
def export_keys(location, keys, secret=False):
"""Export public keys to a location passed as argument.
Args:
location (str): where to export the keys
keys (list): keys to be exported
secret (bool): whether to export secret keys or not
"""
if secret:
GPG("--export-secret-keys", "--armor", "--output", location, *keys)
else:
GPG("--batch", "--yes", "--armor", "--export", "--output", location, *keys)
@_autoinit
def trust(keyfile):
"""Import a public key from a file and trust it.
Args:
keyfile (str): file with the public key
"""
# Get the public keys we are about to import
output = GPG("--with-colons", keyfile, output=str, error=str)
keys = _get_unimported_public_keys(output)
# Import them
GPG("--batch", "--import", keyfile)
# Set trust to ultimate
key_to_fpr = dict(public_keys_to_fingerprint())
for key in keys:
# Skip over keys we cannot find a fingerprint for.
if key not in key_to_fpr:
continue
fpr = key_to_fpr[key]
r, w = os.pipe()
with contextlib.closing(os.fdopen(r, "r")) as r:
with contextlib.closing(os.fdopen(w, "w")) as w:
w.write("{0}:6:\n".format(fpr))
GPG("--import-ownertrust", input=r)
@_autoinit
def untrust(signing, *keys):
"""Delete known keys.
Args:
signing (bool): if True deletes the secret keys
*keys: keys to be deleted
"""
if signing:
skeys = signing_keys(*keys)
GPG("--batch", "--yes", "--delete-secret-keys", *skeys)
pkeys = public_keys(*keys)
GPG("--batch", "--yes", "--delete-keys", *pkeys)
@_autoinit
def sign(key, file, output, clearsign=False):
"""Sign a file with a key.
Args:
key: key to be used to sign
file (str): file to be signed
output (str): output file (either the clearsigned file or
the detached signature)
clearsign (bool): if True wraps the document in an ASCII-armored
signature, if False creates a detached signature
"""
signopt = "--clearsign" if clearsign else "--detach-sign"
GPG(signopt, "--armor", "--local-user", key, "--output", output, file)
@_autoinit
def verify(signature, file=None, suppress_warnings=False):
"""Verify the signature on a file.
Args:
signature (str): signature of the file (or clearsigned file)
file (str): file to be verified. If None, then signature is
assumed to be a clearsigned file.
suppress_warnings (bool): whether or not to suppress warnings
from GnuPG
"""
args = [signature]
if file:
args.append(file)
kwargs = {"error": str} if suppress_warnings else {}
GPG("--verify", *args, **kwargs)
@_autoinit
def list(trusted, signing):
"""List known keys.
Args:
trusted (bool): if True list public keys
signing (bool): if True list private keys
"""
if trusted:
GPG("--list-public-keys")
if signing:
GPG("--list-secret-keys")
def _verify_exe_or_raise(exe):
msg = (
"Spack requires gpgconf version >= 2\n"
" To install a suitable version using Spack, run\n"
" spack install gnupg@2:\n"
" and load it by running\n"
" spack load gnupg@2:"
)
if not exe:
raise SpackGPGError(msg)
output = exe("--version", output=str)
match = re.search(r"^gpg(conf)? \(GnuPG(?:/MacGPG2)?\) (.*)$", output, re.M)
if not match:
raise SpackGPGError('Could not determine "{0}" version'.format(exe.name))
if spack.version.Version(match.group(2)) < spack.version.Version("2"):
raise SpackGPGError(msg)
def _gpgconf():
exe = spack.util.executable.which("gpgconf", "gpg2conf", "gpgconf2")
_verify_exe_or_raise(exe)
# ensure that the gpgconf we found can run "gpgconf --create-socketdir"
try:
exe("--dry-run", "--create-socketdir", output=os.devnull, error=os.devnull)
except spack.util.executable.ProcessError:
# no dice
exe = None
return exe
def _gpg():
exe = spack.util.executable.which("gpg2", "gpg")
_verify_exe_or_raise(exe)
return exe
def _socket_dir(gpgconf):
# Try to ensure that (/var)/run/user/$(id -u) exists so that
# `gpgconf --create-socketdir` can be run later.
#
# NOTE(opadron): This action helps prevent a large class of
# "file-name-too-long" errors in gpg.
# If there is no suitable gpgconf, don't even bother trying to
# pre-create a user run dir.
if not gpgconf:
return None
result = None
for var_run in ("/run", "/var/run"):
if not os.path.exists(var_run):
continue
var_run_user = os.path.join(var_run, "user")
try:
if not os.path.exists(var_run_user):
os.mkdir(var_run_user)
os.chmod(var_run_user, 0o777)
user_dir = os.path.join(var_run_user, str(spack.llnl.util.filesystem.getuid()))
if not os.path.exists(user_dir):
os.mkdir(user_dir)
os.chmod(user_dir, 0o700)
# If the above operation fails due to lack of permissions, then
# just carry on without running gpgconf and hope for the best.
#
# NOTE(opadron): Without a dir in which to create a socket for IPC,
# gnupg may fail if GNUPGHOME is set to a path that
# is too long, where "too long" in this context is
# actually quite short; somewhere in the
# neighborhood of more than 100 characters.
#
# TODO(opadron): Maybe a warning should be printed in this case?
except OSError as exc:
if exc.errno not in (errno.EPERM, errno.EACCES):
raise
user_dir = None
# return the last iteration that provides a usable user run dir
if user_dir is not None:
result = user_dir
return result
|
SpackGPGError
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/exceptions.py
|
{
"start": 321,
"end": 422
}
|
class ____(OAuthToolkitError):
"""
Class for critical errors
"""
pass
|
FatalClientError
|
python
|
sphinx-doc__sphinx
|
sphinx/directives/code.py
|
{
"start": 2991,
"end": 6347
}
|
class ____(SphinxDirective):
"""Directive for a code block with special highlighting or line numbering
settings.
"""
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {
'force': directives.flag,
'linenos': directives.flag,
'dedent': optional_int,
'lineno-start': int,
'emphasize-lines': directives.unchanged_required,
'caption': directives.unchanged_required,
'class': directives.class_option,
'name': directives.unchanged,
}
def run(self) -> list[Node]:
document = self.state.document
code = '\n'.join(self.content)
location = self.state_machine.get_source_and_line(self.lineno)
linespec = self.options.get('emphasize-lines')
if linespec:
try:
nlines = len(self.content)
hl_lines = parse_line_num_spec(linespec, nlines)
if any(i >= nlines for i in hl_lines):
logger.warning(
__('line number spec is out of range(1-%d): %r'),
nlines,
self.options['emphasize-lines'],
location=location,
)
hl_lines = [x + 1 for x in hl_lines if x < nlines]
except ValueError as err:
return [document.reporter.warning(err, line=self.lineno)]
else:
hl_lines = None
if 'dedent' in self.options:
location = self.state_machine.get_source_and_line(self.lineno)
lines = code.splitlines(True)
lines = dedent_lines(lines, self.options['dedent'], location=location)
code = ''.join(lines)
literal: Element = nodes.literal_block(code, code)
if 'linenos' in self.options or 'lineno-start' in self.options:
literal['linenos'] = True
literal['classes'] += self.options.get('class', [])
literal['force'] = 'force' in self.options
if self.arguments:
# highlight language specified
literal['language'] = self.arguments[0]
else:
# no highlight language specified. Then this directive refers the current
# highlight setting via ``highlight`` directive or ``highlight_language``
# configuration.
literal['language'] = (
self.env.current_document.highlight_language
or self.config.highlight_language
)
extra_args = literal['highlight_args'] = {}
if hl_lines is not None:
extra_args['hl_lines'] = hl_lines
if 'lineno-start' in self.options:
extra_args['linenostart'] = self.options['lineno-start']
self.set_source_info(literal)
caption = self.options.get('caption')
if caption:
try:
literal = container_wrapper(self, literal, caption)
except ValueError as exc:
return [document.reporter.warning(exc, line=self.lineno)]
# literal will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
self.add_name(literal)
return [literal]
|
CodeBlock
|
python
|
getsentry__sentry
|
tests/sentry/sentry_apps/api/bases/test_sentryapps.py
|
{
"start": 8661,
"end": 9550
}
|
class ____(TestCase):
def setUp(self) -> None:
self.endpoint = SentryAppInstallationBaseEndpoint()
self.request = drf_request_from_request(self.make_request(user=self.user, method="GET"))
self.sentry_app = self.create_sentry_app(name="foo", organization=self.organization)
self.installation = self.create_sentry_app_installation(
slug=self.sentry_app.slug, organization=self.organization, user=self.user
)
def test_retrieves_installation(self) -> None:
args, kwargs = self.endpoint.convert_args(self.request, self.installation.uuid)
assert kwargs["installation"].id == self.installation.id
def test_raises_when_sentry_app_not_found(self) -> None:
with pytest.raises(SentryAppError):
self.endpoint.convert_args(self.request, "1234")
@control_silo_test
|
SentryAppInstallationBaseEndpointTest
|
python
|
coleifer__peewee
|
tests/manytomany.py
|
{
"start": 827,
"end": 925
}
|
class ____(TestModel):
name = TextField()
CourseStudentDeferred = DeferredThroughModel()
|
Student
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/math_ops/reduction_ops_test_big.py
|
{
"start": 1154,
"end": 9690
}
|
class ____(BaseReductionTest):
"""Test reductions for sum and boolean all over a wide range of shapes."""
def _tf_reduce_max(self, x, reduction_axes, keepdims):
return math_ops.reduce_max(x, reduction_axes, keepdims)
def _tf_reduce_all(self, x, reduction_axes, keepdims):
return math_ops.reduce_all(x, reduction_axes, keepdims)
def _tf_reduce_mean(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _tf_reduce_sum(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
@test_util.run_deprecated_v1
def testFloat32Bfloat16Mean(self):
arrfp32 = np.random.normal(size=[4105, 4105]).astype(np.float32)
arrbf16 = arrfp32.astype(dtypes.bfloat16.as_numpy_dtype)
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
arrfp32_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(4105, 4105)
)
arrbf16_placeholder = array_ops.placeholder(
dtype=dtypes.bfloat16.as_numpy_dtype, shape=(4105, 4105)
)
tf_full_mean_fp32 = self._tf_reduce_mean(
arrfp32_placeholder, [0, 1], False
)
tf_full_mean_bf16 = self._tf_reduce_mean(
arrbf16_placeholder, [0, 1], False
)
tf_full_mean_bf16_cast = math_ops.cast(tf_full_mean_bf16, dtypes.float32)
tf_out_full_f, tf_out_full_b = sess.run(
[tf_full_mean_fp32, tf_full_mean_bf16_cast],
{arrfp32_placeholder: arrfp32, arrbf16_placeholder: arrbf16},
)
self.assertAllClose(tf_out_full_f, tf_out_full_b)
@test_util.run_deprecated_v1
def testFloat32Sum(self):
# make sure we test all possible kernel invocations
# logic is the same for all ops, test just float32 for brevity
arr_ = np.ones([4097, 4097], dtype=np.float32)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_sum = np.ones([size_y], dtype=np.float32) * size_x
row_sum = np.ones([size_x], dtype=np.float32) * size_y
full_sum = np.ones([], dtype=np.float32) * size_x * size_y
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.float32,
shape=(size_x, size_y))
tf_row_sum = self._tf_reduce_sum(arr_placeholder, 1, False)
tf_col_sum = self._tf_reduce_sum(arr_placeholder, 0, False)
tf_full_sum = self._tf_reduce_sum(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr})
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
arr_ = np.ones([130, 130, 130], dtype=np.float32)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.ones([size_x, size_z], dtype=np.float32)
sum_xz = np.ones([size_y], dtype=np.float32)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_mean(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_mean(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run([tf_sum_xz, tf_sum_y],
{arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat32Max(self):
# make sure we test all possible kernel invocations
# logic is the same for all ops, test just float32 for brevity
arr_ = np.random.uniform(
low=-3, high=-1, size=[4105, 4105]).astype(np.float32)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_max = np.max(arr, axis=0)
row_max = np.max(arr, axis=1)
full_max = np.max(col_max)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.float32,
shape=(size_x, size_y))
tf_row_max = self._tf_reduce_max(arr_placeholder, 1, False)
tf_col_max = self._tf_reduce_max(arr_placeholder, 0, False)
tf_full_max = self._tf_reduce_max(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_max, tf_col_max, tf_full_max], {arr_placeholder: arr})
self.assertAllClose(col_max, tf_out_col)
self.assertAllClose(row_max, tf_out_row)
self.assertAllClose(full_max, tf_out_full)
arr_ = np.random.uniform(
low=-3, high=-1, size=[130, 130, 130]).astype(np.float32)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.max(arr, axis=1)
sum_xz = np.max(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_max(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_max(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run(
[tf_sum_xz, tf_sum_y], {arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testBooleanAll(self):
# make sure we test all possible kernel invocations
# test operation where T(0) is not the identity
arr_ = np.ones([4097, 4097], dtype=np.bool_)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_sum = np.ones([size_y], dtype=np.bool_)
row_sum = np.ones([size_x], dtype=np.bool_)
full_sum = np.ones([1], dtype=np.bool_).reshape([])
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.bool_, shape=(size_x, size_y))
tf_row_sum = self._tf_reduce_all(arr_placeholder, 1, False)
tf_col_sum = self._tf_reduce_all(arr_placeholder, 0, False)
tf_full_sum = self._tf_reduce_all(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr})
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
arr_ = np.ones([130, 130, 130], dtype=np.bool_)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.ones([size_x, size_z], dtype=np.bool_)
sum_xz = np.ones([size_y], dtype=np.bool_)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.bool_, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_all(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_all(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run(
[tf_sum_xz, tf_sum_y], {arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
if __name__ == "__main__":
test.main()
|
BigReductionTest
|
python
|
doocs__leetcode
|
solution/3100-3199/3190.Find Minimum Operations to Make All Elements Divisible by Three/Solution.py
|
{
"start": 0,
"end": 118
}
|
class ____:
def minimumOperations(self, nums: List[int]) -> int:
return sum(x % 3 != 0 for x in nums)
|
Solution
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_bigquery.py
|
{
"start": 17776,
"end": 21292
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute(self, mock_hook):
schema_field_updates = [
{
"name": "emp_name",
"description": "Name of employee",
}
]
operator = BigQueryUpdateTableSchemaOperator(
schema_fields_updates=schema_field_updates,
include_policy_tags=False,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
location=TEST_DATASET_LOCATION,
impersonation_chain=["service-account@myproject.iam.gserviceaccount.com"],
)
operator.execute(context=MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=["service-account@myproject.iam.gserviceaccount.com"],
location=TEST_DATASET_LOCATION,
)
mock_hook.return_value.update_table_schema.assert_called_once_with(
schema_fields_updates=schema_field_updates,
include_policy_tags=False,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_get_openlineage_facets_on_complete(self, mock_hook):
table_resource = {
"tableReference": {
"projectId": TEST_GCP_PROJECT_ID,
"datasetId": TEST_DATASET,
"tableId": TEST_TABLE_ID,
},
"description": "Table description.",
"schema": {
"fields": [
{"name": "field1", "type": "STRING", "description": "field1 description"},
{"name": "field2", "type": "INTEGER"},
]
},
}
mock_hook.return_value.update_table_schema.return_value = table_resource
schema_field_updates = [
{
"name": "emp_name",
"description": "Name of employee",
}
]
operator = BigQueryUpdateTableSchemaOperator(
schema_fields_updates=schema_field_updates,
include_policy_tags=False,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
location=TEST_DATASET_LOCATION,
impersonation_chain=["service-account@myproject.iam.gserviceaccount.com"],
)
operator.execute(context=MagicMock())
result = operator.get_openlineage_facets_on_complete(None)
assert not result.run_facets
assert not result.job_facets
assert not result.inputs
assert len(result.outputs) == 1
assert result.outputs[0].namespace == BIGQUERY_NAMESPACE
assert result.outputs[0].name == f"{TEST_GCP_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}"
assert result.outputs[0].facets == {
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="field1", type="STRING", description="field1 description"),
SchemaDatasetFacetFields(name="field2", type="INTEGER"),
]
),
"documentation": DocumentationDatasetFacet(description="Table description."),
}
|
TestBigQueryUpdateTableSchemaOperator
|
python
|
pytorch__pytorch
|
tools/test/test_codegen_model.py
|
{
"start": 5168,
"end": 6790
}
|
class ____(expecttest.TestCase):
def test_single_alias_no_write(self) -> None:
a = Annotation.parse("a")
self.assertEqual(a.alias_set, tuple("a"))
self.assertFalse(a.is_write)
self.assertEqual(a.alias_set_after, ())
def test_single_alias_is_write(self) -> None:
a = Annotation.parse("a!")
self.assertEqual(a.alias_set, tuple("a"))
self.assertTrue(a.is_write)
self.assertEqual(a.alias_set_after, ())
def test_single_alias_is_write_to_wildcard(self) -> None:
a = Annotation.parse("a! -> *")
self.assertEqual(a.alias_set, tuple("a"))
self.assertTrue(a.is_write)
self.assertEqual(a.alias_set_after, tuple("*"))
def test_alias_set(self) -> None:
a = Annotation.parse("a|b")
self.assertEqual(a.alias_set, ("a", "b"))
def test_alias_set_is_write_raises_exception(self) -> None:
with self.assertRaisesRegex(
AssertionError, r"alias set larger than 1 is not mutable"
):
Annotation.parse("a|b!")
def test_single_alias_is_write_to_alias_set(self) -> None:
a = Annotation.parse("a! -> a|b")
self.assertEqual(a.alias_set, tuple("a"))
self.assertTrue(a.is_write)
self.assertEqual(a.alias_set_after, ("a", "b"))
def test_before_and_after_alias_set_larger_than_1_raises_exception(self) -> None:
with self.assertRaisesRegex(
AssertionError,
r"before alias set and after alias set cannot be larger than 1 at the same time",
):
Annotation.parse("a|b -> c|d")
|
TestAnnotation
|
python
|
sympy__sympy
|
sympy/stats/random_matrix_models.py
|
{
"start": 9846,
"end": 15328
}
|
class ____(CircularEnsembleModel):
def joint_eigen_distribution(self):
return self._compute_joint_eigen_distribution(S(4))
def CircularEnsemble(sym, dim):
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularUnitaryEnsemble(sym, dim):
"""
Represents Circular Unitary Ensembles.
Examples
========
>>> from sympy.stats import CircularUnitaryEnsemble as CUE
>>> from sympy.stats import joint_eigen_distribution
>>> C = CUE('U', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**2, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarUnitaryEnsemble
is not evaluated because the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularUnitaryEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularOrthogonalEnsemble(sym, dim):
"""
Represents Circular Orthogonal Ensembles.
Examples
========
>>> from sympy.stats import CircularOrthogonalEnsemble as COE
>>> from sympy.stats import joint_eigen_distribution
>>> C = COE('O', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k])), (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarOrthogonalEnsemble
is not evaluated because the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularOrthogonalEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def CircularSymplecticEnsemble(sym, dim):
"""
Represents Circular Symplectic Ensembles.
Examples
========
>>> from sympy.stats import CircularSymplecticEnsemble as CSE
>>> from sympy.stats import joint_eigen_distribution
>>> C = CSE('S', 1)
>>> joint_eigen_distribution(C)
Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
Note
====
As can be seen above in the example, density of CiruclarSymplecticEnsemble
is not evaluated because the exact definition is based on haar measure of
unitary group which is not unique.
"""
sym, dim = _symbol_converter(sym), _sympify(dim)
model = CircularSymplecticEnsembleModel(sym, dim)
rmp = RandomMatrixPSpace(sym, model=model)
return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
def joint_eigen_distribution(mat):
"""
For obtaining joint probability distribution
of eigen values of random matrix.
Parameters
==========
mat: RandomMatrixSymbol
The matrix symbol whose eigen values are to be considered.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import joint_eigen_distribution
>>> U = GUE('U', 2)
>>> joint_eigen_distribution(U)
Lambda((l[1], l[2]), exp(-l[1]**2 - l[2]**2)*Product(Abs(l[_i] - l[_j])**2, (_j, _i + 1, 2), (_i, 1, 1))/pi)
"""
if not isinstance(mat, RandomMatrixSymbol):
raise ValueError("%s is not of type, RandomMatrixSymbol."%(mat))
return mat.pspace.model.joint_eigen_distribution()
def JointEigenDistribution(mat):
"""
Creates joint distribution of eigen values of matrices with random
expressions.
Parameters
==========
mat: Matrix
The matrix under consideration.
Returns
=======
JointDistributionHandmade
Examples
========
>>> from sympy.stats import Normal, JointEigenDistribution
>>> from sympy import Matrix
>>> A = [[Normal('A00', 0, 1), Normal('A01', 0, 1)],
... [Normal('A10', 0, 1), Normal('A11', 0, 1)]]
>>> JointEigenDistribution(Matrix(A))
JointDistributionHandmade(-sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2
+ A00/2 + A11/2, sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2 + A00/2 + A11/2)
"""
eigenvals = mat.eigenvals(multiple=True)
if not all(is_random(eigenval) for eigenval in set(eigenvals)):
raise ValueError("Eigen values do not have any random expression, "
"joint distribution cannot be generated.")
return JointDistributionHandmade(*eigenvals)
def level_spacing_distribution(mat):
"""
For obtaining distribution of level spacings.
Parameters
==========
mat: RandomMatrixSymbol
The random matrix symbol whose eigen values are
to be considered for finding the level spacings.
Returns
=======
Lambda
Examples
========
>>> from sympy.stats import GaussianUnitaryEnsemble as GUE
>>> from sympy.stats import level_spacing_distribution
>>> U = GUE('U', 2)
>>> level_spacing_distribution(U)
Lambda(_s, 32*_s**2*exp(-4*_s**2/pi)/pi**2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Distribution_of_level_spacings
"""
return mat.pspace.model.level_spacing_distribution()
|
CircularSymplecticEnsembleModel
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/control_flow_ops.py
|
{
"start": 82844,
"end": 85399
}
|
class ____(ControlFlowContext):
"""Base class for XLA and TPU control flow contexts."""
def __init__(self):
super(XLAControlFlowContext, self).__init__()
self._name = "XLAControlFlowContext"
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(XLAControlFlowContext,
self).to_control_flow_context_def(context_def, export_scope)
def IsXLAContext(self):
return True
def AddOp(self, _):
pass
def AddValue(self, x):
return x
def RequiresUniqueFunctionRetracing(self):
"""Returns whether the tf.function should be retraced if the context changes.
"""
return False
@tf_export("__internal__.get_enclosing_xla_context", v1=[])
def get_enclosing_xla_context():
"""Recursively find and return the XLAControlFlowContext."""
graph = ops.get_default_graph()
while graph is not None:
# pylint: disable=protected-access
context_ = graph._get_control_flow_context()
# pylint: enable=protected-access
while context_ is not None:
if isinstance(context_, XLAControlFlowContext):
return context_
context_ = context_.outer_context
# This may be a FuncGraph due to defuns or v2 control flow. We need to
# find the original graph with the XLAControlFlowContext.
graph = getattr(graph, "outer_graph", None)
return None
def from_control_flow_context_def(context_def, import_scope=None):
"""Deserializes `context_def` into the appropriate ControlFlowContext.
Args:
context_def: ControlFlowContextDef proto
import_scope: Optional `string`. Name scope to add.
Returns:
A ControlFlowContext subclass
"""
if context_def.HasField("cond_ctxt"):
return CondContext.from_proto(
context_def.cond_ctxt, import_scope=import_scope)
if context_def.HasField("while_ctxt"):
return WhileContext.from_proto(
context_def.while_ctxt, import_scope=import_scope)
raise NotImplementedError("Unknown ControlFlowContextDef field: %s" %
context_def.WhichOneof("ctxt"))
ops.register_proto_function(
ops.GraphKeys.COND_CONTEXT,
proto_type=control_flow_pb2.CondContextDef,
to_proto=CondContext.to_proto,
from_proto=CondContext.from_proto)
ops.register_proto_function(
ops.GraphKeys.WHILE_CONTEXT,
proto_type=control_flow_pb2.WhileContextDef,
to_proto=WhileContext.to_proto,
from_proto=WhileContext.from_proto)
|
XLAControlFlowContext
|
python
|
getsentry__sentry
|
src/social_auth/backends/visualstudio.py
|
{
"start": 1066,
"end": 3061
}
|
class ____(BaseOAuth2):
"""Slack OAuth authentication mechanism"""
AUTHORIZATION_URL = VISUALSTUDIO_AUTHORIZATION_URL
ACCESS_TOKEN_URL = VISUALSTUDIO_TOKEN_EXCHANGE_URL
AUTH_BACKEND = VisualStudioBackend
SETTINGS_KEY_NAME = "VISUALSTUDIO_APP_ID"
SETTINGS_SECRET_NAME = "VISUALSTUDIO_APP_SECRET"
SETTINGS_CLIENT_SECRET_NAME = "VISUALSTUDIO_CLIENT_SECRET"
REDIRECT_STATE = False
DEFAULT_SCOPE = settings.VISUALSTUDIO_SCOPES
RESPONSE_TYPE = "Assertion"
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
resp = requests.get(
VISUALSTUDIO_USER_DETAILS_URL,
headers={"Authorization": f"Bearer {access_token}"},
)
resp.raise_for_status()
content = resp.json()
return {
"id": content["id"],
"email": content["emailAddress"],
"full_name": content["displayName"],
}
def auth_complete_params(self, state=None):
secret = setting(self.SETTINGS_CLIENT_SECRET_NAME)
return {
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_assertion": secret,
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": self.data.get("code", ""),
"redirect_uri": self.get_redirect_uri(state),
}
@classmethod
def refresh_token_params(cls, token, provider):
secret = setting(cls.SETTINGS_CLIENT_SECRET_NAME)
return {
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_assertion": secret,
"grant_type": "refresh_token",
"redirect_uri": absolute_uri(
reverse("socialauth_associate_complete_auth_sso", args=[provider])
),
"assertion": token,
}
# Backend definition
BACKENDS = {"visualstudio": VisualStudioAuth}
|
VisualStudioAuth
|
python
|
plotly__plotly.py
|
plotly/graph_objs/volume/slices/_y.py
|
{
"start": 233,
"end": 5283
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume.slices"
_path_str = "volume.slices.y"
_valid_props = {"fill", "locations", "locationssrc", "show"}
@property
def fill(self):
"""
Sets the fill ratio of the `slices`. The default fill value of
the `slices` is 1 meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than one would allow
the creation of openings parallel to the edges.
The 'fill' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
@property
def locations(self):
"""
Specifies the location(s) of slices on the axis. When not
specified slices would be created for all points of the axis y
except start and end.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`locations`.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
@property
def show(self):
"""
Determines whether or not slice planes about the y dimension
are drawn.
The 'show' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["show"]
@show.setter
def show(self, val):
self["show"] = val
@property
def _prop_descriptions(self):
return """\
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis y except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
show
Determines whether or not slice planes about the y
dimension are drawn.
"""
def __init__(
self,
arg=None,
fill=None,
locations=None,
locationssrc=None,
show=None,
**kwargs,
):
"""
Construct a new Y object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.slices.Y`
fill
Sets the fill ratio of the `slices`. The default fill
value of the `slices` is 1 meaning that they are
entirely shaded. On the other hand Applying a `fill`
ratio less than one would allow the creation of
openings parallel to the edges.
locations
Specifies the location(s) of slices on the axis. When
not specified slices would be created for all points of
the axis y except start and end.
locationssrc
Sets the source reference on Chart Studio Cloud for
`locations`.
show
Determines whether or not slice planes about the y
dimension are drawn.
Returns
-------
Y
"""
super().__init__("y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.slices.Y
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.slices.Y`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("fill", arg, fill)
self._set_property("locations", arg, locations)
self._set_property("locationssrc", arg, locationssrc)
self._set_property("show", arg, show)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Y
|
python
|
walkccc__LeetCode
|
solutions/2766. Relocate Marbles/2766.py
|
{
"start": 0,
"end": 281
}
|
class ____:
def relocateMarbles(
self,
nums: list[int],
moveFrom: list[int],
moveTo: list[int],
) -> list[int]:
numsSet = set(nums)
for f, t in zip(moveFrom, moveTo):
numsSet.remove(f)
numsSet.add(t)
return sorted(numsSet)
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType26.py
|
{
"start": 987,
"end": 1311
}
|
class ____(Generic[T, S]):
value: DC1[T] | DC2[S]
def method1(self, val: U) -> "ClassC[U, S]":
if isinstance(self.value, DC1):
# This should generate an error.
return ClassC(self.value)
else:
return ClassC(self.value)
T_co = TypeVar("T_co", covariant=True)
|
ClassC
|
python
|
xlwings__xlwings
|
xlwings/pro/reports/markdown.py
|
{
"start": 555,
"end": 1007
}
|
class ____:
def __init__(self, display_name=None):
if display_name:
self.display_name = display_name
else:
self.display_name = ""
def __repr__(self):
s = ""
for attribute in vars(self):
if getattr(self, attribute) and attribute != "display_name":
s += f"{self.display_name}.{attribute}: {getattr(self, attribute)}\n"
return s.replace("\n\n", "\n")
|
Style
|
python
|
openai__openai-python
|
src/openai/resources/realtime/realtime.py
|
{
"start": 26182,
"end": 28739
}
|
class ____(BaseRealtimeConnectionResource):
def clear(self, *, event_id: str | Omit = omit) -> None:
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
)
def commit(self, *, event_id: str | Omit = omit) -> None:
"""
Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event.
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
)
def append(self, *, audio: str, event_id: str | Omit = omit) -> None:
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. A "commit" will create a new
user message item in the conversation history from the buffer content and clear the buffer.
Input audio transcription (if enabled) will be generated when the buffer is committed.
If VAD is enabled the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually. Input audio noise reduction operates on writes to the audio buffer.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike most other client events, the server will
not send a confirmation response to this event.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
)
)
|
RealtimeInputAudioBufferResource
|
python
|
huggingface__transformers
|
tests/utils/import_structures/import_structure_raw_register.py
|
{
"start": 686,
"end": 799
}
|
class ____:
def __init__(self):
pass
@requires()
def a0():
pass
@requires(backends=("torch",))
|
A0
|
python
|
eventlet__eventlet
|
tests/isolated/patcher_threading_subclass_done.py
|
{
"start": 32,
"end": 876
}
|
class ____(threading.Thread):
EXIT_SENTINEL = object()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.q = queue.Queue(maxsize=-1)
self.daemon = True
def run(self):
while True:
task = self.q.get()
if task == self.EXIT_SENTINEL:
break
print(f"Treating task {task}")
# Pretend to work
def submit(self, job):
self.q.put(job)
def terminate(self):
self.q.put(self.EXIT_SENTINEL)
self.join()
if __name__ == "__main__":
import eventlet
eventlet.patcher.monkey_patch()
worker = Worker()
assert not worker.is_alive()
worker.start()
assert worker.is_alive()
worker.submit(1)
worker.terminate()
assert not worker.is_alive()
print("pass")
|
Worker
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 4892,
"end": 5145
}
|
class ____(PythonLeaf):
"""
Simply here to optimize performance.
"""
__slots__ = ()
@property
def end_pos(self) -> Tuple[int, int]:
return self.line, self.column + len(self.value)
# Python base classes
|
_LeafWithoutNewlines
|
python
|
getsentry__sentry
|
tests/sentry/incidents/models/test_incidents.py
|
{
"start": 1898,
"end": 5262
}
|
class ____(TestCase):
def setUp(self) -> None:
self.alert_rule = self.create_alert_rule()
self.trigger = self.create_alert_rule_trigger(self.alert_rule)
def test_negative_cache(self) -> None:
subscription = self.alert_rule.snuba_query.subscriptions.get()
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
is None
)
Incident.objects.get_active_incident(self.alert_rule, self.project, subscription)
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
is False
)
self.create_incident(status=IncidentStatus.CLOSED.value)
self.alert_rule.save()
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
) is False
def test_cache(self) -> None:
subscription = self.alert_rule.snuba_query.subscriptions.get()
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
is None
)
active_incident = self.create_incident(
alert_rule=self.alert_rule, projects=[self.project], subscription=subscription
)
Incident.objects.get_active_incident(self.alert_rule, self.project, subscription)
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
== active_incident
)
active_incident = self.create_incident(
alert_rule=self.alert_rule, projects=[self.project], subscription=subscription
)
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
is None
)
Incident.objects.get_active_incident(self.alert_rule, self.project, subscription)
assert (
cache.get(
Incident.objects._build_active_incident_cache_key(
alert_rule_id=self.alert_rule.id,
project_id=self.project.id,
subscription_id=subscription.id,
)
)
== active_incident
)
|
ActiveIncidentClearCacheTest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_simple10.py
|
{
"start": 331,
"end": 1095
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("simple01.xlsx")
def test_close_file_twice(self):
"""Test warning when closing workbook more than once."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string(0, 0, "Hello")
worksheet.write_number(1, 0, 123)
workbook.close()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
workbook.close()
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
facebook__pyre-check
|
client/configuration/site_packages.py
|
{
"start": 1281,
"end": 1647
}
|
class ____:
name: str
path: Path # NOTE: parent of this path would be the site root
is_typed: bool = False
def to_search_path_element(self) -> search_path.SitePackageElement:
return search_path.SitePackageElement(
site_root=str(self.path.parent), package_name=self.name
)
@dataclasses.dataclass(frozen=True)
|
NonStubPackage
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_overrides.py
|
{
"start": 876,
"end": 1786
}
|
class ____(Benchmark):
def setup(self):
self.numpy_array = np.array(1)
self.numpy_arrays = [np.array(1), np.array(2)]
self.many_arrays = 500 * self.numpy_arrays
self.duck_array = DuckArray()
self.duck_arrays = [DuckArray(), DuckArray()]
self.mixed_arrays = [np.array(1), DuckArray()]
def time_mock_broadcast_to_numpy(self):
mock_broadcast_to(self.numpy_array, ())
def time_mock_broadcast_to_duck(self):
mock_broadcast_to(self.duck_array, ())
def time_mock_concatenate_numpy(self):
mock_concatenate(self.numpy_arrays, axis=0)
def time_mock_concatenate_many(self):
mock_concatenate(self.many_arrays, axis=0)
def time_mock_concatenate_duck(self):
mock_concatenate(self.duck_arrays, axis=0)
def time_mock_concatenate_mixed(self):
mock_concatenate(self.mixed_arrays, axis=0)
|
ArrayFunction
|
python
|
dask__distributed
|
distributed/tests/test_actor.py
|
{
"start": 501,
"end": 774
}
|
class ____:
n = 0
def __init__(self):
self.n = 0
def increment(self):
self.n += 1
return self.n
async def ainc(self):
self.n += 1
return self.n
def add(self, x):
self.n += x
return self.n
|
Counter
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_relationships.py
|
{
"start": 47109,
"end": 48823
}
|
class ____(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column("x", Integer, primary_key=True),
Column("y", Integer, primary_key=True),
Column("z", Integer),
)
Table(
"child",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
# note 'z' is not here
sa.ForeignKeyConstraint(["x", "y"], ["parent.x", "parent.y"]),
)
@classmethod
def setup_mappers(cls):
parent, child = cls.tables.parent, cls.tables.child
class Parent(cls.Comparable):
pass
class Child(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(
Parent,
parent,
properties={
"children": relationship(
Child,
primaryjoin=and_(
parent.c.x == child.c.x,
parent.c.y == child.c.y,
parent.c.z == child.c.z,
),
)
},
)
cls.mapper_registry.map_imperatively(Child, child)
def test_joins_fully(self):
Parent = self.classes.Parent
self.assert_compile(
Parent.children.property.strategy._lazywhere,
":param_1 = child.x AND :param_2 = child.y AND :param_3 = child.z",
)
|
CompositeJoinPartialFK
|
python
|
dask__dask
|
dask/sizeof.py
|
{
"start": 1403,
"end": 8844
}
|
class ____:
"""Sentinel class to mark a class to be skipped by the dispatcher. This only
works if this sentinel mixin is first in the mro.
Examples
--------
>>> def _get_gc_overhead():
... class _CustomObject:
... def __sizeof__(self):
... return 0
...
... return sys.getsizeof(_CustomObject())
>>> class TheAnswer(SimpleSizeof):
... def __sizeof__(self):
... # Sizeof always add overhead of an object for GC
... return 42 - _get_gc_overhead()
>>> sizeof(TheAnswer())
42
"""
@sizeof.register(SimpleSizeof)
def sizeof_blocked(d):
return sys.getsizeof(d)
@sizeof.register(dict)
def sizeof_python_dict(d):
return (
sys.getsizeof(d)
+ sizeof(list(d.keys()))
+ sizeof(list(d.values()))
- 2 * sizeof(list())
)
@sizeof.register_lazy("cupy")
def register_cupy():
import cupy
@sizeof.register(cupy.ndarray)
def sizeof_cupy_ndarray(x):
return int(x.nbytes)
@sizeof.register_lazy("numba")
def register_numba():
import numba.cuda
@sizeof.register(numba.cuda.cudadrv.devicearray.DeviceNDArray)
def sizeof_numba_devicendarray(x):
return int(x.nbytes)
@sizeof.register_lazy("rmm")
def register_rmm():
import rmm
# Only included in 0.11.0+
if hasattr(rmm, "DeviceBuffer"):
@sizeof.register(rmm.DeviceBuffer)
def sizeof_rmm_devicebuffer(x):
return int(x.nbytes)
@sizeof.register_lazy("numpy")
def register_numpy():
import numpy as np
@sizeof.register(np.ndarray)
def sizeof_numpy_ndarray(x):
if 0 in x.strides:
xs = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]
return xs.nbytes
return int(x.nbytes)
@sizeof.register_lazy("pandas")
def register_pandas():
import numpy as np
import pandas as pd
OBJECT_DTYPES = (object, pd.StringDtype("python"))
def object_size(*xs):
if not xs:
return 0
ncells = sum(len(x) for x in xs)
if not ncells:
return 0
# Deduplicate Series of references to the same objects,
# e.g. as produced by read_parquet
unique_samples = {}
for x in xs:
sample = np.random.choice(x, size=100, replace=True)
for i in sample.tolist():
unique_samples[id(i)] = i
nsamples = 100 * len(xs)
sample_nbytes = sum(sizeof(i) for i in unique_samples.values())
if len(unique_samples) / nsamples > 0.5:
# Less than half of the references are duplicated.
# Assume that, if we were to analyze twice the amount of random references,
# we would get twice the amount of unique objects too.
return int(sample_nbytes * ncells / nsamples)
else:
# Assume we've already found all unique objects and that all references that
# we have not yet analyzed are going to point to the same data.
return sample_nbytes
@sizeof.register(pd.DataFrame)
def sizeof_pandas_dataframe(df):
p = sizeof(df.index) + sizeof(df.columns)
object_cols = []
prev_dtype = None
# Unlike df.items(), df._series will not duplicate multiple views of the same
# column e.g. df[["x", "x", "x"]]
for col in df._series.values():
if prev_dtype is None or col.dtype != prev_dtype:
prev_dtype = col.dtype
# Contiguous columns of the same dtype share the same overhead
p += 1200
p += col.memory_usage(index=False, deep=False)
if col.dtype in OBJECT_DTYPES:
object_cols.append(col._values)
# Deduplicate references to the same objects appearing in different Series
p += object_size(*object_cols)
return max(1200, p)
@sizeof.register(pd.Series)
def sizeof_pandas_series(s):
# https://github.com/dask/dask/pull/9776#issuecomment-1359085962
p = 1200 + sizeof(s.index) + s.memory_usage(index=False, deep=False)
if s.dtype in OBJECT_DTYPES:
p += object_size(s._values)
return p
@sizeof.register(pd.Index)
def sizeof_pandas_index(i):
p = 400 + i.memory_usage(deep=False)
if i.dtype in OBJECT_DTYPES:
p += object_size(i)
return p
@sizeof.register(pd.MultiIndex)
def sizeof_pandas_multiindex(i):
return sum(sizeof(l) for l in i.levels) + sum(c.nbytes for c in i.codes)
@sizeof.register_lazy("scipy")
def register_spmatrix():
import scipy
from scipy import sparse
if Version(scipy.__version__) < Version("1.12.0.dev0"):
@sizeof.register(sparse.dok_matrix)
def sizeof_spmatrix_dok(s):
return s.__sizeof__()
@sizeof.register(sparse.spmatrix)
def sizeof_spmatrix(s):
return sum(sizeof(v) for v in s.__dict__.values())
@sizeof.register_lazy("pyarrow")
def register_pyarrow():
import pyarrow as pa
def _get_col_size(data):
p = 0
if not isinstance(data, pa.ChunkedArray):
data = data.data # pyarrow <0.15.0
for chunk in data.iterchunks():
for buffer in chunk.buffers():
if buffer:
p += buffer.size
return p
@sizeof.register(pa.Table)
def sizeof_pyarrow_table(table):
p = sizeof(table.schema.metadata)
for col in table.itercolumns():
p += _get_col_size(col)
return int(p) + 1000
@sizeof.register(pa.ChunkedArray)
def sizeof_pyarrow_chunked_array(data):
return int(_get_col_size(data)) + 1000
@sizeof.register_lazy("xarray")
def register_xarray():
import sys
import xarray as xr
XARRAY_VERSION = Version(xr.__version__)
XARRAY_GE_2024_02 = XARRAY_VERSION >= Version("2024.02.0")
@sizeof.register(xr.core.utils.Frozen)
def xarray_sizeof_frozen(obj):
return sys.getsizeof(obj) + sizeof(obj.mapping)
@sizeof.register(xr.DataArray)
@sizeof.register(xr.Variable)
def xarray_sizeof_da(obj):
return sys.getsizeof(obj) + sizeof(obj.data)
@sizeof.register(xr.Dataset)
def xarray_sizeof_ds(obj):
return sys.getsizeof(obj) + sizeof(obj.variables)
if XARRAY_GE_2024_02:
xarray_sizeof_da = sizeof.register(xr.NamedArray)(xarray_sizeof_da)
@sizeof.register(xr.core.indexes.Indexes)
def xarray_sizeof_indexes(obj):
return (
sys.getsizeof(obj)
+ sizeof(obj._index_type)
+ sizeof(obj._indexes)
+ sizeof(obj._variables)
+ sizeof(obj._dims)
)
@sizeof.register(xr.core.indexes.PandasIndex)
def xarray_sizeof_pd_index(obj):
return (
sys.getsizeof(obj)
+ sizeof(obj.index)
+ sizeof(obj.dim)
+ sizeof(obj.coord_dtype)
)
def _register_entry_point_plugins():
"""Register sizeof implementations exposed by the entry_point mechanism."""
for entry_point in importlib_metadata.entry_points(group="dask.sizeof"):
registrar = entry_point.load()
try:
registrar(sizeof)
except Exception:
logger.exception(
f"Failed to register sizeof entry point {entry_point.name}"
)
_register_entry_point_plugins()
|
SimpleSizeof
|
python
|
pytorch__pytorch
|
test/higher_order_ops/test_invoke_subgraph.py
|
{
"start": 79832,
"end": 83513
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[5]", L_y_: "f32[5]"):
l_x_ = L_x_
l_y_ = L_y_
x: "f32[5]" = l_x_.sin(); l_x_ = None
y: "f32[5]" = l_y_.sin(); l_y_ = None
subgraph_0 = self.subgraph_0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', x, y); subgraph_0 = x = None
z: "f32[5]" = invoke_subgraph[0]; invoke_subgraph = None
subgraph_1 = self.subgraph_1
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_1', z, y); subgraph_1 = z = y = None
getitem_1: "f32[5]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
return (getitem_1,)
class subgraph_0(torch.nn.Module):
def forward(self, x: "f32[5]", y: "f32[5]"):
o: "f32[5]" = torch.zeros_like(x)
triton_kernel_wrapper_mutation = torch.ops.higher_order.triton_kernel_wrapper_mutation(kernel_idx = 0, constant_args_idx = 0, grid = [(5, 1, 1)], tma_descriptor_metadata = {}, kwargs = {'in_ptr0': x, 'in_ptr1': y, 'out_ptr': o}); x = y = triton_kernel_wrapper_mutation = None
sin: "f32[5]" = o.sin(); o = None
return (sin,)
class subgraph_1(torch.nn.Module):
def forward(self, z: "f32[5]", y: "f32[5]"):
o: "f32[5]" = torch.zeros_like(z)
triton_kernel_wrapper_mutation = torch.ops.higher_order.triton_kernel_wrapper_mutation(kernel_idx = 0, constant_args_idx = 1, grid = [(5, 1, 1)], tma_descriptor_metadata = {}, kwargs = {'in_ptr0': z, 'in_ptr1': y, 'out_ptr': o}); z = y = triton_kernel_wrapper_mutation = None
sin: "f32[5]" = o.sin(); o = None
return (sin,)
""",
)
@torch._dynamo.config.patch(capture_dynamic_output_shape_ops=True)
def test_unbacked_symbol(self):
@nested_compile_region
def gn(x):
return torch.sin(torch.nonzero(x))
def fn(x):
return gn(x) + gn(x)
x = torch.randn(64, 1, requires_grad=True)
# Inductor fails with a lowering error
opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_different_strides_in_backward(self):
@nested_compile_region
def gn(x):
return torch.cos(x)
def fn(x):
a = gn(x)
a2 = gn(a)
b = torch.sin(a2)
c = gn(b)
c2 = gn(c)
return c.sum() + c2.sum()
opt_fn = torch.compile(fn, fullgraph=True)
x = torch.randn(8, 16, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
x_clone = x.detach().clone().requires_grad_(True)
torch._dynamo.mark_dynamic(x_clone, 0)
ref = fn(x)
res = opt_fn(x_clone)
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
torch.compiler.reset()
backend = AotEagerAndRecordGraphs()
opt_fn = torch.compile(fn, backend=backend, fullgraph=True)
x = torch.randn(8, 16, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
x_clone = x.detach().clone().requires_grad_(True)
torch._dynamo.mark_dynamic(x_clone, 0)
ref = fn(x)
res = opt_fn(x_clone)
ref.sum().backward()
res.sum().backward()
self.assertEqual(ref, res)
self.assertEqual(x.grad, x_clone.grad)
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.fw_graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
sdispater__pendulum
|
src/pendulum/mixins/default.py
|
{
"start": 107,
"end": 907
}
|
class ____:
_formatter: Formatter = _formatter
def format(self, fmt: str, locale: str | None = None) -> str:
"""
Formats the instance using the given format.
:param fmt: The format to use
:param locale: The locale to use
"""
return self._formatter.format(self, fmt, locale)
def for_json(self) -> str:
"""
Methods for automatic json serialization by simplejson.
"""
return self.isoformat()
def __format__(self, format_spec: str) -> str:
if len(format_spec) > 0:
if "%" in format_spec:
return self.strftime(format_spec)
return self.format(format_spec)
return str(self)
def __str__(self) -> str:
return self.isoformat()
|
FormattableMixin
|
python
|
cherrypy__cherrypy
|
cherrypy/_cptools.py
|
{
"start": 8370,
"end": 9037
}
|
class ____(Tool):
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
"""Initialize an error tool."""
Tool.__init__(self, None, callable, name)
def _wrapper(self):
self.callable(**self._merged_args())
def _setup(self):
"""Wire this tool into ``cherrypy.request``.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
# Builtin tools #
|
ErrorTool
|
python
|
ethereum__web3.py
|
web3/exceptions.py
|
{
"start": 5071,
"end": 5219
}
|
class ____(Web3Exception):
"""
Raised when a method has not retrieved the desired
result within a specified timeout.
"""
|
TimeExhausted
|
python
|
getsentry__sentry
|
src/sentry/grouping/component.py
|
{
"start": 9275,
"end": 9362
}
|
class ____(BaseGroupingComponent[int]):
id: str = "code"
|
NSErrorCodeGroupingComponent
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linear_operator_util_test.py
|
{
"start": 11804,
"end": 12928
}
|
class ____(test.TestCase):
def test_one_is_explicitly_adjoint_of_other_returns_true(self):
x = linalg_lib.LinearOperatorFullMatrix(
[[1., 2.], [3., 4.]], is_self_adjoint=False)
self.assertTrue(linear_operator_util.is_adjoint_pair(x, x.H))
self.assertTrue(linear_operator_util.is_adjoint_pair(x.H, x))
def test_repeated_non_self_adjoint_operator_returns_false(self):
x = linalg_lib.LinearOperatorFullMatrix(
[[1., 2.], [3., 4.]], is_self_adjoint=False)
self.assertFalse(linear_operator_util.is_adjoint_pair(x, x))
def test_repeated_self_adjoint_operator_returns_true(self):
x = linalg_lib.LinearOperatorFullMatrix(
[[1., 2.], [2., 1.]], is_self_adjoint=True)
self.assertTrue(linear_operator_util.is_adjoint_pair(x, x))
def test_pair_of_non_self_adjoint_operator_returns_false(self):
x = linalg_lib.LinearOperatorFullMatrix(
[[1., 2.], [3., 4.]], is_self_adjoint=False)
y = linalg_lib.LinearOperatorFullMatrix(
[[10., 20.], [3., 4.]], is_self_adjoint=False)
self.assertFalse(linear_operator_util.is_adjoint_pair(x, y))
|
IsAdjointPairTest
|
python
|
boto__boto3
|
tests/unit/docs/test_docstring.py
|
{
"start": 639,
"end": 11450
}
|
class ____(BaseDocsTest):
def test_action_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.sample_operation)
action_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
' **Request Syntax**',
' ::',
' response = myservice.sample_operation(',
' Foo=\'string\',',
' Bar=\'string\'',
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
' \'Foo\': \'string\',',
' \'Bar\': \'string\'',
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
],
action_docstring,
)
def test_load_help(self):
sub_resource = self.resource.Sample('Id')
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(sub_resource.load)
load_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
(
' Calls :py:meth:`MyService.Client.sample_operation` to update '
'the attributes of the Sample resource'
),
' **Request Syntax**',
' ::',
' sample.load()',
' :returns: None',
],
load_docstring,
)
def test_sub_resource_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.Sample)
sub_resource_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
' Creates a Sample resource.::',
" sample = myservice.Sample('name')",
' :type name: string',
" :param name: The Sample's name identifier.",
' :rtype: :py:class:`MyService.Sample`',
' :returns: A Sample resource',
],
sub_resource_docstring,
)
def test_attribute_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.Sample('id').__class__.foo)
attribute_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[' - *(string) --* Documents Foo'], attribute_docstring
)
def test_identifier_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.Sample('id').__class__.name)
identifier_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
" *(string)* The Sample's name identifier. This "
"**must** be set."
],
identifier_docstring,
)
def test_reference_help(self):
sample_resource = self.resource.Sample('id')
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(sample_resource.__class__.related_sample)
reference_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
" (:py:class:`Sample`) The related related_sample "
"if set, otherwise ``None``."
],
reference_docstring,
)
def test_collection_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.__class__.samples)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[' A collection of Sample resources'],
collection_method_docstring,
)
def test_collection_all_method_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.samples.all)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
(
' Creates an iterable of all Sample resources in the '
'collection.'
),
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.all()',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
],
collection_method_docstring,
)
def test_collection_filter_method_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.samples.filter)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.filter(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
],
collection_method_docstring,
)
def test_collection_limit_method_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.samples.limit)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.limit(',
' count=123',
' )',
' :type count: integer',
(
' :param count: The limit to the number of resources '
'in the iterable.'
),
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
],
collection_method_docstring,
)
def test_collection_page_size_method_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.samples.page_size)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.page_size(',
' count=123',
' )',
' :type count: integer',
(
' :param count: The number of items returned by '
'each service call'
),
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
],
collection_method_docstring,
)
def test_collection_chaining_help(self):
collection = self.resource.samples.all()
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(collection.all)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
(
' Creates an iterable of all Sample resources in the '
'collection.'
),
' **Request Syntax**',
' ::',
' sample_iterator = myservice.samples.all()',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
],
collection_method_docstring,
)
def test_batch_action_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.samples.operate)
batch_action_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
' **Request Syntax**',
' ::',
' response = myservice.samples.operate(',
" Foo='string',",
" Bar='string'",
' )',
' :type Foo: string',
' :param Foo: Documents Foo',
' :type Bar: string',
' :param Bar: Documents Bar',
' :rtype: dict',
' :returns:',
' **Response Syntax**',
' ::',
' {',
" 'Foo': 'string',",
" 'Bar': 'string'",
' }',
' **Response Structure**',
' - *(dict) --*',
' - **Foo** *(string) --* Documents Foo',
' - **Bar** *(string) --* Documents Bar',
],
batch_action_docstring,
)
def test_resource_waiter_help(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
help(self.resource.Sample('id').wait_until_complete)
resource_waiter_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order(
[
(
' Waits until this Sample is complete. This method calls '
':py:meth:`MyService.Waiter.sample_operation_complete.wait` '
'which polls :py:meth:`MyService.Client.sample_operation` every '
'15 seconds until a successful state is reached. An error '
'is raised after 40 failed checks.'
),
' **Request Syntax**',
' ::',
' sample.wait_until_complete(',
" Bar='string'",
' )',
' :type Bar: string',
' :param Bar: Documents Bar',
' :returns: None',
],
resource_waiter_docstring,
)
|
TestResourceDocstrings
|
python
|
getsentry__sentry
|
tests/sentry/replays/endpoints/test_project_replay_jobs_delete.py
|
{
"start": 551,
"end": 15402
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-project-replay-deletion-jobs-index"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.organization = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.organization)
self.other_project = self.create_project() # Different organization
def test_get_no_jobs(self) -> None:
"""Test GET with no deletion jobs returns empty list"""
response = self.get_success_response(self.organization.slug, self.project.slug)
assert response.data == {"data": []}
def test_get_multiple_jobs(self) -> None:
"""Test GET returns multiple jobs in correct order (newest first)"""
# Create multiple jobs using the factory method
job1 = ReplayDeletionJobModel.objects.create(
project_id=self.project.id,
organization_id=self.organization.id,
range_start=datetime.datetime(2023, 1, 1, tzinfo=datetime.UTC),
range_end=datetime.datetime(2023, 1, 2, tzinfo=datetime.UTC),
query="test query 1",
environments=["prod"],
status="pending",
)
job2 = ReplayDeletionJobModel.objects.create(
project_id=self.project.id,
organization_id=self.organization.id,
range_start=datetime.datetime(2023, 1, 3, tzinfo=datetime.UTC),
range_end=datetime.datetime(2023, 1, 4, tzinfo=datetime.UTC),
query="test query 2",
environments=["staging"],
status="in-progress",
)
response = self.get_success_response(self.organization.slug, self.project.slug)
assert len(response.data["data"]) == 2
# Should be ordered by newest first (job2 then job1)
assert response.data["data"][0]["id"] == job2.id
assert response.data["data"][1]["id"] == job1.id
# Verify data structure
job_data = response.data["data"][0]
assert job_data["status"] == "in-progress"
assert job_data["environments"] == ["staging"]
assert job_data["query"] == "test query 2"
assert job_data["countDeleted"] == 0 # Default offset value
assert "dateCreated" in job_data
assert "dateUpdated" in job_data
assert "rangeStart" in job_data
assert "rangeEnd" in job_data
job_data = response.data["data"][1]
assert job_data["status"] == "pending"
assert job_data["environments"] == ["prod"]
assert job_data["query"] == "test query 1"
assert job_data["countDeleted"] == 0 # Default offset value
assert "dateCreated" in job_data
assert "dateUpdated" in job_data
assert "rangeStart" in job_data
assert "rangeEnd" in job_data
def test_get_only_accessible_projects(self) -> None:
"""Test GET only returns jobs for projects user has access to"""
# Create job for accessible project
accessible_job = ReplayDeletionJobModel.objects.create(
project_id=self.project.id,
organization_id=self.organization.id,
range_start=datetime.datetime(2023, 1, 1, tzinfo=datetime.UTC),
range_end=datetime.datetime(2023, 1, 2, tzinfo=datetime.UTC),
query="accessible",
environments=[],
status="pending",
)
# Create job for inaccessible project (different organization)
ReplayDeletionJobModel.objects.create(
project_id=self.other_project.id,
organization_id=self.other_project.organization_id,
range_start=datetime.datetime(2023, 1, 1, tzinfo=datetime.UTC),
range_end=datetime.datetime(2023, 1, 2, tzinfo=datetime.UTC),
query="inaccessible",
environments=[],
status="pending",
)
response = self.get_success_response(self.organization.slug, self.project.slug)
assert len(response.data["data"]) == 1
assert response.data["data"][0]["id"] == accessible_job.id
assert response.data["data"][0]["query"] == "accessible"
assert response.data["data"][0]["countDeleted"] == 0 # Default offset value
def test_get_count_deleted_reflects_offset(self) -> None:
"""Test that countDeleted field correctly reflects the offset value"""
# Create job with specific offset value
job = ReplayDeletionJobModel.objects.create(
project_id=self.project.id,
organization_id=self.organization.id,
range_start=datetime.datetime(2023, 1, 1, tzinfo=datetime.UTC),
range_end=datetime.datetime(2023, 1, 2, tzinfo=datetime.UTC),
query="test query",
environments=["prod"],
status="in-progress",
offset=42, # Set specific offset value
)
response = self.get_success_response(self.organization.slug, self.project.slug)
assert len(response.data["data"]) == 1
assert response.data["data"][0]["id"] == job.id
assert response.data["data"][0]["countDeleted"] == 42
def test_get_pagination(self) -> None:
"""Test GET pagination works correctly"""
# Create multiple jobs
for i in range(15):
ReplayDeletionJobModel.objects.create(
project_id=self.project.id,
organization_id=self.organization.id,
range_start=datetime.datetime(2023, 1, 1, tzinfo=datetime.UTC),
range_end=datetime.datetime(2023, 1, 2, tzinfo=datetime.UTC),
query=f"query {i}",
environments=[],
status="pending",
)
# Test first page
response = self.get_success_response(self.organization.slug, self.project.slug, per_page=10)
assert len(response.data["data"]) == 10
assert response.data["data"][0]["query"] == "query 14"
# Test second page
response = self.get_success_response(
self.organization.slug,
self.project.slug,
cursor=Cursor(10, 1),
)
assert len(response.data["data"]) == 5
assert response.data["data"][0]["query"] == "query 4"
@patch("sentry.replays.tasks.run_bulk_replay_delete_job.delay")
def test_post_success(self, mock_task: MagicMock) -> None:
"""Test successful POST creates job and schedules task"""
data = {
"data": {
"rangeStart": "2023-01-01T00:00:00Z",
"rangeEnd": "2023-01-02T00:00:00Z",
"environments": ["production"],
"query": None,
}
}
response = self.get_success_response(
self.organization.slug, self.project.slug, method="post", **data, status_code=201
)
# Verify response structure
assert "data" in response.data
job_data = response.data["data"]
assert job_data["status"] == "pending"
assert job_data["environments"] == ["production"]
assert job_data["query"] == ""
assert job_data["countDeleted"] == 0 # Default offset value
# Verify job was created in database
job = ReplayDeletionJobModel.objects.get(id=job_data["id"])
assert job.project_id == self.project.id
assert job.status == "pending"
# Verify task was scheduled
mock_task.assert_called_once_with(job.id, offset=0, has_seer_data=False)
with assume_test_silo_mode(SiloMode.REGION):
RegionOutbox(
shard_scope=OutboxScope.AUDIT_LOG_SCOPE, shard_identifier=self.organization.id
).drain_shard()
with assume_test_silo_mode(SiloMode.CONTROL):
entry = AuditLogEntry.objects.get()
assert entry is not None
assert entry.event == 1156
def test_post_validation_errors(self) -> None:
"""Test POST validation errors"""
# Missing required fields
response = self.get_error_response(
self.organization.slug, self.project.slug, method="post", status_code=400, data={}
)
assert "environments" in response.data["data"]
assert "query" in response.data["data"]
# Invalid date range (end before start)
data = {
"data": {
"rangeStart": "2023-01-02T00:00:00Z",
"rangeEnd": "2023-01-01T00:00:00Z",
"query": "",
"environments": [],
}
}
response = self.get_error_response(
self.organization.slug, self.project.slug, method="post", **data, status_code=400
)
assert "rangeStart must be before rangeEnd" in str(response.data["data"])
def test_permission_denied_without_project_write(self) -> None:
"""Test that users without project:write permissions get 403 Forbidden"""
# Create a user with only member role (no project:write permissions)
user = self.create_user()
self.create_member(user=user, organization=self.organization, role="member")
self.login_as(user=user)
# Create a team but don't add the user to it
team = self.create_team(organization=self.organization)
project = self.create_project(organization=self.organization, teams=[team])
# GET should return 403
self.get_error_response(self.organization.slug, project.slug, status_code=403)
# POST should return 403
data = {
"data": {
"rangeStart": "2023-01-01T00:00:00Z",
"rangeEnd": "2023-01-02T00:00:00Z",
"environments": ["production"],
"query": "test query",
}
}
self.get_error_response(
self.organization.slug, project.slug, method="post", **data, status_code=403
)
def test_permission_denied_with_api_token_insufficient_scope(self) -> None:
"""Test that API tokens without project:write scope get 403 Forbidden"""
with assume_test_silo_mode(SiloMode.CONTROL):
# Create API token with only project:read scope
token = ApiToken.objects.create(user=self.user, scope_list=["project:read"])
# GET should return 403
response = self.client.get(
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/replays/jobs/delete/",
HTTP_AUTHORIZATION=f"Bearer {token.token}",
format="json",
)
assert response.status_code == 403
# POST should return 403
data = {
"data": {
"rangeStart": "2023-01-01T00:00:00Z",
"rangeEnd": "2023-01-02T00:00:00Z",
"environments": ["production"],
"query": "test query",
}
}
response = self.client.post(
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/replays/jobs/delete/",
data=data,
HTTP_AUTHORIZATION=f"Bearer {token.token}",
format="json",
)
assert response.status_code == 403
def test_permission_granted_with_project_write(self) -> None:
"""Test that users with project:write permissions can access endpoints"""
with assume_test_silo_mode(SiloMode.CONTROL):
# Create API token with project:write scope
token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
# GET should succeed
response = self.client.get(
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/replays/jobs/delete/",
HTTP_AUTHORIZATION=f"Bearer {token.token}",
format="json",
)
assert response.status_code == 200
# POST should succeed
data = {
"data": {
"rangeStart": "2023-01-01T00:00:00Z",
"rangeEnd": "2023-01-02T00:00:00Z",
"environments": ["production"],
"query": "test query",
}
}
with patch("sentry.replays.tasks.run_bulk_replay_delete_job.delay"):
response = self.client.post(
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/replays/jobs/delete/",
data=data,
HTTP_AUTHORIZATION=f"Bearer {token.token}",
format="json",
)
assert response.status_code == 201
def test_permission_granted_with_project_admin(self) -> None:
"""Test that users with project:admin permissions can access endpoints"""
with assume_test_silo_mode(SiloMode.CONTROL):
# Create API token with project:admin scope
token = ApiToken.objects.create(user=self.user, scope_list=["project:admin"])
# GET should succeed
response = self.client.get(
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/replays/jobs/delete/",
HTTP_AUTHORIZATION=f"Bearer {token.token}",
format="json",
)
assert response.status_code == 200
# POST should succeed
data = {
"data": {
"rangeStart": "2023-01-01T00:00:00Z",
"rangeEnd": "2023-01-02T00:00:00Z",
"environments": ["production"],
"query": "test query",
}
}
with patch("sentry.replays.tasks.run_bulk_replay_delete_job.delay"):
response = self.client.post(
f"/api/0/projects/{self.organization.slug}/{self.project.slug}/replays/jobs/delete/",
data=data,
HTTP_AUTHORIZATION=f"Bearer {token.token}",
format="json",
)
assert response.status_code == 201
@patch("sentry.replays.tasks.run_bulk_replay_delete_job.delay")
def test_post_has_seer_data(self, mock_task: MagicMock) -> None:
"""Test POST with summaries enabled schedules task with has_seer_data=True."""
data = {
"data": {
"rangeStart": "2023-01-01T00:00:00Z",
"rangeEnd": "2023-01-02T00:00:00Z",
"environments": ["production"],
"query": None,
}
}
with self.feature({"organizations:replay-ai-summaries": True}):
response = self.get_success_response(
self.organization.slug, self.project.slug, method="post", **data, status_code=201
)
job_data = response.data["data"]
job = ReplayDeletionJobModel.objects.get(id=job_data["id"])
assert job.project_id == self.project.id
assert job.status == "pending"
mock_task.assert_called_once_with(job.id, offset=0, has_seer_data=True)
@region_silo_test
|
ProjectReplayDeletionJobsIndexTest
|
python
|
pytorch__pytorch
|
torch/ao/pruning/scheduler/base_scheduler.py
|
{
"start": 191,
"end": 6625
}
|
class ____:
def __init__(self, sparsifier, last_epoch=-1, verbose=False):
# Attach sparsifier
if not isinstance(sparsifier, BaseSparsifier):
raise TypeError(
f"{type(sparsifier).__name__} is not an instance of torch.ao.pruning.BaseSparsifier"
)
self.sparsifier = sparsifier
# Initialize epoch and base sparsity levels
self.base_sl = [group["sparsity_level"] for group in sparsifier.groups]
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `scheduler.step()` is called after
# `sparsifier.step()`
def with_counter(method):
if getattr(method, "_with_counter", False):
# `sparsifier.step()` has already been replaced, return.
return method
# Keep a weak reference to the sparsifier instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1 # type: ignore[union-attr]
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True # type: ignore[attr-defined]
return wrapper
self.sparsifier.step = with_counter(self.sparsifier.step) # type: ignore[assignment]
self.sparsifier._step_count = 0 # type: ignore[attr-defined]
self._step_count: int = 0
self.verbose = verbose
# Housekeeping
self._get_sl_called_within_step: bool = False
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the sparsifier.
"""
return {
key: value for key, value in self.__dict__.items() if key != "sparsifier"
}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_sl(self):
"""Return last computed sparsity level by current scheduler."""
return self._last_sl
def get_sl(self):
# Compute sparsity level using chainable form of the scheduler
# Note: This method is not intended to be called directly, and is only
# used by the ".step" method. Use .get_last_sl() instead.
if not self._get_sl_called_within_step:
warnings.warn(
"To get the last sparsity level computed by the scheduler, "
"please use `get_last_sl()`.",
stacklevel=2,
)
raise NotImplementedError
def print_sl(self, is_verbose, group, sl, epoch=None):
"""Display the current sparsity level."""
if is_verbose:
if epoch is None:
print(f"Adjusting sparsity level of group {group} to {sl:.4e}.")
else:
print(
f"Epoch {epoch:5d}: adjusting sparsity level of group {group} to {sl:.4e}."
)
def __repr__(self):
format_string = self.__class__.__name__ + " ("
format_string += "\n"
format_string += f"Sparsifier {self.sparsifier}\n"
format_string += f" base_sl: {self.base_sl}\n"
format_string += ")"
return format_string
def step(self, epoch=None):
# Raise warning if trying to call scheduler step before the sparsifier.
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.sparsifier.step, "_with_counter"):
warnings.warn(
"Seems like `sparsifier.step()` has been overridden after sparsity scheduler "
"initialization. Please, make sure to call `sparsifier.step()` before "
"`scheduler.step()`.",
UserWarning,
stacklevel=2,
)
# Just check if there were two first scheduler.step() calls before sparsifier.step()
elif self.sparsifier._step_count < 1: # type: ignore[attr-defined]
warnings.warn(
"Detected call of `scheduler.step()` before `sparsifier.step()`. "
"You have to make sure you run the sparsifier.step() BEFORE any "
"calls to the scheduler.step().",
UserWarning,
stacklevel=2,
)
self._step_count += 1
class _enable_get_sl_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_sl_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_sl_called_within_step = False
with _enable_get_sl_call(self):
self.last_epoch += 1
values = self.get_sl()
for i, data in enumerate(zip(self.sparsifier.groups, values)):
param_group, sl = data
param_group["sparsity_level"] = sl
self.print_sl(self.verbose, i, sl, epoch)
self._last_sl = [group["sparsity_level"] for group in self.sparsifier.groups]
self.sparsifier.enable_mask_update = True
def _make_sure_a_list(self, var):
r"""Utility that extends it to the same length as the .groups, ensuring it is a list"""
n = len(self.sparsifier.groups)
if not isinstance(var, (list, tuple)):
return [var] * n
else:
if len(var) != n:
raise ValueError(f"Expected variable of length {n}, but got {len(var)}")
return list(var) # We want the result to be in a list, not tuple
|
BaseScheduler
|
python
|
cython__cython
|
Cython/Compiler/ExprNodes.py
|
{
"start": 416705,
"end": 419782
}
|
class ____(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# module_name EncodedString Name of defining module
# class_def_node PyClassDefNode PyClassDefNode defining this class
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
# force_type bool always create a "new style" class, even with no bases
subexprs = []
type = py_object_type
force_type = False
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def analyse_annotations(self, env):
from .AutoDocTransforms import AnnotationWriter
position = self.class_def_node.pos
dict_items = [
DictItemNode(
entry.pos,
key=IdentifierStringNode(entry.pos, value=entry.name),
value=entry.annotation.string
)
for entry in env.entries.values() if entry.annotation
]
# Annotations dict shouldn't exist for classes which don't declare any.
if dict_items:
annotations_dict = DictNode(position, key_value_pairs=dict_items)
lhs = NameNode(position, name=StringEncoding.EncodedString("__annotations__"))
lhs.entry = env.lookup_here(lhs.name) or env.declare_var(lhs.name, dict_type, position)
node = SingleAssignmentNode(position, lhs=lhs, rhs=annotations_dict)
node.analyse_declarations(env)
self.class_def_node.body.stats.insert(0, node)
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
class_def_node = self.class_def_node
mkw = class_def_node.mkw.py_result() if class_def_node.mkw else 'NULL'
if class_def_node.metaclass:
metaclass = class_def_node.metaclass.py_result()
elif self.force_type:
metaclass = "((PyObject*)&PyType_Type)"
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
class_def_node.bases.py_result(),
class_def_node.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
code.put_make_object_deferred(self.result())
|
Py3ClassNode
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/math_ops_test.py
|
{
"start": 21170,
"end": 23182
}
|
class ____(test_util.TensorFlowTestCase):
def testApproximateEqual(self):
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.00009)
z = False
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = dtype(1)
y = dtype(1.000009)
z = True
with test_util.device(use_gpu=True):
# Default tolerance is 0.00001
z_tf = self.evaluate(math_ops.approximate_equal(x, y))
self.assertAllEqual(z, z_tf)
for dtype in [np.float32, np.double]:
x = np.array([[[[-1, 2.00009999], [-3, 4.01]]]], dtype=dtype)
y = np.array([[[[-1.001, 2], [-3.00009, 4]]]], dtype=dtype)
z = np.array([[[[False, True], [True, False]]]], dtype=np.bool_)
with test_util.device(use_gpu=True):
z_tf = self.evaluate(math_ops.approximate_equal(x, y, tolerance=0.0001))
self.assertAllEqual(z, z_tf)
def testApproximateEqualShape(self):
for dtype in [np.float32, np.double]:
x = np.array([1, 2], dtype=dtype)
y = np.array([[1, 2]], dtype=dtype)
# The inputs 'x' and 'y' must have the same shape.
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Shapes must be equal rank|must be of the same shape"):
math_ops.approximate_equal(x, y)
def testApproximateEqualShapeXla(self):
@def_function.function(jit_compile=True)
def approximate_equal(x, y):
return math_ops.approximate_equal(x, y)
for dtype in [np.float32, np.double]:
x = np.array([1, 2], dtype=dtype)
y = np.array([[1, 2]], dtype=dtype)
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Shapes must be equal rank|must be of the same shape"):
approximate_equal(x, y)
@test_util.run_all_in_graph_and_eager_modes
|
ApproximateEqualTest
|
python
|
redis__redis-py
|
redis/asyncio/cluster.py
|
{
"start": 2470,
"end": 47126
}
|
class ____(AbstractRedis, AbstractRedisCluster, AsyncRedisClusterCommands):
"""
Create a new RedisCluster client.
Pass one of parameters:
- `host` & `port`
- `startup_nodes`
| Use ``await`` :meth:`initialize` to find cluster nodes & create connections.
| Use ``await`` :meth:`close` to disconnect connections & close client.
Many commands support the target_nodes kwarg. It can be one of the
:attr:`NODE_FLAGS`:
- :attr:`PRIMARIES`
- :attr:`REPLICAS`
- :attr:`ALL_NODES`
- :attr:`RANDOM`
- :attr:`DEFAULT_NODE`
Note: This client is not thread/process/fork safe.
:param host:
| Can be used to point to a startup node
:param port:
| Port used if **host** is provided
:param startup_nodes:
| :class:`~.ClusterNode` to used as a startup node
:param require_full_coverage:
| When set to ``False``: the client will not require a full coverage of
the slots. However, if not all slots are covered, and at least one node
has ``cluster-require-full-coverage`` set to ``yes``, the server will throw
a :class:`~.ClusterDownError` for some key-based commands.
| When set to ``True``: all slots must be covered to construct the cluster
client. If not all slots are covered, :class:`~.RedisClusterException` will be
thrown.
| See:
https://redis.io/docs/manual/scaling/#redis-cluster-configuration-parameters
:param read_from_replicas:
| @deprecated - please use load_balancing_strategy instead
| Enable read from replicas in READONLY mode.
When set to true, read commands will be assigned between the primary and
its replications in a Round-Robin manner.
The data read from replicas is eventually consistent with the data in primary nodes.
:param load_balancing_strategy:
| Enable read from replicas in READONLY mode and defines the load balancing
strategy that will be used for cluster node selection.
The data read from replicas is eventually consistent with the data in primary nodes.
:param dynamic_startup_nodes:
| Set the RedisCluster's startup nodes to all the discovered nodes.
If true (default value), the cluster's discovered nodes will be used to
determine the cluster nodes-slots mapping in the next topology refresh.
It will remove the initial passed startup nodes if their endpoints aren't
listed in the CLUSTER SLOTS output.
If you use dynamic DNS endpoints for startup nodes but CLUSTER SLOTS lists
specific IP addresses, it is best to set it to false.
:param reinitialize_steps:
| Specifies the number of MOVED errors that need to occur before reinitializing
the whole cluster topology. If a MOVED error occurs and the cluster does not
need to be reinitialized on this current error handling, only the MOVED slot
will be patched with the redirected node.
To reinitialize the cluster on every MOVED error, set reinitialize_steps to 1.
To avoid reinitializing the cluster on moved errors, set reinitialize_steps to
0.
:param cluster_error_retry_attempts:
| @deprecated - Please configure the 'retry' object instead
In case 'retry' object is set - this argument is ignored!
Number of times to retry before raising an error when :class:`~.TimeoutError`,
:class:`~.ConnectionError`, :class:`~.SlotNotCoveredError`
or :class:`~.ClusterDownError` are encountered
:param retry:
| A retry object that defines the retry strategy and the number of
retries for the cluster client.
In current implementation for the cluster client (starting form redis-py version 6.0.0)
the retry object is not yet fully utilized, instead it is used just to determine
the number of retries for the cluster client.
In the future releases the retry object will be used to handle the cluster client retries!
:param max_connections:
| Maximum number of connections per node. If there are no free connections & the
maximum number of connections are already created, a
:class:`~.MaxConnectionsError` is raised.
:param address_remap:
| An optional callable which, when provided with an internal network
address of a node, e.g. a `(host, port)` tuple, will return the address
where the node is reachable. This can be used to map the addresses at
which the nodes _think_ they are, to addresses at which a client may
reach them, such as when they sit behind a proxy.
| Rest of the arguments will be passed to the
:class:`~redis.asyncio.connection.Connection` instances when created
:raises RedisClusterException:
if any arguments are invalid or unknown. Eg:
- `db` != 0 or None
- `path` argument for unix socket connection
- none of the `host`/`port` & `startup_nodes` were provided
"""
@classmethod
def from_url(cls, url: str, **kwargs: Any) -> "RedisCluster":
"""
Return a Redis client object configured from the given URL.
For example::
redis://[[username]:[password]]@localhost:6379/0
rediss://[[username]:[password]]@localhost:6379/0
Three URL schemes are supported:
- `redis://` creates a TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/redis>
- `rediss://` creates a SSL wrapped TCP socket connection. See more at:
<https://www.iana.org/assignments/uri-schemes/prov/rediss>
The username, password, hostname, path and all querystring values are passed
through ``urllib.parse.unquote`` in order to replace any percent-encoded values
with their corresponding characters.
All querystring options are cast to their appropriate Python types. Boolean
arguments can be specified with string values "True"/"False" or "Yes"/"No".
Values that cannot be properly cast cause a ``ValueError`` to be raised. Once
parsed, the querystring arguments and keyword arguments are passed to
:class:`~redis.asyncio.connection.Connection` when created.
In the case of conflicting arguments, querystring arguments are used.
"""
kwargs.update(parse_url(url))
if kwargs.pop("connection_class", None) is SSLConnection:
kwargs["ssl"] = True
return cls(**kwargs)
__slots__ = (
"_initialize",
"_lock",
"retry",
"command_flags",
"commands_parser",
"connection_kwargs",
"encoder",
"node_flags",
"nodes_manager",
"read_from_replicas",
"reinitialize_counter",
"reinitialize_steps",
"response_callbacks",
"result_callbacks",
)
@deprecated_args(
args_to_warn=["read_from_replicas"],
reason="Please configure the 'load_balancing_strategy' instead",
version="5.3.0",
)
@deprecated_args(
args_to_warn=[
"cluster_error_retry_attempts",
],
reason="Please configure the 'retry' object instead",
version="6.0.0",
)
def __init__(
self,
host: Optional[str] = None,
port: Union[str, int] = 6379,
# Cluster related kwargs
startup_nodes: Optional[List["ClusterNode"]] = None,
require_full_coverage: bool = True,
read_from_replicas: bool = False,
load_balancing_strategy: Optional[LoadBalancingStrategy] = None,
dynamic_startup_nodes: bool = True,
reinitialize_steps: int = 5,
cluster_error_retry_attempts: int = 3,
max_connections: int = 2**31,
retry: Optional["Retry"] = None,
retry_on_error: Optional[List[Type[Exception]]] = None,
# Client related kwargs
db: Union[str, int] = 0,
path: Optional[str] = None,
credential_provider: Optional[CredentialProvider] = None,
username: Optional[str] = None,
password: Optional[str] = None,
client_name: Optional[str] = None,
lib_name: Optional[str] = "redis-py",
lib_version: Optional[str] = get_lib_version(),
# Encoding related kwargs
encoding: str = "utf-8",
encoding_errors: str = "strict",
decode_responses: bool = False,
# Connection related kwargs
health_check_interval: float = 0,
socket_connect_timeout: Optional[float] = None,
socket_keepalive: bool = False,
socket_keepalive_options: Optional[Mapping[int, Union[int, bytes]]] = None,
socket_timeout: Optional[float] = None,
# SSL related kwargs
ssl: bool = False,
ssl_ca_certs: Optional[str] = None,
ssl_ca_data: Optional[str] = None,
ssl_cert_reqs: Union[str, VerifyMode] = "required",
ssl_include_verify_flags: Optional[List[VerifyFlags]] = None,
ssl_exclude_verify_flags: Optional[List[VerifyFlags]] = None,
ssl_certfile: Optional[str] = None,
ssl_check_hostname: bool = True,
ssl_keyfile: Optional[str] = None,
ssl_min_version: Optional[TLSVersion] = None,
ssl_ciphers: Optional[str] = None,
protocol: Optional[int] = 2,
address_remap: Optional[Callable[[Tuple[str, int]], Tuple[str, int]]] = None,
event_dispatcher: Optional[EventDispatcher] = None,
policy_resolver: AsyncPolicyResolver = AsyncStaticPolicyResolver(),
) -> None:
if db:
raise RedisClusterException(
"Argument 'db' must be 0 or None in cluster mode"
)
if path:
raise RedisClusterException(
"Unix domain socket is not supported in cluster mode"
)
if (not host or not port) and not startup_nodes:
raise RedisClusterException(
"RedisCluster requires at least one node to discover the cluster.\n"
"Please provide one of the following or use RedisCluster.from_url:\n"
' - host and port: RedisCluster(host="localhost", port=6379)\n'
" - startup_nodes: RedisCluster(startup_nodes=["
'ClusterNode("localhost", 6379), ClusterNode("localhost", 6380)])'
)
kwargs: Dict[str, Any] = {
"max_connections": max_connections,
"connection_class": Connection,
# Client related kwargs
"credential_provider": credential_provider,
"username": username,
"password": password,
"client_name": client_name,
"lib_name": lib_name,
"lib_version": lib_version,
# Encoding related kwargs
"encoding": encoding,
"encoding_errors": encoding_errors,
"decode_responses": decode_responses,
# Connection related kwargs
"health_check_interval": health_check_interval,
"socket_connect_timeout": socket_connect_timeout,
"socket_keepalive": socket_keepalive,
"socket_keepalive_options": socket_keepalive_options,
"socket_timeout": socket_timeout,
"protocol": protocol,
}
if ssl:
# SSL related kwargs
kwargs.update(
{
"connection_class": SSLConnection,
"ssl_ca_certs": ssl_ca_certs,
"ssl_ca_data": ssl_ca_data,
"ssl_cert_reqs": ssl_cert_reqs,
"ssl_include_verify_flags": ssl_include_verify_flags,
"ssl_exclude_verify_flags": ssl_exclude_verify_flags,
"ssl_certfile": ssl_certfile,
"ssl_check_hostname": ssl_check_hostname,
"ssl_keyfile": ssl_keyfile,
"ssl_min_version": ssl_min_version,
"ssl_ciphers": ssl_ciphers,
}
)
if read_from_replicas or load_balancing_strategy:
# Call our on_connect function to configure READONLY mode
kwargs["redis_connect_func"] = self.on_connect
if retry:
self.retry = retry
else:
self.retry = Retry(
backoff=ExponentialWithJitterBackoff(base=1, cap=10),
retries=cluster_error_retry_attempts,
)
if retry_on_error:
self.retry.update_supported_errors(retry_on_error)
kwargs["response_callbacks"] = _RedisCallbacks.copy()
if kwargs.get("protocol") in ["3", 3]:
kwargs["response_callbacks"].update(_RedisCallbacksRESP3)
else:
kwargs["response_callbacks"].update(_RedisCallbacksRESP2)
self.connection_kwargs = kwargs
if startup_nodes:
passed_nodes = []
for node in startup_nodes:
passed_nodes.append(
ClusterNode(node.host, node.port, **self.connection_kwargs)
)
startup_nodes = passed_nodes
else:
startup_nodes = []
if host and port:
startup_nodes.append(ClusterNode(host, port, **self.connection_kwargs))
if event_dispatcher is None:
self._event_dispatcher = EventDispatcher()
else:
self._event_dispatcher = event_dispatcher
self.startup_nodes = startup_nodes
self.nodes_manager = NodesManager(
startup_nodes,
require_full_coverage,
kwargs,
dynamic_startup_nodes=dynamic_startup_nodes,
address_remap=address_remap,
event_dispatcher=self._event_dispatcher,
)
self.encoder = Encoder(encoding, encoding_errors, decode_responses)
self.read_from_replicas = read_from_replicas
self.load_balancing_strategy = load_balancing_strategy
self.reinitialize_steps = reinitialize_steps
self.reinitialize_counter = 0
# For backward compatibility, mapping from existing policies to new one
self._command_flags_mapping: dict[str, Union[RequestPolicy, ResponsePolicy]] = {
self.__class__.RANDOM: RequestPolicy.DEFAULT_KEYLESS,
self.__class__.PRIMARIES: RequestPolicy.ALL_SHARDS,
self.__class__.ALL_NODES: RequestPolicy.ALL_NODES,
self.__class__.REPLICAS: RequestPolicy.ALL_REPLICAS,
self.__class__.DEFAULT_NODE: RequestPolicy.DEFAULT_NODE,
SLOT_ID: RequestPolicy.DEFAULT_KEYED,
}
self._policies_callback_mapping: dict[
Union[RequestPolicy, ResponsePolicy], Callable
] = {
RequestPolicy.DEFAULT_KEYLESS: lambda command_name: [
self.get_random_primary_or_all_nodes(command_name)
],
RequestPolicy.DEFAULT_KEYED: self.get_nodes_from_slot,
RequestPolicy.DEFAULT_NODE: lambda: [self.get_default_node()],
RequestPolicy.ALL_SHARDS: self.get_primaries,
RequestPolicy.ALL_NODES: self.get_nodes,
RequestPolicy.ALL_REPLICAS: self.get_replicas,
RequestPolicy.SPECIAL: self.get_special_nodes,
ResponsePolicy.DEFAULT_KEYLESS: lambda res: res,
ResponsePolicy.DEFAULT_KEYED: lambda res: res,
}
self._policy_resolver = policy_resolver
self.commands_parser = AsyncCommandsParser()
self._aggregate_nodes = None
self.node_flags = self.__class__.NODE_FLAGS.copy()
self.command_flags = self.__class__.COMMAND_FLAGS.copy()
self.response_callbacks = kwargs["response_callbacks"]
self.result_callbacks = self.__class__.RESULT_CALLBACKS.copy()
self.result_callbacks["CLUSTER SLOTS"] = (
lambda cmd, res, **kwargs: parse_cluster_slots(
list(res.values())[0], **kwargs
)
)
self._initialize = True
self._lock: Optional[asyncio.Lock] = None
# When used as an async context manager, we need to increment and decrement
# a usage counter so that we can close the connection pool when no one is
# using the client.
self._usage_counter = 0
self._usage_lock = asyncio.Lock()
async def initialize(self) -> "RedisCluster":
"""Get all nodes from startup nodes & creates connections if not initialized."""
if self._initialize:
if not self._lock:
self._lock = asyncio.Lock()
async with self._lock:
if self._initialize:
try:
await self.nodes_manager.initialize()
await self.commands_parser.initialize(
self.nodes_manager.default_node
)
self._initialize = False
except BaseException:
await self.nodes_manager.aclose()
await self.nodes_manager.aclose("startup_nodes")
raise
return self
async def aclose(self) -> None:
"""Close all connections & client if initialized."""
if not self._initialize:
if not self._lock:
self._lock = asyncio.Lock()
async with self._lock:
if not self._initialize:
self._initialize = True
await self.nodes_manager.aclose()
await self.nodes_manager.aclose("startup_nodes")
@deprecated_function(version="5.0.0", reason="Use aclose() instead", name="close")
async def close(self) -> None:
"""alias for aclose() for backwards compatibility"""
await self.aclose()
async def __aenter__(self) -> "RedisCluster":
"""
Async context manager entry. Increments a usage counter so that the
connection pool is only closed (via aclose()) when no context is using
the client.
"""
await self._increment_usage()
try:
# Initialize the client (i.e. establish connection, etc.)
return await self.initialize()
except Exception:
# If initialization fails, decrement the counter to keep it in sync
await self._decrement_usage()
raise
async def _increment_usage(self) -> int:
"""
Helper coroutine to increment the usage counter while holding the lock.
Returns the new value of the usage counter.
"""
async with self._usage_lock:
self._usage_counter += 1
return self._usage_counter
async def _decrement_usage(self) -> int:
"""
Helper coroutine to decrement the usage counter while holding the lock.
Returns the new value of the usage counter.
"""
async with self._usage_lock:
self._usage_counter -= 1
return self._usage_counter
async def __aexit__(self, exc_type, exc_value, traceback):
"""
Async context manager exit. Decrements a usage counter. If this is the
last exit (counter becomes zero), the client closes its connection pool.
"""
current_usage = await asyncio.shield(self._decrement_usage())
if current_usage == 0:
# This was the last active context, so disconnect the pool.
await asyncio.shield(self.aclose())
def __await__(self) -> Generator[Any, None, "RedisCluster"]:
return self.initialize().__await__()
_DEL_MESSAGE = "Unclosed RedisCluster client"
def __del__(
self,
_warn: Any = warnings.warn,
_grl: Any = asyncio.get_running_loop,
) -> None:
if hasattr(self, "_initialize") and not self._initialize:
_warn(f"{self._DEL_MESSAGE} {self!r}", ResourceWarning, source=self)
try:
context = {"client": self, "message": self._DEL_MESSAGE}
_grl().call_exception_handler(context)
except RuntimeError:
pass
async def on_connect(self, connection: Connection) -> None:
await connection.on_connect()
# Sending READONLY command to server to configure connection as
# readonly. Since each cluster node may change its server type due
# to a failover, we should establish a READONLY connection
# regardless of the server type. If this is a primary connection,
# READONLY would not affect executing write commands.
await connection.send_command("READONLY")
if str_if_bytes(await connection.read_response()) != "OK":
raise ConnectionError("READONLY command failed")
def get_nodes(self) -> List["ClusterNode"]:
"""Get all nodes of the cluster."""
return list(self.nodes_manager.nodes_cache.values())
def get_primaries(self) -> List["ClusterNode"]:
"""Get the primary nodes of the cluster."""
return self.nodes_manager.get_nodes_by_server_type(PRIMARY)
def get_replicas(self) -> List["ClusterNode"]:
"""Get the replica nodes of the cluster."""
return self.nodes_manager.get_nodes_by_server_type(REPLICA)
def get_random_node(self) -> "ClusterNode":
"""Get a random node of the cluster."""
return random.choice(list(self.nodes_manager.nodes_cache.values()))
def get_default_node(self) -> "ClusterNode":
"""Get the default node of the client."""
return self.nodes_manager.default_node
def set_default_node(self, node: "ClusterNode") -> None:
"""
Set the default node of the client.
:raises DataError: if None is passed or node does not exist in cluster.
"""
if not node or not self.get_node(node_name=node.name):
raise DataError("The requested node does not exist in the cluster.")
self.nodes_manager.default_node = node
def get_node(
self,
host: Optional[str] = None,
port: Optional[int] = None,
node_name: Optional[str] = None,
) -> Optional["ClusterNode"]:
"""Get node by (host, port) or node_name."""
return self.nodes_manager.get_node(host, port, node_name)
def get_node_from_key(
self, key: str, replica: bool = False
) -> Optional["ClusterNode"]:
"""
Get the cluster node corresponding to the provided key.
:param key:
:param replica:
| Indicates if a replica should be returned
|
None will returned if no replica holds this key
:raises SlotNotCoveredError: if the key is not covered by any slot.
"""
slot = self.keyslot(key)
slot_cache = self.nodes_manager.slots_cache.get(slot)
if not slot_cache:
raise SlotNotCoveredError(f'Slot "{slot}" is not covered by the cluster.')
if replica:
if len(self.nodes_manager.slots_cache[slot]) < 2:
return None
node_idx = 1
else:
node_idx = 0
return slot_cache[node_idx]
def get_random_primary_or_all_nodes(self, command_name):
"""
Returns random primary or all nodes depends on READONLY mode.
"""
if self.read_from_replicas and command_name in READ_COMMANDS:
return self.get_random_node()
return self.get_random_primary_node()
def get_random_primary_node(self) -> "ClusterNode":
"""
Returns a random primary node
"""
return random.choice(self.get_primaries())
async def get_nodes_from_slot(self, command: str, *args):
"""
Returns a list of nodes that hold the specified keys' slots.
"""
# get the node that holds the key's slot
return [
self.nodes_manager.get_node_from_slot(
await self._determine_slot(command, *args),
self.read_from_replicas and command in READ_COMMANDS,
self.load_balancing_strategy if command in READ_COMMANDS else None,
)
]
def get_special_nodes(self) -> Optional[list["ClusterNode"]]:
"""
Returns a list of nodes for commands with a special policy.
"""
if not self._aggregate_nodes:
raise RedisClusterException(
"Cannot execute FT.CURSOR commands without FT.AGGREGATE"
)
return self._aggregate_nodes
def keyslot(self, key: EncodableT) -> int:
"""
Find the keyslot for a given key.
See: https://redis.io/docs/manual/scaling/#redis-cluster-data-sharding
"""
return key_slot(self.encoder.encode(key))
def get_encoder(self) -> Encoder:
"""Get the encoder object of the client."""
return self.encoder
def get_connection_kwargs(self) -> Dict[str, Optional[Any]]:
"""Get the kwargs passed to :class:`~redis.asyncio.connection.Connection`."""
return self.connection_kwargs
def set_retry(self, retry: Retry) -> None:
self.retry = retry
def set_response_callback(self, command: str, callback: ResponseCallbackT) -> None:
"""Set a custom response callback."""
self.response_callbacks[command] = callback
async def _determine_nodes(
self,
command: str,
*args: Any,
request_policy: RequestPolicy,
node_flag: Optional[str] = None,
) -> List["ClusterNode"]:
# Determine which nodes should be executed the command on.
# Returns a list of target nodes.
if not node_flag:
# get the nodes group for this command if it was predefined
node_flag = self.command_flags.get(command)
if node_flag in self._command_flags_mapping:
request_policy = self._command_flags_mapping[node_flag]
policy_callback = self._policies_callback_mapping[request_policy]
if request_policy == RequestPolicy.DEFAULT_KEYED:
nodes = await policy_callback(command, *args)
elif request_policy == RequestPolicy.DEFAULT_KEYLESS:
nodes = policy_callback(command)
else:
nodes = policy_callback()
if command.lower() == "ft.aggregate":
self._aggregate_nodes = nodes
return nodes
async def _determine_slot(self, command: str, *args: Any) -> int:
if self.command_flags.get(command) == SLOT_ID:
# The command contains the slot ID
return int(args[0])
# Get the keys in the command
# EVAL and EVALSHA are common enough that it's wasteful to go to the
# redis server to parse the keys. Besides, there is a bug in redis<7.0
# where `self._get_command_keys()` fails anyway. So, we special case
# EVAL/EVALSHA.
# - issue: https://github.com/redis/redis/issues/9493
# - fix: https://github.com/redis/redis/pull/9733
if command.upper() in ("EVAL", "EVALSHA"):
# command syntax: EVAL "script body" num_keys ...
if len(args) < 2:
raise RedisClusterException(
f"Invalid args in command: {command, *args}"
)
keys = args[2 : 2 + int(args[1])]
# if there are 0 keys, that means the script can be run on any node
# so we can just return a random slot
if not keys:
return random.randrange(0, REDIS_CLUSTER_HASH_SLOTS)
else:
keys = await self.commands_parser.get_keys(command, *args)
if not keys:
# FCALL can call a function with 0 keys, that means the function
# can be run on any node so we can just return a random slot
if command.upper() in ("FCALL", "FCALL_RO"):
return random.randrange(0, REDIS_CLUSTER_HASH_SLOTS)
raise RedisClusterException(
"No way to dispatch this command to Redis Cluster. "
"Missing key.\nYou can execute the command by specifying "
f"target nodes.\nCommand: {args}"
)
# single key command
if len(keys) == 1:
return self.keyslot(keys[0])
# multi-key command; we need to make sure all keys are mapped to
# the same slot
slots = {self.keyslot(key) for key in keys}
if len(slots) != 1:
raise RedisClusterException(
f"{command} - all keys must map to the same key slot"
)
return slots.pop()
def _is_node_flag(self, target_nodes: Any) -> bool:
return isinstance(target_nodes, str) and target_nodes in self.node_flags
def _parse_target_nodes(self, target_nodes: Any) -> List["ClusterNode"]:
if isinstance(target_nodes, list):
nodes = target_nodes
elif isinstance(target_nodes, ClusterNode):
# Supports passing a single ClusterNode as a variable
nodes = [target_nodes]
elif isinstance(target_nodes, dict):
# Supports dictionaries of the format {node_name: node}.
# It enables to execute commands with multi nodes as follows:
# rc.cluster_save_config(rc.get_primaries())
nodes = list(target_nodes.values())
else:
raise TypeError(
"target_nodes type can be one of the following: "
"node_flag (PRIMARIES, REPLICAS, RANDOM, ALL_NODES),"
"ClusterNode, list<ClusterNode>, or dict<any, ClusterNode>. "
f"The passed type is {type(target_nodes)}"
)
return nodes
async def execute_command(self, *args: EncodableT, **kwargs: Any) -> Any:
"""
Execute a raw command on the appropriate cluster node or target_nodes.
It will retry the command as specified by the retries property of
the :attr:`retry` & then raise an exception.
:param args:
| Raw command args
:param kwargs:
- target_nodes: :attr:`NODE_FLAGS` or :class:`~.ClusterNode`
or List[:class:`~.ClusterNode`] or Dict[Any, :class:`~.ClusterNode`]
- Rest of the kwargs are passed to the Redis connection
:raises RedisClusterException: if target_nodes is not provided & the command
can't be mapped to a slot
"""
command = args[0]
target_nodes = []
target_nodes_specified = False
retry_attempts = self.retry.get_retries()
passed_targets = kwargs.pop("target_nodes", None)
if passed_targets and not self._is_node_flag(passed_targets):
target_nodes = self._parse_target_nodes(passed_targets)
target_nodes_specified = True
retry_attempts = 0
command_policies = await self._policy_resolver.resolve(args[0].lower())
if not command_policies and not target_nodes_specified:
command_flag = self.command_flags.get(command)
if not command_flag:
# Fallback to default policy
if not self.get_default_node():
slot = None
else:
slot = await self._determine_slot(*args)
if not slot:
command_policies = CommandPolicies()
else:
command_policies = CommandPolicies(
request_policy=RequestPolicy.DEFAULT_KEYED,
response_policy=ResponsePolicy.DEFAULT_KEYED,
)
else:
if command_flag in self._command_flags_mapping:
command_policies = CommandPolicies(
request_policy=self._command_flags_mapping[command_flag]
)
else:
command_policies = CommandPolicies()
elif not command_policies and target_nodes_specified:
command_policies = CommandPolicies()
# Add one for the first execution
execute_attempts = 1 + retry_attempts
for _ in range(execute_attempts):
if self._initialize:
await self.initialize()
if (
len(target_nodes) == 1
and target_nodes[0] == self.get_default_node()
):
# Replace the default cluster node
self.replace_default_node()
try:
if not target_nodes_specified:
# Determine the nodes to execute the command on
target_nodes = await self._determine_nodes(
*args,
request_policy=command_policies.request_policy,
node_flag=passed_targets,
)
if not target_nodes:
raise RedisClusterException(
f"No targets were found to execute {args} command on"
)
if len(target_nodes) == 1:
# Return the processed result
ret = await self._execute_command(target_nodes[0], *args, **kwargs)
if command in self.result_callbacks:
ret = self.result_callbacks[command](
command, {target_nodes[0].name: ret}, **kwargs
)
return self._policies_callback_mapping[
command_policies.response_policy
](ret)
else:
keys = [node.name for node in target_nodes]
values = await asyncio.gather(
*(
asyncio.create_task(
self._execute_command(node, *args, **kwargs)
)
for node in target_nodes
)
)
if command in self.result_callbacks:
return self.result_callbacks[command](
command, dict(zip(keys, values)), **kwargs
)
return self._policies_callback_mapping[
command_policies.response_policy
](dict(zip(keys, values)))
except Exception as e:
if retry_attempts > 0 and type(e) in self.__class__.ERRORS_ALLOW_RETRY:
# The nodes and slots cache were should be reinitialized.
# Try again with the new cluster setup.
retry_attempts -= 1
continue
else:
# raise the exception
raise e
async def _execute_command(
self, target_node: "ClusterNode", *args: Union[KeyT, EncodableT], **kwargs: Any
) -> Any:
asking = moved = False
redirect_addr = None
ttl = self.RedisClusterRequestTTL
while ttl > 0:
ttl -= 1
try:
if asking:
target_node = self.get_node(node_name=redirect_addr)
await target_node.execute_command("ASKING")
asking = False
elif moved:
# MOVED occurred and the slots cache was updated,
# refresh the target node
slot = await self._determine_slot(*args)
target_node = self.nodes_manager.get_node_from_slot(
slot,
self.read_from_replicas and args[0] in READ_COMMANDS,
self.load_balancing_strategy
if args[0] in READ_COMMANDS
else None,
)
moved = False
return await target_node.execute_command(*args, **kwargs)
except BusyLoadingError:
raise
except MaxConnectionsError:
# MaxConnectionsError indicates client-side resource exhaustion
# (too many connections in the pool), not a node failure.
# Don't treat this as a node failure - just re-raise the error
# without reinitializing the cluster.
raise
except (ConnectionError, TimeoutError):
# Connection retries are being handled in the node's
# Retry object.
# Remove the failed node from the startup nodes before we try
# to reinitialize the cluster
self.nodes_manager.startup_nodes.pop(target_node.name, None)
# Hard force of reinitialize of the node/slots setup
# and try again with the new setup
await self.aclose()
raise
except (ClusterDownError, SlotNotCoveredError):
# ClusterDownError can occur during a failover and to get
# self-healed, we will try to reinitialize the cluster layout
# and retry executing the command
# SlotNotCoveredError can occur when the cluster is not fully
# initialized or can be temporary issue.
# We will try to reinitialize the cluster topology
# and retry executing the command
await self.aclose()
await asyncio.sleep(0.25)
raise
except MovedError as e:
# First, we will try to patch the slots/nodes cache with the
# redirected node output and try again. If MovedError exceeds
# 'reinitialize_steps' number of times, we will force
# reinitializing the tables, and then try again.
# 'reinitialize_steps' counter will increase faster when
# the same client object is shared between multiple threads. To
# reduce the frequency you can set this variable in the
# RedisCluster constructor.
self.reinitialize_counter += 1
if (
self.reinitialize_steps
and self.reinitialize_counter % self.reinitialize_steps == 0
):
await self.aclose()
# Reset the counter
self.reinitialize_counter = 0
else:
self.nodes_manager._moved_exception = e
moved = True
except AskError as e:
redirect_addr = get_node_name(host=e.host, port=e.port)
asking = True
except TryAgainError:
if ttl < self.RedisClusterRequestTTL / 2:
await asyncio.sleep(0.05)
raise ClusterError("TTL exhausted.")
def pipeline(
self, transaction: Optional[Any] = None, shard_hint: Optional[Any] = None
) -> "ClusterPipeline":
"""
Create & return a new :class:`~.ClusterPipeline` object.
Cluster implementation of pipeline does not support transaction or shard_hint.
:raises RedisClusterException: if transaction or shard_hint are truthy values
"""
if shard_hint:
raise RedisClusterException("shard_hint is deprecated in cluster mode")
return ClusterPipeline(self, transaction)
def lock(
self,
name: KeyT,
timeout: Optional[float] = None,
sleep: float = 0.1,
blocking: bool = True,
blocking_timeout: Optional[float] = None,
lock_class: Optional[Type[Lock]] = None,
thread_local: bool = True,
raise_on_release_error: bool = True,
) -> Lock:
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
``blocking`` indicates whether calling ``acquire`` should block until
the lock has been acquired or to fail immediately, causing ``acquire``
to return False and the lock not being acquired. Defaults to True.
Note this value can be overridden by passing a ``blocking``
argument to ``acquire``.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``lock_class`` forces the specified lock implementation. Note that as
of redis-py 3.0, the only lock class we implement is ``Lock`` (which is
a Lua-based lock). So, it's unlikely you'll need this parameter, unless
you have created your own custom lock class.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
``raise_on_release_error`` indicates whether to raise an exception when
the lock is no longer owned when exiting the context manager. By default,
this is True, meaning an exception will be raised. If False, the warning
will be logged and the exception will be suppressed.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage."""
if lock_class is None:
lock_class = Lock
return lock_class(
self,
name,
timeout=timeout,
sleep=sleep,
blocking=blocking,
blocking_timeout=blocking_timeout,
thread_local=thread_local,
raise_on_release_error=raise_on_release_error,
)
async def transaction(
self, func: Coroutine[None, "ClusterPipeline", Any], *watches, **kwargs
):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single argument which is a Pipeline object.
"""
shard_hint = kwargs.pop("shard_hint", None)
value_from_callable = kwargs.pop("value_from_callable", False)
watch_delay = kwargs.pop("watch_delay", None)
async with self.pipeline(True, shard_hint) as pipe:
while True:
try:
if watches:
await pipe.watch(*watches)
func_value = await func(pipe)
exec_value = await pipe.execute()
return func_value if value_from_callable else exec_value
except WatchError:
if watch_delay is not None and watch_delay > 0:
time.sleep(watch_delay)
continue
|
RedisCluster
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/math_ops_test.py
|
{
"start": 15945,
"end": 19127
}
|
class ____(test_util.TensorFlowTestCase):
"""Test for sampled_addmm."""
SUPPORTED_DTYPES = [
dtypes.bfloat16,
dtypes.float16,
dtypes.float32,
dtypes.float64,
]
def sampledADDMMRef(
self,
indices,
values,
dense_shape,
mat1,
mat2,
beta=1.0,
alpha=1.0,
output_type=dtypes.float32,
):
dense = math_ops.matmul(mat1, mat2, output_type=output_type)
dense_vals = array_ops.gather_nd(dense, indices, batch_dims=dense.ndim - 2)
return alpha * dense_vals + beta * values
def testSampledADDMM2D(self):
for dtype in self.SUPPORTED_DTYPES:
indices = constant_op.constant([[0, 0], [1, 1]])
values = constant_op.constant([0.5, 0.3], dtype=dtype)
dense_shape = constant_op.constant([2, 2])
mat1 = constant_op.constant([1, 2, 3, 4, 5, 6], shape=[2, 3], dtype=dtype)
mat2 = constant_op.constant(
[7, 8, 9, 10, 11, 12], shape=[3, 2], dtype=dtype
)
alpha = 0.75
beta = 0.25
_, res, _ = math_ops.sampled_addmm(
indices,
values,
dense_shape,
mat1,
mat2,
beta=beta,
alpha=alpha,
output_type=dtype,
)
ref = self.sampledADDMMRef(
indices,
values,
dense_shape,
mat1,
mat2,
beta=beta,
alpha=alpha,
output_type=dtype,
)
self.assertAllClose(res, ref, atol=1e-2)
def testBatchSampledADDMM(self):
for dtype in self.SUPPORTED_DTYPES:
indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 0]]])
values = constant_op.constant([[3, 5], [2, 7]], dtype=dtype)
dense_shape = constant_op.constant([2, 2])
mat1 = constant_op.constant(
np.arange(1, 13), shape=[2, 2, 3], dtype=dtype
)
mat2 = constant_op.constant(
np.arange(13, 25), shape=[2, 3, 2], dtype=dtype
)
alpha = 0.4
beta = 0.6
_, res, _ = math_ops.sampled_addmm(
indices,
values,
dense_shape,
mat1,
mat2,
beta=beta,
alpha=alpha,
output_type=dtype,
)
ref = self.sampledADDMMRef(
indices,
values,
dense_shape,
mat1,
mat2,
beta=beta,
alpha=alpha,
output_type=dtype,
)
self.assertAllClose(res, ref, atol=1e-2)
def testInvalidDenseShape(self):
for dtype in self.SUPPORTED_DTYPES:
indices = constant_op.constant([[[0, 1], [1, 0]], [[0, 0], [1, 0]]])
values = constant_op.constant([[3, 5], [2, 7]], dtype=dtype)
dense_shape = constant_op.constant([1, 2])
mat1 = constant_op.constant(
np.arange(1, 13), shape=[2, 2, 3], dtype=dtype
)
mat2 = constant_op.constant(
np.arange(13, 25), shape=[2, 3, 2], dtype=dtype
)
with self.assertRaisesRegex(ValueError, "does not match output shape"):
math_ops.sampled_addmm(
indices, values, dense_shape, mat1, mat2, output_type=dtype
)
@test_util.run_all_in_graph_and_eager_modes
|
SampledADDMMTest
|
python
|
astropy__astropy
|
astropy/coordinates/builtin_frames/ecliptic.py
|
{
"start": 7773,
"end": 8539
}
|
class ____(BaseEclipticFrame):
"""
Heliocentric true ecliptic coordinates. These origin of the coordinates are the
center of the sun, with the x axis pointing in the direction of
the *true* (not mean) equinox as at the time specified by the ``equinox``
attribute (as seen from Earth), and the xy-plane in the plane of the
ecliptic for that date.
The frame attributes are listed under **Other Parameters**.
{params}
"""
equinox = TimeAttribute(default=EQUINOX_J2000, doc="The equinox time")
obstime = TimeAttribute(
default=DEFAULT_OBSTIME, doc="The reference time (e.g., time of observation)"
)
@format_doc(base_doc, components=doc_components_ecl.format("sun's center"), footer="")
|
HeliocentricTrueEcliptic
|
python
|
coleifer__peewee
|
tests/libs/mock.py
|
{
"start": 9053,
"end": 9546
}
|
class ____(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
|
_Sentinel
|
python
|
ray-project__ray
|
python/ray/train/examples/experiment_tracking/lightning_exp_tracking_model_dl.py
|
{
"start": 558,
"end": 1236
}
|
class ____(pl.LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(3, 1)
def forward(self, x):
return self.layer(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.binary_cross_entropy_with_logits(y_hat.flatten(), y.float())
# The metrics below will be reported to Loggers
self.log("train_loss", loss)
self.log_dict({
"metric_1": 1 / (batch_idx + 1), "metric_2": batch_idx * 100
})
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3)
|
DummyModel
|
python
|
django__django
|
tests/gis_tests/geogapp/models.py
|
{
"start": 566,
"end": 685
}
|
class ____(NamedModel):
code = models.CharField(max_length=10)
poly = models.PolygonField(geography=True)
|
Zipcode
|
python
|
apache__avro
|
lang/py/avro/ipc.py
|
{
"start": 2140,
"end": 8568
}
|
class ____:
"""Base class for the client side of a protocol interaction."""
def __init__(self, local_protocol, transceiver):
self._local_protocol = local_protocol
self._transceiver = transceiver
self._remote_protocol = None
self._remote_hash = None
self._send_protocol = None
# read-only properties
@property
def local_protocol(self):
return self._local_protocol
@property
def transceiver(self):
return self._transceiver
# read/write properties
@property
def remote_protocol(self):
return self._remote_protocol
@remote_protocol.setter
def remote_protocol(self, new_remote_protocol):
self._remote_protocol = new_remote_protocol
REMOTE_PROTOCOLS[self.transceiver.remote_name] = self.remote_protocol
@property
def remote_hash(self):
return self._remote_hash
@remote_hash.setter
def remote_hash(self, new_remote_hash):
self._remote_hash = new_remote_hash
REMOTE_HASHES[self.transceiver.remote_name] = self.remote_hash
@property
def send_protocol(self):
return self._send_protocol
@send_protocol.setter
def send_protocol(self, new_send_protocol):
self._send_protocol = new_send_protocol
def request(self, message_name, request_datum):
"""
Writes a request message and reads a response or error message.
"""
# build handshake and call request
buffer_writer = io.BytesIO()
buffer_encoder = avro.io.BinaryEncoder(buffer_writer)
self.write_handshake_request(buffer_encoder)
self.write_call_request(message_name, request_datum, buffer_encoder)
# send the handshake and call request; block until call response
call_request = buffer_writer.getvalue()
return self.issue_request(call_request, message_name, request_datum)
def write_handshake_request(self, encoder):
local_hash = self.local_protocol.md5
remote_name = self.transceiver.remote_name
remote_hash = REMOTE_HASHES.get(remote_name)
if remote_hash is None:
remote_hash = local_hash
self.remote_protocol = self.local_protocol
request_datum = {}
request_datum["clientHash"] = local_hash
request_datum["serverHash"] = remote_hash
if self.send_protocol:
request_datum["clientProtocol"] = str(self.local_protocol)
HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
def write_call_request(self, message_name, request_datum, encoder):
"""
The format of a call request is:
* request metadata, a map with values of type bytes
* the message name, an Avro string, followed by
* the message parameters. Parameters are serialized according to
the message's request declaration.
"""
# request metadata (not yet implemented)
request_metadata = {}
META_WRITER.write(request_metadata, encoder)
# message name
message = self.local_protocol.messages.get(message_name)
if message is None:
raise avro.errors.AvroException(f"Unknown message: {message_name}")
encoder.write_utf8(message.name)
# message parameters
self.write_request(message.request, request_datum, encoder)
def write_request(self, request_schema, request_datum, encoder):
datum_writer = avro.io.DatumWriter(request_schema)
datum_writer.write(request_datum, encoder)
def read_handshake_response(self, decoder):
handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
match = handshake_response.get("match")
if match == "BOTH":
self.send_protocol = False
return True
elif match == "CLIENT":
if self.send_protocol:
raise avro.errors.AvroException("Handshake failure.")
self.remote_protocol = avro.protocol.parse(handshake_response.get("serverProtocol"))
self.remote_hash = handshake_response.get("serverHash")
self.send_protocol = False
return True
elif match == "NONE":
if self.send_protocol:
raise avro.errors.AvroException("Handshake failure.")
self.remote_protocol = avro.protocol.parse(handshake_response.get("serverProtocol"))
self.remote_hash = handshake_response.get("serverHash")
self.send_protocol = True
return False
else:
raise avro.errors.AvroException(f"Unexpected match: {match}")
def read_call_response(self, message_name, decoder):
"""
The format of a call response is:
* response metadata, a map with values of type bytes
* a one-byte error flag boolean, followed by either:
o if the error flag is false,
the message response, serialized per the message's response schema.
o if the error flag is true,
the error, serialized per the message's error union schema.
"""
# response metadata
META_READER.read(decoder)
# remote response schema
remote_message_schema = self.remote_protocol.messages.get(message_name)
if remote_message_schema is None:
raise avro.errors.AvroException(f"Unknown remote message: {message_name}")
# local response schema
local_message_schema = self.local_protocol.messages.get(message_name)
if local_message_schema is None:
raise avro.errors.AvroException(f"Unknown local message: {message_name}")
# error flag
if not decoder.read_boolean():
writers_schema = remote_message_schema.response
readers_schema = local_message_schema.response
return self.read_response(writers_schema, readers_schema, decoder)
else:
writers_schema = remote_message_schema.errors
readers_schema = local_message_schema.errors
datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
raise avro.errors.AvroRemoteException(datum_reader.read(decoder))
def read_response(self, writers_schema, readers_schema, decoder):
datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
result = datum_reader.read(decoder)
return result
|
BaseRequestor
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-pairs-that-form-a-complete-day-i.py
|
{
"start": 48,
"end": 383
}
|
class ____(object):
def countCompleteDayPairs(self, hours):
"""
:type hours: List[int]
:rtype: int
"""
result = 0
cnt = [0]*24
for x in hours:
result += cnt[-x%24]
cnt[x%24] += 1
return result
# Time: O(n^2)
# Space: O(1)
# brute force
|
Solution
|
python
|
spyder-ide__spyder
|
spyder/plugins/editor/widgets/codeeditor/lsp_mixin.py
|
{
"start": 2085,
"end": 51221
}
|
class ____:
# -- LSP constants
# Timeouts (in milliseconds) to sychronize symbols and folding after
# linting results arrive, according to the number of lines in the file.
SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS = {
# Lines: Timeout
500: 600,
1500: 800,
2500: 1000,
6500: 1500,
}
# Timeout (in milliseconds) to send pending requests to LSP server
LSP_REQUESTS_SHORT_DELAY = 50
LSP_REQUESTS_LONG_DELAY = 300
# -- LSP signals
#: Signal emitted when an LSP request is sent to the LSP manager
sig_perform_completion_request = Signal(str, str, dict)
#: Signal emitted when a response is received from the completion plugin
# For now it's only used on tests, but it could be used to track
# and profile completion diagnostics.
completions_response_signal = Signal(str, object)
#: Signal to display object information on the Help plugin
sig_display_object_info = Signal(str, bool)
#: Signal only used for tests
# TODO: Remove it!
sig_signature_invoked = Signal(dict)
#: Signal emitted when processing code analysis warnings is finished
sig_process_code_analysis = Signal()
#: Signal emitted to tell cloned editors that they need to update their
# code folding.
sig_update_code_folding = Signal(tuple)
# Used to start the status spinner in the editor
sig_start_operation_in_progress = Signal()
# Used to start the status spinner in the editor
sig_stop_operation_in_progress = Signal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Request symbols and folding after a timeout.
# See: process_diagnostics
# Connecting the timeout signal is performed in document_did_open()
self._timer_sync_symbols_and_folding = QTimer(self)
self._timer_sync_symbols_and_folding.setSingleShot(True)
self.blockCountChanged.connect(
self.set_sync_symbols_and_folding_timeout)
# LSP requests handling
# self.textChanged.connect(self._schedule_document_did_change)
self._pending_server_requests = []
self._server_requests_timer = QTimer(self)
self._server_requests_timer.setSingleShot(True)
self._server_requests_timer.setInterval(self.LSP_REQUESTS_SHORT_DELAY)
self._server_requests_timer.timeout.connect(
self._process_server_requests)
# Code Folding
self.code_folding = True
self.update_folding_thread = QThread(None)
self.update_folding_thread.finished.connect(
self._finish_update_folding)
# Autoformat on save
self.format_on_save = False
self.format_eventloop = QEventLoop(None)
self.format_timer = QTimer(self)
self.__cursor_position_before_format = 0
# Outline explorer
self.oe_proxy = None
# Diagnostics
self.update_diagnostics_thread = QThread(None)
self.update_diagnostics_thread.run = self.set_errors
self.update_diagnostics_thread.finished.connect(
self.finish_code_analysis)
self._diagnostics = []
# Text diffs across versions
self.differ = diff_match_patch()
self.previous_text = ''
self.patch = []
self.leading_whitespaces = {}
# Other attributes
self.filename = None
self.completions_available = False
self.text_version = 0
self.save_include_text = True
self.open_close_notifications = True
self.sync_mode = TextDocumentSyncKind.FULL
self.will_save_notify = False
self.will_save_until_notify = False
self.enable_hover = False
self.auto_completion_characters = []
self.resolve_completions_enabled = False
self.signature_completion_characters = []
self.go_to_definition_enabled = False
self.find_references_enabled = False
self.highlight_enabled = False
self.formatting_enabled = False
self.range_formatting_enabled = False
self.document_symbols_enabled = False
self.formatting_characters = []
self.completion_args = None
self.folding_supported = False
self._folding_info = None
self.is_cloned = False
self.operation_in_progress = False
self.formatting_in_progress = False
self.symbols_in_sync = False
self.folding_in_sync = False
self.pyflakes_linting_enabled = True
# ---- Helper private methods
# -------------------------------------------------------------------------
def _process_server_requests(self):
"""Process server requests."""
# Check if the document needs to be updated
if self._document_server_needs_update:
self.document_did_change()
self.do_automatic_completions()
self._document_server_needs_update = False
# Send pending requests
for method, params, requires_response in self._pending_server_requests:
self.emit_request(method, params, requires_response)
# Clear pending requests
self._pending_server_requests = []
# ---- Basic methods
# -------------------------------------------------------------------------
@Slot(str, dict)
def handle_response(self, method, params):
if method in self.handler_registry:
handler_name = self.handler_registry[method]
handler = getattr(self, handler_name)
handler(params)
# This signal is only used on tests.
# It could be used to track and profile LSP diagnostics.
self.completions_response_signal.emit(method, params)
def emit_request(self, method, params, requires_response):
"""Send request to LSP manager."""
params["requires_response"] = requires_response
params["response_instance"] = self
self.sig_perform_completion_request.emit(
self.language.lower(), method, params
)
def manage_lsp_handle_errors(self, message):
"""
Actions to take when we get errors while handling LSP responses.
"""
# Raise exception so that handle response errors can be reported to
# Github
raise LSPHandleError(message)
# ---- Configuration and start/stop
# -------------------------------------------------------------------------
def start_completion_services(self):
"""Start completion services for this instance."""
self.completions_available = True
if self.is_cloned:
additional_msg = "cloned editor"
else:
additional_msg = ""
self.document_did_open()
logger.debug(
"Completion services available for {0}: {1}".format(
additional_msg, self.filename
)
)
def register_completion_capabilities(self, capabilities):
"""
Register completion server capabilities.
Parameters
----------
capabilities: dict
Capabilities supported by a language server.
"""
sync_options = capabilities["textDocumentSync"]
completion_options = capabilities["completionProvider"]
signature_options = capabilities["signatureHelpProvider"]
range_formatting_options = capabilities[
"documentOnTypeFormattingProvider"
]
self.open_close_notifications = sync_options.get("openClose", False)
self.sync_mode = sync_options.get("change", TextDocumentSyncKind.NONE)
self.will_save_notify = sync_options.get("willSave", False)
self.will_save_until_notify = sync_options.get(
"willSaveWaitUntil", False
)
self.save_include_text = sync_options["save"]["includeText"]
self.enable_hover = capabilities["hoverProvider"]
self.folding_supported = capabilities.get(
"foldingRangeProvider", False
)
self.auto_completion_characters = completion_options[
"triggerCharacters"
]
self.resolve_completions_enabled = completion_options.get(
"resolveProvider", False
)
self.signature_completion_characters = signature_options[
"triggerCharacters"
] + [
"="
] # FIXME:
self.go_to_definition_enabled = capabilities["definitionProvider"]
self.find_references_enabled = capabilities["referencesProvider"]
self.highlight_enabled = capabilities["documentHighlightProvider"]
self.formatting_enabled = capabilities["documentFormattingProvider"]
self.range_formatting_enabled = capabilities[
"documentRangeFormattingProvider"
]
self.document_symbols_enabled = capabilities["documentSymbolProvider"]
self.formatting_characters.append(
range_formatting_options["firstTriggerCharacter"]
)
self.formatting_characters += range_formatting_options.get(
"moreTriggerCharacter", []
)
if self.formatting_enabled:
self.format_action.setEnabled(True)
self.sig_refresh_formatting.emit(True)
self.completions_available = True
def stop_completion_services(self):
logger.debug("Stopping completion services for %s" % self.filename)
self.completions_available = False
@request(
method=CompletionRequestTypes.DOCUMENT_DID_OPEN,
requires_response=False,
)
def document_did_open(self):
"""Send textDocument/didOpen request to the server."""
# We need to be sure that this signal is disconnected before trying to
# connect it below.
# Note: It can already be connected when the user requires a server
# restart or when the server failed to start.
# Fixes spyder-ide/spyder#20679
try:
self._timer_sync_symbols_and_folding.timeout.disconnect()
except (TypeError, RuntimeError):
pass
# The connect is performed here instead of in __init__() because
# notify_close() may have been called (which disconnects the signal).
# Qt.UniqueConnection is used to avoid duplicate signal-slot
# connections (just in case).
#
# Note: PyQt5 throws if the signal is not unique (= already connected).
# It is an error if this happens because as per LSP specification
# `didOpen` “must not be sent more than once without a corresponding
# close notification send before”.
self._timer_sync_symbols_and_folding.timeout.connect(
self.sync_symbols_and_folding, Qt.UniqueConnection
)
cursor = self.textCursor()
text = self.get_text_with_eol()
if self.is_ipython():
# Send valid python text to LSP as it doesn't support IPython
text = self.ipython_to_python(text)
params = {
"file": self.filename,
"language": self.language,
"version": self.text_version,
"text": text,
"codeeditor": self,
"offset": cursor.position(),
"selection_start": cursor.selectionStart(),
"selection_end": cursor.selectionEnd(),
}
return params
# ---- Symbols
# -------------------------------------------------------------------------
@schedule_request(method=CompletionRequestTypes.DOCUMENT_SYMBOL)
def request_symbols(self):
"""Request document symbols."""
if not self.document_symbols_enabled:
return
if self.oe_proxy is not None:
self.oe_proxy.emit_request_in_progress()
params = {"file": self.filename}
return params
@handles(CompletionRequestTypes.DOCUMENT_SYMBOL)
def process_symbols(self, params):
"""Handle symbols response."""
try:
symbols = params["params"]
self._update_classfuncdropdown(symbols)
if self.oe_proxy is not None:
self.oe_proxy.update_outline_info(symbols)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing symbols")
finally:
self.symbols_in_sync = True
def _update_classfuncdropdown(self, symbols):
"""Update class/function dropdown."""
symbols = [] if symbols is None else symbols
if self.classfuncdropdown.isVisible():
self.classfuncdropdown.update_data(symbols)
else:
self.classfuncdropdown.set_data(symbols)
# ---- Linting and didChange
# -------------------------------------------------------------------------
def _schedule_document_did_change(self):
"""Schedule a document update."""
self._document_server_needs_update = True
self._server_requests_timer.setInterval(self.LSP_REQUESTS_LONG_DELAY)
self._server_requests_timer.start()
@request(
method=CompletionRequestTypes.DOCUMENT_DID_CHANGE,
requires_response=False,
)
def document_did_change(self):
"""Send textDocument/didChange request to the server."""
# Cancel formatting
self.formatting_in_progress = False
self.symbols_in_sync = False
self.folding_in_sync = False
# Don't send request for cloned editors because it's not necessary.
# The original file should send the request.
if self.is_cloned:
return
# Get text
text = self.get_text_with_eol()
if self.is_ipython():
# Send valid python text to LSP
text = self.ipython_to_python(text)
self.text_version += 1
self.patch = self.differ.patch_make(self.previous_text, text)
self.previous_text = text
cursor = self.textCursor()
params = {
"file": self.filename,
"version": self.text_version,
"text": text,
"diff": self.patch,
"offset": cursor.position(),
"selection_start": cursor.selectionStart(),
"selection_end": cursor.selectionEnd(),
}
return params
@handles(CompletionRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS)
def process_diagnostics(self, params):
"""Handle linting response."""
# The LSP spec doesn't require that folding and symbols
# are treated in the same way as linting, i.e. to be
# recomputed on didChange, didOpen and didSave. However,
# we think that's necessary to maintain accurate folding
# and symbols all the time. Therefore, we decided to call
# those requests here, but after a certain timeout to
# avoid performance issues.
self._timer_sync_symbols_and_folding.start()
# Process results (runs in a thread)
self.process_code_analysis(params["params"])
def set_sync_symbols_and_folding_timeout(self):
"""
Set timeout to sync symbols and folding according to the file
size.
"""
current_lines = self.get_line_count()
timeout = None
for lines in self.SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS.keys():
if (current_lines // lines) == 0:
timeout = self.SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS[lines]
break
if not timeout:
timeouts = self.SYNC_SYMBOLS_AND_FOLDING_TIMEOUTS.values()
timeout = list(timeouts)[-1]
# Add a random number so that several files are not synced at the same
# time.
self._timer_sync_symbols_and_folding.setInterval(
timeout + random.randint(-100, 100)
)
def sync_symbols_and_folding(self):
"""
Synchronize symbols and folding after linting results arrive.
"""
if not self.folding_in_sync:
self.request_folding()
if not self.symbols_in_sync:
self.request_symbols()
def process_code_analysis(self, diagnostics):
"""Process code analysis results in a thread."""
self.cleanup_code_analysis()
self._diagnostics = diagnostics
# Process diagnostics in a thread to improve performance.
self.update_diagnostics_thread.start()
def cleanup_code_analysis(self):
"""Remove all code analysis markers"""
self.setUpdatesEnabled(False)
self.clear_extra_selections("code_analysis_highlight")
self.clear_extra_selections("code_analysis_underline")
for data in self.blockuserdata_list():
data.code_analysis = []
self.setUpdatesEnabled(True)
# When the new code analysis results are empty, it is necessary
# to update manually the scrollflag and linenumber areas (otherwise,
# the old flags will still be displayed):
self.sig_flags_changed.emit()
self.linenumberarea.update()
def set_errors(self):
"""Set errors and warnings in the line number area."""
try:
self._process_code_analysis(underline=False)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing linting")
def underline_errors(self):
"""Underline errors and warnings."""
try:
# Clear current selections before painting the new ones.
# This prevents accumulating them when moving around in or editing
# the file, which generated a memory leakage and sluggishness
# after some time.
self.clear_extra_selections("code_analysis_underline")
self._process_code_analysis(underline=True)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing linting")
def finish_code_analysis(self):
"""Finish processing code analysis results."""
self.linenumberarea.update()
if self.underline_errors_enabled:
self.underline_errors()
self.sig_process_code_analysis.emit()
self.sig_flags_changed.emit()
def errors_present(self):
"""
Return True if there are errors or warnings present in the file.
"""
return bool(len(self._diagnostics))
def _process_code_analysis(self, underline):
"""
Process all code analysis results.
Parameters
----------
underline: bool
Determines if errors and warnings are going to be set in
the line number area or underlined. It's better to separate
these two processes for perfomance reasons. That's because
setting errors can be done in a thread whereas underlining
them can't.
"""
document = self.document()
if underline:
first_block, last_block = self.get_buffer_block_numbers()
for diagnostic in self._diagnostics:
if self.is_ipython() and (
diagnostic["message"] == "undefined name 'get_ipython'"
):
# get_ipython is defined in IPython files
continue
source = diagnostic.get("source", "")
msg_range = diagnostic["range"]
start = msg_range["start"]
end = msg_range["end"]
code = diagnostic.get("code", "E")
message = diagnostic["message"]
severity = diagnostic.get("severity", DiagnosticSeverity.ERROR)
block = document.findBlockByNumber(start["line"])
text = block.text()
# Skip messages according to certain criteria.
# This one works for any programming language
if "analysis:ignore" in text:
continue
# This only works for Python and it's only needed with pyflakes.
if self.language == "Python" and self.pyflakes_linting_enabled:
if NOQA_INLINE_REGEXP.search(text) is not None:
continue
data = block.userData()
if not data:
data = BlockUserData(self)
if underline:
block_nb = block.blockNumber()
if first_block <= block_nb <= last_block:
error = severity == DiagnosticSeverity.ERROR
color = self.error_color if error else self.warning_color
color = QColor(color)
color.setAlpha(255)
block.color = color
data.selection_start = start
data.selection_end = end
self.highlight_selection(
"code_analysis_underline",
data._selection(),
underline_color=block.color,
)
else:
# Don't append messages to data for cloned editors to avoid
# showing them twice or more times on hover.
# Fixes spyder-ide/spyder#15618
if not self.is_cloned:
data.code_analysis.append(
(source, code, severity, message)
)
block.setUserData(data)
# ---- Completion
# -------------------------------------------------------------------------
@schedule_request(method=CompletionRequestTypes.DOCUMENT_COMPLETION)
def do_completion(self, automatic=False):
"""Trigger completion."""
cursor = self.textCursor()
current_word = self.get_current_word(
completion=True, valid_python_variable=False
)
params = {
"file": self.filename,
"line": cursor.blockNumber(),
"column": cursor.columnNumber(),
"offset": cursor.position(),
"selection_start": cursor.selectionStart(),
"selection_end": cursor.selectionEnd(),
"current_word": current_word,
}
self.completion_args = (self.textCursor().position(), automatic)
return params
@handles(CompletionRequestTypes.DOCUMENT_COMPLETION)
def process_completion(self, params):
"""Handle completion response."""
args = self.completion_args
if args is None:
# This should not happen
return
self.completion_args = None
position, automatic = args
start_cursor = self.textCursor()
start_cursor.movePosition(QTextCursor.StartOfBlock)
line_text = self.get_text(start_cursor.position(), "eol")
leading_whitespace = self.compute_whitespace(line_text)
indentation_whitespace = " " * leading_whitespace
eol_char = self.get_line_separator()
try:
completions = params["params"]
completions = (
[]
if completions is None
else [
completion
for completion in completions
if completion.get("insertText")
or completion.get("textEdit", {}).get("newText")
]
)
prefix = self.get_current_word(
completion=True, valid_python_variable=False
)
if (
len(completions) == 1
and completions[0].get("insertText") == prefix
and not completions[0].get("textEdit", {}).get("newText")
):
completions.pop()
replace_end = self.textCursor().position()
under_cursor = self.get_current_word_and_position(completion=True)
if under_cursor:
word, replace_start = under_cursor
else:
word = ""
replace_start = replace_end
first_letter = ""
if len(word) > 0:
first_letter = word[0]
def sort_key(completion):
if "textEdit" in completion:
text_insertion = completion["textEdit"]["newText"]
else:
text_insertion = completion["insertText"]
first_insert_letter = text_insertion[0]
case_mismatch = (
first_letter.isupper() and first_insert_letter.islower()
) or (first_letter.islower() and first_insert_letter.isupper())
# False < True, so case matches go first
return (case_mismatch, completion["sortText"])
completion_list = sorted(completions, key=sort_key)
# Allow for textEdit completions to be filtered by Spyder
# if on-the-fly completions are disabled, only if the
# textEdit range matches the word under the cursor.
for completion in completion_list:
if "textEdit" in completion:
c_replace_start = completion["textEdit"]["range"]["start"]
c_replace_end = completion["textEdit"]["range"]["end"]
if (
c_replace_start == replace_start
and c_replace_end == replace_end
):
insert_text = completion["textEdit"]["newText"]
completion["filterText"] = insert_text
completion["insertText"] = insert_text
del completion["textEdit"]
if "insertText" in completion:
insert_text = completion["insertText"]
insert_text_lines = insert_text.splitlines()
reindented_text = [insert_text_lines[0]]
for insert_line in insert_text_lines[1:]:
insert_line = indentation_whitespace + insert_line
reindented_text.append(insert_line)
reindented_text = eol_char.join(reindented_text)
completion["insertText"] = reindented_text
self.completion_widget.show_list(
completion_list, position, automatic
)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing completions")
@schedule_request(method=CompletionRequestTypes.COMPLETION_RESOLVE)
def resolve_completion_item(self, item):
return {"file": self.filename, "completion_item": item}
@handles(CompletionRequestTypes.COMPLETION_RESOLVE)
def handle_completion_item_resolution(self, response):
try:
response = response["params"]
if not response:
return
self.completion_widget.augment_completion_info(response)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors(
"Error when handling completion item resolution"
)
# ---- Signature Hints
# -------------------------------------------------------------------------
@schedule_request(method=CompletionRequestTypes.DOCUMENT_SIGNATURE)
def request_signature(self):
"""Ask for signature."""
line, column = self.get_cursor_line_column()
offset = self.get_position("cursor")
params = {
"file": self.filename,
"line": line,
"column": column,
"offset": offset,
}
return params
@handles(CompletionRequestTypes.DOCUMENT_SIGNATURE)
def process_signatures(self, params):
"""Handle signature response."""
try:
signature_params = params["params"]
if signature_params is not None:
self.sig_signature_invoked.emit(signature_params)
signature_data = signature_params["signatures"]
documentation = signature_data["documentation"]
if isinstance(documentation, dict):
documentation = documentation["value"]
# The language server returns encoded text with
# spaces defined as `\xa0`
documentation = documentation.replace("\xa0", " ")
# Enable parsing signature's active parameter if available
# while allowing to show calltip for signatures without
# parameters.
# See spyder-ide/spyder#21660
parameter = None
if "activeParameter" in signature_params:
parameter_idx = signature_params["activeParameter"]
parameters = signature_data["parameters"]
if len(parameters) > 0 and parameter_idx < len(parameters):
parameter_data = parameters[parameter_idx]
parameter = parameter_data["label"]
signature = signature_data["label"]
# This method is part of spyder/widgets/mixins
self.show_calltip(
signature=signature,
parameter=parameter,
language=self.language,
documentation=documentation,
)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing signature")
# ---- Hover/Cursor
# -------------------------------------------------------------------------
@schedule_request(method=CompletionRequestTypes.DOCUMENT_CURSOR_EVENT)
def request_cursor_event(self):
text = self.get_text_with_eol()
cursor = self.textCursor()
params = {
"file": self.filename,
"version": self.text_version,
"text": text,
"offset": cursor.position(),
"selection_start": cursor.selectionStart(),
"selection_end": cursor.selectionEnd(),
}
return params
@schedule_request(method=CompletionRequestTypes.DOCUMENT_HOVER)
def request_hover(self, line, col, offset, show_hint=True, clicked=True):
"""Request hover information."""
params = {
"file": self.filename,
"line": line,
"column": col,
"offset": offset,
}
self._show_hint = show_hint
self._request_hover_clicked = clicked
return params
@handles(CompletionRequestTypes.DOCUMENT_HOVER)
def handle_hover_response(self, contents):
"""Handle hover response."""
if running_under_pytest():
from unittest.mock import Mock
# On some tests this is returning a Mock
if isinstance(contents, Mock):
return
try:
content = contents["params"]
# - Don't display hover if there's no content to display.
# - Prevent spurious errors when a client returns a list.
if not content or isinstance(content, list):
return
self.sig_display_object_info.emit(
content, self._request_hover_clicked
)
if content is not None and self._show_hint and self._last_point:
# This is located in spyder/widgets/mixins.py
word = self._last_hover_word
# Replace non-breaking spaces for real ones.
content = content.replace("\xa0", " ")
# Show hover
self.show_hint(
content,
inspect_word=word,
at_point=self._last_point,
vertical_position='top',
as_hover=True,
)
self._last_point = None
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing hover")
# ---- Go To Definition
# -------------------------------------------------------------------------
@Slot()
@schedule_request(method=CompletionRequestTypes.DOCUMENT_DEFINITION)
def go_to_definition_from_cursor(self, cursor=None):
"""Go to definition from cursor instance (QTextCursor)."""
if not self.go_to_definition_enabled or self.in_comment_or_string():
return
if cursor is None:
cursor = self.textCursor()
text = str(cursor.selectedText())
if len(text) == 0:
cursor.select(QTextCursor.WordUnderCursor)
text = str(cursor.selectedText())
if text is not None:
line, column = self.get_cursor_line_column()
params = {"file": self.filename, "line": line, "column": column}
return params
@handles(CompletionRequestTypes.DOCUMENT_DEFINITION)
def handle_go_to_definition(self, position):
"""Handle go to definition response."""
try:
position = position["params"]
if position is not None:
def_range = position["range"]
start = def_range["start"]
if self.filename == position["file"]:
self.go_to_line(
start["line"] + 1, start["character"], None, word=None
)
else:
self.go_to_definition.emit(
position["file"], start["line"] + 1, start["character"]
)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors(
"Error when processing go to definition"
)
# ---- Document/Selection formatting
# -------------------------------------------------------------------------
def format_document_or_range(self):
"""Format current document or selected text."""
if self.has_selected_text() and self.range_formatting_enabled:
self.format_document_range()
else:
self.format_document()
@schedule_request(method=CompletionRequestTypes.DOCUMENT_FORMATTING)
def format_document(self):
"""Format current document."""
self.__cursor_position_before_format = self.textCursor().position()
if not self.formatting_enabled:
return
if self.formatting_in_progress:
# Already waiting for a formatting
return
using_spaces = self.indent_chars != "\t"
tab_size = (
len(self.indent_chars)
if using_spaces
else self.tab_stop_width_spaces
)
params = {
"file": self.filename,
"options": {
"tab_size": tab_size,
"insert_spaces": using_spaces,
"trim_trailing_whitespace": self.remove_trailing_spaces,
"insert_final_new_line": self.add_newline,
"trim_final_new_lines": self.remove_trailing_newlines,
},
}
# Sets the document into read-only and updates its corresponding
# tab name to display the filename into parenthesis
self.setReadOnly(True)
self.document().setModified(True)
self.sig_start_operation_in_progress.emit()
self.operation_in_progress = True
self.formatting_in_progress = True
return params
@schedule_request(method=CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING)
def format_document_range(self):
"""Format selected text."""
self.__cursor_position_before_format = self.textCursor().position()
if not self.range_formatting_enabled or not self.has_selected_text():
return
if self.formatting_in_progress:
# Already waiting for a formatting
return
start, end = self.get_selection_start_end()
start_line, start_col = start
end_line, end_col = end
# Remove empty trailing newline from multiline selection
if end_line > start_line and end_col == 0:
end_line -= 1
fmt_range = {
"start": {"line": start_line, "character": start_col},
"end": {"line": end_line, "character": end_col},
}
using_spaces = self.indent_chars != "\t"
tab_size = (
len(self.indent_chars)
if using_spaces
else self.tab_stop_width_spaces
)
params = {
"file": self.filename,
"range": fmt_range,
"options": {
"tab_size": tab_size,
"insert_spaces": using_spaces,
"trim_trailing_whitespace": self.remove_trailing_spaces,
"insert_final_new_line": self.add_newline,
"trim_final_new_lines": self.remove_trailing_newlines,
},
}
# Sets the document into read-only and updates its corresponding
# tab name to display the filename into parenthesis
self.setReadOnly(True)
self.document().setModified(True)
self.sig_start_operation_in_progress.emit()
self.operation_in_progress = True
self.formatting_in_progress = True
return params
@handles(CompletionRequestTypes.DOCUMENT_FORMATTING)
def handle_document_formatting(self, edits):
"""Handle document formatting response."""
try:
if self.formatting_in_progress:
self._apply_document_edits(edits)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors(
"Error when processing document formatting"
)
finally:
# Remove read-only parenthesis and highlight document modification
self.setReadOnly(False)
self.document().setModified(False)
self.document().setModified(True)
self.sig_stop_operation_in_progress.emit()
self.operation_in_progress = False
self.formatting_in_progress = False
@handles(CompletionRequestTypes.DOCUMENT_RANGE_FORMATTING)
def handle_document_range_formatting(self, edits):
"""Handle document range formatting response."""
try:
if self.formatting_in_progress:
self._apply_document_edits(edits)
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors(
"Error when processing document selection formatting"
)
finally:
# Remove read-only parenthesis and highlight document modification
self.setReadOnly(False)
self.document().setModified(False)
self.document().setModified(True)
self.sig_stop_operation_in_progress.emit()
self.operation_in_progress = False
self.formatting_in_progress = False
def _apply_document_edits(self, edits):
"""Apply a set of atomic document edits to the current editor text."""
edits = edits["params"]
if edits is None:
return
# We need to use here toPlainText (which returns text with '\n'
# for eols) and not get_text_with_eol, so that applying the
# text edits that come from the LSP in the way implemented below
# works as expected. That's because we assume eol chars of length
# one in our algorithm.
# Fixes spyder-ide/spyder#16180
text = self.toPlainText()
text_tokens = list(text)
merged_text = None
for edit in edits:
edit_range = edit["range"]
repl_text = edit["newText"]
start, end = edit_range["start"], edit_range["end"]
start_line, start_col = start["line"], start["character"]
end_line, end_col = end["line"], end["character"]
start_pos = self.get_position_line_number(start_line, start_col)
end_pos = self.get_position_line_number(end_line, end_col)
# Replace repl_text eols for '\n' to match the ones used in
# `text`.
repl_eol = sourcecode.get_eol_chars(repl_text)
if repl_eol is not None and repl_eol != "\n":
repl_text = repl_text.replace(repl_eol, "\n")
text_tokens = list(text_tokens)
this_edit = list(repl_text)
if end_line == self.document().blockCount():
end_pos = self.get_position("eof")
end_pos += 1
if (
end_pos == len(text_tokens)
and text_tokens[end_pos - 1] == "\n"
):
end_pos += 1
this_edition = (
text_tokens[: max(start_pos - 1, 0)]
+ this_edit
+ text_tokens[end_pos - 1:]
)
text_edit = "".join(this_edition)
if merged_text is None:
merged_text = text_edit
else:
merged_text = merge(text_edit, merged_text, text)
if merged_text is not None:
# Restore eol chars after applying edits.
merged_text = merged_text.replace("\n", self.get_line_separator())
cursor = self.textCursor()
# Save breakpoints here to restore them after inserting merged_text
# Fixes spyder-ide/spyder#16549
if getattr(self, "breakpoints_manager", False):
breakpoints = self.breakpoints_manager.get_breakpoints()
else:
breakpoints = None
# Begin text insertion
cursor.beginEditBlock()
# Select current text
cursor.movePosition(QTextCursor.Start)
cursor.movePosition(QTextCursor.End, QTextCursor.KeepAnchor)
# Insert formatted text in place of the previous one
cursor.insertText(merged_text)
# End text insertion
cursor.endEditBlock()
# Restore breakpoints
if breakpoints:
self.breakpoints_manager.set_breakpoints(breakpoints)
# Restore previous cursor position and center it.
# Fixes spyder-ide/spyder#19958
# Use QTextCursor.(position | setPosition) to restore the cursor
# position to be able to do it with any wrap mode.
# Fixes spyder-ide/spyder#20852
if self.__cursor_position_before_format:
self.moveCursor(QTextCursor.Start)
cursor = self.textCursor()
cursor.setPosition(self.__cursor_position_before_format)
self.setTextCursor(cursor)
self.centerCursor()
# ---- Code folding
# -------------------------------------------------------------------------
def compute_whitespace(self, line):
tab_size = self.tab_stop_width_spaces
whitespace_regex = re.compile(r"(\s+).*")
whitespace_match = whitespace_regex.match(line)
total_whitespace = 0
if whitespace_match is not None:
whitespace_chars = whitespace_match.group(1)
whitespace_chars = whitespace_chars.replace("\t", tab_size * " ")
total_whitespace = len(whitespace_chars)
return total_whitespace
def update_whitespace_count(self, line, column):
self.leading_whitespaces = {}
lines = str(self.toPlainText()).splitlines()
for i, text in enumerate(lines):
total_whitespace = self.compute_whitespace(text)
self.leading_whitespaces[i] = total_whitespace
def cleanup_folding(self):
"""Cleanup folding pane."""
self.folding_panel.folding_regions = {}
@schedule_request(method=CompletionRequestTypes.DOCUMENT_FOLDING_RANGE)
def request_folding(self):
"""Request folding."""
if not self.folding_supported or not self.code_folding:
return
params = {"file": self.filename}
return params
@handles(CompletionRequestTypes.DOCUMENT_FOLDING_RANGE)
def handle_folding_range(self, response):
"""Handle folding response."""
ranges = response["params"]
if ranges is None:
return
# Update folding info in a thread
self.update_folding_thread.run = functools.partial(
self._update_folding_info, ranges)
self.update_folding_thread.start()
def _update_folding_info(self, ranges):
"""Update folding information with new data from the LSP."""
try:
lines = self.toPlainText().splitlines()
current_tree, root = merge_folding(
ranges, lines, self.get_line_separator(),
self.folding_panel.current_tree, self.folding_panel.root
)
self._folding_info = (current_tree, root, *collect_folding_regions(root))
except RuntimeError:
# This is triggered when a codeeditor instance was removed
# before the response can be processed.
return
except Exception:
self.manage_lsp_handle_errors("Error when processing folding")
def highlight_folded_regions(self):
self.folding_panel.highlight_folded_regions()
def _finish_update_folding(self):
"""Finish updating code folding."""
self.sig_update_code_folding.emit(self._folding_info)
self.apply_code_folding(self._folding_info)
def apply_code_folding(self, folding_info):
"""Apply code folding info."""
# Check if we actually have folding info to update before trying to do
# it.
# Fixes spyder-ide/spyder#19514
if folding_info is not None:
self.folding_panel.update_folding(folding_info)
self.highlight_folded_regions()
# Update indent guides, which depend on folding
if self.indent_guides._enabled:
line, column = self.get_cursor_line_column()
self.update_whitespace_count(line, column)
# This is necessary to repaint guides in cloned editors and the
# original one after making edits in any one of them.
# See spyder-ide/spyder#23297
self.update()
self.folding_in_sync = True
# ---- Save/close file
# -------------------------------------------------------------------------
@schedule_request(method=CompletionRequestTypes.DOCUMENT_DID_SAVE,
requires_response=False)
def notify_save(self):
"""Send save request."""
params = {'file': self.filename}
if self.save_include_text:
params['text'] = self.get_text_with_eol()
return params
@request(method=CompletionRequestTypes.DOCUMENT_DID_CLOSE,
requires_response=False)
def notify_close(self):
"""Send close request."""
self._pending_server_requests = []
# This is necessary to prevent an error when closing the file.
# Fixes spyder-ide/spyder#20071
try:
self._server_requests_timer.stop()
except RuntimeError:
pass
if self.completions_available:
# This is necessary to prevent an error in our tests.
try:
# Servers can send an empty publishDiagnostics reply to clear
# diagnostics after they receive a didClose request. Since
# we also ask for symbols and folding when processing
# diagnostics, we need to prevent it from happening
# before sending that request here.
self._timer_sync_symbols_and_folding.timeout.disconnect()
except (TypeError, RuntimeError):
pass
params = {
'file': self.filename,
'codeeditor': self
}
return params
|
LSPMixin
|
python
|
readthedocs__readthedocs.org
|
readthedocs/audit/apps.py
|
{
"start": 113,
"end": 245
}
|
class ____(AppConfig):
name = "readthedocs.audit"
def ready(self):
import readthedocs.audit.signals # noqa
|
AuditConfig
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 089. 房屋偷盗/Solution.py
|
{
"start": 0,
"end": 241
}
|
class ____:
def rob(self, nums: List[int]) -> int:
n = len(nums)
f = [0] * (n + 1)
f[1] = nums[0]
for i in range(2, n + 1):
f[i] = max(f[i - 1], f[i - 2] + nums[i - 1])
return f[n]
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-well-performing-interval.py
|
{
"start": 29,
"end": 697
}
|
class ____(object):
def longestWPI(self, hours):
"""
:type hours: List[int]
:rtype: int
"""
result, accu = 0, 0
lookup = {}
for i, h in enumerate(hours):
accu = accu+1 if h > 8 else accu-1
if accu > 0:
result = i+1
elif accu-1 in lookup:
# lookup[accu-1] is the leftmost idx with smaller accu,
# because for i from 1 to some positive k,
# lookup[accu-i] is a strickly increasing sequence
result = max(result, i-lookup[accu-1])
lookup.setdefault(accu, i)
return result
|
Solution
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/session_manager.py
|
{
"start": 5532,
"end": 13272
}
|
class ____(Protocol):
"""SessionManagers are responsible for encapsulating all session lifecycle behavior
that the Streamlit Runtime may care about.
A SessionManager must define the following required methods:
- __init__
- connect_session
- close_session
- get_session_info
- list_sessions
SessionManager implementations may also choose to define the notions of active and
inactive sessions. The precise definitions of active/inactive are left to the
concrete implementation. SessionManagers that wish to differentiate between active
and inactive sessions should have the required methods listed above operate on *all*
sessions. Additionally, they should define the following methods for working with
active sessions:
- disconnect_session
- get_active_session_info
- is_active_session
- list_active_sessions
When active session-related methods are left undefined, their default
implementations are the naturally corresponding required methods.
The Runtime, unless there's a good reason to do otherwise, should generally work
with the active-session versions of a SessionManager's methods. There isn't currently
a need for us to be able to operate on inactive sessions stored in SessionStorage
outside of the SessionManager itself. However, it's highly likely that we'll
eventually have to do so, which is why the abstractions allow for this now.
Notes
-----
Threading: All SessionManager methods are *not* threadsafe -- they must be called
from the runtime's eventloop thread.
"""
@abstractmethod
def __init__(
self,
session_storage: SessionStorage,
uploaded_file_manager: UploadedFileManager,
script_cache: ScriptCache,
message_enqueued_callback: Callable[[], None] | None,
) -> None:
"""Initialize a SessionManager with the given SessionStorage.
Parameters
----------
session_storage
The SessionStorage instance backing this SessionManager.
uploaded_file_manager
Used to manage files uploaded by users via the Streamlit web client.
script_cache
ScriptCache instance. Caches user script bytecode.
message_enqueued_callback
A callback invoked after a message is enqueued to be sent to a web client.
"""
raise NotImplementedError
@abstractmethod
def connect_session(
self,
client: SessionClient,
script_data: ScriptData,
user_info: dict[str, str | bool | None],
existing_session_id: str | None = None,
session_id_override: str | None = None,
) -> str:
"""Create a new session or connect to an existing one.
Parameters
----------
client
A concrete SessionClient implementation for communicating with
the session's client.
script_data
Contains parameters related to running a script.
user_info
A dict that contains information about the session's user. For now,
it only (optionally) contains the user's email address.
{
"email": "example@example.com"
}
existing_session_id
The ID of an existing session to reconnect to. If one is not provided, a new
session is created. Note that whether a SessionManager supports reconnecting
to an existing session is left up to the concrete SessionManager
implementation. Those that do not support reconnection should simply ignore
this argument.
session_id_override
The ID to assign to a new session being created with this method. Setting
this can be useful when the service that a Streamlit Runtime is running in
wants to tie the lifecycle of a Streamlit session to some other session-like
object that it manages. Only one of existing_session_id and
session_id_override should be set.
Returns
-------
str
The session's unique string ID.
"""
raise NotImplementedError
@abstractmethod
def close_session(self, session_id: str) -> None:
"""Close and completely delete the session with the given id.
This function may be called multiple times for the same session,
which is not an error. (Subsequent calls just no-op.)
Parameters
----------
session_id
The session's unique ID.
"""
raise NotImplementedError
@abstractmethod
def get_session_info(self, session_id: str) -> SessionInfo | None:
"""Return the SessionInfo for the given id, or None if no such session
exists.
Parameters
----------
session_id
The session's unique ID.
Returns
-------
SessionInfo or None
"""
raise NotImplementedError
@abstractmethod
def list_sessions(self) -> list[SessionInfo]:
"""Return the SessionInfo for all sessions managed by this SessionManager.
Returns
-------
List[SessionInfo]
"""
raise NotImplementedError
def num_sessions(self) -> int:
"""Return the number of sessions tracked by this SessionManager.
Subclasses of SessionManager shouldn't provide their own implementation of this
method without a *very* good reason.
Returns
-------
int
"""
return len(self.list_sessions())
# NOTE: The following methods only need to be overwritten when a concrete
# SessionManager implementation has a notion of active vs inactive sessions.
# If left unimplemented in a subclass, the default implementations of these methods
# call corresponding SessionManager methods in a natural way.
def disconnect_session(self, session_id: str) -> None:
"""Disconnect the given session.
This method should be idempotent.
Parameters
----------
session_id
The session's unique ID.
"""
self.close_session(session_id)
def get_active_session_info(self, session_id: str) -> ActiveSessionInfo | None:
"""Return the ActiveSessionInfo for the given id, or None if either no such
session exists or the session is not active.
Parameters
----------
session_id
The active session's unique ID.
Returns
-------
ActiveSessionInfo or None
"""
session = self.get_session_info(session_id)
if session is None or not session.is_active():
return None
return session.to_active()
def is_active_session(self, session_id: str) -> bool:
"""Return True if the given session exists and is active, False otherwise.
Returns
-------
bool
"""
return self.get_active_session_info(session_id) is not None
def list_active_sessions(self) -> list[ActiveSessionInfo]:
"""Return the session info for all active sessions tracked by this SessionManager.
Returns
-------
List[ActiveSessionInfo]
"""
return [s.to_active() for s in self.list_sessions()]
def num_active_sessions(self) -> int:
"""Return the number of active sessions tracked by this SessionManager.
Subclasses of SessionManager shouldn't provide their own implementation of this
method without a *very* good reason.
Returns
-------
int
"""
return len(self.list_active_sessions())
|
SessionManager
|
python
|
fluentpython__example-code-2e
|
15-more-types/protocol/random/randompop_test.py
|
{
"start": 99,
"end": 612
}
|
class ____:
def __init__(self, items: Iterable) -> None:
self._items = list(items)
random.shuffle(self._items)
def pop_random(self) -> Any:
return self._items.pop()
def test_issubclass() -> None:
assert issubclass(SimplePopper, RandomPopper)
def test_isinstance() -> None:
popper: RandomPopper = SimplePopper([1])
if TYPE_CHECKING:
reveal_type(popper)
# Revealed type is 'randompop.RandomPopper'
assert isinstance(popper, RandomPopper)
|
SimplePopper
|
python
|
ray-project__ray
|
python/ray/tune/examples/mnist_ptl_mini.py
|
{
"start": 435,
"end": 1647
}
|
class ____(pl.LightningDataModule):
def __init__(self, batch_size: int, data_dir: str = PATH_DATASETS):
super().__init__()
self.data_dir = data_dir
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
)
self.batch_size = batch_size
self.dims = (1, 28, 28)
self.num_classes = 10
def prepare_data(self):
# download
with FileLock(os.path.expanduser("~/.data.lock")):
load_dataset("ylecun/mnist", cache_dir=self.data_dir)
def setup(self, stage=None):
dataset = load_dataset("ylecun/mnist", cache_dir=self.data_dir)
def transform_fn(sample):
return (self.transform(sample["image"]), sample["label"])
self.mnist_train = [transform_fn(sample) for sample in dataset["train"]]
self.mnist_val = [transform_fn(sample) for sample in dataset["test"]]
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.batch_size)
|
MNISTDataModule
|
python
|
coleifer__peewee
|
tests/db_tests.py
|
{
"start": 16922,
"end": 23878
}
|
class ____(ModelTestCase):
requires = [Category, User, UniqueModel, IndexedModel, Person]
def test_table_exists(self):
self.assertTrue(self.database.table_exists(User._meta.table_name))
self.assertFalse(self.database.table_exists('nuggies'))
self.assertTrue(self.database.table_exists(User))
class X(TestModel): pass
self.assertFalse(self.database.table_exists(X))
def test_get_tables(self):
tables = self.database.get_tables()
required = set(m._meta.table_name for m in self.requires)
self.assertTrue(required.issubset(set(tables)))
UniqueModel._schema.drop_all()
tables = self.database.get_tables()
self.assertFalse(UniqueModel._meta.table_name in tables)
def test_get_indexes(self):
indexes = self.database.get_indexes('unique_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('unique_model_pkey', 'PRIMARY')]
self.assertEqual(data, [
('unique_model_name', ['name'], True, 'unique_model')])
indexes = self.database.get_indexes('indexed_model')
data = [(index.name, index.columns, index.unique, index.table)
for index in indexes
if index.name not in ('indexed_model_pkey', 'PRIMARY')]
self.assertEqual(sorted(data), [
('indexed_model_first_last', ['first', 'last'], False,
'indexed_model'),
('indexed_model_first_last_dob', ['first', 'last', 'dob'], True,
'indexed_model')])
# Multi-column index where columns are in different order than declared
# on the table.
indexes = self.database.get_indexes('person')
data = [(index.name, index.columns, index.unique)
for index in indexes
if index.name not in ('person_pkey', 'PRIMARY')]
self.assertEqual(data, [
('person_last_first', ['last', 'first'], False)])
def test_get_columns(self):
columns = self.database.get_columns('indexed_model')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('id', False, True, 'indexed_model'),
('first', False, False, 'indexed_model'),
('last', False, False, 'indexed_model'),
('dob', False, False, 'indexed_model')])
columns = self.database.get_columns('category')
data = [(c.name, c.null, c.primary_key, c.table)
for c in columns]
self.assertEqual(data, [
('name', False, True, 'category'),
('parent_id', True, False, 'category')])
def test_get_primary_keys(self):
primary_keys = self.database.get_primary_keys('users')
self.assertEqual(primary_keys, ['id'])
primary_keys = self.database.get_primary_keys('category')
self.assertEqual(primary_keys, ['name'])
@requires_models(Note)
def test_get_views(self):
def normalize_view_meta(view_meta):
sql_ws_norm = re.sub(r'[\n\s]+', ' ', view_meta.sql.strip('; '))
return view_meta.name, (sql_ws_norm
.replace('`peewee_test`.', '')
.replace('`notes`.', '')
.replace('notes.', '')
.replace('`', ''))
def assertViews(expected):
# Create two sample views.
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC')
self.database.execute_sql('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')
try:
views = self.database.get_views()
normalized = sorted([normalize_view_meta(v) for v in views])
self.assertEqual(normalized, expected)
# Ensure that we can use get_columns to introspect views.
columns = self.database.get_columns('notes_deleted')
self.assertEqual([c.name for c in columns], ['content'])
columns = self.database.get_columns('notes_public')
self.assertEqual([c.name for c in columns], ['content', 'ts'])
finally:
self.database.execute_sql('DROP VIEW notes_public;')
self.database.execute_sql('DROP VIEW notes_deleted;')
# Unfortunately, all databases seem to represent VIEW definitions
# differently internally.
if IS_SQLITE:
assertViews([
('notes_deleted', ('CREATE VIEW notes_deleted AS '
'SELECT content FROM notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public', ('CREATE VIEW notes_public AS '
'SELECT content, ts FROM notes '
'WHERE status = 1 ORDER BY ts DESC'))])
elif IS_MYSQL:
assertViews([
('notes_deleted',
('select content AS content from notes '
'where status = 9 order by id desc')),
('notes_public',
('select content AS content,ts AS ts from notes '
'where status = 1 order by ts desc'))])
elif IS_POSTGRESQL:
assertViews([
('notes_deleted',
('SELECT content FROM notes '
'WHERE (status = 9) ORDER BY id DESC')),
('notes_public',
('SELECT content, ts FROM notes '
'WHERE (status = 1) ORDER BY ts DESC'))])
elif IS_CRDB:
assertViews([
('notes_deleted',
('SELECT content FROM peewee_test.public.notes '
'WHERE status = 9 ORDER BY id DESC')),
('notes_public',
('SELECT content, ts FROM peewee_test.public.notes '
'WHERE status = 1 ORDER BY ts DESC'))])
@requires_models(User, Tweet, Category)
def test_get_foreign_keys(self):
foreign_keys = self.database.get_foreign_keys('tweet')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('user_id', 'users', 'id', 'tweet')])
foreign_keys = self.database.get_foreign_keys('category')
data = [(fk.column, fk.dest_table, fk.dest_column, fk.table)
for fk in foreign_keys]
self.assertEqual(data, [
('parent_id', 'category', 'name', 'category')])
|
TestIntrospection
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/frames.py
|
{
"start": 153280,
"end": 169239
}
|
class ____(Request):
"""
Gets the count of frames matching the given dataview
:param dataview: Dataview specification
:type dataview: Dataview
"""
_service = "frames"
_action = "get_count_for_dataview"
_version = "2.23"
_schema = {
"definitions": {
"dataview": {
"properties": {
"augmentation": {
"description": "Augmentation parameters. Only for training and testing tasks.",
"oneOf": [
{"$ref": "#/definitions/dv_augmentation"},
{"type": "null"},
],
},
"filters": {
"description": "List of FilterRule ('OR' relationship)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": ["array", "null"],
},
"iteration": {
"description": "Iteration parameters. Not applicable for register (import) tasks.",
"oneOf": [
{"$ref": "#/definitions/iteration"},
{"type": "null"},
],
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": ["object", "null"],
},
"mapping": {
"description": "Mapping parameters",
"oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}],
},
"output_rois": {
"description": (
"'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which"
" led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be"
" returned multiple times with a different roi each time.\n\nNote: this should be used for"
" Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be"
" returned\n "
),
"oneOf": [
{"$ref": "#/definitions/output_rois_enum"},
{"type": "null"},
],
},
"versions": {
"description": "View dataset versions",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/dv_augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
"view_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": ["string", "null"],
},
"merge_with": {
"description": "Version ID to merge with",
"type": ["string", "null"],
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"dataview": {
"$ref": "#/definitions/dataview",
"description": "Dataview specification",
}
},
"required": ["dataview"],
}
def __init__(self, dataview, **kwargs):
super(GetCountForDataviewRequest, self).__init__(**kwargs)
self.dataview = dataview
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
if isinstance(value, dict):
value = Dataview.from_dict(value)
else:
self.assert_isinstance(value, "dataview", Dataview)
self._property_dataview = value
|
GetCountForDataviewRequest
|
python
|
django__django
|
tests/admin_inlines/admin.py
|
{
"start": 1607,
"end": 1692
}
|
class ____(admin.StackedInline):
model = EditablePKBook
|
EditablePKBookStackedInline
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/distributions/distribution.py
|
{
"start": 9682,
"end": 46517
}
|
class ____(_BaseDistribution, metaclass=_DistributionMeta):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Shapes
There are three important concepts associated with TensorFlow Distributions
shapes:
- Event shape describes the shape of a single draw from the distribution;
it may be dependent across dimensions. For scalar distributions, the event
shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is
`[5]`.
- Batch shape describes independent, not identically distributed draws, aka a
"collection" or "bunch" of distributions.
- Sample shape describes independent, identically distributed draws of batches
from the distribution family.
The event shape and the batch shape are properties of a Distribution object,
whereas the sample shape is associated with a specific call to `sample` or
`log_prob`.
For detailed usage examples of TensorFlow Distributions shapes, see
[this tutorial](
https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb)
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tf_type(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
if not name or name[-1] != "/": # `name` is not a name scope
non_unique_name = name or type(self).__name__
with ops.name_scope(non_unique_name) as name:
pass
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name
@property
def _parameters(self):
return self._parameter_dict
@_parameters.setter
def _parameters(self, value):
"""Intercept assignments to self._parameters to avoid reference cycles.
Parameters are often created using locals(), so we need to clean out any
references to `self` before assigning it to an attribute.
Args:
value: A dictionary of parameters to assign to the `_parameters` property.
"""
if "self" in value:
del value["self"]
self._parameter_dict = value
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used:
# `parameters = dict(locals())`.
return {k: v for k, v in self._parameters.items()
if not k.startswith("__") and k != "self"}
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError(
"batch_shape_tensor is not implemented: {}".format(type(self).__name__))
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._batch_shape())
def _event_shape_tensor(self):
raise NotImplementedError(
"event_shape_tensor is not implemented: {}".format(type(self).__name__))
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return tensor_shape.as_shape(self._event_shape())
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented: {}".format(
type(self).__name__))
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented: {}".format(
type(self).__name__))
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented: {}".format(
type(self).__name__))
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented: {}".format(
type(self).__name__))
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented: {}".format(
type(self).__name__))
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError(
"log_survival_function is not implemented: {}".format(
type(self).__name__))
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented: {}".format(
type(self).__name__))
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented: {}".format(
type(self).__name__))
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented: {}".format(
type(self).__name__))
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _quantile(self, value):
raise NotImplementedError("quantile is not implemented: {}".format(
type(self).__name__))
def _call_quantile(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = _convert_to_tensor(
value, name="value", preferred_dtype=self.dtype)
return self._quantile(value, **kwargs)
def quantile(self, value, name="quantile"):
"""Quantile function. Aka "inverse cdf" or "percent point function".
Given random variable `X` and `p in [0, 1]`, the `quantile` is:
```none
quantile(p) := x such that P[X <= x] == p
```
Args:
value: `float` or `double` `Tensor`.
name: Python `str` prepended to names of ops created by this function.
Returns:
quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_quantile(value, name)
def _variance(self):
raise NotImplementedError("variance is not implemented: {}".format(
type(self).__name__))
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented: {}".format(
type(self).__name__))
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented: {}".format(
type(self).__name__))
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: Python `str` prepended to names of ops created by this function.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented: {}".format(
type(self).__name__))
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def _cross_entropy(self, other):
return kullback_leibler.cross_entropy(
self, other, allow_nan_stats=self.allow_nan_stats)
def cross_entropy(self, other, name="cross_entropy"):
"""Computes the (Shannon) cross entropy.
Denote this distribution (`self`) by `P` and the `other` distribution by
`Q`. Assuming `P, Q` are absolutely continuous with respect to
one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)
cross entropy is defined as:
```none
H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)
```
where `F` denotes the support of the random variable `X ~ P`.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of (Shanon) cross entropy.
"""
with self._name_scope(name):
return self._cross_entropy(other)
def _kl_divergence(self, other):
return kullback_leibler.kl_divergence(
self, other, allow_nan_stats=self.allow_nan_stats)
def kl_divergence(self, other, name="kl_divergence"):
"""Computes the Kullback--Leibler divergence.
Denote this distribution (`self`) by `p` and the `other` distribution by
`q`. Assuming `p, q` are absolutely continuous with respect to reference
measure `r`, the KL divergence is defined as:
```none
KL[p, q] = E_p[log(p(X)/q(X))]
= -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x)
= H[p, q] - H[p]
```
where `F` denotes the support of the random variable `X ~ p`, `H[., .]`
denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy.
Args:
other: `tfp.distributions.Distribution` instance.
name: Python `str` prepended to names of ops created by this function.
Returns:
kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`
representing `n` different calculations of the Kullback-Leibler
divergence.
"""
with self._name_scope(name):
return self._kl_divergence(other)
def __str__(self):
return ("tfp.distributions.{type_name}("
"\"{self_name}\""
"{maybe_batch_shape}"
"{maybe_event_shape}"
", dtype={dtype})".format(
type_name=type(self).__name__,
self_name=self.name,
maybe_batch_shape=(", batch_shape={}".format(self.batch_shape)
if self.batch_shape.ndims is not None
else ""),
maybe_event_shape=(", event_shape={}".format(self.event_shape)
if self.event_shape.ndims is not None
else ""),
dtype=self.dtype.name))
def __repr__(self):
return ("<tfp.distributions.{type_name} "
"'{self_name}'"
" batch_shape={batch_shape}"
" event_shape={event_shape}"
" dtype={dtype}>".format(
type_name=type(self).__name__,
self_name=self.name,
batch_shape=self.batch_shape,
event_shape=self.event_shape,
dtype=self.dtype.name))
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32), array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape().dims[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
|
Distribution
|
python
|
Textualize__textual
|
docs/examples/guide/reactivity/recompose02.py
|
{
"start": 149,
"end": 613
}
|
class ____(App):
CSS = """
Screen {align: center middle}
Digits {width: auto}
"""
time: reactive[datetime] = reactive(datetime.now, recompose=True)
def compose(self) -> ComposeResult:
yield Digits(f"{self.time:%X}")
def update_time(self) -> None:
self.time = datetime.now()
def on_mount(self) -> None:
self.set_interval(1, self.update_time)
if __name__ == "__main__":
app = Clock()
app.run()
|
Clock
|
python
|
tiangolo__fastapi
|
docs_src/extra_models/tutorial002.py
|
{
"start": 220,
"end": 264
}
|
class ____(UserBase):
password: str
|
UserIn
|
python
|
allegroai__clearml
|
clearml/backend_api/session/jsonmodels/validators.py
|
{
"start": 1338,
"end": 2520
}
|
class ____(object):
"""Validator for maximum value."""
def __init__(self, maximum_value: Any, exclusive: bool = False) -> None:
"""Init.
:param maximum_value: Maximum value for validator.
:param bool exclusive: If `True`, then validated value must be strongly
bigger than given threshold.
"""
self.maximum_value = maximum_value
self.exclusive = exclusive
def validate(self, value: Any) -> None:
"""Validate value."""
if self.exclusive:
if value >= self.maximum_value:
tpl = "'{val}' is bigger or equal than maximum ('{max}')."
raise ValidationError(tpl.format(val=value, max=self.maximum_value))
else:
if value > self.maximum_value:
raise ValidationError(
"'{value}' is bigger than maximum ('{max}').".format(value=value, max=self.maximum_value)
)
def modify_schema(self, field_schema: dict) -> None:
"""Modify field schema."""
field_schema["maximum"] = self.maximum_value
if self.exclusive:
field_schema["exclusiveMaximum"] = True
|
Max
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/typinganndata/ann_module5.py
|
{
"start": 163,
"end": 202
}
|
class ____:
value: Final = 3000
|
MyClass
|
python
|
huggingface__transformers
|
src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
{
"start": 8043,
"end": 14352
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: RecurrentGemmaConfig):
super().__init__()
self.config = config
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = config.head_dim
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
self.partial_rotary_factor = config.partial_rotary_factor
self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=True)
self.rotary_emb = RecurrentGemmaRotaryEmbedding(config=config)
def forward(
self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
# Partial rotary embedding
query_rot, query_pass = torch.chunk(query_states, int(1 / self.partial_rotary_factor), dim=-1)
key_rot, key_pass = torch.chunk(key_states, int(1 / self.partial_rotary_factor), dim=-1)
query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
query_states = torch.cat((query_rot, query_pass), dim=-1)
key_states = torch.cat((key_rot, key_pass), dim=-1)
if use_cache and hasattr(self, "key_states"):
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = self._update_cache(key_states, value_states, **cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states.contiguous(),
key_states.contiguous(),
value_states.contiguous(),
attn_mask=causal_mask, # pretty much a must for sliding window backend!
dropout_p=self.attention_dropout if self.training else 0.0,
scale=self.head_dim**-0.5,
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
return attn_output
def _setup_cache(self, batch_size, device, dtype=None):
if dtype is None and self.config.dtype is not None:
dtype = self.config.dtype
dtype = dtype if dtype is not None else torch.float32
cache_shape = (batch_size, self.num_key_value_heads, self.config.attention_window_size, self.head_dim)
self.value_states = torch.zeros(cache_shape, dtype=dtype, device=device)
self.key_states = torch.zeros(cache_shape, dtype=dtype, device=device)
@torch.no_grad()
def _update_cache(self, key_states, value_states, **cache_kwargs):
"""
torch.compile compatible sliding window.
Computes the `indices` based on `cache_position >= self.config.attention_window_size - 1`.
The `to_shift` is only true once we are above attention_window_size. Thus with `attention_window_size==64`:
indices = (slicing + to_shift[-1].int()-1) % self.config.attention_window_size
tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 0])
We overwrite the cache using these, then we always write at cache_position (clamped to `attention_window_size`)
"""
cache_position = cache_kwargs.get("cache_position")
if cache_position.shape[0] > self.config.attention_window_size:
# int indexing -> device sync? in compile, use tensor
k_out = key_states[:, :, -self.config.attention_window_size :, :]
v_out = value_states[:, :, -self.config.attention_window_size :, :]
else:
slicing = torch.ones(
self.config.attention_window_size, dtype=torch.long, device=value_states.device
).cumsum(0)
cache_position = cache_position.clamp(0, self.config.attention_window_size - 1)
to_shift = cache_position >= self.config.attention_window_size - 1
indices = (slicing + to_shift[-1].int() - 1) % self.config.attention_window_size
k_out, v_out = self.key_states.to(key_states.device), self.value_states.to(value_states.device)
k_out = k_out[:, :, indices]
v_out = v_out[:, :, indices]
k_out[:, :, cache_position] = key_states.to(k_out.dtype)
v_out[:, :, cache_position] = value_states.to(v_out.dtype)
self.key_states, self.value_states = k_out, v_out
return k_out, v_out
|
RecurrentGemmaSdpaAttention
|
python
|
doocs__leetcode
|
solution/0700-0799/0732.My Calendar III/Solution.py
|
{
"start": 205,
"end": 1666
}
|
class ____:
def __init__(self):
self.root = Node(1, int(1e9 + 1))
def modify(self, l: int, r: int, v: int, node: Node = None):
if l > r:
return
if node is None:
node = self.root
if node.l >= l and node.r <= r:
node.v += v
node.add += v
return
self.pushdown(node)
if l <= node.mid:
self.modify(l, r, v, node.left)
if r > node.mid:
self.modify(l, r, v, node.right)
self.pushup(node)
def query(self, l: int, r: int, node: Node = None) -> int:
if l > r:
return 0
if node is None:
node = self.root
if node.l >= l and node.r <= r:
return node.v
self.pushdown(node)
v = 0
if l <= node.mid:
v = max(v, self.query(l, r, node.left))
if r > node.mid:
v = max(v, self.query(l, r, node.right))
return v
def pushup(self, node: Node):
node.v = max(node.left.v, node.right.v)
def pushdown(self, node: Node):
if node.left is None:
node.left = Node(node.l, node.mid)
if node.right is None:
node.right = Node(node.mid + 1, node.r)
if node.add:
node.left.v += node.add
node.right.v += node.add
node.left.add += node.add
node.right.add += node.add
node.add = 0
|
SegmentTree
|
python
|
pytest-dev__pytest
|
src/_pytest/main.py
|
{
"start": 15784,
"end": 15879
}
|
class ____(Exception):
"""Signals a stop as failed test run."""
@dataclasses.dataclass
|
Failed
|
python
|
PrefectHQ__prefect
|
tests/cli/test_deploy.py
|
{
"start": 176990,
"end": 182029
}
|
class ____:
async def test_deploy_without_entrypoint(self, prefect_client: PrefectClient):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
user_input=(
# Accept first flow
readchar.key.ENTER
+
# Accept default deployment name
readchar.key.ENTER
+
# accept first work pool
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# decline save user inputs
"n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Select a flow to deploy",
"test",
"import-project/my_module/flow.py",
"test",
"import-project/my_module/flow.py",
"foobar",
"nested-project/implicit_relative.py",
"nested-project/explicit_relative.py",
"An important name",
"Second important name",
"flows/hello.py",
"successfully created",
],
)
async def test_deploy_without_entrypoint_manually_enter(
self, prefect_client: PrefectClient
):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
user_input=(
# Decline selecting from list
"n"
+
# Enter entrypoint
"flows/hello.py:my_flow"
+ readchar.key.ENTER
+
# Accept default deployment name
readchar.key.ENTER
+
# accept first work pool
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# decline save user inputs
"n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Select a flow to deploy",
"Flow entrypoint (expected format path/to/file.py:function_name)",
"Deployment 'An important name/default' successfully created",
],
)
deployment = await prefect_client.read_deployment_by_name(
name="An important name/default"
)
assert deployment.entrypoint == "flows/hello.py:my_flow"
async def test_deploy_validates_manually_entered_entrypoints(
self, prefect_client: PrefectClient
):
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy",
user_input=(
# Decline selecting from list
"n"
+
# Enter syntactically invalid entrypoint
"flows/hello.py"
+ readchar.key.ENTER
+
# Enter entrypoint with non-existent file
"flows/does_not_exist.py:my_flow"
+ readchar.key.ENTER
+
# Enter entrypoint with non-existent function
"flows/hello.py:does_not_exist"
+ readchar.key.ENTER
+
# Enter valid entrypoint
"flows/hello.py:my_flow"
+ readchar.key.ENTER
+
# Accept default deployment name
readchar.key.ENTER
+
# accept first work pool
readchar.key.ENTER
+
# decline schedule
"n"
+ readchar.key.ENTER
+
# Decline remote storage
"n"
+ readchar.key.ENTER
+
# decline save user inputs
"n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Select a flow to deploy",
"Please enter a valid flow entrypoint.",
"Failed to load flow from entrypoint 'flows/does_not_exist.py:my_flow'",
"Failed to load flow from entrypoint 'flows/hello.py:does_not_exist'",
"Deployment 'An important name/default' successfully created",
],
)
deployment = await prefect_client.read_deployment_by_name(
name="An important name/default"
)
assert deployment.entrypoint == "flows/hello.py:my_flow"
|
TestDeployWithoutEntrypoint
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/embed/test_util__embed.py
|
{
"start": 24060,
"end": 24754
}
|
class ____:
def test_apply_None(self) -> None:
d = Document()
orig = d.theme
beu._set_temp_theme(d, None)
assert beu._themes[d] is orig
assert d.theme is orig
def test_apply_theme(self) -> None:
t = Theme(json={})
d = Document()
orig = d.theme
beu._set_temp_theme(d, t)
assert beu._themes[d] is orig
assert d.theme is t
def test_apply_from_curdoc(self) -> None:
t = Theme(json={})
curdoc().theme = t
d = Document()
orig = d.theme
beu._set_temp_theme(d, beu.FromCurdoc)
assert beu._themes[d] is orig
assert d.theme is t
|
Test__set_temp_theme
|
python
|
neetcode-gh__leetcode
|
python/0894-all-possible-full-binary-trees.py
|
{
"start": 192,
"end": 780
}
|
class ____:
def allPossibleFBT(self, n: int) -> List[Optional[TreeNode]]:
dp = { 0 : [], 1 : [ TreeNode() ] }
def backtrack(n):
if n in dp:
return dp[n]
res = []
for l in range(n):
r = n - 1 - l
leftTrees, rightTrees = backtrack(l), backtrack(r)
for t1 in leftTrees:
for t2 in rightTrees:
res.append(TreeNode(0, t1, t2))
dp[n] = res
return res
return backtrack(n)
|
Solution
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 262409,
"end": 262714
}
|
class ____(_PrintableStructure):
_fields_ = [
('unit', c_uint),
('location', c_uint),
('sublocation', c_uint),
('extlocation', c_uint),
('address', c_uint),
('isParity', c_uint),
('count', c_uint)
]
|
c_nvmlEccSramUniqueUncorrectedErrorEntry_v1_t
|
python
|
PyCQA__pylint
|
tests/checkers/base/unittest_base.py
|
{
"start": 288,
"end": 588
}
|
class ____(unittest.TestCase):
@unittest.skip("too many dependencies need six :(")
def test_no_six(self) -> None:
try:
has_six = True
except ImportError:
has_six = False
self.assertFalse(has_six, "pylint must be able to run without six")
|
TestNoSix
|
python
|
django__django
|
django/contrib/admin/sites.py
|
{
"start": 22879,
"end": 23403
}
|
class ____(LazyObject):
def _setup(self):
AdminSiteClass = import_string(apps.get_app_config("admin").default_site)
self._wrapped = AdminSiteClass()
def __repr__(self):
return repr(self._wrapped)
# This global object represents the default admin site, for the common case.
# You can provide your own AdminSite using the (Simple)AdminConfig.default_site
# attribute. You can also instantiate AdminSite in your own code to create a
# custom admin site.
site = DefaultAdminSite()
|
DefaultAdminSite
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.