language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
gevent__gevent
|
src/greentest/3.14/test_threading_local.py
|
{
"start": 334,
"end": 474
}
|
class ____(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
|
Weak
|
python
|
getsentry__sentry
|
src/sentry/preprod/utils.py
|
{
"start": 78,
"end": 1018
}
|
class ____(NamedTuple):
"""Parsed components of a release version string."""
app_id: str
build_version: str
def parse_release_version(release_version: str) -> ParsedReleaseVersion | None:
"""
Parse release version string into app_id and build_version components.
Expected formats:
1. "app_id@version+build_number" -> returns (app_id, version)
2. "app_id@version" -> returns (app_id, version)
Args:
release_version: The release version string to parse
Returns:
ParsedReleaseVersion with app_id and build_version, or None if parsing fails
"""
# Parse app_id and version, ignoring build_number if present
version_match = re.match(r"^([^@]+)@([^+]+)(?:\+.*)?$", release_version)
if version_match:
app_id, build_version = version_match.groups()
return ParsedReleaseVersion(app_id=app_id, build_version=build_version)
return None
|
ParsedReleaseVersion
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/completion.py
|
{
"start": 730,
"end": 978
}
|
class ____(enum.Enum):
"""The audit requirements of a container."""
NONE = 'none'
REQUIRED = 'required'
def __repr__(self) -> str:
return f'{self.__class__.__name__}.{self.name}'
@dataclasses.dataclass(frozen=True)
|
AuditMode
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constructor14.py
|
{
"start": 442,
"end": 537
}
|
class ____(Protocol[T_contra]):
def __call__(self, message: T_contra, /) -> Any: ...
|
Callback
|
python
|
wandb__wandb
|
wandb/vendor/pygments/filters/__init__.py
|
{
"start": 1827,
"end": 2893
}
|
class ____(Filter):
"""Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
|
CodeTagFilter
|
python
|
django__django
|
django/db/models/lookups.py
|
{
"start": 26654,
"end": 26767
}
|
class ____(YearLookup, GreaterThan):
def get_bound_params(self, start, finish):
return (finish,)
|
YearGt
|
python
|
google__pytype
|
pytype/rewrite/abstract/base_test.py
|
{
"start": 369,
"end": 527
}
|
class ____(test_utils.ContextfulTestBase):
def _const(self, const):
return base.PythonConstant(self.ctx, const, allow_direct_instantiation=True)
|
TestBase
|
python
|
walkccc__LeetCode
|
solutions/75. Sort Colors/75-2.py
|
{
"start": 0,
"end": 556
}
|
class ____:
def sortColors(self, nums: list[int]) -> None:
l = 0 # The next 0 should be placed in l.
r = len(nums) - 1 # THe next 2 should be placed in r.
i = 0
while i <= r:
if nums[i] == 0:
nums[i], nums[l] = nums[l], nums[i]
i += 1
l += 1
elif nums[i] == 1:
i += 1
else:
# We may swap a 0 to index i, but we're still not sure whether this 0
# is placed in the correct index, so we can't move pointer i.
nums[i], nums[r] = nums[r], nums[i]
r -= 1
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/core/converter.py
|
{
"start": 8192,
"end": 8517
}
|
class ____(object):
"""ProgramContext keeps track of converting function hierarchies.
Attributes:
options: ConversionOptions
autograph_module: Deprecated. Do not use.
"""
def __init__(self, options, autograph_module=None):
self.options = options
self.autograph_module = autograph_module
|
ProgramContext
|
python
|
pandas-dev__pandas
|
pandas/core/col.py
|
{
"start": 6101,
"end": 9072
}
|
class ____:
def __init__(self, func: Expression, namespace: str) -> None:
self._func = func
self._namespace = namespace
def __call__(self, df: DataFrame) -> Any:
return self._func(df)
def __getattr__(self, attr: str) -> Any:
if isinstance(getattr(getattr(Series, self._namespace), attr), property):
repr_str = f"{self._func._repr_str}.{self._namespace}.{attr}"
return Expression(
lambda df: getattr(getattr(self(df), self._namespace), attr),
repr_str,
)
def func(df: DataFrame, *args: Any, **kwargs: Any) -> Any:
parsed_args = _parse_args(df, *args)
parsed_kwargs = _parse_kwargs(df, **kwargs)
return getattr(getattr(self(df), self._namespace), attr)(
*parsed_args, **parsed_kwargs
)
def wrapper(*args: Any, **kwargs: Any) -> Expression:
args_str = _pretty_print_args_kwargs(*args, **kwargs)
repr_str = f"{self._func._repr_str}.{self._namespace}.{attr}({args_str})"
return Expression(lambda df: func(df, *args, **kwargs), repr_str)
return wrapper
@set_module("pandas")
def col(col_name: Hashable) -> Expression:
"""
Generate deferred object representing a column of a DataFrame.
Any place which accepts ``lambda df: df[col_name]``, such as
:meth:`DataFrame.assign` or :meth:`DataFrame.loc`, can also accept
``pd.col(col_name)``.
Parameters
----------
col_name : Hashable
Column name.
Returns
-------
`pandas.api.typing.Expression`
A deferred object representing a column of a DataFrame.
See Also
--------
DataFrame.query : Query columns of a dataframe using string expressions.
Examples
--------
You can use `col` in `assign`.
>>> df = pd.DataFrame({"name": ["beluga", "narwhal"], "speed": [100, 110]})
>>> df.assign(name_titlecase=pd.col("name").str.title())
name speed name_titlecase
0 beluga 100 Beluga
1 narwhal 110 Narwhal
You can also use it for filtering.
>>> df.loc[pd.col("speed") > 105]
name speed
1 narwhal 110
"""
if not isinstance(col_name, Hashable):
msg = f"Expected Hashable, got: {type(col_name)}"
raise TypeError(msg)
def func(df: DataFrame) -> Series:
if col_name not in df.columns:
columns_str = str(df.columns.tolist())
max_len = 90
if len(columns_str) > max_len:
columns_str = columns_str[:max_len] + "...]"
msg = (
f"Column '{col_name}' not found in given DataFrame.\n\n"
f"Hint: did you mean one of {columns_str} instead?"
)
raise ValueError(msg)
return df[col_name]
return Expression(func, f"col({col_name!r})")
__all__ = ["Expression", "col"]
|
NamespaceExpression
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 243377,
"end": 246027
}
|
class ____(Response):
"""
Response of tasks.enqueue endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param queued: Number of tasks queued (0 or 1)
:type queued: int
"""
_service = "tasks"
_action = "enqueue"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"queued": {
"description": "Number of tasks queued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self, updated: Optional[int] = None, fields: Optional[dict] = None, queued: Optional[int] = None, **kwargs: Any
) -> None:
super(EnqueueResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.queued = queued
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("queued")
def queued(self) -> Optional[int]:
return self._property_queued
@queued.setter
def queued(self, value: Optional[int]) -> None:
if value is None:
self._property_queued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "queued", six.integer_types)
self._property_queued = value
|
EnqueueResponse
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/renderers.py
|
{
"start": 295,
"end": 1951
}
|
class ____(JSONRenderer):
"""
Renderer that sort they keys from the JSON alphabetically.
See https://github.com/encode/django-rest-framework/pull/4202
"""
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Copied from ``rest_framework.renders.JSONRenderer``.
Changes:
- sort_keys=True on json.dumps
- use str instead of six.text_types
https://github.com/encode/django-rest-framework/blob/b7523f4/rest_framework/renderers.py#L84
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data,
cls=self.encoder_class,
indent=indent,
ensure_ascii=self.ensure_ascii,
allow_nan=not self.strict,
separators=separators,
sort_keys=True,
)
if isinstance(ret, str):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace("\u2028", "\\u2028").replace("\u2029", "\\u2029")
return bytes(ret.encode("utf-8"))
return ret
|
AlphabeticalSortedJSONRenderer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/single-element-in-a-sorted-array.py
|
{
"start": 32,
"end": 557
}
|
class ____(object):
def singleNonDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left, right = 0, len(nums)-1
while left <= right:
mid = left + (right - left) / 2
if not (mid%2 == 0 and mid+1 < len(nums) and \
nums[mid] == nums[mid+1]) and \
not (mid%2 == 1 and nums[mid] == nums[mid-1]):
right = mid-1
else:
left = mid+1
return nums[left]
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/shared_integrations/exceptions/__init__.py
|
{
"start": 5057,
"end": 5187
}
|
class ____(IntegrationError):
"""
Error when an external API resource is not found.
"""
|
IntegrationResourceNotFoundError
|
python
|
falconry__falcon
|
tests/test_httpstatus.py
|
{
"start": 3717,
"end": 6397
}
|
class ____:
def test_raise_status_in_process_request(self, hook_test_client):
"""Make sure we can raise status from middleware process request"""
client = hook_test_client
class TestMiddleware:
def process_request(self, req, resp):
raise HTTPStatus(
falcon.HTTP_200, headers={'X-Failed': 'False'}, text='Pass'
)
# NOTE(kgriffs): Test the side-by-side support for dual WSGI and
# ASGI compatibility.
async def process_request_async(self, req, resp):
self.process_request(req, resp)
client.app.add_middleware(TestMiddleware())
response = client.simulate_request(path='/status', method='GET')
assert response.status_code == 200
assert response.headers['x-failed'] == 'False'
assert response.text == 'Pass'
def test_raise_status_in_process_resource(self, hook_test_client):
"""Make sure we can raise status from middleware process resource"""
client = hook_test_client
class TestMiddleware:
def process_resource(self, req, resp, resource, params):
raise HTTPStatus(
falcon.HTTP_200, headers={'X-Failed': 'False'}, text='Pass'
)
async def process_resource_async(self, *args):
self.process_resource(*args)
# NOTE(kgriffs): Pass a list to test that add_middleware can handle it
client.app.add_middleware([TestMiddleware()])
response = client.simulate_request(path='/status', method='GET')
assert response.status == falcon.HTTP_200
assert response.headers['x-failed'] == 'False'
assert response.text == 'Pass'
def test_raise_status_runs_process_response(self, hook_test_client):
"""Make sure process_response still runs"""
client = hook_test_client
class TestMiddleware:
def process_response(self, req, resp, resource, req_succeeded):
resp.status = falcon.HTTP_200
resp.set_header('X-Failed', 'False')
resp.text = 'Pass'
async def process_response_async(self, *args):
self.process_response(*args)
# NOTE(kgriffs): Pass a generic iterable to test that add_middleware
# can handle it.
client.app.add_middleware(iter([TestMiddleware()]))
response = client.simulate_request(path='/status', method='GET')
assert response.status == falcon.HTTP_200
assert response.headers['x-failed'] == 'False'
assert response.text == 'Pass'
|
TestHTTPStatusWithMiddleware
|
python
|
ray-project__ray
|
release/llm_tests/benchmark/load_test.py
|
{
"start": 4635,
"end": 7000
}
|
class ____:
lock = threading.Lock()
users = None
first_request_done = 0
logging_params = None
environment = None
tokenizer = None
@classmethod
def notify_init(cls, environment, logging_params):
with cls.lock:
if cls.environment is None:
cls.environment = environment
if cls.logging_params is None:
cls.logging_params = logging_params
else:
assert (
cls.logging_params == logging_params
), f"Inconsistent settings between workers: {cls.logging_params} != {logging_params}"
@classmethod
def notify_first_request(cls):
with cls.lock:
if (
cls.environment.parsed_options.qps is not None
and cls.first_request_done == 0
):
# if in QPS mode, reset after first successful request comes back
cls.reset_stats()
cls.first_request_done += 1
if (
cls.environment.parsed_options.qps is not None
and cls.first_request_done == 0
and cls.users == cls.first_request_done
):
# if in fixed load mode, reset after all users issued one request (we're in a steady state)
cls.reset_stats()
@classmethod
def notify_spawning_complete(cls, user_count):
with cls.lock:
cls.users = user_count
if cls.users == cls.first_request_done:
cls.reset_stats()
@classmethod
def reset_stats(cls):
assert cls.environment.runner, "only local mode is supported"
print("Resetting stats after traffic reach a steady state")
cls.environment.events.reset_stats.fire()
cls.environment.runner.stats.reset_all()
@classmethod
def load_tokenizer(cls, dir):
if not dir:
return None
with cls.lock:
if cls.tokenizer:
return cls.tokenizer
import transformers
cls.tokenizer = transformers.AutoTokenizer.from_pretrained(dir)
cls.tokenizer.add_bos_token = False
cls.tokenizer.add_eos_token = False
return cls.tokenizer
events.spawning_complete.add_listener(InitTracker.notify_spawning_complete)
@dataclass
|
InitTracker
|
python
|
run-llama__llama_index
|
llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/llama_index/memory/bedrock_agentcore/base.py
|
{
"start": 4578,
"end": 4893
}
|
class ____(BaseModel):
actor_id: str
memory_id: str
session_id: str
namespace: str = "/"
memory_strategy_id: Optional[str] = None
def get_context(self) -> Dict[str, Optional[str]]:
return {key: value for key, value in self.__dict__.items() if value is not None}
|
AgentCoreMemoryContext
|
python
|
fastai__fastai
|
fastai/vision/augment.py
|
{
"start": 46805,
"end": 47868
}
|
class ____():
def __init__(self, max_lighting=0.2, p=0.75, draw=None, batch=False): store_attr()
def _def_draw(self, x):
if not self.batch: res = x.new_empty(x.size(0)).uniform_(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
else: res = x.new_zeros(x.size(0)) + random.uniform(math.log(1-self.max_lighting), -math.log(1-self.max_lighting))
return torch.exp(res)
def before_call(self, x):
self.change = _draw_mask(x, self._def_draw, draw=self.draw, p=self.p, neutral=1., batch=self.batch)
def __call__(self, x):
#interpolate between grayscale and original in-place
gs = grayscale(x)
gs.mul_(1-self.change[:,None,None,None])
x.mul_(self.change[:,None,None,None])
return x.add_(gs)
# %% ../../nbs/09_vision.augment.ipynb 222
@patch
@delegates(_SaturationLogit.__init__)
def saturation(x: TensorImage, **kwargs):
func = _SaturationLogit(**kwargs)
func.before_call(x)
return x.lighting(func)
# %% ../../nbs/09_vision.augment.ipynb 223
|
_SaturationLogit
|
python
|
apache__airflow
|
providers/openlineage/src/airflow/providers/openlineage/plugins/facets.py
|
{
"start": 3601,
"end": 3722
}
|
class ____(RunFacet):
"""Composite Airflow DAG run facet."""
dag: dict
dagRun: dict
@define
|
AirflowDagRunFacet
|
python
|
apache__airflow
|
providers/exasol/src/airflow/providers/exasol/operators/exasol.py
|
{
"start": 1045,
"end": 2500
}
|
class ____(SQLExecuteQueryOperator):
"""
Executes sql code in a specific Exasol database.
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
template references are recognized by str ending in '.sql'
:param exasol_conn_id: reference to a specific Exasol database
:param autocommit: if True, each command is automatically committed.
(default value: False)
:param parameters: (optional) the parameters to render the SQL query with.
:param schema: (optional) name of the schema which overwrite defined one in connection
:param handler: (optional) handler to process the results of the query
"""
template_fields: Sequence[str] = ("sql", "exasol_conn_id")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers: ClassVar[dict] = {"sql": "sql"}
ui_color = "#ededed"
conn_id_field = "exasol_conn_id"
def __init__(
self,
*,
exasol_conn_id: str = "exasol_default",
schema: str | None = None,
handler=exasol_fetch_all_handler,
**kwargs,
) -> None:
self.exasol_conn_id = exasol_conn_id
if schema is not None:
hook_params = kwargs.pop("hook_params", {})
kwargs["hook_params"] = {"schema": schema, **hook_params}
super().__init__(conn_id=exasol_conn_id, handler=handler, **kwargs)
|
ExasolOperator
|
python
|
pypa__hatch
|
tests/backend/builders/test_config.py
|
{
"start": 6211,
"end": 8493
}
|
class ____:
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.require_runtime_dependencies is builder.config.require_runtime_dependencies is False
def test_target(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"require-runtime-dependencies": True}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.require_runtime_dependencies is True
def test_target_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"require-runtime-dependencies": 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError,
match="Field `tool.hatch.build.targets.foo.require-runtime-dependencies` must be a boolean",
):
_ = builder.config.require_runtime_dependencies
def test_global(self, isolation):
config = {"tool": {"hatch": {"build": {"require-runtime-dependencies": True}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.require_runtime_dependencies is True
def test_global_not_boolean(self, isolation):
config = {"tool": {"hatch": {"build": {"require-runtime-dependencies": 9000}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(TypeError, match="Field `tool.hatch.build.require-runtime-dependencies` must be a boolean"):
_ = builder.config.require_runtime_dependencies
def test_target_overrides_global(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"require-runtime-dependencies": True,
"targets": {"foo": {"require-runtime-dependencies": False}},
}
}
}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert builder.config.require_runtime_dependencies is False
|
TestRequireRuntimeDependencies
|
python
|
numpy__numpy
|
numpy/_core/tests/test_unicode.py
|
{
"start": 12521,
"end": 12671
}
|
class ____(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
|
TestByteorder_2_UCS4
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/system/date_time.py
|
{
"start": 862,
"end": 3086
}
|
class ____(BaseFactCollector):
name = 'date_time'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
facts_dict = {}
date_time_facts = {}
# Store the timestamp once, then get local and UTC versions from that
epoch_ts = time.time()
now = datetime.datetime.fromtimestamp(epoch_ts)
utcnow = datetime.datetime.fromtimestamp(
epoch_ts,
tz=datetime.timezone.utc,
)
date_time_facts['year'] = now.strftime('%Y')
date_time_facts['month'] = now.strftime('%m')
date_time_facts['weekday'] = now.strftime('%A')
date_time_facts['weekday_number'] = now.strftime('%w')
date_time_facts['weeknumber'] = now.strftime('%W')
date_time_facts['day'] = now.strftime('%d')
date_time_facts['hour'] = now.strftime('%H')
date_time_facts['minute'] = now.strftime('%M')
date_time_facts['second'] = now.strftime('%S')
date_time_facts['epoch'] = now.strftime('%s')
# epoch returns float or string in some non-linux environments
if date_time_facts['epoch'] == '' or date_time_facts['epoch'][0] == '%':
date_time_facts['epoch'] = str(int(epoch_ts))
# epoch_int always returns integer format of epoch
date_time_facts['epoch_int'] = str(int(now.strftime('%s')))
if date_time_facts['epoch_int'] == '' or date_time_facts['epoch_int'][0] == '%':
date_time_facts['epoch_int'] = str(int(epoch_ts))
date_time_facts['date'] = now.strftime('%Y-%m-%d')
date_time_facts['time'] = now.strftime('%H:%M:%S')
date_time_facts['iso8601_micro'] = utcnow.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
date_time_facts['iso8601'] = utcnow.strftime("%Y-%m-%dT%H:%M:%SZ")
date_time_facts['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
date_time_facts['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
date_time_facts['tz'] = time.strftime("%Z")
date_time_facts['tz_dst'] = time.tzname[1]
date_time_facts['tz_offset'] = time.strftime("%z")
facts_dict['date_time'] = date_time_facts
return facts_dict
|
DateTimeFactCollector
|
python
|
huggingface__transformers
|
src/transformers/models/instructblipvideo/modular_instructblipvideo.py
|
{
"start": 6655,
"end": 6749
}
|
class ____(InstructBlipVisionModel):
input_modalities = "video"
|
InstructBlipVideoVisionModel
|
python
|
astropy__astropy
|
astropy/cosmology/_src/tests/io/test_latex.py
|
{
"start": 321,
"end": 2788
}
|
class ____(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Write] with ``format="latex"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_latex_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_latex_failed_cls.tex"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_latex_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_latex_cls.tex"
write(fp, cls=tbl_cls)
def test_latex_columns(self, write, tmp_path):
fp = tmp_path / "test_rename_latex_columns.tex"
write(fp, latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
# For now, Cosmology class and name are stored in first 2 slots
for column_name in tbl.colnames[2:]:
assert column_name in _FORMAT_TABLE.values()
def test_write_latex_invalid_path(self, write):
"""Test passing an invalid path"""
invalid_fp = ""
with pytest.raises(FileNotFoundError, match="No such file or directory"):
write(invalid_fp, format="ascii.latex")
def test_write_latex_false_overwrite(self, write, tmp_path):
"""Test to write a LaTeX file without overwriting an existing file"""
# Test that passing an invalid path to write_latex() raises a IOError
fp = tmp_path / "test_write_latex_false_overwrite.tex"
write(fp)
with pytest.raises(OSError, match="overwrite=True"):
write(fp, overwrite=False)
def test_write_latex_unsupported_format(self, write, tmp_path):
"""Test for unsupported format"""
fp = tmp_path / "test_write_latex_unsupported_format.tex"
invalid_format = "unsupported"
with pytest.raises((ValueError, IORegistryError)) as exc_info:
pytest.raises(ValueError, match="format must be 'ascii.latex'")
pytest.raises(IORegistryError, match="No writer defined for format")
write(fp, format=invalid_format)
|
WriteLATEXTestMixin
|
python
|
django__django
|
tests/gis_tests/test_geoforms.py
|
{
"start": 16720,
"end": 18299
}
|
class ____(SimpleTestCase):
def setUp(self):
self.geometries = {
"point": GEOSGeometry("SRID=4326;POINT(9.052734375 42.451171875)"),
}
def test_osm_widget(self):
class PointForm(forms.Form):
p = forms.PointField(widget=forms.OSMWidget)
geom = self.geometries["point"]
form = PointForm(data={"p": geom})
rendered = form.as_p()
self.assertIn('"base_layer": "osm"', rendered)
self.assertIn('<textarea id="id_p"', rendered)
def test_default_lat_lon(self):
self.assertEqual(forms.OSMWidget.default_lon, 5)
self.assertEqual(forms.OSMWidget.default_lat, 47)
self.assertEqual(forms.OSMWidget.default_zoom, 12)
class PointForm(forms.Form):
p = forms.PointField(
widget=forms.OSMWidget(
attrs={
"default_lon": 20,
"default_lat": 30,
"default_zoom": 17,
}
),
)
form = PointForm()
rendered = form.as_p()
attrs = {
"base_layer": "osm",
"geom_type": "POINT",
"map_srid": 3857,
"display_raw": False,
"default_lon": 20,
"default_lat": 30,
"default_zoom": 17,
"required": True,
"id": "id_p",
"geom_name": "Point",
}
expected = json_script(attrs, "id_p_mapwidget_options")
self.assertInHTML(expected, rendered)
|
OSMWidgetTest
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py
|
{
"start": 929,
"end": 1055
}
|
class ____:
"""Represents the details of a configuration."""
section: str | None = None
@dataclass
|
ConfigurationDetails
|
python
|
redis__redis-py
|
redis/asyncio/client.py
|
{
"start": 50002,
"end": 50330
}
|
class ____(Protocol):
async def __call__(self, e: BaseException, pubsub: PubSub): ...
PSWorkerThreadExcHandlerT = Union[
PubsubWorkerExceptionHandler, AsyncPubsubWorkerExceptionHandler
]
CommandT = Tuple[Tuple[Union[str, bytes], ...], Mapping[str, Any]]
CommandStackT = List[CommandT]
|
AsyncPubsubWorkerExceptionHandler
|
python
|
numba__numba
|
numba/core/cgutils.py
|
{
"start": 2210,
"end": 7039
}
|
class ____(object):
"""
Creates a `Structure` like interface that is constructed with information
from DataModel instance. FE type must have a data model that is a
subclass of StructModel.
"""
# The following class members must be overridden by subclass
_fe_type = None
def __init__(self, context, builder, value=None, ref=None):
self._context = context
self._datamodel = self._context.data_model_manager[self._fe_type]
if not isinstance(self._datamodel, numba.core.datamodel.StructModel):
raise TypeError(
"Not a structure model: {0}".format(self._datamodel))
self._builder = builder
self._be_type = self._get_be_type(self._datamodel)
assert not is_pointer(self._be_type)
outer_ref, ref = self._make_refs(ref)
if ref.type.pointee != self._be_type:
raise AssertionError("bad ref type: expected %s, got %s"
% (self._be_type.as_pointer(), ref.type))
if value is not None:
if value.type != outer_ref.type.pointee:
raise AssertionError("bad value type: expected %s, got %s"
% (outer_ref.type.pointee, value.type))
self._builder.store(value, outer_ref)
self._value = ref
self._outer_ref = outer_ref
def _make_refs(self, ref):
"""
Return an (outer ref, value ref) pair. By default, these are
the same pointers, but a derived class may override this.
"""
if ref is None:
ref = alloca_once(self._builder, self._be_type, zfill=True)
return ref, ref
def _get_be_type(self, datamodel):
raise NotImplementedError
def _cast_member_to_value(self, index, val):
raise NotImplementedError
def _cast_member_from_value(self, index, val):
raise NotImplementedError
def _get_ptr_by_index(self, index):
return gep_inbounds(self._builder, self._value, 0, index)
def _get_ptr_by_name(self, attrname):
index = self._datamodel.get_field_position(attrname)
return self._get_ptr_by_index(index)
def __getattr__(self, field):
"""
Load the LLVM value of the named *field*.
"""
if not field.startswith('_'):
return self[self._datamodel.get_field_position(field)]
else:
raise AttributeError(field)
def __setattr__(self, field, value):
"""
Store the LLVM *value* into the named *field*.
"""
if field.startswith('_'):
return super(_StructProxy, self).__setattr__(field, value)
self[self._datamodel.get_field_position(field)] = value
def __getitem__(self, index):
"""
Load the LLVM value of the field at *index*.
"""
member_val = self._builder.load(self._get_ptr_by_index(index))
return self._cast_member_to_value(index, member_val)
def __setitem__(self, index, value):
"""
Store the LLVM *value* into the field at *index*.
"""
ptr = self._get_ptr_by_index(index)
value = self._cast_member_from_value(index, value)
if value.type != ptr.type.pointee:
if (is_pointer(value.type) and is_pointer(ptr.type.pointee)
and value.type.pointee == ptr.type.pointee.pointee):
# Differ by address-space only
# Auto coerce it
value = self._context.addrspacecast(self._builder,
value,
ptr.type.pointee.addrspace)
else:
raise TypeError("Invalid store of {value.type} to "
"{ptr.type.pointee} in "
"{self._datamodel} "
"(trying to write member #{index})"
.format(value=value, ptr=ptr, self=self,
index=index))
self._builder.store(value, ptr)
def __len__(self):
"""
Return the number of fields.
"""
return self._datamodel.field_count
def _getpointer(self):
"""
Return the LLVM pointer to the underlying structure.
"""
return self._outer_ref
def _getvalue(self):
"""
Load and return the value of the underlying LLVM structure.
"""
return self._builder.load(self._outer_ref)
def _setvalue(self, value):
"""
Store the value in this structure.
"""
assert not is_pointer(value.type)
assert value.type == self._be_type, (value.type, self._be_type)
self._builder.store(value, self._value)
|
_StructProxy
|
python
|
ray-project__ray
|
doc/source/serve/doc_code/monitoring/logging_config.py
|
{
"start": 805,
"end": 1171
}
|
class ____:
def __call__(self) -> int:
logger = logging.getLogger("ray.serve")
logger.debug("This debug message is from the router.")
return "hello world"
# __level_end__
serve.run(Model.bind())
resp = requests.get("http://localhost:8000/")
# __logs_dir_start__
@serve.deployment(logging_config=LoggingConfig(logs_dir="/my_dirs"))
|
Model
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-dashscope/llama_index/embeddings/dashscope/base.py
|
{
"start": 1078,
"end": 4843
}
|
class ____(str, Enum):
"""DashScope MultiModalEmbedding models."""
MULTIMODAL_EMBEDDING_ONE_PEACE_V1 = "multimodal-embedding-one-peace-v1"
def get_text_embedding(
model: str,
text: Union[str, List[str]],
api_key: Optional[str] = None,
**kwargs: Any,
) -> List[List[float]]:
"""
Call DashScope text embedding.
ref: https://help.aliyun.com/zh/dashscope/developer-reference/text-embedding-api-details.
Args:
model (str): The `DashScopeTextEmbeddingModels`
text (Union[str, List[str]]): text or list text to embedding.
Raises:
ImportError: need import dashscope
Returns:
List[List[float]]: The list of embedding result, if failed return empty list.
if some of test no output, the correspond index of output is None.
"""
try:
import dashscope
except ImportError:
raise ImportError("DashScope requires `pip install dashscope")
if isinstance(text, str):
text = [text]
response = dashscope.TextEmbedding.call(
model=model, input=text, api_key=api_key, kwargs=kwargs
)
embedding_results = [None] * len(text)
if response.status_code == HTTPStatus.OK:
for emb in response.output["embeddings"]:
embedding_results[emb["text_index"]] = emb["embedding"]
else:
logger.error("Calling TextEmbedding failed, details: %s" % response)
return embedding_results
def get_batch_text_embedding(
model: str, url: str, api_key: Optional[str] = None, **kwargs: Any
) -> Optional[str]:
"""
Call DashScope batch text embedding.
Args:
model (str): The `DashScopeMultiModalEmbeddingModels`
url (str): The url of the file to embedding which with lines of text to embedding.
Raises:
ImportError: Need install dashscope package.
Returns:
str: The url of the embedding result, format ref:
https://help.aliyun.com/zh/dashscope/developer-reference/text-embedding-async-api-details
"""
try:
import dashscope
except ImportError:
raise ImportError("DashScope requires `pip install dashscope")
response = dashscope.BatchTextEmbedding.call(
model=model, url=url, api_key=api_key, kwargs=kwargs
)
if response.status_code == HTTPStatus.OK:
return response.output["url"]
else:
logger.error("Calling BatchTextEmbedding failed, details: %s" % response)
return None
def get_multimodal_embedding(
model: str, input: list, api_key: Optional[str] = None, **kwargs: Any
) -> List[float]:
"""
Call DashScope multimodal embedding.
ref: https://help.aliyun.com/zh/dashscope/developer-reference/one-peace-multimodal-embedding-api-details.
Args:
model (str): The `DashScopeBatchTextEmbeddingModels`
input (str): The input of the embedding, eg:
[{'factor': 1, 'text': '你好'},
{'factor': 2, 'audio': 'https://dashscope.oss-cn-beijing.aliyuncs.com/audios/cow.flac'},
{'factor': 3, 'image': 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/256_1.png'}]
Raises:
ImportError: Need install dashscope package.
Returns:
List[float]: Embedding result, if failed return empty list.
"""
try:
import dashscope
except ImportError:
raise ImportError("DashScope requires `pip install dashscope")
response = dashscope.MultiModalEmbedding.call(
model=model, input=input, api_key=api_key, kwargs=kwargs
)
if response.status_code == HTTPStatus.OK:
return response.output["embedding"]
else:
logger.error("Calling MultiModalEmbedding failed, details: %s" % response)
return []
|
DashScopeMultiModalEmbeddingModels
|
python
|
networkx__networkx
|
networkx/algorithms/tests/test_link_prediction.py
|
{
"start": 1883,
"end": 3219
}
|
class ____:
@classmethod
def setup_class(cls):
cls.func = staticmethod(nx.jaccard_coefficient)
cls.test = staticmethod(partial(_test_func, predict_func=cls.func))
def test_K5(self):
G = nx.complete_graph(5)
self.test(G, [(0, 1)], [(0, 1, 0.6)])
def test_P4(self):
G = nx.path_graph(4)
self.test(G, [(0, 2)], [(0, 2, 0.5)])
@pytest.mark.parametrize("graph_type", (nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph))
def test_notimplemented(self, graph_type):
G = graph_type([(0, 1), (1, 2)])
with pytest.raises(nx.NetworkXNotImplemented):
self.func(G, [(0, 2)])
def test_node_not_found(self):
G = nx.Graph()
G.add_edges_from([(0, 1), (0, 2), (2, 3)])
with pytest.raises(nx.NodeNotFound):
self.func(G, [(0, 4)])
def test_no_common_neighbor(self):
G = nx.Graph()
G.add_edges_from([(0, 1), (2, 3)])
self.test(G, [(0, 2)], [(0, 2, 0)])
def test_isolated_nodes(self):
G = nx.Graph()
G.add_nodes_from([0, 1])
self.test(G, [(0, 1)], [(0, 1, 0)])
def test_all_nonexistent_edges(self):
G = nx.Graph()
G.add_edges_from([(0, 1), (0, 2), (2, 3)])
self.test(G, None, [(0, 3, 0.5), (1, 2, 0.5), (1, 3, 0)])
|
TestJaccardCoefficient
|
python
|
getsentry__sentry
|
src/sentry/lang/native/symbolicator.py
|
{
"start": 1586,
"end": 1966
}
|
class ____:
"""Bundles information about a symbolication task:
the platform and whether it's an existing event being reprocessed.
"""
platform: SymbolicatorPlatform
is_reprocessing: bool = False
def with_platform(self, platform: SymbolicatorPlatform) -> SymbolicatorTaskKind:
return dataclasses.replace(self, platform=platform)
|
SymbolicatorTaskKind
|
python
|
celery__celery
|
t/unit/security/test_certificate.py
|
{
"start": 2267,
"end": 3377
}
|
class ____(SecurityCase):
@patch('os.path.isdir')
@patch('glob.glob')
@patch('celery.security.certificate.Certificate')
def test_init(self, Certificate, glob, isdir):
cert = Certificate.return_value = Mock()
cert.has_expired.return_value = False
isdir.return_value = True
glob.return_value = ['foo.cert']
with conftest.open():
cert.get_id.return_value = 1
path = os.path.join('var', 'certs')
x = FSCertStore(path)
assert 1 in x._certs
glob.assert_called_with(os.path.join(path, '*'))
# they both end up with the same id
glob.return_value = ['foo.cert', 'bar.cert']
with pytest.raises(SecurityError):
x = FSCertStore(path)
glob.return_value = ['foo.cert']
cert.has_expired.return_value = True
with pytest.raises(SecurityError):
x = FSCertStore(path)
isdir.return_value = False
with pytest.raises(SecurityError):
x = FSCertStore(path)
|
test_FSCertStore
|
python
|
mlflow__mlflow
|
mlflow/server/graphql/autogenerated_graphql_schema.py
|
{
"start": 1101,
"end": 1429
}
|
class ____(graphene.ObjectType):
job_id = graphene.String()
run_id = graphene.String()
job_state = graphene.Field(MlflowDeploymentJobConnectionState)
run_state = graphene.Field(MlflowModelVersionDeploymentJobStateDeploymentJobRunState)
current_task_name = graphene.String()
|
MlflowModelVersionDeploymentJobState
|
python
|
scipy__scipy
|
scipy/stats/_sampling.py
|
{
"start": 12267,
"end": 38729
}
|
class ____:
"""
Fast sampling by numerical inversion of the CDF for a large class of
continuous distributions in `scipy.stats`.
Parameters
----------
dist : rv_frozen object
Frozen distribution object from `scipy.stats`. The list of supported
distributions can be found in the Notes section. The shape parameters,
`loc` and `scale` used to create the distributions must be scalars.
For example, for the Gamma distribution with shape parameter `p`,
`p` has to be a float, and for the beta distribution with shape
parameters (a, b), both a and b have to be floats.
domain : tuple of floats, optional
If one wishes to sample from a truncated/conditional distribution,
the domain has to be specified.
The default is None. In that case, the random variates are not
truncated, and the domain is inferred from the support of the
distribution.
ignore_shape_range : boolean, optional.
If False, shape parameters that are outside of the valid range
of values to ensure that the numerical accuracy (see Notes) is
high, raise a ValueError. If True, any shape parameters that are valid
for the distribution are accepted. This can be useful for testing.
The default is False.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
A NumPy random number generator or seed for the underlying NumPy
random number generator used to generate the stream of uniform
random numbers.
If `random_state` is None, it uses ``self.random_state``.
If `random_state` is an int,
``np.random.default_rng(random_state)`` is used.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
Attributes
----------
loc : float
The location parameter.
random_state : {`numpy.random.Generator`, `numpy.random.RandomState`}
The random state used in relevant methods like `rvs` (unless
another `random_state` is passed as an argument to these methods).
scale : float
The scale parameter.
Methods
-------
cdf
evaluate_error
ppf
qrvs
rvs
support
Notes
-----
The class creates an object for continuous distributions specified
by `dist`. The method `rvs` uses a generator from
`scipy.stats.sampling` that is created when the object is instantiated.
In addition, the methods `qrvs` and `ppf` are added.
`qrvs` generate samples based on quasi-random numbers from
`scipy.stats.qmc`. `ppf` is the PPF based on the
numerical inversion method in [1]_ (`NumericalInversePolynomial`) that is
used to generate random variates.
Supported distributions (`distname`) are:
``alpha``, ``anglit``, ``argus``, ``beta``, ``betaprime``, ``bradford``,
``burr``, ``burr12``, ``cauchy``, ``chi``, ``chi2``, ``cosine``,
``crystalball``, ``expon``, ``gamma``, ``gennorm``, ``geninvgauss``,
``gumbel_l``, ``gumbel_r``, ``hypsecant``, ``invgamma``, ``invgauss``,
``invweibull``, ``laplace``, ``logistic``, ``maxwell``, ``moyal``,
``norm``, ``pareto``, ``powerlaw``, ``t``, ``rayleigh``, ``semicircular``,
``wald``, ``weibull_max``, ``weibull_min``.
`rvs` relies on the accuracy of the numerical inversion. If very extreme
shape parameters are used, the numerical inversion might not work. However,
for all implemented distributions, the admissible shape parameters have
been tested, and an error will be raised if the user supplies values
outside of the allowed range. The u-error should not exceed 1e-10 for all
valid parameters. Note that warnings might be raised even if parameters
are within the valid range when the object is instantiated.
To check numerical accuracy, the method `evaluate_error` can be used.
Note that all implemented distributions are also part of `scipy.stats`, and
the object created by `FastGeneratorInversion` relies on methods like
`ppf`, `cdf` and `pdf` from `rv_frozen`. The main benefit of using this
class can be summarized as follows: Once the generator to sample random
variates is created in the setup step, sampling and evaluation of
the PPF using `ppf` are very fast,
and performance is essentially independent of the distribution. Therefore,
a substantial speed-up can be achieved for many distributions if large
numbers of random variates are required. It is important to know that this
fast sampling is achieved by inversion of the CDF. Thus, one uniform
random variate is transformed into a non-uniform variate, which is an
advantage for several simulation methods, e.g., when
the variance reduction methods of common random variates or
antithetic variates are be used ([2]_).
In addition, inversion makes it possible to
- to use a QMC generator from `scipy.stats.qmc` (method `qrvs`),
- to generate random variates truncated to an interval. For example, if
one aims to sample standard normal random variates from
the interval (2, 4), this can be easily achieved by using the parameter
`domain`.
The location and scale that are initially defined by `dist`
can be reset without having to rerun the setup
step to create the generator that is used for sampling. The relation
of the distribution `Y` with `loc` and `scale` to the standard
distribution `X` (i.e., ``loc=0`` and ``scale=1``) is given by
``Y = loc + scale * X``.
References
----------
.. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold.
"Random variate generation by numerical inversion when only the
density is known." ACM Transactions on Modeling and Computer
Simulation (TOMACS) 20.4 (2010): 1-25.
.. [2] Hörmann, Wolfgang, Josef Leydold and Gerhard Derflinger.
"Automatic nonuniform random number generation."
Springer, 2004.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> from scipy.stats.sampling import FastGeneratorInversion
Let's start with a simple example to illustrate the main features:
>>> gamma_frozen = stats.gamma(1.5)
>>> gamma_dist = FastGeneratorInversion(gamma_frozen)
>>> r = gamma_dist.rvs(size=1000)
The mean should be approximately equal to the shape parameter 1.5:
>>> r.mean()
1.52423591130436 # may vary
Similarly, we can draw a sample based on quasi-random numbers:
>>> r = gamma_dist.qrvs(size=1000)
>>> r.mean()
1.4996639255942914 # may vary
Compare the PPF against approximation `ppf`.
>>> q = [0.001, 0.2, 0.5, 0.8, 0.999]
>>> np.max(np.abs(gamma_frozen.ppf(q) - gamma_dist.ppf(q)))
4.313394796895409e-08
To confirm that the numerical inversion is accurate, we evaluate the
approximation error (u-error), which should be below 1e-10 (for more
details, refer to the documentation of `evaluate_error`):
>>> gamma_dist.evaluate_error()
(7.446320551265581e-11, nan) # may vary
Note that the location and scale can be changed without instantiating a
new generator:
>>> gamma_dist.loc = 2
>>> gamma_dist.scale = 3
>>> r = gamma_dist.rvs(size=1000)
The mean should be approximately 2 + 3*1.5 = 6.5.
>>> r.mean()
6.399549295242894 # may vary
Let us also illustrate how truncation can be applied:
>>> trunc_norm = FastGeneratorInversion(stats.norm(), domain=(3, 4))
>>> r = trunc_norm.rvs(size=1000)
>>> 3 < r.min() < r.max() < 4
True
Check the mean:
>>> r.mean()
3.250433367078603 # may vary
>>> stats.norm.expect(lb=3, ub=4, conditional=True)
3.260454285589997
In this particular, case, `scipy.stats.truncnorm` could also be used to
generate truncated normal random variates.
"""
def __init__(
self,
dist,
*,
domain=None,
ignore_shape_range=False,
random_state=None,
):
if isinstance(dist, stats.distributions.rv_frozen):
distname = dist.dist.name
if distname not in PINV_CONFIG.keys():
raise ValueError(
f"Distribution '{distname}' is not supported."
f"It must be one of {list(PINV_CONFIG.keys())}"
)
else:
raise ValueError("`dist` must be a frozen distribution object")
loc = dist.kwds.get("loc", 0)
scale = dist.kwds.get("scale", 1)
args = dist.args
if not np.isscalar(loc):
raise ValueError("loc must be scalar.")
if not np.isscalar(scale):
raise ValueError("scale must be scalar.")
self._frozendist = getattr(stats, distname)(
*args,
loc=loc,
scale=scale,
)
self._distname = distname
nargs = np.broadcast_arrays(args)[0].size
nargs_expected = self._frozendist.dist.numargs
if nargs != nargs_expected:
raise ValueError(
f"Each of the {nargs_expected} shape parameters must be a "
f"scalar, but {nargs} values are provided."
)
self.random_state = random_state
if domain is None:
self._domain = self._frozendist.support()
self._p_lower = 0.0
self._p_domain = 1.0
else:
self._domain = domain
self._p_lower = self._frozendist.cdf(self._domain[0])
_p_domain = self._frozendist.cdf(self._domain[1]) - self._p_lower
self._p_domain = _p_domain
self._set_domain_adj()
self._ignore_shape_range = ignore_shape_range
# the domain to be passed to NumericalInversePolynomial
# define a separate variable since in case of a transformation,
# domain_pinv will not be the same as self._domain
self._domain_pinv = self._domain
# get information about the distribution from the config to set up
# the generator
dist = self._process_config(distname, args)
if self._rvs_transform_inv is not None:
d0 = self._rvs_transform_inv(self._domain[0], *args)
d1 = self._rvs_transform_inv(self._domain[1], *args)
if d0 > d1:
# swap values if transformation if decreasing
d0, d1 = d1, d0
# only update _domain_pinv and not _domain
# _domain refers to the original distribution, _domain_pinv
# to the transformed distribution
self._domain_pinv = d0, d1
# self._center has been set by the call self._process_config
# check if self._center is inside the transformed domain
# _domain_pinv, otherwise move it to the endpoint that is closer
if self._center is not None:
if self._center < self._domain_pinv[0]:
self._center = self._domain_pinv[0]
elif self._center > self._domain_pinv[1]:
self._center = self._domain_pinv[1]
self._rng = NumericalInversePolynomial(
dist,
random_state=self.random_state,
domain=self._domain_pinv,
center=self._center,
)
@property
def random_state(self):
return self._random_state
@random_state.setter
def random_state(self, random_state):
self._random_state = check_random_state_qmc(random_state)
@property
def loc(self):
return self._frozendist.kwds.get("loc", 0)
@loc.setter
def loc(self, loc):
if not np.isscalar(loc):
raise ValueError("loc must be scalar.")
self._frozendist.kwds["loc"] = loc
# update the adjusted domain that depends on loc and scale
self._set_domain_adj()
@property
def scale(self):
return self._frozendist.kwds.get("scale", 0)
@scale.setter
def scale(self, scale):
if not np.isscalar(scale):
raise ValueError("scale must be scalar.")
self._frozendist.kwds["scale"] = scale
# update the adjusted domain that depends on loc and scale
self._set_domain_adj()
def _set_domain_adj(self):
""" Adjust the domain based on loc and scale. """
loc = self.loc
scale = self.scale
lb = self._domain[0] * scale + loc
ub = self._domain[1] * scale + loc
self._domain_adj = (lb, ub)
def _process_config(self, distname, args):
cfg = PINV_CONFIG[distname]
if "check_pinv_params" in cfg:
if not self._ignore_shape_range:
if not cfg["check_pinv_params"](*args):
msg = ("No generator is defined for the shape parameters "
f"{args}. Use ignore_shape_range to proceed "
"with the selected values.")
raise ValueError(msg)
if "center" in cfg.keys():
if not np.isscalar(cfg["center"]):
self._center = cfg["center"](*args)
else:
self._center = cfg["center"]
else:
self._center = None
self._rvs_transform = cfg.get("rvs_transform", None)
self._rvs_transform_inv = cfg.get("rvs_transform_inv", None)
_mirror_uniform = cfg.get("mirror_uniform", None)
if _mirror_uniform is None:
self._mirror_uniform = False
else:
self._mirror_uniform = _mirror_uniform(*args)
return CustomDistPINV(cfg["pdf"], args)
def rvs(self, size=None):
"""
Sample from the distribution by inversion.
Parameters
----------
size : int or tuple, optional
The shape of samples. Default is ``None`` in which case a scalar
sample is returned.
Returns
-------
rvs : array_like
A NumPy array of random variates.
Notes
-----
Random variates are generated by numerical inversion of the CDF, i.e.,
`ppf` computed by `NumericalInversePolynomial` when the class
is instantiated. Note that the
default ``rvs`` method of the rv_continuous class is
overwritten. Hence, a different stream of random numbers is generated
even if the same seed is used.
"""
# note: we cannot use self._rng.rvs directly in case
# self._mirror_uniform is true
u = self.random_state.uniform(size=size)
if self._mirror_uniform:
u = 1 - u
r = self._rng.ppf(u)
if self._rvs_transform is not None:
r = self._rvs_transform(r, *self._frozendist.args)
return self.loc + self.scale * r
def ppf(self, q):
"""
Very fast PPF (inverse CDF) of the distribution which
is a very close approximation of the exact PPF values.
Parameters
----------
u : array_like
Array with probabilities.
Returns
-------
ppf : array_like
Quantiles corresponding to the values in `u`.
Notes
-----
The evaluation of the PPF is very fast but it may have a large
relative error in the far tails. The numerical precision of the PPF
is controlled by the u-error, that is,
``max |u - CDF(PPF(u))|`` where the max is taken over points in
the interval [0,1], see `evaluate_error`.
Note that this PPF is designed to generate random samples.
"""
q = np.asarray(q)
if self._mirror_uniform:
x = self._rng.ppf(1 - q)
else:
x = self._rng.ppf(q)
if self._rvs_transform is not None:
x = self._rvs_transform(x, *self._frozendist.args)
return self.scale * x + self.loc
def qrvs(self, size=None, d=None, qmc_engine=None):
"""
Quasi-random variates of the given distribution.
The `qmc_engine` is used to draw uniform quasi-random variates, and
these are converted to quasi-random variates of the given distribution
using inverse transform sampling.
Parameters
----------
size : int, tuple of ints, or None; optional
Defines shape of random variates array. Default is ``None``.
d : int or None, optional
Defines dimension of uniform quasi-random variates to be
transformed. Default is ``None``.
qmc_engine : scipy.stats.qmc.QMCEngine(d=1), optional
Defines the object to use for drawing
quasi-random variates. Default is ``None``, which uses
`scipy.stats.qmc.Halton(1)`.
Returns
-------
rvs : ndarray or scalar
Quasi-random variates. See Notes for shape information.
Notes
-----
The shape of the output array depends on `size`, `d`, and `qmc_engine`.
The intent is for the interface to be natural, but the detailed rules
to achieve this are complicated.
- If `qmc_engine` is ``None``, a `scipy.stats.qmc.Halton` instance is
created with dimension `d`. If `d` is not provided, ``d=1``.
- If `qmc_engine` is not ``None`` and `d` is ``None``, `d` is
determined from the dimension of the `qmc_engine`.
- If `qmc_engine` is not ``None`` and `d` is not ``None`` but the
dimensions are inconsistent, a ``ValueError`` is raised.
- After `d` is determined according to the rules above, the output
shape is ``tuple_shape + d_shape``, where:
- ``tuple_shape = tuple()`` if `size` is ``None``,
- ``tuple_shape = (size,)`` if `size` is an ``int``,
- ``tuple_shape = size`` if `size` is a sequence,
- ``d_shape = tuple()`` if `d` is ``None`` or `d` is 1, and
- ``d_shape = (d,)`` if `d` is greater than 1.
The elements of the returned array are part of a low-discrepancy
sequence. If `d` is 1, this means that none of the samples are truly
independent. If `d` > 1, each slice ``rvs[..., i]`` will be of a
quasi-independent sequence; see `scipy.stats.qmc.QMCEngine` for
details. Note that when `d` > 1, the samples returned are still those
of the provided univariate distribution, not a multivariate
generalization of that distribution.
"""
qmc_engine, d = _validate_qmc_input(qmc_engine, d, self.random_state)
# mainly copied from unuran_wrapper.pyx.templ
# `rvs` is flexible about whether `size` is an int or tuple, so this
# should be, too.
try:
if size is None:
tuple_size = (1,)
else:
tuple_size = tuple(size)
except TypeError:
tuple_size = (size,)
# we do not use rng.qrvs directly since we need to be
# able to apply the ppf to 1 - u
N = 1 if size is None else np.prod(size)
u = qmc_engine.random(N)
if self._mirror_uniform:
u = 1 - u
qrvs = self._ppf(u)
if self._rvs_transform is not None:
qrvs = self._rvs_transform(qrvs, *self._frozendist.args)
if size is None:
qrvs = qrvs.squeeze()[()]
else:
if d == 1:
qrvs = qrvs.reshape(tuple_size)
else:
qrvs = qrvs.reshape(tuple_size + (d,))
return self.loc + self.scale * qrvs
def evaluate_error(self, size=100000, random_state=None, x_error=False):
"""
Evaluate the numerical accuracy of the inversion (u- and x-error).
Parameters
----------
size : int, optional
The number of random points over which the error is estimated.
Default is ``100000``.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
A NumPy random number generator or seed for the underlying NumPy
random number generator used to generate the stream of uniform
random numbers.
If `random_state` is None, use ``self.random_state``.
If `random_state` is an int,
``np.random.default_rng(random_state)`` is used.
If `random_state` is already a ``Generator`` or ``RandomState``
instance then that instance is used.
Returns
-------
u_error, x_error : tuple of floats
A NumPy array of random variates.
Notes
-----
The numerical precision of the inverse CDF `ppf` is controlled by
the u-error. It is computed as follows:
``max |u - CDF(PPF(u))|`` where the max is taken `size` random
points in the interval [0,1]. `random_state` determines the random
sample. Note that if `ppf` was exact, the u-error would be zero.
The x-error measures the direct distance between the exact PPF
and `ppf`. If ``x_error`` is set to ``True`, it is
computed as the maximum of the minimum of the relative and absolute
x-error:
``max(min(x_error_abs[i], x_error_rel[i]))`` where
``x_error_abs[i] = |PPF(u[i]) - PPF_fast(u[i])|``,
``x_error_rel[i] = max |(PPF(u[i]) - PPF_fast(u[i])) / PPF(u[i])|``.
Note that it is important to consider the relative x-error in the case
that ``PPF(u)`` is close to zero or very large.
By default, only the u-error is evaluated and the x-error is set to
``np.nan``. Note that the evaluation of the x-error will be very slow
if the implementation of the PPF is slow.
Further information about these error measures can be found in [1]_.
References
----------
.. [1] Derflinger, Gerhard, Wolfgang Hörmann, and Josef Leydold.
"Random variate generation by numerical inversion when only the
density is known." ACM Transactions on Modeling and Computer
Simulation (TOMACS) 20.4 (2010): 1-25.
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> from scipy.stats.sampling import FastGeneratorInversion
Create an object for the normal distribution:
>>> d_norm_frozen = stats.norm()
>>> d_norm = FastGeneratorInversion(d_norm_frozen)
To confirm that the numerical inversion is accurate, we evaluate the
approximation error (u-error and x-error).
>>> u_error, x_error = d_norm.evaluate_error(x_error=True)
The u-error should be below 1e-10:
>>> u_error
8.785783212061915e-11 # may vary
Compare the PPF against approximation `ppf`:
>>> q = [0.001, 0.2, 0.4, 0.6, 0.8, 0.999]
>>> diff = np.abs(d_norm_frozen.ppf(q) - d_norm.ppf(q))
>>> x_error_abs = np.max(diff)
>>> x_error_abs
1.2937954707581412e-08
This is the absolute x-error evaluated at the points q. The relative
error is given by
>>> x_error_rel = np.max(diff / np.abs(d_norm_frozen.ppf(q)))
>>> x_error_rel
4.186725600453555e-09
The x_error computed above is derived in a very similar way over a
much larger set of random values q. At each value q[i], the minimum
of the relative and absolute error is taken. The final value is then
derived as the maximum of these values. In our example, we get the
following value:
>>> x_error
4.507068014335139e-07 # may vary
"""
if not isinstance(size, numbers.Integral | np.integer):
raise ValueError("size must be an integer.")
# urng will be used to draw the samples for testing the error
# it must not interfere with self.random_state. therefore, do not
# call self.rvs, but draw uniform random numbers and apply
# self.ppf (note: like in rvs, consider self._mirror_uniform)
urng = check_random_state_qmc(random_state)
u = urng.uniform(size=size)
if self._mirror_uniform:
u = 1 - u
x = self.ppf(u)
uerr = np.max(np.abs(self._cdf(x) - u))
if not x_error:
return uerr, np.nan
ppf_u = self._ppf(u)
x_error_abs = np.abs(self.ppf(u)-ppf_u)
x_error_rel = x_error_abs / np.abs(ppf_u)
x_error_combined = np.array([x_error_abs, x_error_rel]).min(axis=0)
return uerr, np.max(x_error_combined)
def support(self):
"""Support of the distribution.
Returns
-------
a, b : float
end-points of the distribution's support.
Notes
-----
Note that the support of the distribution depends on `loc`,
`scale` and `domain`.
Examples
--------
>>> from scipy import stats
>>> from scipy.stats.sampling import FastGeneratorInversion
Define a truncated normal distribution:
>>> d_norm = FastGeneratorInversion(stats.norm(), domain=(0, 1))
>>> d_norm.support()
(0, 1)
Shift the distribution:
>>> d_norm.loc = 2.5
>>> d_norm.support()
(2.5, 3.5)
"""
return self._domain_adj
def _cdf(self, x):
"""Cumulative distribution function (CDF)
Parameters
----------
x : array_like
The values where the CDF is evaluated
Returns
-------
y : ndarray
CDF evaluated at x
"""
y = self._frozendist.cdf(x)
if self._p_domain == 1.0:
return y
return np.clip((y - self._p_lower) / self._p_domain, 0, 1)
def _ppf(self, q):
"""Percent point function (inverse of `cdf`)
Parameters
----------
q : array_like
lower tail probability
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
if self._p_domain == 1.0:
return self._frozendist.ppf(q)
x = self._frozendist.ppf(self._p_domain * np.array(q) + self._p_lower)
return np.clip(x, self._domain_adj[0], self._domain_adj[1])
|
FastGeneratorInversion
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_utils.py
|
{
"start": 890,
"end": 5724
}
|
class ____(fixtures.TestBase):
def test_column_element_no_visit(self):
class MyElement(ColumnElement):
_traverse_internals = []
eq_(sql_util.find_tables(MyElement(), check_columns=True), [])
def test_find_tables_selectable(self):
metadata = MetaData()
common = Table(
"common",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
Column("extra", String(45)),
)
subset_select = select(common.c.id, common.c.data).alias()
eq_(set(sql_util.find_tables(subset_select)), {common})
@testing.variation("has_cache_key", [True, False])
def test_get_embedded_bindparams(self, has_cache_key):
bp = bindparam("x")
if not has_cache_key:
class NotCacheable(TypeDecorator):
impl = String
cache_ok = False
stmt = select(column("q", NotCacheable())).where(column("y") == bp)
else:
stmt = select(column("q")).where(column("y") == bp)
eq_(stmt._get_embedded_bindparams(), [bp])
if not has_cache_key:
is_(stmt._generate_cache_key(), None)
else:
is_not_none(stmt._generate_cache_key())
def test_find_tables_aliases(self):
metadata = MetaData()
common = Table(
"common",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
Column("extra", String(45)),
)
calias = common.alias()
subset_select = select(common.c.id, calias.c.data).subquery()
eq_(
set(sql_util.find_tables(subset_select, include_aliases=True)),
{common, calias, subset_select},
)
def test_incompatible_options_add_clslevel(self):
class opt1(sql_base.CacheableOptions):
_cache_key_traversal = []
foo = "bar"
with expect_raises_message(
TypeError,
"dictionary contains attributes not covered by "
"Options class .*opt1.* .*'bar'.*",
):
o1 = opt1
o1 += {"foo": "f", "bar": "b"}
def test_incompatible_options_add_instancelevel(self):
class opt1(sql_base.CacheableOptions):
_cache_key_traversal = []
foo = "bar"
o1 = opt1(foo="bat")
with expect_raises_message(
TypeError,
"dictionary contains attributes not covered by "
"Options class .*opt1.* .*'bar'.*",
):
o1 += {"foo": "f", "bar": "b"}
def test_options_merge(self):
class opt1(sql_base.CacheableOptions):
_cache_key_traversal = []
class opt2(sql_base.CacheableOptions):
_cache_key_traversal = []
foo = "bar"
class opt3(sql_base.CacheableOptions):
_cache_key_traversal = []
foo = "bar"
bat = "hi"
o2 = opt2.safe_merge(opt1)
eq_(o2.__dict__, {})
eq_(o2.foo, "bar")
assert_raises_message(
TypeError,
r"other element .*opt2.* is not empty, is not of type .*opt1.*, "
r"and contains attributes not covered here .*'foo'.*",
opt1.safe_merge,
opt2,
)
o2 = opt2 + {"foo": "bat"}
o3 = opt2.safe_merge(o2)
eq_(o3.foo, "bat")
o4 = opt3.safe_merge(o2)
eq_(o4.foo, "bat")
eq_(o4.bat, "hi")
assert_raises(TypeError, opt2.safe_merge, o4)
@testing.combinations(
(column("q"), [column("q")]),
(column("q").desc(), [column("q")]),
(column("q").desc().label(None), [column("q")]),
(column("q").label(None).desc(), [column("q")]),
(column("q").label(None).desc().label(None), [column("q")]),
("foo", []), # textual label reference
(
select(column("q")).scalar_subquery().label(None),
[select(column("q")).scalar_subquery().label(None)],
),
(
select(column("q")).scalar_subquery().label(None).desc(),
[select(column("q")).scalar_subquery().label(None)],
),
)
def test_unwrap_order_by(self, expr, expected):
expr = coercions.expect(roles.OrderByRole, expr)
unwrapped = sql_util.unwrap_order_by(expr)
for a, b in zip_longest(unwrapped, expected):
assert a is not None and a.compare(b)
def test_column_collection_get(self):
col_id = column("id", Integer)
col_alt = column("alt", Integer)
table1 = table("mytable", col_id)
is_(table1.columns.get("id"), col_id)
is_(table1.columns.get("alt"), None)
is_(table1.columns.get("alt", col_alt), col_alt)
|
MiscTest
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_trace_item_stats.py
|
{
"start": 179,
"end": 2744
}
|
class ____(
APITransactionTestCase,
SnubaTestCase,
SpanTestCase,
):
view = "sentry-api-0-organization-trace-item-stats"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.ten_mins_ago = before_now(minutes=10)
self.ten_mins_ago_iso = self.ten_mins_ago.replace(microsecond=0).isoformat()
def do_request(self, query=None, features=None, **kwargs):
if query:
query.setdefault("sampling", "HIGHEST_ACCURACY")
response = self.client.get(
reverse(
self.view,
kwargs={"organization_id_or_slug": self.organization.slug},
),
query,
format="json",
**kwargs,
)
return response
def _store_span(self, description=None, tags=None, duration=None):
if tags is None:
tags = {"foo": "bar"}
self.store_span(
self.create_span(
{"description": description or "foo", "sentry_tags": tags},
start_ts=self.ten_mins_ago,
duration=duration or 1000,
),
is_eap=True,
)
def test_no_project(self) -> None:
response = self.do_request()
assert response.status_code == 200, response.data
assert response.data == {"data": []}
def test_distribution_values(self) -> None:
tags = [
({"browser": "chrome", "device": "desktop"}, 500),
({"browser": "chrome", "device": "mobile"}, 100),
({"browser": "chrome", "device": "mobile"}, 100),
({"browser": "chrome", "device": "desktop"}, 100),
({"browser": "safari", "device": "mobile"}, 100),
({"browser": "chrome", "device": "desktop"}, 500),
({"browser": "edge", "device": "desktop"}, 500),
]
for tag, duration in tags:
self._store_span(tags=tag, duration=duration)
response = self.do_request(
query={"query": "span.duration:<=100", "statsType": ["attributeDistributions"]}
)
assert response.status_code == 200, response.data
assert len(response.data["data"]) == 1
attribute_distribution = response.data["data"][0]["attribute_distributions"]["data"]
device_data = attribute_distribution["sentry.device"]
assert {"label": "mobile", "value": 3.0} in device_data
assert {"label": "desktop", "value": 1.0} in device_data
assert response.data
|
OrganizationTraceItemsStatsEndpointTest
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/_ast.py
|
{
"start": 122968,
"end": 124330
}
|
class ____(ASTBase):
def __init__(self, nestedName: ASTNestedName, initializer: ASTInitializer) -> None:
self.nestedName = nestedName
self.initializer = initializer
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTConcept):
return NotImplemented
return (
self.nestedName == other.nestedName
and self.initializer == other.initializer
)
def __hash__(self) -> int:
return hash((self.nestedName, self.initializer))
@property
def name(self) -> ASTNestedName:
return self.nestedName
def get_id(
self, version: int, objectType: str | None = None, symbol: Symbol | None = None
) -> str:
if version == 1:
raise NoOldIdError
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res = transform(self.nestedName)
if self.initializer:
res += transform(self.initializer)
return res
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.nestedName.describe_signature(signode, mode, env, symbol)
if self.initializer:
self.initializer.describe_signature(signode, mode, env, symbol)
|
ASTConcept
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3/modeling_qwen3.py
|
{
"start": 2297,
"end": 3067
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
Qwen3RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
Qwen3RMSNorm
|
python
|
openai__openai-python
|
src/openai/resources/fine_tuning/fine_tuning.py
|
{
"start": 3640,
"end": 4216
}
|
class ____:
def __init__(self, fine_tuning: AsyncFineTuning) -> None:
self._fine_tuning = fine_tuning
@cached_property
def jobs(self) -> AsyncJobsWithRawResponse:
return AsyncJobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
return AsyncCheckpointsWithRawResponse(self._fine_tuning.checkpoints)
@cached_property
def alpha(self) -> AsyncAlphaWithRawResponse:
return AsyncAlphaWithRawResponse(self._fine_tuning.alpha)
|
AsyncFineTuningWithRawResponse
|
python
|
huggingface__transformers
|
src/transformers/utils/quantization_config.py
|
{
"start": 93552,
"end": 94871
}
|
class ____(QuantizationConfigMixin):
"""
This is a wrapper class about all possible attributes and features that you can play with a model that has been
loaded using mxfp4 quantization.
Args:
modules_to_not_convert (`list`, *optional*, default to `None`):
The list of modules to not quantize, useful for quantizing models that explicitly require to have
some modules left in their original precision.
dequantize (`bool`, *optional*, default to `False`):
Whether we dequantize the model to bf16 precision or not
"""
def __init__(
self,
modules_to_not_convert: list | None = None,
dequantize: bool = False,
**kwargs,
):
self.quant_method = QuantizationMethod.MXFP4
self.modules_to_not_convert = modules_to_not_convert
self.dequantize = dequantize
def get_loading_attributes(self):
return {"dequantize": self.dequantize}
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
return {"quant_method": self.quant_method, "modules_to_not_convert": self.modules_to_not_convert}
|
Mxfp4Config
|
python
|
django__django
|
django/db/models/fields/tuple_lookups.py
|
{
"start": 491,
"end": 1193
}
|
class ____(Func):
allows_composite_expressions = True
function = ""
output_field = models.Field()
def __len__(self):
return len(self.source_expressions)
def __iter__(self):
return iter(self.source_expressions)
def as_sqlite(self, compiler, connection):
if connection.get_database_version() < (3, 37) and isinstance(
first_expr := self.source_expressions[0], Tuple
):
first_expr = first_expr.copy()
first_expr.function = "VALUES"
return Tuple(first_expr, *self.source_expressions[1:]).as_sql(
compiler, connection
)
return self.as_sql(compiler, connection)
|
Tuple
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_laguerre.py
|
{
"start": 9699,
"end": 11012
}
|
class ____:
def test_lagder(self):
# check exceptions
assert_raises(TypeError, lag.lagder, [0], .5)
assert_raises(ValueError, lag.lagder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0] * i + [1]
res = lag.lagder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = lag.lagder(lag.lagint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_lagder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T
res = lag.lagder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagder(c) for c in c2d])
res = lag.lagder(c2d, axis=1)
assert_almost_equal(res, tgt)
|
TestDerivative
|
python
|
bokeh__bokeh
|
src/bokeh/models/widgets/tables.py
|
{
"start": 16118,
"end": 17556
}
|
class ____(CellFormatter):
''' HTML formatter using a template.
This uses Underscore's `template` method and syntax. http://underscorejs.org/#template
The formatter has access other items in the row via the `dataContext` object passed to the formatter.
So, for example, if another column in the datasource was named `url`, the template could access it as:
.. code-block:: jinja
<a href="<%= url %>"><%= value %></a>
To use a different set of template delimiters, pass the appropriate values for `evaluate`, `interpolate`,
or `escape`. See the Underscore `template` documentation for more information. http://underscorejs.org/#template
Example: Simple HTML template to format the column value as code.
.. code-block:: python
HTMLTemplateFormatter(template='<code><%= value %></code>')
Example: Use values from other columns (`manufacturer` and `model`) to build a hyperlink.
.. code-block:: python
HTMLTemplateFormatter(template=
'<a href="https:/www.google.com/search?q=<%= manufacturer %>+<%= model %>" target="_blank"><%= value %></a>'
)
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
template = String('<%= value %>', help="""
Template string to be used by Underscore's template method.
""")
|
HTMLTemplateFormatter
|
python
|
getsentry__sentry
|
src/sentry/tsdb/redissnuba.py
|
{
"start": 3263,
"end": 3483
}
|
class ____(type):
def __new__(cls, name, bases, attrs):
for key in method_specifications.keys():
attrs[key] = make_method(key)
return type.__new__(cls, name, bases, attrs)
|
RedisSnubaTSDBMeta
|
python
|
PyCQA__pylint
|
pylint/config/argument.py
|
{
"start": 6990,
"end": 8307
}
|
class ____(_BaseStoreArgument):
"""Class representing a store argument to be parsed by an argparse.ArgumentsParser.
This is based on the parameters passed to argparse.ArgumentsParser.add_message.
See:
https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument
"""
# pylint: disable-next=too-many-arguments
def __init__(
self,
*,
flags: list[str],
action: str,
default: _ArgumentTypes,
arg_type: str,
choices: list[str] | None,
arg_help: str,
metavar: str,
hide_help: bool,
section: str | None,
) -> None:
super().__init__(
flags=flags,
action=action,
default=default,
arg_help=arg_help,
hide_help=hide_help,
section=section,
)
self.type = _TYPE_TRANSFORMERS[arg_type]
"""A transformer function that returns a transformed type of the argument."""
self.choices = choices
"""A list of possible choices for the argument.
None if there are no restrictions.
"""
self.metavar = metavar
"""The metavar of the argument.
See:
https://docs.python.org/3/library/argparse.html#metavar
"""
|
_StoreArgument
|
python
|
scipy__scipy
|
scipy/ndimage/tests/test_interpolation.py
|
{
"start": 3737,
"end": 5330
}
|
class ____:
def test_spline01(self, dtype, order, xp):
dtype = getattr(xp, dtype)
data = xp.ones([], dtype=dtype)
out = ndimage.spline_filter(data, order=order)
assert out == xp.asarray(1, dtype=out.dtype)
def test_spline02(self, dtype, order, xp):
dtype = getattr(xp, dtype)
data = xp.asarray([1], dtype=dtype)
out = ndimage.spline_filter(data, order=order)
assert_array_almost_equal(out, xp.asarray([1]))
@skip_xp_backends(np_only=True, exceptions=["cupy"],
reason='output=dtype is numpy-specific')
def test_spline03(self, dtype, order, xp):
dtype = getattr(xp, dtype)
data = xp.ones([], dtype=dtype)
out = ndimage.spline_filter(data, order, output=dtype)
assert out == xp.asarray(1, dtype=out.dtype)
def test_spline04(self, dtype, order, xp):
dtype = getattr(xp, dtype)
data = xp.ones([4], dtype=dtype)
out = ndimage.spline_filter(data, order)
assert_array_almost_equal(out, xp.asarray([1, 1, 1, 1]))
def test_spline05(self, dtype, order, xp):
dtype = getattr(xp, dtype)
data = xp.ones([4, 4], dtype=dtype)
out = ndimage.spline_filter(data, order=order)
expected = xp.asarray([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]])
assert_array_almost_equal(out, expected)
@make_xp_test_case(ndimage.geometric_transform)
@pytest.mark.parametrize('order', range(0, 6))
|
TestSpline
|
python
|
realpython__materials
|
python-double-underscore/shapes.py
|
{
"start": 13,
"end": 178
}
|
class ____:
def __init__(self, radius):
self.radius = _validate(radius)
def calculate_area(self):
return round(_PI * self.radius**2, 2)
|
Circle
|
python
|
cython__cython
|
Cython/Compiler/ExprNodes.py
|
{
"start": 513879,
"end": 515101
}
|
class ____(IntBinopNode):
# '|' operator.
def analyse_pytyping_modifiers(self, env):
if self.operand1.is_none or self.operand2.is_none:
return ['typing.Optional']
def _analyse_bitwise_or_none(self, env, operand_node):
"""Analyse annotations in form `[...] | None` and `None | [...]`"""
with env.new_c_type_context(False):
ttype = operand_node.analyse_as_type(env)
if not ttype:
return None
if not ttype.can_be_optional():
# If ttype cannot be optional we need to return an equivalent Python type allowing None.
# If it cannot be mapped to a Python type, we must error out.
if ttype.equivalent_type and not operand_node.as_cython_attribute():
return ttype.equivalent_type
else:
error(operand_node.pos, f"'[...] | None' cannot be applied to type {ttype}")
return ttype
def analyse_as_type(self, env):
if self.operand1.is_none:
return self._analyse_bitwise_or_none(env, self.operand2)
elif self.operand2.is_none:
return self._analyse_bitwise_or_none(env, self.operand1)
return None
|
BitwiseOrNode
|
python
|
Delgan__loguru
|
loguru/_recattrs.py
|
{
"start": 2481,
"end": 3542
}
|
class ____:
"""A class representing a thread record with ID and name.
Attributes
----------
id : int
The thread ID
name : str
The thread name
"""
__slots__ = ("id", "name")
def __init__(self, id_, name):
"""Initialize a RecordThread instance.
Parameters
----------
id_ : int
The thread ID
name : str
The thread name
"""
self.id = id_
self.name = name
def __repr__(self):
"""Return string representation of RecordThread.
Returns
-------
str
Formatted string with id and name
"""
return "(id=%r, name=%r)" % (self.id, self.name)
def __format__(self, spec):
"""Format the RecordThread instance.
Parameters
----------
spec : str
Format specification
Returns
-------
str
Formatted ID according to specification
"""
return self.id.__format__(spec)
|
RecordThread
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 79075,
"end": 79452
}
|
class ____:
xlBlanks = 4 # from enum XlPTSelectionMode
xlButton = 15 # from enum XlPTSelectionMode
xlDataAndLabel = 0 # from enum XlPTSelectionMode
xlDataOnly = 2 # from enum XlPTSelectionMode
xlFirstRow = 256 # from enum XlPTSelectionMode
xlLabelOnly = 1 # from enum XlPTSelectionMode
xlOrigin = 3 # from enum XlPTSelectionMode
|
PTSelectionMode
|
python
|
scikit-learn__scikit-learn
|
sklearn/calibration.py
|
{
"start": 39182,
"end": 48230
}
|
class ____(RegressorMixin, BaseEstimator):
"""Temperature scaling model.
Attributes
----------
beta_ : float
The optimized inverse temperature.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : ndarray of shape (n_samples,) or (n_samples, n_classes)
Training data.
This should be the output of `decision_function` or `predict_proba`.
If the input appears to be probabilities (i.e., values between 0 and 1
that sum to 1 across classes), it will be converted to logits using
`np.log(p + eps)`.
Binary decision function outputs (1D) will be converted to two-class
logits of the form (-x, x). For shapes of the form (n_samples, 1), the
same process applies.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
xp, _, xp_device = get_namespace_and_device(X, y)
X, y = indexable(X, y)
check_consistent_length(X, y)
logits = _convert_to_logits(X) # guarantees xp.float64 or xp.float32
dtype_ = logits.dtype
labels = column_or_1d(y, dtype=dtype_)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, labels, dtype=dtype_)
if _is_numpy_namespace(xp):
multinomial_loss = HalfMultinomialLoss(n_classes=logits.shape[1])
else:
multinomial_loss = partial(_half_multinomial_loss, xp=xp)
def log_loss(log_beta=0.0):
"""Compute the log loss as a parameter of the inverse temperature
(beta).
Parameters
----------
log_beta : float
The current logarithm of the inverse temperature value during
optimisation.
Returns
-------
negative_log_likelihood_loss : float
The negative log likelihood loss.
"""
# TODO: numpy 2.0
# Ensure raw_prediction has the same dtype as labels using .astype().
# Without this, dtype promotion rules differ across NumPy versions:
#
# beta = np.float64(0)
# logits = np.array([1, 2], dtype=np.float32)
#
# result = beta * logits
# - NumPy < 2: result.dtype is float32
# - NumPy 2+: result.dtype is float64
#
# This can cause dtype mismatch errors downstream (e.g., buffer dtype).
log_beta = xp.asarray(log_beta, dtype=dtype_, device=xp_device)
raw_prediction = xp.exp(log_beta) * logits
return multinomial_loss(labels, raw_prediction, sample_weight)
xatol = 64 * xp.finfo(dtype_).eps
log_beta_minimizer = minimize_scalar(
log_loss,
bounds=(-10.0, 10.0),
options={
"xatol": xatol,
},
)
if not log_beta_minimizer.success: # pragma: no cover
raise RuntimeError(
"Temperature scaling fails to optimize during calibration. "
"Reason from `scipy.optimize.minimize_scalar`: "
f"{log_beta_minimizer.message}"
)
self.beta_ = xp.exp(
xp.asarray(log_beta_minimizer.x, dtype=dtype_, device=xp_device)
)
return self
def predict(self, X):
"""Predict new data by linear interpolation.
Parameters
----------
X : ndarray of shape (n_samples,) or (n_samples, n_classes)
Data to predict from.
This should be the output of `decision_function` or `predict_proba`.
If the input appears to be probabilities (i.e., values between 0 and 1
that sum to 1 across classes), it will be converted to logits using
`np.log(p + eps)`.
Binary decision function outputs (1D) will be converted to two-class
logits of the form (-x, x). For shapes of the form (n_samples, 1), the
same process applies.
Returns
-------
X_ : ndarray of shape (n_samples, n_classes)
The predicted data.
"""
logits = _convert_to_logits(X)
return softmax(self.beta_ * logits)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.one_d_array = True
tags.input_tags.two_d_array = False
return tags
@validate_params(
{
"y_true": ["array-like"],
"y_prob": ["array-like"],
"pos_label": [Real, str, "boolean", None],
"n_bins": [Interval(Integral, 1, None, closed="left")],
"strategy": [StrOptions({"uniform", "quantile"})],
},
prefer_skip_nested_validation=True,
)
def calibration_curve(
y_true,
y_prob,
*,
pos_label=None,
n_bins=5,
strategy="uniform",
):
"""Compute true and predicted probabilities for a calibration curve.
The method assumes the inputs come from a binary classifier, and
discretize the [0, 1] interval into bins.
Calibration curves may also be referred to as reliability diagrams.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True targets.
y_prob : array-like of shape (n_samples,)
Probabilities of the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
.. versionadded:: 1.1
n_bins : int, default=5
Number of bins to discretize the [0, 1] interval. A bigger number
requires more data. Bins with no samples (i.e. without
corresponding values in `y_prob`) will not be returned, thus the
returned arrays may have less than `n_bins` values.
strategy : {'uniform', 'quantile'}, default='uniform'
Strategy used to define the widths of the bins.
uniform
The bins have identical widths.
quantile
The bins have the same number of samples and depend on `y_prob`.
Returns
-------
prob_true : ndarray of shape (n_bins,) or smaller
The proportion of samples whose class is the positive class, in each
bin (fraction of positives).
prob_pred : ndarray of shape (n_bins,) or smaller
The mean predicted probability in each bin.
See Also
--------
CalibrationDisplay.from_predictions : Plot calibration curve using true
and predicted labels.
CalibrationDisplay.from_estimator : Plot calibration curve using an
estimator and data.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
Examples
--------
>>> import numpy as np
>>> from sklearn.calibration import calibration_curve
>>> y_true = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1])
>>> y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.])
>>> prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=3)
>>> prob_true
array([0. , 0.5, 1. ])
>>> prob_pred
array([0.2 , 0.525, 0.85 ])
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
check_consistent_length(y_true, y_prob)
pos_label = _check_pos_label_consistency(pos_label, y_true)
if y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1].")
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError(
f"Only binary classification is supported. Provided labels {labels}."
)
y_true = y_true == pos_label
if strategy == "quantile": # Determine bin edges by distribution of data
quantiles = np.linspace(0, 1, n_bins + 1)
bins = np.percentile(y_prob, quantiles * 100)
elif strategy == "uniform":
bins = np.linspace(0.0, 1.0, n_bins + 1)
else:
raise ValueError(
"Invalid entry to 'strategy' input. Strategy "
"must be either 'quantile' or 'uniform'."
)
binids = np.searchsorted(bins[1:-1], y_prob)
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = bin_true[nonzero] / bin_total[nonzero]
prob_pred = bin_sums[nonzero] / bin_total[nonzero]
return prob_true, prob_pred
|
_TemperatureScaling
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/utils/packages.py
|
{
"start": 2154,
"end": 2304
}
|
class ____(Enum):
Operators = "Operators"
Transfers = "Transfers"
Sensors = "Sensors"
Hooks = "Hooks"
Secrets = "Secrets"
|
EntityType
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0119_alter_addonsconfig_flyout_sorting_custom_pattern_and_more.py
|
{
"start": 150,
"end": 1229
}
|
class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0118_addons_flyout_sorting"),
]
operations = [
migrations.AlterField(
model_name="addonsconfig",
name="flyout_sorting_custom_pattern",
field=models.CharField(
blank=True,
default=None,
help_text='Sorting pattern supported by BumpVer (<a href="https://github.com/mbarkhau/bumpver#pattern-examples">See examples</a>)',
max_length=32,
null=True,
),
),
migrations.AlterField(
model_name="historicaladdonsconfig",
name="flyout_sorting_custom_pattern",
field=models.CharField(
blank=True,
default=None,
help_text='Sorting pattern supported by BumpVer (<a href="https://github.com/mbarkhau/bumpver#pattern-examples">See examples</a>)',
max_length=32,
null=True,
),
),
]
|
Migration
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-sqlalchemy/tests/test_database.py
|
{
"start": 1360,
"end": 1569
}
|
class ____:
def dispose(self):
return True
@contextmanager
def connect(self):
try:
yield SQLAlchemyConnectionMock()
finally:
pass
|
SQLAlchemyEngineMock
|
python
|
numba__numba
|
numba/core/types/functions.py
|
{
"start": 26002,
"end": 27068
}
|
class ____(Opaque):
"""
Recursive call to a Dispatcher.
"""
_overloads = None
def __init__(self, dispatcher_type):
assert isinstance(dispatcher_type, Dispatcher)
self.dispatcher_type = dispatcher_type
name = "recursive(%s)" % (dispatcher_type,)
super(RecursiveCall, self).__init__(name)
# Initializing for the first time
if self._overloads is None:
self._overloads = {}
def add_overloads(self, args, qualname, uid):
"""Add an overload of the function.
Parameters
----------
args :
argument types
qualname :
function qualifying name
uid :
unique id
"""
self._overloads[args] = _RecursiveCallOverloads(qualname, uid)
def get_overloads(self, args):
"""Get the qualifying name and unique id for the overload given the
argument types.
"""
return self._overloads[args]
@property
def key(self):
return self.dispatcher_type
|
RecursiveCall
|
python
|
cookiecutter__cookiecutter
|
cookiecutter/environment.py
|
{
"start": 2003,
"end": 2474
}
|
class ____(ExtensionLoaderMixin, Environment):
"""Create strict Jinja2 environment.
Jinja2 environment will raise error on undefined variable in template-
rendering context.
"""
def __init__(self, **kwargs: Any) -> None:
"""Set the standard Cookiecutter StrictEnvironment.
Also loading extensions defined in cookiecutter.json's _extensions key.
"""
super().__init__(undefined=StrictUndefined, **kwargs)
|
StrictEnvironment
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 471963,
"end": 472478
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of ApproveDeployments"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "deployments")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
deployments = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null("Deployment")), graphql_name="deployments")
"""The affected deployments."""
|
ApproveDeploymentsPayload
|
python
|
fabric__fabric
|
fabric/tasks.py
|
{
"start": 52,
"end": 2876
}
|
class ____(invoke.Task):
"""
Extends `invoke.tasks.Task` with knowledge of target hosts and similar.
As `invoke.tasks.Task` relegates documentation responsibility to its `@task
<invoke.tasks.task>` expression, so we relegate most details to our version
of `@task <fabric.tasks.task>` - please see its docs for details.
.. versionadded:: 2.1
"""
def __init__(self, *args, **kwargs):
# Pull out our own kwargs before hitting super, which will TypeError on
# anything it doesn't know about.
self.hosts = kwargs.pop("hosts", None)
super().__init__(*args, **kwargs)
def task(*args, **kwargs):
"""
Wraps/extends Invoke's `@task <invoke.tasks.task>` with extra kwargs.
See `the Invoke-level API docs <invoke.tasks.task>` for most details; this
Fabric-specific implementation adds the following additional keyword
arguments:
:param hosts:
An iterable of host-connection specifiers appropriate for eventually
instantiating a `.Connection`. The existence of this argument will
trigger automatic parameterization of the task when invoked from the
CLI, similar to the behavior of :option:`--hosts`.
.. note::
This parameterization is "lower-level" than that driven by
:option:`--hosts`: if a task decorated with this parameter is
executed in a session where :option:`--hosts` was given, the
CLI-driven value will win out.
List members may be one of:
- A string appropriate for being the first positional argument to
`.Connection` - see its docs for details, but these are typically
shorthand-only convenience strings like ``hostname.example.com`` or
``user@host:port``.
- A dictionary appropriate for use as keyword arguments when
instantiating a `.Connection`. Useful for values that don't mesh well
with simple strings (e.g. statically defined IPv6 addresses) or to
bake in more complex info (eg ``connect_timeout``, ``connect_kwargs``
params like auth info, etc).
These two value types *may* be mixed together in the same list, though
we recommend that you keep things homogenous when possible, to avoid
confusion when debugging.
.. note::
No automatic deduplication of values is performed; if you pass in
multiple references to the same effective target host, the wrapped
task will execute on that host multiple times (including making
separate connections).
.. versionadded:: 2.1
"""
# Override klass to be our own Task, not Invoke's, unless somebody gave it
# explicitly.
kwargs.setdefault("klass", Task)
return invoke.task(*args, **kwargs)
|
Task
|
python
|
ray-project__ray
|
python/ray/data/tests/test_map.py
|
{
"start": 33840,
"end": 44535
}
|
class ____:
@pytest.fixture
def mock_actor_async_ctx(self):
_map_actor_ctx = _MapActorContext(Mock(), Mock(), is_async=True)
loop: AbstractEventLoop = _map_actor_ctx.udf_map_asyncio_loop
assert loop is not None
with patch("ray.data._map_actor_context", _map_actor_ctx):
yield _map_actor_ctx
loop.call_soon_threadsafe(loop.stop)
_map_actor_ctx.udf_map_asyncio_thread.join()
def test_non_coroutine_function_assertion(
self, target_max_block_size_infinite_or_default
):
"""Test that non-coroutine function raises assertion error."""
def sync_fn(x):
return x
validate_fn = Mock()
with pytest.raises(ValueError, match="Expected a coroutine function"):
_generate_transform_fn_for_async_map(
sync_fn, validate_fn, max_concurrency=1
)
def test_zero_max_concurrent_batches_assertion(
self, target_max_block_size_infinite_or_default
):
"""Test that zero max_concurrent_batches raises assertion error."""
async def async_fn(x):
yield x
validate_fn = Mock()
with pytest.raises(AssertionError):
_generate_transform_fn_for_async_map(
async_fn, validate_fn, max_concurrency=0
)
def test_empty_input(
self, mock_actor_async_ctx, target_max_block_size_infinite_or_default
):
"""Test with empty input iterator."""
async def async_fn(x):
yield x
validate_fn = Mock()
transform_fn = _generate_transform_fn_for_async_map(
async_fn, validate_fn, max_concurrency=2
)
task_context = Mock()
assert list(transform_fn([], task_context)) == []
validate_fn.assert_not_called()
@pytest.mark.parametrize("udf_kind", ["coroutine", "async_gen"])
def test_basic_async_processing(
self, udf_kind, mock_actor_async_ctx, target_max_block_size_infinite_or_default
):
"""Test basic async processing with order preservation."""
if udf_kind == "async_gen":
async def async_fn(x):
# Randomly slow-down UDFs (capped by 5ms)
delay = random.randint(0, 5) / 1000
await asyncio.sleep(delay)
yield x
elif udf_kind == "coroutine":
async def async_fn(x):
# Randomly slow-down UDFs (capped by 5ms)
delay = random.randint(0, 5) / 1000
await asyncio.sleep(delay)
return x
else:
pytest.fail(f"Unrecognized udf_kind ({udf_kind})")
validate_fn = Mock()
transform_fn = _generate_transform_fn_for_async_map(
async_fn, validate_fn, max_concurrency=100
)
N = 10_000
task_context = Mock()
result = list(transform_fn(range(N), task_context))
assert result == list(range(N))
assert validate_fn.call_count == N
@pytest.mark.parametrize("result_len", [0, 5])
def test_basic_async_processing_with_iterator(
self,
result_len: int,
mock_actor_async_ctx,
target_max_block_size_infinite_or_default,
):
"""Test UDF that yields multiple items per input."""
async def multi_yield_fn(x):
for i in range(result_len):
yield f"processed_{x}_{i}"
validate_fn = Mock()
transform_fn = _generate_transform_fn_for_async_map(
multi_yield_fn, validate_fn, max_concurrency=2
)
task_context = Mock()
input_seq = [1, 2]
# NOTE: Outputs are expected to match input sequence ordering
expected = [f"processed_{x}_{i}" for x in input_seq for i in range(result_len)]
assert list(transform_fn(input_seq, task_context)) == expected
def test_concurrency_limiting(
self,
mock_actor_async_ctx,
restore_data_context,
target_max_block_size_infinite_or_default,
):
"""Test that concurrency is properly limited."""
max_concurrency = 10
concurrent_task_counter = 0
async def async_fn(x):
# NOTE: This is safe, since event-loop is single-threaded
nonlocal concurrent_task_counter
concurrent_task_counter += 1
assert concurrent_task_counter <= max_concurrency
yield x
# NOTE: We're doing sleep here to interrupt the task and yield
# event loop to the next one (otherwise tasks will simply be
# completed sequentially)
await asyncio.sleep(0.001)
concurrent_task_counter -= 1
validate_fn = Mock()
transform_fn = _generate_transform_fn_for_async_map(
async_fn, validate_fn, max_concurrency=max_concurrency
)
task_context = Mock()
result = list(transform_fn(range(10_000), task_context))
assert len(result) == 10_000
@pytest.mark.parametrize("failure_kind", ["udf", "validation"])
def test_exception_in_udf(
self,
failure_kind: str,
mock_actor_async_ctx,
target_max_block_size_infinite_or_default,
):
"""Test exception handling in UDF."""
udf_failure_msg = "UDF failure"
validation_failure_msg = "Validation failure"
async def failing_async_fn(x):
if failure_kind == "udf" and x == 2:
raise ValueError(udf_failure_msg)
yield x
def validate_fn(x):
if failure_kind == "validation" and x == 2:
raise ValueError(validation_failure_msg)
transform_fn = _generate_transform_fn_for_async_map(
failing_async_fn, validate_fn, max_concurrency=2
)
task_context = Mock()
if failure_kind == "udf":
expected_exception_msg = udf_failure_msg
elif failure_kind == "validation":
expected_exception_msg = validation_failure_msg
else:
pytest.fail(f"Unexpected failure type ({failure_kind})")
with pytest.raises(ValueError, match=expected_exception_msg):
list(transform_fn([1, 2, 3], task_context))
@pytest.mark.parametrize("fn_type", ["func", "class"])
def test_map_operator_warns_on_few_inputs(
fn_type: Literal["func", "class"],
shutdown_only,
target_max_block_size_infinite_or_default,
):
if fn_type == "func":
def fn(row):
return row
else:
class fn:
def __call__(self, row):
return row
with pytest.warns(UserWarning, match="can launch at most 1 task"):
# The user specified `concurrency=2` for the map operator, but the pipeline
# can only launch one task because there's only one input block. So, Ray Data
# should emit a warning instructing the user to increase the number of input
# blocks.
ray.data.range(2, override_num_blocks=1).map(fn, concurrency=2).materialize()
def test_map_op_backpressure_configured_properly(
target_max_block_size_infinite_or_default,
):
"""This test asserts that configuration of the MapOperator generator's back-pressure is
propagated appropriately to the Ray Core
"""
total = 5
def _map_raising(r):
if isinstance(r["item"], Exception):
raise r["item"]
return r
# Reset this to make sure test is invariant of default value changes
DataContext.get_current()._max_num_blocks_in_streaming_gen_buffer = 2
# To simulate incremental iteration we are
# - Aggressively applying back-pressure (allowing no more than a single block
# to be in the queue)
# - Restrict Map Operator concurrency to run no more than 1 task at a time
#
# At the end of the pipeline we fetch only first 4 elements (instead of 5) to prevent the last 1
# from executing (1 is going to be a buffered block)
df = ray.data.from_items(
list(range(5)) + [ValueError("failed!")], override_num_blocks=6
)
# NOTE: Default back-pressure configuration allows 2 blocks in the
# generator's buffer, hence default execution will fail as we'd
# try map all 6 elements
with pytest.raises(RayTaskError) as exc_info:
df.map(_map_raising).materialize()
assert str(ValueError("failed")) in str(exc_info.value)
# Reducing number of blocks in the generator buffer, will prevent this pipeline
# from throwing
vals = (
df.map(
_map_raising,
concurrency=1,
ray_remote_args_fn=lambda: {
"_generator_backpressure_num_objects": 2, # 1 for block, 1 for metadata
},
)
.limit(total - 1)
.take_batch()["item"]
.tolist()
)
assert list(range(5))[:-1] == vals
@pytest.mark.skipif(
get_pyarrow_version() < MIN_PYARROW_VERSION_TYPE_PROMOTION,
reason="Requires pyarrow>=14 for unify_schemas in OneHotEncoder",
)
def test_map_names(target_max_block_size_infinite_or_default):
"""To test different UDF format such that the operator
has the correct representation.
The actual name is handled by
AbstractUDFMap._get_operator_name()
"""
ds = ray.data.range(5)
r = ds.map(lambda x: {"id": str(x["id"])}).__repr__()
assert r.startswith("Map(<lambda>)"), r
class C:
def __call__(self, x):
return x
r = ds.map(C, concurrency=4).__repr__()
assert r.startswith("Map(C)"), r
# Simple and partial functions
def func(x, y):
return x
r = ds.map(func, fn_args=[0]).__repr__()
assert r.startswith("Map(func)")
from functools import partial
r = ds.map(partial(func, y=1)).__repr__()
assert r.startswith("Map(func)"), r
# Preprocessor
from ray.data.preprocessors import OneHotEncoder
ds = ray.data.from_items(["a", "b", "c", "a", "b", "c"])
enc = OneHotEncoder(columns=["item"])
r = enc.fit_transform(ds).__repr__()
assert "OneHotEncoder" in r, r
def test_map_with_max_calls():
ds = ray.data.range(10)
# OK to set 'max_calls' as static option
ds = ds.map(lambda x: x, max_calls=1)
assert ds.count() == 10
ds = ray.data.range(10)
# Not OK to set 'max_calls' as dynamic option
with pytest.raises(ValueError):
ds = ds.map(
lambda x: x,
ray_remote_args_fn=lambda: {"max_calls": 1},
)
ds.take_all()
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
TestGenerateTransformFnForAsyncMap
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/workers.py
|
{
"start": 75896,
"end": 76135
}
|
class ____(Response):
"""
Response of workers.register endpoint.
"""
_service = "workers"
_action = "register"
_version = "2.20"
_schema = {"definitions": {}, "properties": {}, "type": "object"}
|
RegisterResponse
|
python
|
Textualize__textual
|
src/textual/color.py
|
{
"start": 20771,
"end": 27370
}
|
class ____:
"""Defines a color gradient."""
def __init__(self, *stops: tuple[float, Color | str], quality: int = 50) -> None:
"""Create a color gradient that blends colors to form a spectrum.
A gradient is defined by a sequence of "stops" consisting of a tuple containing a float and a color.
The stop indicates the color at that point on a spectrum between 0 and 1.
Colors may be given as a [Color][textual.color.Color] instance, or a string that
can be parsed into a Color (with [Color.parse][textual.color.Color.parse]).
The `quality` argument defines the number of _steps_ in the gradient. Intermediate colors are
interpolated from the two nearest colors. Increasing `quality` can generate a smoother looking gradient,
at the expense of a little extra work to pre-calculate the colors.
Args:
stops: Color stops.
quality: The number of steps in the gradient.
Raises:
ValueError: If any stops are missing (must be at least a stop for 0 and 1).
"""
parse = Color.parse
self._stops = sorted(
[
(
(position, parse(color))
if isinstance(color, str)
else (position, color)
)
for position, color in stops
]
)
if len(stops) < 2:
raise ValueError("At least 2 stops required.")
if self._stops[0][0] != 0.0:
raise ValueError("First stop must be 0.")
if self._stops[-1][0] != 1.0:
raise ValueError("Last stop must be 1.")
self._quality = quality
self._colors: list[Color] | None = None
self._rich_colors: list[RichColor] | None = None
@classmethod
def from_colors(cls, *colors: Color | str, quality: int = 50) -> Gradient:
"""Construct a gradient form a sequence of colors, where the stops are evenly spaced.
Args:
*colors: Positional arguments may be Color instances or strings to parse into a color.
quality: The number of steps in the gradient.
Returns:
A new Gradient instance.
"""
if len(colors) < 2:
raise ValueError("Two or more colors required.")
stops = [(i / (len(colors) - 1), Color.parse(c)) for i, c in enumerate(colors)]
return cls(*stops, quality=quality)
@property
def colors(self) -> list[Color]:
"""A list of colors in the gradient."""
position = 0
quality = self._quality
if self._colors is None:
colors: list[Color] = []
add_color = colors.append
(stop1, color1), (stop2, color2) = self._stops[0:2]
for step_position in range(quality):
step = step_position / (quality - 1)
while step > stop2:
position += 1
(stop1, color1), (stop2, color2) = self._stops[
position : position + 2
]
add_color(color1.blend(color2, (step - stop1) / (stop2 - stop1)))
self._colors = colors
assert len(self._colors) == self._quality
return self._colors
def get_color(self, position: float) -> Color:
"""Get a color from the gradient at a position between 0 and 1.
Positions that are between stops will return a blended color.
Args:
position: A number between 0 and 1, where 0 is the first stop, and 1 is the last.
Returns:
A Textual color.
"""
if position <= 0:
return self.colors[0]
if position >= 1:
return self.colors[-1]
color_position = position * (self._quality - 1)
color_index = int(color_position)
color1, color2 = self.colors[color_index : color_index + 2]
return color1.blend(color2, color_position % 1)
def get_rich_color(self, position: float) -> RichColor:
"""Get a (Rich) color from the gradient at a position between 0 and 1.
Positions that are between stops will return a blended color.
Args:
position: A number between 0 and 1, where 0 is the first stop, and 1 is the last.
Returns:
A (Rich) color.
"""
return self.get_color(position).rich_color
# Color constants
WHITE: Final = Color(255, 255, 255)
"""A constant for pure white."""
BLACK: Final = Color(0, 0, 0)
"""A constant for pure black."""
TRANSPARENT: Final = Color.parse("transparent")
"""A constant for transparent."""
def rgb_to_lab(rgb: Color) -> Lab:
"""Convert an RGB color to the CIE-L*ab format.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php.
"""
r, g, b = rgb.r / 255, rgb.g / 255, rgb.b / 255
r = pow((r + 0.055) / 1.055, 2.4) if r > 0.04045 else r / 12.92
g = pow((g + 0.055) / 1.055, 2.4) if g > 0.04045 else g / 12.92
b = pow((b + 0.055) / 1.055, 2.4) if b > 0.04045 else b / 12.92
x = (r * 41.24 + g * 35.76 + b * 18.05) / 95.047
y = (r * 21.26 + g * 71.52 + b * 7.22) / 100
z = (r * 1.93 + g * 11.92 + b * 95.05) / 108.883
off = 16 / 116
x = pow(x, 1 / 3) if x > 0.008856 else 7.787 * x + off
y = pow(y, 1 / 3) if y > 0.008856 else 7.787 * y + off
z = pow(z, 1 / 3) if z > 0.008856 else 7.787 * z + off
return Lab(116 * y - 16, 500 * (x - y), 200 * (y - z))
def lab_to_rgb(lab: Lab, alpha: float = 1.0) -> Color:
"""Convert a CIE-L*ab color to RGB.
Uses the standard RGB color space with a D65/2⁰ standard illuminant.
Conversion passes through the XYZ color space.
Cf. http://www.easyrgb.com/en/math.php.
"""
y = (lab.L + 16) / 116
x = lab.a / 500 + y
z = y - lab.b / 200
off = 16 / 116
y = pow(y, 3) if y > 0.2068930344 else (y - off) / 7.787
x = 0.95047 * pow(x, 3) if x > 0.2068930344 else 0.122059 * (x - off)
z = 1.08883 * pow(z, 3) if z > 0.2068930344 else 0.139827 * (z - off)
r = x * 3.2406 + y * -1.5372 + z * -0.4986
g = x * -0.9689 + y * 1.8758 + z * 0.0415
b = x * 0.0557 + y * -0.2040 + z * 1.0570
r = 1.055 * pow(r, 1 / 2.4) - 0.055 if r > 0.0031308 else 12.92 * r
g = 1.055 * pow(g, 1 / 2.4) - 0.055 if g > 0.0031308 else 12.92 * g
b = 1.055 * pow(b, 1 / 2.4) - 0.055 if b > 0.0031308 else 12.92 * b
return Color(int(r * 255), int(g * 255), int(b * 255), alpha)
|
Gradient
|
python
|
getsentry__sentry
|
src/sentry/integrations/aws_lambda/integration.py
|
{
"start": 10748,
"end": 13483
}
|
class ____:
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
curr_step = 0 if pipeline.fetch_state("skipped_project_select") else 1
def render_response(error=None):
assert pipeline.organization is not None
serialized_organization = organization_service.serialize_organization(
id=pipeline.organization.id,
as_user=(
serialize_rpc_user(request.user) if isinstance(request.user, User) else None
),
)
template_url = options.get("aws-lambda.cloudformation-url")
context = {
"baseCloudformationUrl": "https://console.aws.amazon.com/cloudformation/home#/stacks/create/review",
"templateUrl": template_url,
"stackName": "Sentry-Monitoring-Stack",
"regionList": ALL_AWS_REGIONS,
"accountNumber": pipeline.fetch_state("account_number"),
"region": pipeline.fetch_state("region"),
"error": error,
"initialStepNumber": curr_step,
"organization": serialized_organization,
"awsExternalId": pipeline.fetch_state("aws_external_id"),
}
return render_react_view(request, "awsLambdaCloudformation", context)
# form submit adds accountNumber to GET parameters
if "accountNumber" in request.GET:
data = request.GET
# load parameters post request
account_number = data["accountNumber"]
region = data["region"]
aws_external_id = data["awsExternalId"]
pipeline.bind_state("account_number", account_number)
pipeline.bind_state("region", region)
pipeline.bind_state("aws_external_id", aws_external_id)
# now validate the arn works
try:
gen_aws_client(account_number, region, aws_external_id)
except ClientError:
return render_response(
_("Please validate the Cloudformation stack was created successfully")
)
except ConfigurationError:
# if we have a configuration error, we should blow up the pipeline
raise
except Exception as e:
logger.exception(
"AwsLambdaCloudFormationPipelineView.unexpected_error",
extra={"error": str(e)},
)
return render_response(_("Unknown error"))
# if no error, continue
return pipeline.next_step()
return render_response()
|
AwsLambdaCloudFormationPipelineView
|
python
|
encode__django-rest-framework
|
tests/test_validators.py
|
{
"start": 32232,
"end": 32372
}
|
class ____(serializers.ModelSerializer):
class Meta:
model = UniqueForDateModel
fields = '__all__'
|
UniqueForDateSerializer
|
python
|
scrapy__scrapy
|
tests/test_dupefilters.py
|
{
"start": 954,
"end": 999
}
|
class ____:
method = "n/a"
|
DirectDupeFilter
|
python
|
django__django
|
tests/fixtures_regress/models.py
|
{
"start": 75,
"end": 402
}
|
class ____(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
|
Animal
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/exceptions.py
|
{
"start": 16146,
"end": 17774
}
|
class ____(HashError):
"""A hash was needed for a requirement but is absent."""
order = 2
head = (
"Hashes are required in --require-hashes mode, but they are "
"missing from some requirements. Here is a list of those "
"requirements along with the hashes their downloaded archives "
"actually had. Add lines like these to your requirements files to "
"prevent tampering. (If you did not enable --require-hashes "
"manually, note that it turns on automatically when any package "
"has a hash.)"
)
def __init__(self, gotten_hash: str) -> None:
"""
:param gotten_hash: The hash of the (possibly malicious) archive we
just downloaded
"""
self.gotten_hash = gotten_hash
def body(self) -> str:
# Dodge circular import.
from pipenv.patched.pip._internal.utils.hashes import FAVORITE_HASH
package = None
if self.req:
# In the case of URL-based requirements, display the original URL
# seen in the requirements file rather than the package name,
# so the output can be directly copied into the requirements file.
package = (
self.req.original_link
if self.req.is_direct
# In case someone feeds something downright stupid
# to InstallRequirement's constructor.
else getattr(self.req, "req", None)
)
return " {} --hash={}:{}".format(
package or "unknown package", FAVORITE_HASH, self.gotten_hash
)
|
HashMissing
|
python
|
apache__airflow
|
airflow-core/src/airflow/utils/file.py
|
{
"start": 1771,
"end": 2843
}
|
class ____(NamedTuple):
"""Typed namedtuple with utility functions for regexp ignore rules."""
pattern: Pattern
base_dir: Path
@staticmethod
def compile(pattern: str, base_dir: Path, definition_file: Path) -> _IgnoreRule | None:
"""Build an ignore rule from the supplied regexp pattern and log a useful warning if it is invalid."""
try:
return _RegexpIgnoreRule(re.compile(pattern), base_dir)
except re.error as e:
log.warning("Ignoring invalid regex '%s' from %s: %s", pattern, definition_file, e)
return None
@staticmethod
def match(path: Path, rules: list[_IgnoreRule]) -> bool:
"""Match a list of ignore rules against the supplied path."""
for rule in rules:
if not isinstance(rule, _RegexpIgnoreRule):
raise ValueError(f"_RegexpIgnoreRule cannot match rules of type: {type(rule)}")
if rule.pattern.search(str(path.relative_to(rule.base_dir))) is not None:
return True
return False
|
_RegexpIgnoreRule
|
python
|
pytorch__pytorch
|
test/test_spectral_ops.py
|
{
"start": 66459,
"end": 67144
}
|
class ____(TestCase):
pass
def generate_doc_test(doc_test):
def test(self, device):
self.assertEqual(device, 'cpu')
runner = doctest.DocTestRunner()
runner.run(doc_test)
if runner.failures != 0:
runner.summarize()
self.fail('Doctest failed')
setattr(TestFFTDocExamples, 'test_' + doc_test.name, skipCPUIfNoFFT(test))
for doc_test in FFTDocTestFinder().find(torch.fft, globs=dict(torch=torch)):
generate_doc_test(doc_test)
instantiate_device_type_tests(TestFFT, globals())
instantiate_device_type_tests(TestFFTDocExamples, globals(), only_for='cpu')
if __name__ == '__main__':
run_tests()
|
TestFFTDocExamples
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/combinations.py
|
{
"start": 10884,
"end": 16212
}
|
class ____(object):
"""Wraps a `tf.distribute.Strategy` and adds a name for test titles."""
def __init__(self,
name,
distribution_fn,
required_gpus=None,
required_physical_gpus=0,
required_tpu=False,
use_cloud_tpu=False,
has_chief=False,
num_workers=1,
num_ps=0,
share_gpu=True,
pool_runner_fn=None,
no_xla=False):
"""Initialize NamedDistribution.
Args:
name: Name that will be a part of the name of the test case.
distribution_fn: A callable that creates a `tf.distribute.Strategy`.
required_gpus: The number of GPUs that the strategy requires. Only one of
`required_gpus` and `required_physical_gpus` should be set.
required_physical_gpus: Number of physical GPUs required. Only one of
`required_gpus` and `required_physical_gpus` should be set.
required_tpu: Whether the strategy requires TPU.
use_cloud_tpu: Whether the strategy requires cloud TPU.
has_chief: Whether the strategy requires a chief worker.
num_workers: The number of workers that the strategy requires.
num_ps: The number of parameter servers.
share_gpu: Whether to share GPUs among workers.
pool_runner_fn: An optional callable that returns a MultiProcessPoolRunner
to run the test.
no_xla: Whether to skip in XLA tests.
"""
object.__init__(self)
self._name = name
self._distribution_fn = distribution_fn
self.required_gpus = required_gpus
self.required_physical_gpus = required_physical_gpus
self.required_tpu = required_tpu
self.use_cloud_tpu = use_cloud_tpu
self.has_chief = has_chief
self.num_workers = num_workers
self.num_ps = num_ps
self.share_gpu = share_gpu
self._pool_runner_fn = pool_runner_fn
self.no_xla = no_xla
@property
def runner(self):
if self._pool_runner_fn is not None:
return self._pool_runner_fn()
return None
@property
def strategy(self):
return self._distribution_fn()
def __repr__(self):
return self._name
# This is to allow adding combinations that runs a function both as a
# tf.function and eagerly.
#
# @combinations.generate(
# combinations.combine(
# tf_function = [combinations.tf_function, combinations.no_tf_function]
# )
# )
# def testXXX(tf_function):
# @tf_function
# def foo():
# tf.add(1., 1.)
#
# foo()
tf_function = combinations_lib.NamedObject("TfFunction", def_function.function)
no_tf_function = combinations_lib.NamedObject("NoTfFunction", lambda f: f)
def concat(*combined):
"""Concats combinations."""
result = []
for one in combined:
result += one
return result
@tf_export("__internal__.distribute.combinations.generate", v1=[])
def generate(combinations, test_combinations=()):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""Distributed adapter of `tf.__internal__.test.combinations.generate`.
All tests with distributed strategy should use this one instead of
`tf.__internal__.test.combinations.generate`. This function has support of
strategy combinations, GPU/TPU and multi worker support.
See `tf.__internal__.test.combinations.generate` for usage.
"""
# pylint: enable=g-doc-args,g-doc-return-or-yield
default_combinations = (
framework_combinations.EagerGraphCombination(),
framework_combinations.TFVersionCombination(),
ClusterCombination(),
DistributionCombination(),
GPUCombination(),
TPUCombination(),
)
# We apply our own decoration to handle multi worker tests before applying
# framework.test_combinations.generate. The order is important since we need
# framework.test_combinations.generate to apply all parameter modifiers first.
combination_decorator = combinations_lib.generate(
combinations, test_combinations=default_combinations + test_combinations)
def decorator(test_method_or_class):
if isinstance(test_method_or_class, type):
# If it's a test class.
class_object = test_method_or_class
# Decorate each test method with _multi_worker_test.
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
setattr(class_object, name, _multi_worker_test(test_method))
return combination_decorator(class_object)
else:
return combination_decorator(_multi_worker_test(test_method_or_class))
return decorator
combine = combinations_lib.combine
times = combinations_lib.times
NamedObject = combinations_lib.NamedObject
# Identifies whether we're in the main process or worker processes.
# `_multi_worker_test` decoration behaves differently in the main processes and
# the worker processes. See the documentation of _multi_worker_test for detail.
_running_in_worker = False
@tf_export("__internal__.distribute.combinations.in_main_process", v1=[])
def in_main_process():
"""Whether it's in the main test process.
This is normally used to prepare the test environment which should only happen
in the main process.
Returns:
A boolean.
"""
return not _running_in_worker
|
NamedDistribution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/generic1.py
|
{
"start": 327,
"end": 399
}
|
class ____(Generic[int]):
...
# This should generate two errors.
|
Class3
|
python
|
getsentry__sentry
|
src/sentry/replays/usecases/query/conditions/aggregate.py
|
{
"start": 2487,
"end": 3807
}
|
class ____(GenericBase):
@staticmethod
def visit_eq(expression: Expression, value: str | None) -> Condition:
if value is None:
return does_not_contain(_nonnull_ipv4(expression))
return contains(IPv4Scalar.visit_eq(expression, value))
@staticmethod
def visit_neq(expression: Expression, value: str | None) -> Condition:
if value is None:
return contains(_nonnull_ipv4(expression))
return does_not_contain(IPv4Scalar.visit_eq(expression, value))
@staticmethod
def visit_in(expression: Expression, value_list: list[str | None]) -> Condition:
nonempty_case = contains(
IPv4Scalar.visit_in(expression, [v for v in value_list if v is not None])
)
if None in value_list:
return Or(conditions=[SumOfIPv4Scalar.visit_eq(expression, None), nonempty_case])
return nonempty_case
@staticmethod
def visit_not_in(expression: Expression, value_list: list[str | None]) -> Condition:
nonempty_case = does_not_contain(
IPv4Scalar.visit_in(expression, [v for v in value_list if v is not None])
)
if None in value_list:
return And(conditions=[SumOfIPv4Scalar.visit_neq(expression, None), nonempty_case])
return nonempty_case
|
SumOfIPv4Scalar
|
python
|
huggingface__transformers
|
src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py
|
{
"start": 1095,
"end": 13561
}
|
class ____(SequenceFeatureExtractor):
r"""
Constructs a SeamlessM4T feature extractor.
This feature extractor inherits from [`SequenceFeatureExtractor`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, *optional*, defaults to 80):
Number of Mel-frequency bins.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding vectors.
stride (`int`, *optional*, defaults to 2):
Stride used to reshape audios from shape (batch_size,num_frames,num_mel_bins) to
(batch_size,num_frames//stride,num_mel_bins*stride).
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=80,
sampling_rate=16000,
num_mel_bins=80,
padding_value=0.0,
stride=2,
**kwargs,
):
self.num_mel_bins = num_mel_bins
self.return_attention_mask = True
self.stride = stride
mel_filters = mel_filter_bank(
num_frequency_bins=257,
num_mel_filters=self.num_mel_bins,
min_frequency=20,
max_frequency=sampling_rate // 2,
sampling_rate=sampling_rate,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
self.mel_filters = mel_filters
self.window = window_function(400, "povey", periodic=False)
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def zero_mean_unit_var_norm(
input_values: list[np.ndarray], attention_mask: list[np.ndarray], padding_value: float = 0.0
) -> list[np.ndarray]:
"""
Every array in the list is normalized to have zero mean and unit variance
"""
if attention_mask is not None:
attention_mask = np.array(attention_mask, np.int32)
normed_input_values = []
for vector, length in zip(input_values, attention_mask.sum(-1)):
normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
normed_slice[length:] = padding_value
normed_input_values.append(normed_slice)
else:
normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def _extract_fbank_features(
self,
waveform: np.ndarray,
) -> np.ndarray:
"""
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
"""
# by default, it extracts the left channel if stereo
if len(waveform.shape) == 2:
waveform = waveform[0]
waveform = np.squeeze(waveform) * (2**15) # Kaldi compliance: 16-bit signed integers
features = spectrogram(
waveform,
self.window,
frame_length=400,
hop_length=160,
fft_length=512,
power=2.0,
center=False,
preemphasis=0.97,
mel_filters=self.mel_filters,
log_mel="log",
mel_floor=1.192092955078125e-07,
remove_dc_offset=True,
).T
return features
def __call__(
self,
raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
padding: Union[bool, str, PaddingStrategy] = True,
pad_to_multiple_of: Optional[int] = 2,
max_length: Optional[int] = None,
truncation: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
do_normalize_per_mel_bins: Optional[bool] = True,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `torch.Tensor`, `list[float]`, `list[np.ndarray]`, `list[torch.Tensor]`,
`list[list[float]]`, `list[list[list[float]]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array,
a torch tensor, a list of float values, a list of numpy arrays, a list of torch tensors,
a list of list of float values or a list of a list of list of float values.
If `raw_speech` is a one-dimensional `np.ndarray`, `torch.Tensor` or a `list[float]`, `raw_speech` is
considered a single-channel, single-sample sound. In all other cases, the first dimension of
`raw_speech`, whether from an `np.ndarray`, a `torch.Tensor` or a `list[...]`,
corresponds to the number of samples in the batch, and the number of channels
(i.e. mono or stereo character) is derived from the other dimensions
(1D -> single-channel waveform batches; 2D-> stereo-channel waveform batches).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*, defaults to 2):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For SeamlessM4T models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
do_normalize_per_mel_bins (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean unit-variance normalize the input per mel-channel.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to the tokenizer or the feature
extractor.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
return_attention_mask = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 3:
raise ValueError(f"Only mono-channel or stereo-channel audio is supported for input to {self}")
acceptable_types = (
(torch.Tensor, np.ndarray, tuple, list) if is_torch_available() else (np.ndarray, tuple, list)
)
is_batched = is_batched_numpy or (
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], acceptable_types))
)
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
# always return batch
if not is_batched:
raw_speech = [raw_speech]
# extract fbank features
features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
if do_normalize_per_mel_bins:
# torch defaults to ddof=1, and numpy defaults to ddof=0
features = [
(x - np.expand_dims(x.mean(0), 0)) / np.sqrt(np.expand_dims(x.var(0, ddof=1), 0) + 1e-7)
for x in features
]
# convert into correct format for padding
encoded_inputs = BatchFeature({"input_features": features})
padded_inputs = self.pad(
encoded_inputs,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=True,
return_tensors="np",
)
# SeamlessM4T needs to process extracted features
input_features = padded_inputs.get("input_features")
attention_mask = padded_inputs.pop("attention_mask")
batch_size, num_frames, num_channels = input_features.shape
remainder = num_frames % self.stride
if remainder != 0:
input_features = input_features[:, : num_frames - remainder, :]
attention_mask = attention_mask[:, : num_frames - remainder]
input_features = np.reshape(
input_features, (batch_size, num_frames // self.stride, num_channels * self.stride)
)
indices = np.arange(0, num_frames - remainder)
attention_mask = attention_mask[:, indices % self.stride == 1]
padded_inputs["input_features"] = input_features
if return_attention_mask:
padded_inputs["attention_mask"] = attention_mask
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
__all__ = ["SeamlessM4TFeatureExtractor"]
|
SeamlessM4TFeatureExtractor
|
python
|
google__flatbuffers
|
tests/monster_test_generated.py
|
{
"start": 3170,
"end": 4116
}
|
class ____(object):
# InParentNamespaceT
def __init__(
self,
):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
inParentNamespace = InParentNamespace()
inParentNamespace.Init(buf, pos)
return cls.InitFromObj(inParentNamespace)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, inParentNamespace):
x = InParentNamespaceT()
x._UnPack(inParentNamespace)
return x
# InParentNamespaceT
def _UnPack(self, inParentNamespace):
if inParentNamespace is None:
return
# InParentNamespaceT
def Pack(self, builder):
InParentNamespaceStart(builder)
inParentNamespace = InParentNamespaceEnd(builder)
return inParentNamespace
|
InParentNamespaceT
|
python
|
scikit-learn__scikit-learn
|
sklearn/neural_network/_multilayer_perceptron.py
|
{
"start": 31498,
"end": 50619
}
|
class ____(ClassifierMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
For a comparison between Adam optimizer and SGD, see
:ref:`sphx_glr_auto_examples_neural_networks_plot_mlp_training_curves.py`.
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
Strength of the L2 regularization term. The L2 regularization term
is divided by the sample size when added to the loss.
For an example usage and visualization of varying regularization, see
:ref:`sphx_glr_auto_examples_neural_networks_plot_mlp_alpha.py`.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`.
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate at each
time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when ``solver='sgd'``.
learning_rate_init : float, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : float, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set
aside ``validation_fraction`` of training data as validation and
terminate training when validation score is not improving by at least
``tol`` for ``n_iter_no_change`` consecutive epochs. The split is
stratified, except in a multilabel setting.
If early stopping is False, then the training stops when the training
loss does not improve by more than ``tol`` for ``n_iter_no_change``
consecutive passes over the training set.
Only effective when solver='sgd' or 'adam'.
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'.
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'.
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of loss function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of loss function calls.
Note that number of loss function calls will be greater than or equal
to the number of iterations for the `MLPClassifier`.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray or list of ndarray of shape (n_classes,)
Class labels for each output.
loss_ : float
The current loss computed with the loss function.
best_loss_ : float or None
The minimum loss reached by the solver throughout fitting.
If `early_stopping=True`, this attribute is set to `None`. Refer to
the `best_validation_score_` fitted attribute instead.
loss_curve_ : list of shape (`n_iter_`,)
The ith element in the list represents the loss at the ith iteration.
validation_scores_ : list of shape (`n_iter_`,) or None
The score at each iteration on a held-out validation set. The score
reported is the accuracy score. Only available if `early_stopping=True`,
otherwise the attribute is set to `None`.
best_validation_score_ : float or None
The best validation score (i.e. accuracy score) that triggered the
early stopping. Only available if `early_stopping=True`, otherwise the
attribute is set to `None`.
t_ : int
The number of training samples seen by the solver during fitting.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The number of iterations the solver has run.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
See Also
--------
MLPRegressor : Multi-layer Perceptron regressor.
BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E. "Connectionist learning procedures."
Artificial intelligence 40.1 (1989): 185-234.
Glorot, Xavier, and Yoshua Bengio.
"Understanding the difficulty of training deep feedforward neural networks."
International Conference on Artificial Intelligence and Statistics. 2010.
:arxiv:`He, Kaiming, et al (2015). "Delving deep into rectifiers:
Surpassing human-level performance on imagenet classification." <1502.01852>`
:arxiv:`Kingma, Diederik, and Jimmy Ba (2014)
"Adam: A method for stochastic optimization." <1412.6980>`
Examples
--------
>>> from sklearn.neural_network import MLPClassifier
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
... random_state=1)
>>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
>>> clf.predict_proba(X_test[:1])
array([[0.0383, 0.961]])
>>> clf.predict(X_test[:5, :])
array([1, 0, 1, 0, 1])
>>> clf.score(X_test, y_test)
0.8...
"""
def __init__(
self,
hidden_layer_sizes=(100,),
activation="relu",
*,
solver="adam",
alpha=0.0001,
batch_size="auto",
learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5,
max_iter=200,
shuffle=True,
random_state=None,
tol=1e-4,
verbose=False,
warm_start=False,
momentum=0.9,
nesterovs_momentum=True,
early_stopping=False,
validation_fraction=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
n_iter_no_change=10,
max_fun=15000,
):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
loss="log_loss",
shuffle=shuffle,
random_state=random_state,
tol=tol,
verbose=verbose,
warm_start=warm_start,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
max_fun=max_fun,
)
def _validate_input(self, X, y, incremental, reset):
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
multi_output=True,
dtype=(np.float64, np.float32),
reset=reset,
)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
# Matrix of actions to be taken under the possible combinations:
# The case that incremental == True and classes_ not defined is
# already checked by _check_partial_fit_first_call that is called
# in _partial_fit below.
# The cases are already grouped into the respective if blocks below.
#
# incremental warm_start classes_ def action
# 0 0 0 define classes_
# 0 1 0 define classes_
# 0 0 1 redefine classes_
#
# 0 1 1 check compat warm_start
# 1 1 1 check compat warm_start
#
# 1 0 1 check compat last fit
#
# Note the reliance on short-circuiting here, so that the second
# or part implies that classes_ is defined.
if (not hasattr(self, "classes_")) or (not self.warm_start and not incremental):
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
self.classes_ = self._label_binarizer.classes_
else:
classes = unique_labels(y)
if self.warm_start:
if set(classes) != set(self.classes_):
raise ValueError(
"warm_start can only be used where `y` has the same "
"classes as in the previous call to fit. Previously "
f"got {self.classes_}, `y` has {classes}"
)
elif len(np.setdiff1d(classes, self.classes_, assume_unique=True)):
raise ValueError(
"`y` has classes not in `self.classes_`. "
f"`self.classes_` has {self.classes_}. 'y' has {classes}."
)
# This downcast to bool is to prevent upcasting when working with
# float32 data
y = self._label_binarizer.transform(y).astype(bool)
return X, y
def predict(self, X):
"""Predict using the multi-layer perceptron classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self)
return self._predict(X)
def _predict(self, X, check_input=True):
"""Private predict method with optional input validation"""
y_pred = self._forward_pass_fast(X, check_input=check_input)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
def _score(self, X, y, sample_weight=None):
return super()._score_with_function(
X, y, sample_weight=sample_weight, score_function=accuracy_score
)
@available_if(lambda est: est._check_solver())
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, sample_weight=None, classes=None):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : array-like of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 1.7
classes : array of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Trained MLP model.
"""
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith("multilabel"):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
return self._fit(X, y, sample_weight=sample_weight, incremental=True)
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : ndarray of shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to `log(predict_proba(X))`.
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.multi_label = True
return tags
|
MLPClassifier
|
python
|
Textualize__textual
|
docs/examples/guide/widgets/fizzbuzz01.py
|
{
"start": 479,
"end": 669
}
|
class ____(App):
CSS_PATH = "fizzbuzz01.tcss"
def compose(self) -> ComposeResult:
yield FizzBuzz()
if __name__ == "__main__":
app = FizzBuzzApp()
app.run()
|
FizzBuzzApp
|
python
|
scipy__scipy
|
scipy/fft/tests/test_helper.py
|
{
"start": 18682,
"end": 19522
}
|
class ____:
def test_definition(self, xp):
x = xp.asarray([0, 1, 2, 3, 4], dtype=xp.float64)
x2 = xp.asarray([0, 1, 2, 3, 4, 5], dtype=xp.float64)
# default dtype varies across backends
y = 9 * fft.rfftfreq(9, xp=xp)
xp_assert_close(y, x, check_dtype=False, check_namespace=True)
y = 9 * xp.pi * fft.rfftfreq(9, xp.pi, xp=xp)
xp_assert_close(y, x, check_dtype=False)
y = 10 * fft.rfftfreq(10, xp=xp)
xp_assert_close(y, x2, check_dtype=False)
y = 10 * xp.pi * fft.rfftfreq(10, xp.pi, xp=xp)
xp_assert_close(y, x2, check_dtype=False)
def test_device(self, xp, devices):
for d in devices:
y = fft.rfftfreq(9, xp=xp, device=d)
x = xp.empty(0, device=d)
assert xp_device(y) == xp_device(x)
|
TestRFFTFreq
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/demos/transformer.py
|
{
"start": 4175,
"end": 5517
}
|
class ____(Dataset):
"""Mini version of WikiText2."""
def __init__(self, data_dir: Path = Path("./data"), block_size: int = 35, download: bool = True) -> None:
super().__init__()
self.path = data_dir / "wikitext-2.txt"
if download:
self.download(self.path)
self.data, self.dictionary = tokenize(self.path)
self.block_size = block_size
@property
def vocab_size(self) -> int:
return len(self.dictionary)
def __len__(self) -> int:
return len(self.data) // self.block_size - 1
def __getitem__(self, index: int) -> tuple[Tensor, Tensor]:
start = index * self.block_size
end = start + self.block_size
inputs = self.data[start:end]
target = self.data[(start + 1) : (end + 1)]
return inputs, target
@staticmethod
def download(destination: Path) -> None:
if not _REQUESTS_AVAILABLE:
raise ModuleNotFoundError(str(_REQUESTS_AVAILABLE))
import requests
os.makedirs(destination.parent, exist_ok=True)
url = "https://raw.githubusercontent.com/pytorch/examples/main/word_language_model/data/wikitext-2/train.txt"
if os.path.exists(destination):
return
with open(destination, "w") as f:
f.write(requests.get(url).text)
|
WikiText2
|
python
|
FactoryBoy__factory_boy
|
tests/djapp/models.py
|
{
"start": 2083,
"end": 2417
}
|
class ____(models.Model):
foo = models.CharField(max_length=20)
def __init__(self, post_save_signal_receiver=None):
super().__init__()
if post_save_signal_receiver:
signals.post_save.connect(
post_save_signal_receiver,
sender=self.__class__,
)
|
WithSignals
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI020.py
|
{
"start": 636,
"end": 1265
}
|
class ____(list["int"]): # Y020 Quoted annotations should never be used in stubs
"""Documented and guaranteed useful.""" # Y021 Docstrings should not be included in stubs
if sys.platform == "linux":
f: "int" # Y020 Quoted annotations should never be used in stubs
elif sys.platform == "win32":
f: "str" # Y020 Quoted annotations should never be used in stubs
else:
f: "bytes" # Y020 Quoted annotations should never be used in stubs
# These two shouldn't trigger Y020 -- empty strings can't be "quoted annotations"
k = "" # Y052 Need type annotation for "k"
el = r"" # Y052 Need type annotation for "el"
|
Child
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-longest-semi-repetitive-substring.py
|
{
"start": 44,
"end": 418
}
|
class ____(object):
def longestSemiRepetitiveSubstring(self, s):
"""
:type s: str
:rtype: int
"""
result = left = prev = 0
for right in xrange(len(s)):
if right-1 >= 0 and s[right-1] == s[right]:
left, prev = prev, right
result = max(result, right-left+1)
return result
|
Solution
|
python
|
pydantic__pydantic
|
pydantic/type_adapter.py
|
{
"start": 1900,
"end": 35653
}
|
class ____(Generic[T]):
"""!!! abstract "Usage Documentation"
[`TypeAdapter`](../concepts/type_adapter.md)
Type adapters provide a flexible way to perform validation and serialization based on a Python type.
A `TypeAdapter` instance exposes some of the functionality from `BaseModel` instance methods
for types that do not have such methods (such as dataclasses, primitive types, and more).
**Note:** `TypeAdapter` instances are not types, and cannot be used as type annotations for fields.
Args:
type: The type associated with the `TypeAdapter`.
config: Configuration for the `TypeAdapter`, should be a dictionary conforming to
[`ConfigDict`][pydantic.config.ConfigDict].
!!! note
You cannot provide a configuration when instantiating a `TypeAdapter` if the type you're using
has its own config that cannot be overridden (ex: `BaseModel`, `TypedDict`, and `dataclass`). A
[`type-adapter-config-unused`](../errors/usage_errors.md#type-adapter-config-unused) error will
be raised in this case.
_parent_depth: Depth at which to search for the [parent frame][frame-objects]. This frame is used when
resolving forward annotations during schema building, by looking for the globals and locals of this
frame. Defaults to 2, which will result in the frame where the `TypeAdapter` was instantiated.
!!! note
This parameter is named with an underscore to suggest its private nature and discourage use.
It may be deprecated in a minor version, so we only recommend using it if you're comfortable
with potential change in behavior/support. It's default value is 2 because internally,
the `TypeAdapter` class makes another call to fetch the frame.
module: The module that passes to plugin if provided.
Attributes:
core_schema: The core schema for the type.
validator: The schema validator for the type.
serializer: The schema serializer for the type.
pydantic_complete: Whether the core schema for the type is successfully built.
??? tip "Compatibility with `mypy`"
Depending on the type used, `mypy` might raise an error when instantiating a `TypeAdapter`. As a workaround, you can explicitly
annotate your variable:
```py
from typing import Union
from pydantic import TypeAdapter
ta: TypeAdapter[Union[str, int]] = TypeAdapter(Union[str, int]) # type: ignore[arg-type]
```
??? info "Namespace management nuances and implementation details"
Here, we collect some notes on namespace management, and subtle differences from `BaseModel`:
`BaseModel` uses its own `__module__` to find out where it was defined
and then looks for symbols to resolve forward references in those globals.
On the other hand, `TypeAdapter` can be initialized with arbitrary objects,
which may not be types and thus do not have a `__module__` available.
So instead we look at the globals in our parent stack frame.
It is expected that the `ns_resolver` passed to this function will have the correct
namespace for the type we're adapting. See the source code for `TypeAdapter.__init__`
and `TypeAdapter.rebuild` for various ways to construct this namespace.
This works for the case where this function is called in a module that
has the target of forward references in its scope, but
does not always work for more complex cases.
For example, take the following:
```python {title="a.py"}
IntList = list[int]
OuterDict = dict[str, 'IntList']
```
```python {test="skip" title="b.py"}
from a import OuterDict
from pydantic import TypeAdapter
IntList = int # replaces the symbol the forward reference is looking for
v = TypeAdapter(OuterDict)
v({'x': 1}) # should fail but doesn't
```
If `OuterDict` were a `BaseModel`, this would work because it would resolve
the forward reference within the `a.py` namespace.
But `TypeAdapter(OuterDict)` can't determine what module `OuterDict` came from.
In other words, the assumption that _all_ forward references exist in the
module we are being called from is not technically always true.
Although most of the time it is and it works fine for recursive models and such,
`BaseModel`'s behavior isn't perfect either and _can_ break in similar ways,
so there is no right or wrong between the two.
But at the very least this behavior is _subtly_ different from `BaseModel`'s.
"""
core_schema: CoreSchema
validator: SchemaValidator | PluggableSchemaValidator
serializer: SchemaSerializer
pydantic_complete: bool
@overload
def __init__(
self,
type: type[T],
*,
config: ConfigDict | None = ...,
_parent_depth: int = ...,
module: str | None = ...,
) -> None: ...
# This second overload is for unsupported special forms (such as Annotated, Union, etc.)
# Currently there is no way to type this correctly
# See https://github.com/python/typing/pull/1618
@overload
def __init__(
self,
type: Any,
*,
config: ConfigDict | None = ...,
_parent_depth: int = ...,
module: str | None = ...,
) -> None: ...
def __init__(
self,
type: Any,
*,
config: ConfigDict | None = None,
_parent_depth: int = 2,
module: str | None = None,
) -> None:
if _type_has_config(type) and config is not None:
raise PydanticUserError(
'Cannot use `config` when the type is a BaseModel, dataclass or TypedDict.'
' These types can have their own config and setting the config via the `config`'
' parameter to TypeAdapter will not override it, thus the `config` you passed to'
' TypeAdapter becomes meaningless, which is probably not what you want.',
code='type-adapter-config-unused',
)
self._type = type
self._config = config
self._parent_depth = _parent_depth
self.pydantic_complete = False
parent_frame = self._fetch_parent_frame()
if isinstance(type, types.FunctionType):
# Special case functions, which are *not* pushed to the `NsResolver` stack and without this special case
# would only have access to the parent namespace where the `TypeAdapter` was instantiated (if the function is defined
# in another module, we need to look at that module's globals).
if parent_frame is not None:
# `f_locals` is the namespace where the type adapter was instantiated (~ to `f_globals` if at the module level):
parent_ns = parent_frame.f_locals
else: # pragma: no cover
parent_ns = None
globalns, localns = _namespace_utils.ns_for_function(
type,
parent_namespace=parent_ns,
)
parent_namespace = None
else:
if parent_frame is not None:
globalns = parent_frame.f_globals
# Do not provide a local ns if the type adapter happens to be instantiated at the module level:
localns = parent_frame.f_locals if parent_frame.f_locals is not globalns else {}
else: # pragma: no cover
globalns = {}
localns = {}
parent_namespace = localns
self._module_name = module or cast(str, globalns.get('__name__', ''))
self._init_core_attrs(
ns_resolver=_namespace_utils.NsResolver(
namespaces_tuple=_namespace_utils.NamespacesTuple(locals=localns, globals=globalns),
parent_namespace=parent_namespace,
),
force=False,
)
def _fetch_parent_frame(self) -> FrameType | None:
frame = sys._getframe(self._parent_depth)
if frame.f_globals.get('__name__') == 'typing':
# Because `TypeAdapter` is generic, explicitly parametrizing the class results
# in a `typing._GenericAlias` instance, which proxies instantiation calls to the
# "real" `TypeAdapter` class and thus adding an extra frame to the call. To avoid
# pulling anything from the `typing` module, use the correct frame (the one before):
return frame.f_back
return frame
def _init_core_attrs(
self, ns_resolver: _namespace_utils.NsResolver, force: bool, raise_errors: bool = False
) -> bool:
"""Initialize the core schema, validator, and serializer for the type.
Args:
ns_resolver: The namespace resolver to use when building the core schema for the adapted type.
force: Whether to force the construction of the core schema, validator, and serializer.
If `force` is set to `False` and `_defer_build` is `True`, the core schema, validator, and serializer will be set to mocks.
raise_errors: Whether to raise errors if initializing any of the core attrs fails.
Returns:
`True` if the core schema, validator, and serializer were successfully initialized, otherwise `False`.
Raises:
PydanticUndefinedAnnotation: If `PydanticUndefinedAnnotation` occurs in`__get_pydantic_core_schema__`
and `raise_errors=True`.
"""
if not force and self._defer_build:
_mock_val_ser.set_type_adapter_mocks(self)
self.pydantic_complete = False
return False
try:
self.core_schema = _getattr_no_parents(self._type, '__pydantic_core_schema__')
self.validator = _getattr_no_parents(self._type, '__pydantic_validator__')
self.serializer = _getattr_no_parents(self._type, '__pydantic_serializer__')
# TODO: we don't go through the rebuild logic here directly because we don't want
# to repeat all of the namespace fetching logic that we've already done
# so we simply skip to the block below that does the actual schema generation
if (
isinstance(self.core_schema, _mock_val_ser.MockCoreSchema)
or isinstance(self.validator, _mock_val_ser.MockValSer)
or isinstance(self.serializer, _mock_val_ser.MockValSer)
):
raise AttributeError()
except AttributeError:
config_wrapper = _config.ConfigWrapper(self._config)
schema_generator = _generate_schema.GenerateSchema(config_wrapper, ns_resolver=ns_resolver)
try:
core_schema = schema_generator.generate_schema(self._type)
except PydanticUndefinedAnnotation:
if raise_errors:
raise
_mock_val_ser.set_type_adapter_mocks(self)
return False
try:
self.core_schema = schema_generator.clean_schema(core_schema)
except _generate_schema.InvalidSchemaError:
_mock_val_ser.set_type_adapter_mocks(self)
return False
core_config = config_wrapper.core_config(None)
self.validator = create_schema_validator(
schema=self.core_schema,
schema_type=self._type,
schema_type_module=self._module_name,
schema_type_name=str(self._type),
schema_kind='TypeAdapter',
config=core_config,
plugin_settings=config_wrapper.plugin_settings,
)
self.serializer = SchemaSerializer(self.core_schema, core_config)
self.pydantic_complete = True
return True
@property
def _defer_build(self) -> bool:
config = self._config if self._config is not None else self._model_config
if config:
return config.get('defer_build') is True
return False
@property
def _model_config(self) -> ConfigDict | None:
type_: Any = _typing_extra.annotated_type(self._type) or self._type # Eg FastAPI heavily uses Annotated
if _utils.lenient_issubclass(type_, BaseModel):
return type_.model_config
return getattr(type_, '__pydantic_config__', None)
def __repr__(self) -> str:
return f'TypeAdapter({_repr.display_as_type(self._type)})'
def rebuild(
self,
*,
force: bool = False,
raise_errors: bool = True,
_parent_namespace_depth: int = 2,
_types_namespace: _namespace_utils.MappingNamespace | None = None,
) -> bool | None:
"""Try to rebuild the pydantic-core schema for the adapter's type.
This may be necessary when one of the annotations is a ForwardRef which could not be resolved during
the initial attempt to build the schema, and automatic rebuilding fails.
Args:
force: Whether to force the rebuilding of the type adapter's schema, defaults to `False`.
raise_errors: Whether to raise errors, defaults to `True`.
_parent_namespace_depth: Depth at which to search for the [parent frame][frame-objects]. This
frame is used when resolving forward annotations during schema rebuilding, by looking for
the locals of this frame. Defaults to 2, which will result in the frame where the method
was called.
_types_namespace: An explicit types namespace to use, instead of using the local namespace
from the parent frame. Defaults to `None`.
Returns:
Returns `None` if the schema is already "complete" and rebuilding was not required.
If rebuilding _was_ required, returns `True` if rebuilding was successful, otherwise `False`.
"""
if not force and self.pydantic_complete:
return None
if _types_namespace is not None:
rebuild_ns = _types_namespace
elif _parent_namespace_depth > 0:
rebuild_ns = _typing_extra.parent_frame_namespace(parent_depth=_parent_namespace_depth, force=True) or {}
else:
rebuild_ns = {}
# we have to manually fetch globals here because there's no type on the stack of the NsResolver
# and so we skip the globalns = get_module_ns_of(typ) call that would normally happen
globalns = sys._getframe(max(_parent_namespace_depth - 1, 1)).f_globals
ns_resolver = _namespace_utils.NsResolver(
namespaces_tuple=_namespace_utils.NamespacesTuple(locals=rebuild_ns, globals=globalns),
parent_namespace=rebuild_ns,
)
return self._init_core_attrs(ns_resolver=ns_resolver, force=True, raise_errors=raise_errors)
def validate_python(
self,
object: Any,
/,
*,
strict: bool | None = None,
extra: ExtraValues | None = None,
from_attributes: bool | None = None,
context: Any | None = None,
experimental_allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
by_alias: bool | None = None,
by_name: bool | None = None,
) -> T:
"""Validate a Python object against the model.
Args:
object: The Python object to validate against the model.
strict: Whether to strictly check types.
extra: Whether to ignore, allow, or forbid extra data during model validation.
See the [`extra` configuration value][pydantic.ConfigDict.extra] for details.
from_attributes: Whether to extract data from object attributes.
context: Additional context to pass to the validator.
experimental_allow_partial: **Experimental** whether to enable
[partial validation](../concepts/experimental.md#partial-validation), e.g. to process streams.
* False / 'off': Default behavior, no partial validation.
* True / 'on': Enable partial validation.
* 'trailing-strings': Enable partial validation and allow trailing strings in the input.
by_alias: Whether to use the field's alias when validating against the provided input data.
by_name: Whether to use the field's name when validating against the provided input data.
!!! note
When using `TypeAdapter` with a Pydantic `dataclass`, the use of the `from_attributes`
argument is not supported.
Returns:
The validated object.
"""
if by_alias is False and by_name is not True:
raise PydanticUserError(
'At least one of `by_alias` or `by_name` must be set to True.',
code='validate-by-alias-and-name-false',
)
return self.validator.validate_python(
object,
strict=strict,
extra=extra,
from_attributes=from_attributes,
context=context,
allow_partial=experimental_allow_partial,
by_alias=by_alias,
by_name=by_name,
)
def validate_json(
self,
data: str | bytes | bytearray,
/,
*,
strict: bool | None = None,
extra: ExtraValues | None = None,
context: Any | None = None,
experimental_allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
by_alias: bool | None = None,
by_name: bool | None = None,
) -> T:
"""!!! abstract "Usage Documentation"
[JSON Parsing](../concepts/json.md#json-parsing)
Validate a JSON string or bytes against the model.
Args:
data: The JSON data to validate against the model.
strict: Whether to strictly check types.
extra: Whether to ignore, allow, or forbid extra data during model validation.
See the [`extra` configuration value][pydantic.ConfigDict.extra] for details.
context: Additional context to use during validation.
experimental_allow_partial: **Experimental** whether to enable
[partial validation](../concepts/experimental.md#partial-validation), e.g. to process streams.
* False / 'off': Default behavior, no partial validation.
* True / 'on': Enable partial validation.
* 'trailing-strings': Enable partial validation and allow trailing strings in the input.
by_alias: Whether to use the field's alias when validating against the provided input data.
by_name: Whether to use the field's name when validating against the provided input data.
Returns:
The validated object.
"""
if by_alias is False and by_name is not True:
raise PydanticUserError(
'At least one of `by_alias` or `by_name` must be set to True.',
code='validate-by-alias-and-name-false',
)
return self.validator.validate_json(
data,
strict=strict,
extra=extra,
context=context,
allow_partial=experimental_allow_partial,
by_alias=by_alias,
by_name=by_name,
)
def validate_strings(
self,
obj: Any,
/,
*,
strict: bool | None = None,
extra: ExtraValues | None = None,
context: Any | None = None,
experimental_allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
by_alias: bool | None = None,
by_name: bool | None = None,
) -> T:
"""Validate object contains string data against the model.
Args:
obj: The object contains string data to validate.
strict: Whether to strictly check types.
extra: Whether to ignore, allow, or forbid extra data during model validation.
See the [`extra` configuration value][pydantic.ConfigDict.extra] for details.
context: Additional context to use during validation.
experimental_allow_partial: **Experimental** whether to enable
[partial validation](../concepts/experimental.md#partial-validation), e.g. to process streams.
* False / 'off': Default behavior, no partial validation.
* True / 'on': Enable partial validation.
* 'trailing-strings': Enable partial validation and allow trailing strings in the input.
by_alias: Whether to use the field's alias when validating against the provided input data.
by_name: Whether to use the field's name when validating against the provided input data.
Returns:
The validated object.
"""
if by_alias is False and by_name is not True:
raise PydanticUserError(
'At least one of `by_alias` or `by_name` must be set to True.',
code='validate-by-alias-and-name-false',
)
return self.validator.validate_strings(
obj,
strict=strict,
extra=extra,
context=context,
allow_partial=experimental_allow_partial,
by_alias=by_alias,
by_name=by_name,
)
def get_default_value(self, *, strict: bool | None = None, context: Any | None = None) -> Some[T] | None:
"""Get the default value for the wrapped type.
Args:
strict: Whether to strictly check types.
context: Additional context to pass to the validator.
Returns:
The default value wrapped in a `Some` if there is one or None if not.
"""
return self.validator.get_default_value(strict=strict, context=context)
def dump_python(
self,
instance: T,
/,
*,
mode: Literal['json', 'python'] = 'python',
include: IncEx | None = None,
exclude: IncEx | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal['none', 'warn', 'error'] = True,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
context: Any | None = None,
) -> Any:
"""Dump an instance of the adapted type to a Python object.
Args:
instance: The Python object to serialize.
mode: The output format.
include: Fields to include in the output.
exclude: Fields to exclude from the output.
by_alias: Whether to use alias names for field names.
exclude_unset: Whether to exclude unset fields.
exclude_defaults: Whether to exclude fields with default values.
exclude_none: Whether to exclude fields with None values.
exclude_computed_fields: Whether to exclude computed fields.
While this can be useful for round-tripping, it is usually recommended to use the dedicated
`round_trip` parameter instead.
round_trip: Whether to output the serialized data in a way that is compatible with deserialization.
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
fallback: A function to call when an unknown value is encountered. If not provided,
a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
context: Additional context to pass to the serializer.
Returns:
The serialized object.
"""
return self.serializer.to_python(
instance,
mode=mode,
by_alias=by_alias,
include=include,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
exclude_computed_fields=exclude_computed_fields,
round_trip=round_trip,
warnings=warnings,
fallback=fallback,
serialize_as_any=serialize_as_any,
context=context,
)
def dump_json(
self,
instance: T,
/,
*,
indent: int | None = None,
ensure_ascii: bool = False,
include: IncEx | None = None,
exclude: IncEx | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal['none', 'warn', 'error'] = True,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
context: Any | None = None,
) -> bytes:
"""!!! abstract "Usage Documentation"
[JSON Serialization](../concepts/json.md#json-serialization)
Serialize an instance of the adapted type to JSON.
Args:
instance: The instance to be serialized.
indent: Number of spaces for JSON indentation.
ensure_ascii: If `True`, the output is guaranteed to have all incoming non-ASCII characters escaped.
If `False` (the default), these characters will be output as-is.
include: Fields to include.
exclude: Fields to exclude.
by_alias: Whether to use alias names for field names.
exclude_unset: Whether to exclude unset fields.
exclude_defaults: Whether to exclude fields with default values.
exclude_none: Whether to exclude fields with a value of `None`.
exclude_computed_fields: Whether to exclude computed fields.
While this can be useful for round-tripping, it is usually recommended to use the dedicated
`round_trip` parameter instead.
round_trip: Whether to serialize and deserialize the instance to ensure round-tripping.
warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
"error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
fallback: A function to call when an unknown value is encountered. If not provided,
a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
context: Additional context to pass to the serializer.
Returns:
The JSON representation of the given instance as bytes.
"""
return self.serializer.to_json(
instance,
indent=indent,
ensure_ascii=ensure_ascii,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
exclude_computed_fields=exclude_computed_fields,
round_trip=round_trip,
warnings=warnings,
fallback=fallback,
serialize_as_any=serialize_as_any,
context=context,
)
def json_schema(
self,
*,
by_alias: bool = True,
ref_template: str = DEFAULT_REF_TEMPLATE,
union_format: Literal['any_of', 'primitive_type_array'] = 'any_of',
schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,
mode: JsonSchemaMode = 'validation',
) -> dict[str, Any]:
"""Generate a JSON schema for the adapted type.
Args:
by_alias: Whether to use alias names for field names.
ref_template: The format string used for generating $ref strings.
union_format: The format to use when combining schemas from unions together. Can be one of:
- `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf)
keyword to combine schemas (the default).
- `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type)
keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive
type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to
`any_of`.
schema_generator: To override the logic used to generate the JSON schema, as a subclass of
`GenerateJsonSchema` with your desired modifications
mode: The mode in which to generate the schema.
schema_generator: The generator class used for creating the schema.
mode: The mode to use for schema generation.
Returns:
The JSON schema for the model as a dictionary.
"""
schema_generator_instance = schema_generator(
by_alias=by_alias, ref_template=ref_template, union_format=union_format
)
if isinstance(self.core_schema, _mock_val_ser.MockCoreSchema):
self.core_schema.rebuild()
assert not isinstance(self.core_schema, _mock_val_ser.MockCoreSchema), 'this is a bug! please report it'
return schema_generator_instance.generate(self.core_schema, mode=mode)
@staticmethod
def json_schemas(
inputs: Iterable[tuple[JsonSchemaKeyT, JsonSchemaMode, TypeAdapter[Any]]],
/,
*,
by_alias: bool = True,
title: str | None = None,
description: str | None = None,
ref_template: str = DEFAULT_REF_TEMPLATE,
union_format: Literal['any_of', 'primitive_type_array'] = 'any_of',
schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,
) -> tuple[dict[tuple[JsonSchemaKeyT, JsonSchemaMode], JsonSchemaValue], JsonSchemaValue]:
"""Generate a JSON schema including definitions from multiple type adapters.
Args:
inputs: Inputs to schema generation. The first two items will form the keys of the (first)
output mapping; the type adapters will provide the core schemas that get converted into
definitions in the output JSON schema.
by_alias: Whether to use alias names.
title: The title for the schema.
description: The description for the schema.
ref_template: The format string used for generating $ref strings.
union_format: The format to use when combining schemas from unions together. Can be one of:
- `'any_of'`: Use the [`anyOf`](https://json-schema.org/understanding-json-schema/reference/combining#anyOf)
keyword to combine schemas (the default).
- `'primitive_type_array'`: Use the [`type`](https://json-schema.org/understanding-json-schema/reference/type)
keyword as an array of strings, containing each type of the combination. If any of the schemas is not a primitive
type (`string`, `boolean`, `null`, `integer` or `number`) or contains constraints/metadata, falls back to
`any_of`.
schema_generator: The generator class used for creating the schema.
Returns:
A tuple where:
- The first element is a dictionary whose keys are tuples of JSON schema key type and JSON mode, and
whose values are the JSON schema corresponding to that pair of inputs. (These schemas may have
JsonRef references to definitions that are defined in the second returned element.)
- The second element is a JSON schema containing all definitions referenced in the first returned
element, along with the optional title and description keys.
"""
schema_generator_instance = schema_generator(
by_alias=by_alias, ref_template=ref_template, union_format=union_format
)
inputs_ = []
for key, mode, adapter in inputs:
# This is the same pattern we follow for model json schemas - we attempt a core schema rebuild if we detect a mock
if isinstance(adapter.core_schema, _mock_val_ser.MockCoreSchema):
adapter.core_schema.rebuild()
assert not isinstance(adapter.core_schema, _mock_val_ser.MockCoreSchema), (
'this is a bug! please report it'
)
inputs_.append((key, mode, adapter.core_schema))
json_schemas_map, definitions = schema_generator_instance.generate_definitions(inputs_)
json_schema: dict[str, Any] = {}
if definitions:
json_schema['$defs'] = definitions
if title:
json_schema['title'] = title
if description:
json_schema['description'] = description
return json_schemas_map, json_schema
|
TypeAdapter
|
python
|
google__jax
|
tests/pmap_test.py
|
{
"start": 127610,
"end": 128202
}
|
class ____:
def setUp(self):
super().setUp()
if config.pmap_shmap_merge.value:
# NOTE(dsuo): Most test do pass `pmap_shmap_merge=True` and
# `disable_jit=True` but are they still meaningful? They can also be much
# slower.
raise SkipTest('Not testing disable_jit when `pmap_shmap_merge=True`.')
self.enter_context(jtu.thread_local_config_context(jax_disable_jit=True))
self.enter_context(jtu.ignore_warning(
message="Some donated buffers were not usable", category=UserWarning))
@jtu.pytest_mark_if_available('multiaccelerator')
|
EagerPmapMixin
|
python
|
astropy__astropy
|
astropy/table/pprint.py
|
{
"start": 6106,
"end": 30878
}
|
class ____:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
# Declare to keep static type checker happy.
lines = None
width = None
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
width, lines = get_terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(
self,
col,
max_lines=None,
show_name=True,
show_unit=None,
show_dtype=False,
show_length=None,
html=False,
align=None,
):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs,
)
# Replace tab and newline with text representations so they display nicely.
# Newline in particular is a problem in a multicolumn table.
col_strs = [
val.replace("\t", "\\t").replace("\n", "\\n") for val in col_strs_iter
]
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from astropy.utils.xml.writer import xml_escape
n_header = outs["n_header"]
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
val = f"<{td}>{xml_escape(col_str.strip())}</{td}>"
row = "<tr>" + val + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, "<table>")
col_strs.append("</table>")
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs["i_centers"]:
col_strs[i] = col_strs[i].center(col_width)
if outs["i_dashes"] is not None:
col_strs[outs["i_dashes"]] = "-" * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r"(?P<fill>.?)(?P<align>[<^>=])")
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError(
"column align must be one of '<', '^', '>', or '='"
)
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group("fill")
align_char = match.group("align")
if align_char == "=":
if fill_char != "0":
raise ValueError("fill character must be '0' for '=' align")
# str.zfill gets used which does not take fill char arg
fill_char = ""
else:
fill_char = ""
align_char = ">"
justify_methods = {"<": "ljust", "^": "center", ">": "rjust", "=": "zfill"}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs["show_length"]:
col_strs.append(f"Length = {len(col)} rows")
return col_strs, outs
def _name_and_structure(self, name, dtype, sep=" "):
"""Format a column name, including a possible structure.
Normally, just returns the name, but if it has a structured dtype,
will add the parts in between square brackets. E.g.,
"name [f0, f1]" or "name [f0[sf0, sf1], f1]".
"""
if dtype is None or dtype.names is None:
return name
structure = ", ".join(
[
self._name_and_structure(name, dt, sep="")
for name, (dt, _) in dtype.fields.items()
]
)
return f"{name}{sep}[{structure}]"
def _pformat_col_iter(
self,
col,
max_lines,
show_name,
show_unit,
outs,
show_dtype=False,
show_length=None,
):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
dtype = getattr(col, "dtype", None)
multidims = getattr(col, "shape", [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
multidims_all_ones = np.prod(multidims) == 1
multidims_has_zero = 0 in multidims
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
n_header += 1
yield self._name_and_structure(col_name, dtype)
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or "")
if show_dtype:
i_centers.append(n_header)
n_header += 1
if dtype is not None:
# For zero-length strings, np.dtype((dtype, ())) does not work;
# see https://github.com/numpy/numpy/issues/27301
# As a work-around, just omit the shape if there is none.
col_dtype = dtype_info_name((dtype, multidims) if multidims else dtype)
else:
col_dtype = col.__class__.__qualname__ or "object"
yield col_dtype
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield "---"
max_lines -= n_header
n_print2 = max_lines // 2
try:
n_rows = len(col)
except TypeError:
is_scalar = True
n_rows = 1
else:
is_scalar = False
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, "default_format", None)
pssf = (
getattr(col.info, "possible_string_format_functions", None)
or _possible_string_format_functions
)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if n_rows > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate([np.arange(0, i0 + 1), np.arange(i1 + 1, n_rows)])
else:
i0 = -1
indices = np.arange(n_rows)
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if multidims_all_ones:
return format_func(col_format, col[(idx,) + multidim0])
elif multidims_has_zero:
# Any zero dimension means there is no data to print
return ""
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return f"{left} .. {right}"
elif is_scalar:
return format_func(col_format, col)
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield "..."
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
f'Unable to parse format string "{col_format}" for '
f'entry "{col[idx]}" in column "{col.info.name}" '
f'with datatype "{col.info.dtype}".\n'
"See https://docs.astropy.org/en/stable/table/construct_table.html#format-specifier "
"for possible format specifications."
)
outs["show_length"] = show_length
outs["n_header"] = n_header
outs["i_centers"] = i_centers
outs["i_dashes"] = i_dashes
def _pformat_table(
self,
table,
max_lines=-1,
max_width=-1,
show_name=True,
show_unit=None,
show_dtype=False,
html=False,
tableid=None,
tableclass=None,
align=None,
):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
-1 (default) implies no limit, ``None`` implies using the
height of the current terminal.
max_width : int or None
Maximum character width of output
-1 (default) implies no limit, ``None`` implies using the
width of the current terminal.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is to False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or None
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError(
f"got {len(align)} alignment values instead of "
f"the number of columns ({n_cols})"
)
else:
raise TypeError(
f"align keyword must be str or list or tuple (got {type(align)})"
)
# Process column visibility from table pprint_include_names and
# pprint_exclude_names attributes and get the set of columns to show.
pprint_include_names = _get_pprint_include_names(table)
cols = []
outs = None # Initialize so static type checker is happy
for align_, col in zip(align, table.columns.values()):
if col.info.name not in pprint_include_names:
continue
lines, outs = self._pformat_col(
col,
max_lines,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
align=align_,
)
if outs["show_length"]:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ["<No columns>"], {"show_length": False}
# Use the values for the last column since they are all the same
n_header = outs["n_header"]
n_rows = len(cols[0])
def outwidth(cols):
return sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ["..."] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from astropy.utils.xml.writer import xml_escape
if tableid is None:
tableid = f"table{id(table)}"
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = " ".join(tableclass)
rows.append(f'<table id="{tableid}" class="{tableclass}">')
else:
rows.append(f'<table id="{tableid}">')
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = "th" if i < n_header else "td"
vals = (f"<{td}>{xml_escape(col[i].strip())}</{td}>" for col in cols)
row = "<tr>" + "".join(vals) + "</tr>"
if i < n_header:
row = "<thead>" + row + "</thead>"
rows.append(row)
rows.append("</table>")
else:
for i in range(n_rows):
row = " ".join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(
self,
tabcol,
max_lines=None,
max_width=None,
show_name=True,
show_unit=None,
show_dtype=False,
):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = "f br<>qhpn"
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(
max_lines=-1,
show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
)
if hasattr(tabcol, "columns"): # tabcol is a table
kwargs["max_width"] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system("cls" if os.name == "nt" else "clear")
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = (
"red" if i < n_header else "default" for i in range(len(lines))
)
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=" ")
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error(
"Console does not support getting a character"
" as required by more(). Use pprint() instead."
)
return
if key in allowed_keys:
break
print(key)
if key.lower() == "q":
break
if key == " " or key == "f":
i0 += delta_lines
elif key == "b":
i0 = i0 - delta_lines
elif key == "r":
pass
elif key == "<":
i0 = 0
elif key == ">":
i0 = len(tabcol)
elif key == "p":
i0 -= 1
elif key == "n":
i0 += 1
elif key == "h":
showlines = False
print(
"""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""",
end=" ",
)
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
|
TableFormatter
|
python
|
pytorch__pytorch
|
torch/distributed/pipelining/stage.py
|
{
"start": 3807,
"end": 41057
}
|
class ____(ABC):
"""
Base class for pipeline stages.
Defines or implements common methods used by the `_PipelineStage` used by
the tracing frontend and `PipelineStage` used by manual frontend.
"""
def __init__(
self,
submodule: torch.nn.Module,
stage_index: int,
num_stages: int,
device: torch.device,
group: dist.ProcessGroup | None = None,
dw_builder: Callable[[], Callable[..., None]] | None = None,
):
"""
Args:
submodule (torch.nn.Module): The module to be executed in this stage.
stage_index (int): The index of this stage.
num_stages (int): The total number of stages in this pipeline.
device (torch.device): The device to run this stage on.
group (Optional[dist.ProcessGroup]): The process group to use for communication.
If `None`, the default process group will be used.
Default: `None`.
dw_builder (Optional[Callable[[], Callable[..., None]]): If provided, dw_builder is a builder function
that will build a new dw_runner function that will run parts of module backward that were intentionally
skipped during the module's actual backward pass. The builder must be invoked by stage after stage runs
model backwards, and stage should save the latest dw_runner to run during weight pas (W).
If not provided, a dw_runner will be generated automatically by traversing the autograd graph.
When used with schedules that only have F and B steps, the fresh dw_runner function will be called as
part of I (input backwards). When used with F,I,W schedules, the dw_runner function implements 'W'.
"""
super().__init__()
if stage_index >= num_stages:
raise ValueError(
f"Stage index {stage_index} is out of range of {num_stages}"
)
self.submod = submodule
self.stage_index = stage_index
self.num_stages = num_stages
# pyrefly: ignore [read-only]
self.device = device
self.group = group
self.dw_builder = dw_builder
# backward state
self.backward_state: dict[int, tuple[Any, ...]] = {}
# store dw_runner per microbatch_id
self.dw_runner: dict[int, Callable[..., None]] = {}
# `group_rank` is rank in process group `group`.
self.group_rank = dist.get_rank(self.group)
self.group_size = dist.get_world_size(self.group)
if self.group_size > self.num_stages:
raise RuntimeError(
f"Pipeline group size {self.group_size} cannot be larger than number of stages {self.num_stages}"
)
# Run time states
self._outputs_meta: tuple[torch.Tensor, ...] | None = None
# map microbatch ID to list of forward tensor args
self.fwd_cache: dict[int, tuple[Any, list[torch.Tensor]]] = {}
# map microbatch ID to list of backward grad tensor args
self.bwd_cache: dict[int, tuple[torch.Tensor | None, ...]] = {}
# Caching chunk outputs for final output merge or reduction
self.output_chunks: list[Any] = []
# Initialize has_backward to false; this will be set to true if loss
# function is passed to pipeline schedule
self.has_backward = False
# Log prefix
self.log_prefix = f"[Stage {self.stage_index}]"
# Forward infra
self.args_recv_info: dict[int, tuple[InputInfo, ...]] = {}
self.act_send_info: dict[int, list] = {}
# Backward infra will created lazily
self.grad_recv_info: dict = {}
self.grad_send_info: list | None = None
# To be populated later by the Schedule
self.chunks: int | None = None
self.stage_index_to_group_rank: dict[int, int] = {
i: i % self.group_size for i in range(self.num_stages)
}
@property
def has_backward(self) -> bool:
"""
Returns true if this stage has a backward pass.
"""
return self._has_backward
@has_backward.setter
def has_backward(self, has_backward: bool):
self._has_backward = has_backward
@property
def is_first(self):
"""
Returns true if this stage is the first stage in the pipeline.
"""
return self.stage_index == 0
@property
def is_last(self):
"""
Returns true if this stage is the last stage in the pipeline.
"""
return self.stage_index == self.num_stages - 1
def _check_chunk_id(self, chunk_id: int):
if self.chunks is None:
raise RuntimeError(
"Attempted to access chunk_id before chunks have been configured."
)
if chunk_id >= self.chunks:
raise RuntimeError(
f"Chunk id {chunk_id} is out of range [0, {self.chunks})"
)
def _configure_outputs_meta(self, outputs_meta: tuple[torch.Tensor, ...]):
"""
Track the output shapes/dtype of this stage since they determine the send operation(s) which must match
recv operations of the next stage. The next stage _will_ be freezing its recv buffers based on its initial
configuration, so it's important to also freeze/validate the output side to avoid any send/recv mismatches
which could show up as hangs, silent corruption, or other errors.
"""
assert self._outputs_meta is None, (
"Attempting to reconfigure output_meta, which is not supported"
)
self._outputs_meta = tuple(outputs_meta) # type: ignore[assignment]
def get_outputs_meta(self) -> tuple[torch.Tensor, ...]:
"""Get the output metadata (meta tensors) representing the outputs of this stage"""
assert self._outputs_meta is not None, (
"Attempted to get_outputs_meta() without configuring output meta"
)
return self._outputs_meta
def _create_grad_send_info(
self,
args_recv_info: tuple,
) -> list[int | None]:
"""
Create a list of stage indices to send gradients to.
"""
grad_send_info: list[int | None] = []
def map_recv_to_send(a):
# Note: we send gradients back to previous stage as long as in
# forward it is a received input, regardless of whether it requires
# grad. It is up to the previous stage to discard this gradient.
if isinstance(a, _RecvInfo):
grad_send_info.append(a.source)
return a.source
else:
grad_send_info.append(None)
return None
map_aggregate(args_recv_info, map_recv_to_send)
logger.debug("%s Grad send info: %s", self.log_prefix, grad_send_info)
return grad_send_info
@abstractmethod
def _prepare_forward_infra(
self,
num_microbatches: int,
args: tuple[Any, ...],
kwargs: dict[str, Any] | None = None,
) -> tuple[Any, ...]:
raise NotImplementedError
def _prepare_backward_infra(self, num_microbatches: int):
# TODO: this is needed for backward_maybe_with_nosync
self.chunks = num_microbatches
for mb_index in range(num_microbatches):
# `grad_recv_info` is a mirror of `act_send_info`
self.grad_recv_info[mb_index] = self._create_grad_recv_info(
self.act_send_info
)
@abstractmethod
def _create_grad_recv_info(
self,
act_send_info: dict,
) -> tuple[_RecvInfo, ...]:
raise NotImplementedError
def _get_recv_ops(
self,
recv_infos: tuple[InputInfo, ...],
) -> list[dist.P2POp]:
"""
Helper function shared by `get_fwd_recv_ops` and `get_bwd_recv_ops`.
Returns a list of ops that correspond to the recv infos.
"""
ops: list[dist.P2POp] = []
for info in recv_infos:
if not isinstance(info, _RecvInfo):
continue
peer_rank = self.stage_index_to_group_rank[info.source]
peer_global_rank = (
peer_rank
if self.group is None
else dist.get_global_rank(self.group, peer_rank)
)
ops.append(
dist.P2POp(dist.irecv, info.buffer, peer_global_rank, self.group)
)
return ops
"""[Note: V-schedule special case]
V-Schedules have a special case where 2 stages with adjacent stage_id are on the same rank.
ex: 2 ranks, 4 stages forms a simple V:
rank0: stage 0 stage 3
rank1: stage 1 stage 2
stage 0,1 and 2,3 communicate activations using send/recv as usual, but stage 1,2 do not need to
use communication ops. Instead, they should pass tensor data directly via function call.
set_local_fwd_input and (get_local_bwd_output + set_local_bwd_input) facilitate this optimization, and
should be called at the appropriate time during the pipeline schedule (after forward or backward execution).
"""
def set_local_fwd_input(self, prev_stage_outputs: Any, mb_index: int) -> None:
"""
Moves 'prev_stage_outputs' from another stage on the same rank into place as inputs for this stage. Avoids
copying tensor data or using send/recv op. Detaches original tensor and sets requires_grad so the
tensor can serve as a leaf for autograd and gradients can be collected from it during backward.
"""
recv_infos: tuple[InputInfo, ...] = self.args_recv_info[mb_index]
# See [Note: pipeline model output type]
prev_stage_outputs = _normalize_model_output_as_tuple(prev_stage_outputs)
for info, tensor in zip(recv_infos, prev_stage_outputs):
assert isinstance(tensor, torch.Tensor), (
f"expected tensor values as outputs from prev stage, got {type(tensor)}"
)
assert isinstance(info, _RecvInfo), (
"set_local_Fwd_input should only be called on non-first stage, which should always have RecvInfo"
)
# We don't need to do a data copy here, since we can directly pass the activation tensor reference from
# one stage to the next. However, we do need to mark the activation as a leaf tensor since it will serve
# as the input tensor for a fresh autograd graph, not part of the previous stage's autograd graph.
# TODO: confirm, do we use this activation as the root of the backward call for the previous stage? does
# detach have any affect on that?
info.buffer = tensor.detach().requires_grad_(True)
def get_local_bwd_output(self, mb_index):
"""
Returns the input grad tensors for this stage, which correspond to the stage inputs during forward.
"""
assert self.has_backward, (
"can't steal_bwd_input if this stage doesn't have backward"
)
assert not self.is_first, "can't get bwd output if this stage is first"
self._check_chunk_id(mb_index)
return self.bwd_cache.pop(mb_index)
def set_local_bwd_input(
self, next_stage_bwd_outputs: tuple[torch.Tensor | None, ...], mb_index: int
) -> None:
"""
Moves 'grad input' tensors from the next stage to 'grad_output' on this stage, avoiding a copy or send/recv.
Does not detach or set '_requires_grad'.
"""
assert isinstance(next_stage_bwd_outputs, tuple), (
f"Expected tuple, got {type(next_stage_bwd_outputs)}"
)
assert self.has_backward, (
"can't set bwd input if this stage doesn't have backward"
)
assert not self.is_last, "can't set bwd input if this stage is last"
recv_infos = self.grad_recv_info[mb_index]
for info, tensor in zip(recv_infos, next_stage_bwd_outputs):
assert isinstance(tensor, torch.Tensor), (
f"expected tensor values as outputs from prev stage, got {type(tensor)}"
)
assert isinstance(info, _RecvInfo), (
f"Expected a recv info, got {type(info)}"
)
info.buffer = tensor
def get_fwd_recv_ops(self, fwd_chunk_id: int) -> list[dist.P2POp]:
"""
Returns a list of ops that are needed to receive the input arguments
for this stage.
"""
recv_infos: tuple[InputInfo, ...] = self.args_recv_info[fwd_chunk_id]
return self._get_recv_ops(recv_infos)
def get_bwd_recv_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]:
"""
Returns a list of ops that are needed to receive the gradients
for this stage.
"""
if not self.has_backward or self.is_last:
return []
recv_infos = self.grad_recv_info[bwd_chunk_id]
return self._get_recv_ops(recv_infos)
def get_fwd_send_ops(self, fwd_chunk_id: int) -> list[dist.P2POp]:
"""
Get the activation send ops for current stage's forward.
"""
output_tuple, _ = self.fwd_cache[fwd_chunk_id]
ops: list[dist.P2POp] = []
for idx, out in enumerate(output_tuple):
dst_stages = self.act_send_info[idx]
for dst in dst_stages:
if dst is None:
continue
logger.debug(
"%s Sending tensor to Stage %s: %s",
self.log_prefix,
dst,
out.size(),
)
peer_rank = self.stage_index_to_group_rank[dst]
peer_global_rank = (
peer_rank
if self.group is None
else dist.get_global_rank(self.group, peer_rank)
)
ops.append(dist.P2POp(dist.isend, out, peer_global_rank, self.group))
return ops
def get_bwd_send_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]:
"""
Get the gradient send ops for current stage's backward.
"""
if not self.has_backward or self.is_first:
return []
self._check_chunk_id(bwd_chunk_id)
# Create bwd send infra lazily
if self.grad_send_info is None:
# Send info for input grads during backward:
# List of destinations corresponding to input grads
# Can be None if an input has no grad
# `grad_send_info` is a mirror of `args_recv_info`
self.grad_send_info = self._create_grad_send_info(self.args_recv_info[0])
ops: list[dist.P2POp] = []
grads_input = self.bwd_cache.pop(bwd_chunk_id)
for grad, grad_recv_stage in zip(grads_input, self.grad_send_info):
if isinstance(grad, torch.Tensor) and grad_recv_stage is not None:
logger.debug(
"%s Sending gradient to Stage %s: %s",
self.log_prefix,
grad_recv_stage,
grad.size(),
)
peer_rank = self.stage_index_to_group_rank[grad_recv_stage]
peer_global_rank = (
peer_rank
if self.group is None
else dist.get_global_rank(self.group, peer_rank)
)
ops.append(dist.P2POp(dist.isend, grad, peer_global_rank, self.group))
else:
if not (grad is None and grad_recv_stage is None):
raise RuntimeError(
f"[{self.stage_index}] for chunk {bwd_chunk_id} has gradients {grad} "
f"and is expecting to send gradients to stage {grad_recv_stage}"
)
return ops
def clear_runtime_states(self) -> None:
"""
Clear runtime states of the stage.
"""
# map microbatch ID to list of forward tensor args
self.fwd_cache.clear()
# Caching chunk outputs for final output merge or reduction
self.output_chunks.clear()
# Clear grad of input buffers in between schedule steps. This is because
# `torch.autograd.backward()` will accumulate gradients into leaf
# tensors by default. For gradients to pass back to previous stages, we
# don't want such accumulation.
for recv_tuple in self.args_recv_info.values(): # iterate over all chunks
for a in recv_tuple: # iterate over all input args
if isinstance(a, _RecvInfo):
# Set to None is the newer and recommended way to clear grads, compared to `zero_()`.
# See https://github.com/pytorch/pytorch/pull/92731
a.buffer.grad = None
def _map_tensor_from_recv_info(
self,
recv_infos: tuple[InputInfo, ...],
):
"""
Map tensors from recv infos to a list.
"""
def get_recv_tensor(info):
if isinstance(info, _RecvInfo):
return info.buffer
else:
raise AssertionError(f"Expected _RecvInfo but got {type(info)}")
return map_aggregate(cast(Argument, recv_infos), get_recv_tensor)
def _retrieve_recv_activations(self, fwd_chunk_id: int):
"""
Retrieve the activations received for the current stage during forward.
"""
recv_infos = self.args_recv_info[fwd_chunk_id]
activations = self._map_tensor_from_recv_info(recv_infos)
return activations
def _retrieve_recv_grads(
self,
bwd_chunk_id: int,
):
"""
Retrieve the gradients received for the current stage during backward.
"""
recv_infos = self.grad_recv_info[bwd_chunk_id]
grads = self._map_tensor_from_recv_info(recv_infos)
return grads
def forward_maybe_with_nosync(self, *args, **kwargs):
# If submod is wrapped with DDP, we use the `no_sync` context manager to
# avoid gradient all-reduce per microbatch
if isinstance(self.submod, DistributedDataParallel):
with self.submod.no_sync(): # type: ignore[operator]
out_val = self.submod(*args, **kwargs)
else:
out_val = self.submod(*args, **kwargs)
return out_val
def scale_grads(self, grad_scale_factor: int) -> None:
"""Scale gradients model gradients by `grad_scale_factor`, which should be specified in coordination with the
loss function used with pipelining. For loss functions which perform 'mean' loss reduction, `grad_scale_factor`
should be set to num_microbatches. For loss functions that use `sum` reduction, `grad_scale_factor` should
be set to 1.
Should only be called once per pipeline schedule step, after all backwards passes have completed.
"""
# PP scales only for its own contribution (microbatches), but relies on DP to scale further
# for DP degree.
if grad_scale_factor != 1:
for p in self.submod.parameters():
if p.grad is not None:
p.grad.div_(grad_scale_factor)
def backward_maybe_with_nosync(
self,
backward_type,
bwd_kwargs: dict,
last_backward: bool = False,
) -> tuple[tuple[torch.Tensor | None, ...], list[dict[str, Any]] | None]:
"""
Whether using PP with FSDP, DDP, or replicate there are some runtime differences between the last backward step and the
other steps. Namely, we need to accumulate gradients on previous steps and reduce them on the last step, but
there are additional state-variables and performance considerations depending on the data parallelism used.
This helper should adapt any pipeline parallel schedule to work with common/supported data parallel libraries.
"""
def perform_backward(
backward_type,
) -> Callable[
[],
tuple[tuple[torch.Tensor | None, ...], list[dict[str, Any]] | None],
]:
if backward_type == "full":
return lambda: (
stage_backward(
bwd_kwargs["stage_output"],
bwd_kwargs["output_grads"],
bwd_kwargs["input_values"],
),
None,
)
elif backward_type == "input":
return lambda: stage_backward_input(
bwd_kwargs["stage_output"],
bwd_kwargs["output_grads"],
bwd_kwargs["input_values"],
self.submod.parameters(),
)
elif backward_type == "weight":
return lambda: (
stage_backward_weight(
self.submod.parameters(), bwd_kwargs["param_groups"]
),
None,
)
else:
raise RuntimeError(f"Unknown backward type: {backward_type}")
# If submod is wrapped by DDP
if isinstance(self.submod, DistributedDataParallel):
if last_backward:
# Last chunk, prepare for gradient reduction
# HACK: reaching into DDP implementation details here. Is there a better way?
self.submod.reducer.prepare_for_backward( # type: ignore[union-attr, operator]
list(
torch.nn.parallel.distributed._find_tensors( # type: ignore[attr-defined]
bwd_kwargs["stage_output"]
)
)
)
result = perform_backward(backward_type)()
else:
with self.submod.no_sync(): # type: ignore[operator]
result = perform_backward(backward_type)()
# If submod is a FSDP or replicate module
elif isinstance(self.submod, FSDPModule):
self.submod.set_is_last_backward(False)
self.submod.set_reshard_after_backward(False)
self.submod.set_requires_gradient_sync(False)
result = perform_backward(backward_type)()
else:
# Non-DP submodule, regular backward
result = perform_backward(backward_type)()
grads, param_groups = result
return grads, param_groups
def forward_one_chunk(
self,
fwd_chunk_id: int,
args: tuple[Any, ...],
kwargs: dict[str, Any] | None = None,
save_forward_output: bool = True,
):
"""
Perform forward pass on the stage with one microbatch.
`args` and `kwargs` are the inputs from *external* to this stage.
As of Sept 2024:
- `args` applies to the first stage only, other stages receives args
through activation transmission.
- `kwargs` can be passed to all stages via respective `step` calls.
"""
if self.is_first:
# First stage doesn't need to receive anything
composite_args = args
else:
# Receive activations for this chunk
# Activations only come in args form
composite_args = self._retrieve_recv_activations(fwd_chunk_id)
composite_kwargs = kwargs or {}
self._validate_fwd_input(args, kwargs)
# Compute forward
try:
output = self.forward_maybe_with_nosync(*composite_args, **composite_kwargs)
except Exception as e:
exc_msg = f"""
{self.log_prefix} failed to run forward:
args: {map_debug_info(composite_args)}
kwargs: {map_debug_info(composite_kwargs)}
"""
raise RuntimeError(exc_msg) from e
# See [Note: pipeline model output type]
output_tuple = _normalize_model_output_as_tuple(output)
# Prepare for final output merge or reduction
# Output chunks is only used for the last stage since we only merge the output of the last stage
if self.is_last and save_forward_output:
self.output_chunks.append(output)
# Save activations and inputs for backward
flat_args = flatten_args(composite_args)
flat_kwargs = flatten_args(composite_kwargs)
flatten_input_tensors = flat_args + flat_kwargs
self.fwd_cache[fwd_chunk_id] = (
output_tuple, # stage_output
flatten_input_tensors, # input_values
)
logger.debug(
"%s Forwarded chunk %s, outputs: %s",
self.log_prefix,
fwd_chunk_id,
map_debug_info(output),
)
self._validate_fwd_outputs(output_tuple)
# We return the original user-provided output, not normalized to tuple.
# See [Note: pipeline model output type]
return output
def backward_one_chunk(
self,
bwd_chunk_id: int,
loss=None,
full_backward: bool = True,
last_backward=False,
):
"""
Perform backward pass on the module.
This should only be called once per microbatch.
If full_backward is True (the default), the full backward pass including weight and input gradients will be run,
and it is an error to call `backward_weight_one_chunk` for this bwd_chunk_id.
If full_backward is False, it is optional that `dw_runner` was provided to the PipelineStage at __init__ time,
and a subsequent call to `backward_weight_one_chunk` is required to invoke dw_runner and complete the backward.
last_backward is controlled by the schedule and signals synchronization of gradients across DP groups
after the last backward.
"""
# skip backward computation if backward is not enabled
if not self.has_backward:
return
self._check_chunk_id(bwd_chunk_id)
(
stage_output,
input_values,
) = self.fwd_cache.pop(bwd_chunk_id)
# Compute backward
if self.is_last:
# Last stage computes gradients from loss and has no gradients from
# next stage
bwd_kwargs = {
"stage_output": loss,
"output_grads": None,
"input_values": input_values,
}
else:
# Otherwise, receive gradients from next stage
grads_output = self._retrieve_recv_grads(bwd_chunk_id)
# If an input to the pipeline requires gradient,
# `torch.autograd.backward` will accumulate the gradient into the
# `.grad` field of such input
bwd_kwargs = {
"stage_output": stage_output,
"output_grads": grads_output,
"input_values": input_values,
}
grads_input: tuple[torch.Tensor | None, ...] = ()
# Custom backward function
if self.dw_builder:
# TODO: We may want to change our semantics so we are allowed to ignore
# the 'dw_builder' and call full_backward directly when it is a full_backward op.
grads_input, _ = self.backward_maybe_with_nosync(
"full",
bwd_kwargs,
last_backward=last_backward,
)
if full_backward:
self.dw_builder()()
else:
self.dw_runner[bwd_chunk_id] = self.dw_builder()
else:
if full_backward:
grads_input, _ = self.backward_maybe_with_nosync(
"full", bwd_kwargs, last_backward=last_backward
)
else:
param_groups: list[dict[str, Any]] | None = None
# Skip the backward for the first stage since we will perform the weight update with
# autograd.backward in backward_weight_one_chunk
if not self.is_first:
if isinstance(bwd_kwargs["stage_output"], torch.Tensor):
bwd_kwargs["stage_output"] = (bwd_kwargs["stage_output"],)
# perform the partial backwards for the inputs with a custom backward function
# when the "stage_ouput" is a loss, then it is a tensor, otherwise it is a tuple of tensors
grads_input, param_groups = self.backward_maybe_with_nosync(
"input", bwd_kwargs, last_backward=last_backward
)
# TODO: we dont need to save this, add to dw_runner?
self.backward_state[bwd_chunk_id] = (
bwd_kwargs["input_values"],
param_groups,
bwd_kwargs["stage_output"],
bwd_kwargs["output_grads"],
)
# Save a placeholder for the dw_runner
self.dw_runner[bwd_chunk_id] = lambda: None
self.bwd_cache[bwd_chunk_id] = grads_input
if self.is_last and not self.is_first:
# Autograd dependencies:
# rest_of_autograd_graph -> stage_output -> loss
# stage_output is no longer used in the last stage for backward and only needed
# to return to the user in merge_output_chunks, therefore
# this should be detached to release autograd graph context and free memory earlier
for t in stage_output:
if not t._is_view(): # views are not detachable in-place
t.detach_()
logger.debug("%s Backwarded chunk %s", self.log_prefix, bwd_chunk_id)
def backward_weight_one_chunk(self, bwd_chunk_id: int, last_backward=False):
# skip backward computation if backward is not enabled
if not self.has_backward:
return
assert bwd_chunk_id in self.dw_runner, (
f"{self.log_prefix} Attempted to run backward_weight_one_chunk for chunk {bwd_chunk_id}"
" without first calling `backward_one_chunk(full_backward=False)`"
)
if self.dw_builder is not None:
self.dw_runner.pop(bwd_chunk_id)()
else:
(
input_values,
param_groups,
stage_output,
output_grads,
) = self.backward_state.pop(bwd_chunk_id)
if self.stage_index != 0:
bwd_kwargs = {
"stage_output": stage_output,
"param_groups": param_groups,
}
self.backward_maybe_with_nosync(
"weight", bwd_kwargs, last_backward=last_backward
)
else:
# TODO: figure out a better way to do this:
# if inputs does not require gradient,
# then the parameter group will not be fully captured during stage_backward_input
# in this case, we need call grad directly on the parameters
# To solve: make input fn do the intersect compute and then finish it off during W
bwd_kwargs = {
"stage_output": stage_output,
"output_grads": output_grads,
"input_values": input_values,
}
self.backward_maybe_with_nosync(
"full", bwd_kwargs, last_backward=last_backward
)
def _validate_fwd_input(self, args, kwargs):
"""Raises a RuntimeError if shapes of input args/kwargs do not match the shapes configured for this stage."""
if self.is_first:
# TODO why is there a separate recv_info for each pipeline chunk?
# kwen2501: to avoid passing a `fwd_chunk_id` to this function, we
# check all chunks against args_recv_info[0]
expected_args = self.args_recv_info[0]
else:
# We don't check inputs for non-0 stages assuming they don't accept
# user inputs in canonical pipeline scenarios
return
if len(kwargs):
# TODO- need a mapping of kwarg to position in self.args_recv_info
# Without it, we are not 100% sure how to match the args and
# expected_args.
return
# TODO- need a mapping of kwarg to position in self.args_recv_info
# maybe it's impossible to tell whether the len mismatches because
# (a) the user passed an extra arg or missed an arg
# (b) the user did not pass a kwarg, which has a default value baked into expected_args
expected_tensors_meta = [
e.meta if isinstance(e, _RootArgPlaceholder) else e.buffer
for e in expected_args
]
validate_tensors_metadata(
f"Stage {self.stage_index} forward inputs", expected_tensors_meta, args
)
def _validate_fwd_outputs(self, outputs: tuple[torch.Tensor, ...]):
"""Raises a RuntimeError if this stage produces an output of unexpected shape/dtype.
Most likely, this could be cause either by incorrect user specification of output shapes, or because
shape inference was done on the original model but then at runtime the model is wrapped with something like
mixed precision which changes output dtype.
"""
expected_tensors_meta = self.get_outputs_meta()
validate_tensors_metadata(
f"Stage {self.stage_index} forward outputs", expected_tensors_meta, outputs
)
def _get_init_p2p_neighbors_ops(self) -> list[dist.P2POp]:
"""
Get the operations to initialize the p2p communicators between previous and next stages.
This is done so by creating a dummy tensor and sending it to the next stage and receiving
from the previous stage.
"""
ops: list[dist.P2POp] = []
next_stage_peer_rank = self.stage_index_to_group_rank.get(self.stage_index + 1)
prev_stage_peer_rank = self.stage_index_to_group_rank.get(self.stage_index - 1)
recv_tensor = torch.zeros(1, device=self.device, dtype=torch.float32)
send_tensor = torch.tensor(
self.stage_index, device=self.device, dtype=torch.float32
)
# forward
if not self.is_first:
ops.append(
dist.P2POp(
dist.irecv,
recv_tensor,
group_peer=prev_stage_peer_rank,
group=self.group,
)
)
if not self.is_last:
ops.append(
dist.P2POp(
dist.isend,
send_tensor,
group_peer=next_stage_peer_rank,
group=self.group,
)
)
# backward
if not self.is_first:
ops.append(
dist.P2POp(
dist.isend,
send_tensor,
group_peer=prev_stage_peer_rank,
group=self.group,
)
)
if not self.is_last:
ops.append(
dist.P2POp(
dist.irecv,
recv_tensor,
group_peer=next_stage_peer_rank,
group=self.group,
)
)
return ops
def perform_reduce_grad(self, grad_scale_factor: int):
"""
Called as a part of schedule IR.
REDUCE_GRAD action is scheduled after all microbatches W, B actions.
Currently contains "post_backward" functionality for FSDP.
We can try to extract post_backward in a separate IR action in future.
"""
# Manually call post backward for FSDP
if isinstance(self.submod, FSDPModule):
fsdp_module = self.submod
fsdp_module.set_is_last_backward(True)
fsdp_module.set_reshard_after_backward(True)
fsdp_module.set_requires_gradient_sync(True)
if isinstance(fsdp_module, ReplicateModule):
distributed_state = replicate.state(fsdp_module) # type: ignore[arg-type]
else:
distributed_state = fully_shard.state(fsdp_module) # type: ignore[attr-defined]
for state in distributed_state._state_ctx.all_states:
if state._fsdp_param_group:
state._fsdp_param_group.post_backward()
# it would be much better if pipelining backward invoked .backward so autograd hooks
# worked and modules like DDP/FSDP behaved as expected. Working around this for the time being,
# we need to call this too to ensure FSDP syncs its grad reduction ops back to the default stream.
distributed_state._root_post_backward_final_callback()
# Call gradient scaling at the end of the backward pass
# NOTE: this must happen after FSDP post_backward is FSDP is enabled
if grad_scale_factor != 1:
self.scale_grads(grad_scale_factor)
|
_PipelineStageBase
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/resolved/scopes.py
|
{
"start": 3462,
"end": 4222
}
|
class ____(WrappedObjectScope):
"""Provides access to component loading utilities within templates.
Available via `{{ context.* }}` in component YAML files.
Examples:
{{ context.project_root }}/data/input.csv
{{ context.load_component("other_component") }}
{{ context.build_defs("submodule") }}
"""
def __init__(self, context):
accessible_attributes = {
"load_component",
"build_defs",
"project_root",
}
super().__init__(context, accessible_attributes)
@property
def project_root(self) -> str:
# override here so we can resolve the project_root to a string
return str(self._wrapped_object.project_root.resolve())
|
LoadContextScope
|
python
|
psf__black
|
scripts/release_tests.py
|
{
"start": 600,
"end": 2333
}
|
class ____(unittest.TestCase):
def setUp(self) -> None:
# We only test on >= 3.12
self.tempdir = TemporaryDirectory(delete=False) # type: ignore
self.tempdir_path = Path(self.tempdir.name)
self.sf = SourceFiles(self.tempdir_path)
def tearDown(self) -> None:
rmtree(self.tempdir.name)
return super().tearDown()
@patch("release.get_git_tags")
def test_get_current_version(self, mocked_git_tags: Mock) -> None:
mocked_git_tags.return_value = ["1.1.0", "69.1.0", "69.1.1", "2.2.0"]
self.assertEqual("69.1.1", self.sf.get_current_version())
@patch("release.get_git_tags")
@patch("release.datetime", FakeDateTime)
def test_get_next_version(self, mocked_git_tags: Mock) -> None:
# test we handle no args
mocked_git_tags.return_value = []
self.assertEqual(
"69.1.0",
self.sf.get_next_version(),
"Unable to get correct next version with no git tags",
)
# test we handle
mocked_git_tags.return_value = ["1.1.0", "69.1.0", "69.1.1", "2.2.0"]
self.assertEqual(
"69.1.2",
self.sf.get_next_version(),
"Unable to get correct version with 2 previous versions released this"
" month",
)
def test_tuple_calver(self) -> None:
first_month_release = tuple_calver("69.1.0")
second_month_release = tuple_calver("69.1.1")
self.assertEqual((69, 1, 0), first_month_release)
self.assertEqual((0, 0, 0), tuple_calver("69.1.1a0")) # Hack for alphas/betas
self.assertTrue(first_month_release < second_month_release)
if __name__ == "__main__":
unittest.main()
|
TestRelease
|
python
|
getsentry__sentry
|
tests/sentry/preprod/api/endpoints/test_preprod_artifact_rerun_analysis.py
|
{
"start": 250,
"end": 3478
}
|
class ____(APITestCase):
"""Base class with shared test logic for rerun analysis endpoints"""
def setUp(self):
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.organization)
self.login_as(user=self.user)
def create_artifact_with_metrics(self):
"""Creates an artifact with size metrics and comparisons"""
main_file = File.objects.create(name="test_artifact.zip", type="application/zip")
artifact = PreprodArtifact.objects.create(
project=self.project,
file_id=main_file.id,
app_name="test_artifact",
app_id="com.test.app",
build_version="1.0.0",
build_number=1,
state=PreprodArtifact.ArtifactState.PROCESSED,
)
analysis_file_1 = File.objects.create(name="analysis1.json", type="application/json")
size_metric_1 = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
analysis_file_id=analysis_file_1.id,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
)
analysis_file_2 = File.objects.create(name="analysis2.json", type="application/json")
size_metric_2 = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
analysis_file_id=analysis_file_2.id,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
identifier="watch_app",
)
comparison_file = File.objects.create(name="comparison.json", type="application/json")
comparison = PreprodArtifactSizeComparison.objects.create(
head_size_analysis=size_metric_1,
base_size_analysis=size_metric_2,
organization_id=self.organization.id,
file_id=comparison_file.id,
)
return artifact, main_file, [analysis_file_1, analysis_file_2, comparison_file], comparison
def assert_metrics_cleaned_up(self, artifact, analysis_files, comparison):
"""Asserts that old metrics/comparisons are deleted and new PENDING metric created"""
assert PreprodArtifactSizeMetrics.objects.filter(preprod_artifact=artifact).count() == 1
new_metric = PreprodArtifactSizeMetrics.objects.get(preprod_artifact=artifact)
assert new_metric.state == PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING
assert (
new_metric.metrics_artifact_type
== PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT
)
assert new_metric.analysis_file_id is None
assert not PreprodArtifactSizeComparison.objects.filter(id=comparison.id).exists()
for analysis_file in analysis_files:
assert not File.objects.filter(id=analysis_file.id).exists()
def assert_artifact_reset(self, artifact):
"""Asserts that artifact state is reset"""
artifact.refresh_from_db()
assert artifact.state == PreprodArtifact.ArtifactState.UPLOADED
assert artifact.error_code is None
assert artifact.error_message is None
|
BaseRerunAnalysisTest
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/ai.py
|
{
"start": 2869,
"end": 3244
}
|
class ____(InputType):
"""Passes along user input as-is."""
@classmethod
def matches(cls, user_input: str) -> bool:
return True
@classmethod
def get_context(cls, user_input: str) -> str:
return f"The user's stated goal is: {user_input}."
@classmethod
def additional_allowed_tools(cls) -> list[str]:
return []
|
TextInputType
|
python
|
gevent__gevent
|
src/greentest/3.10/test_smtpd.py
|
{
"start": 995,
"end": 1100
}
|
class ____(DummyServer):
def listen(self, num):
raise DummyDispatcherBroken()
|
BrokenDummyServer
|
python
|
django__django
|
tests/custom_lookups/tests.py
|
{
"start": 7210,
"end": 7271
}
|
class ____(StartsWith):
lookup_name = "sw"
|
CustomStartsWith
|
python
|
oauthlib__oauthlib
|
oauthlib/oauth2/rfc6749/errors.py
|
{
"start": 7761,
"end": 8013
}
|
class ____(OAuth2Error):
"""
The requested scope is invalid, unknown, or malformed, or
exceeds the scope granted by the resource owner.
https://tools.ietf.org/html/rfc6749#section-5.2
"""
error = 'invalid_scope'
|
InvalidScopeError
|
python
|
huggingface__transformers
|
src/transformers/models/metaclip_2/modular_metaclip_2.py
|
{
"start": 18030,
"end": 20619
}
|
class ____(CLIPTextModelWithProjection):
"""
MetaClip2 text model with a projection layer on top (a linear layer on top of the pooled output).
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Args:
config ([`MetaClip2TextConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
Examples:
```python
>>> from transformers import AutoTokenizer, MetaClip2TextModelWithProjection
>>> model = MetaClip2TextModelWithProjection.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> text_embeds = outputs.text_embeds
```"""
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
):
r"""
Examples:
```python
>>> from transformers import AutoTokenizer, MetaClip2TextModelWithProjection
>>> model = MetaClip2TextModelWithProjection.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> text_embeds = outputs.text_embeds
```"""
return super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
**kwargs,
)
|
MetaClip2TextModelWithProjection
|
python
|
pytorch__pytorch
|
test/functorch/test_control_flow.py
|
{
"start": 269712,
"end": 271874
}
|
class ____(torch.nn.Module):
def forward(self, primals_1: "f32[3, 4]", primals_2: "f32[3, 4]", gt: "b8[]", tangents_1: "f32[3, 4]"):
true_graph_1 = self.true_graph_1
false_graph_1 = self.false_graph_1
cond_1 = torch.ops.higher_order.cond(gt, true_graph_1, false_graph_1, (primals_1, primals_2, tangents_1)); gt = true_graph_1 = false_graph_1 = primals_1 = primals_2 = tangents_1 = None
getitem_1: "f32[3, 4]" = cond_1[0]
getitem_2: "f32[3, 4]" = cond_1[1]; cond_1 = None
return (getitem_1, getitem_2)
class true_graph_1(torch.nn.Module):
def forward(self, arg0_1: "f32[3, 4]", arg1_1: "f32[3, 4]", arg2_1: "f32[3, 4]"):
clone: "f32[3, 4]" = torch.ops.aten.clone.default(arg2_1)
clone_1: "f32[3, 4]" = torch.ops.aten.clone.default(arg2_1); arg2_1 = None
return [clone, clone_1]
class false_graph_1(torch.nn.Module):
def forward(self, arg0_1: "f32[3, 4]", arg1_1: "f32[3, 4]", arg2_1: "f32[3, 4]"):
clone: "f32[3, 4]" = torch.ops.aten.clone.default(arg2_1)
clone_1: "f32[3, 4]" = torch.ops.aten.clone.default(arg2_1); arg2_1 = None
return [clone, clone_1]
""", # noqa: B950
)
self.assertEqual(res, res_compiled)
@skipIfTorchDynamo("Skip because we're testing export")
def test_cond_autograd_backward_out_out_aliasing(self):
from torch._dynamo.testing import AotEagerAndRecordGraphs
backend = AotEagerAndRecordGraphs()
def fn(x, y):
return (x + y).sin()
def f(x, y):
return control_flow.cond(x.sum() > 4, fn, fn, (x, y))
example_inputs = (
torch.ones(3, 4, requires_grad=True),
torch.ones(3, 4, requires_grad=True),
)
res = f(*example_inputs)
res.sum().backward()
res_compiled = torch.compile(f, backend=backend)(*example_inputs)
res_compiled.sum().backward()
if not TEST_WITH_CROSSREF:
self.assertExpectedInline(
normalize_gm(backend.bw_graphs[0].print_readable(print_output=False)),
"""\
|
GraphModule
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/internal/message_test.py
|
{
"start": 2065,
"end": 65592
}
|
class ____(unittest.TestCase):
def testBadUtf8String(self, message_module):
if api_implementation.Type() != 'python':
self.skipTest('Skipping testBadUtf8String, currently only the python '
'api implementation raises UnicodeDecodeError when a '
'string field contains bad utf-8.')
bad_utf8_data = test_util.GoldenFileData('bad_utf8_string')
with self.assertRaises(UnicodeDecodeError) as context:
message_module.TestAllTypes.FromString(bad_utf8_data)
self.assertIn('TestAllTypes.optional_string', str(context.exception))
def testParseErrors(self, message_module):
msg = message_module.TestAllTypes()
self.assertRaises(TypeError, msg.FromString, 0)
self.assertRaises(Exception, msg.FromString, '0')
# Unexpected end group tag.
end_tag = encoder.TagBytes(1, 4)
with self.assertRaises(message.DecodeError) as context:
msg.FromString(end_tag)
if api_implementation.Type() != 'upb':
# upb raises a less specific exception.
self.assertRegex(str(context.exception), 'Unexpected end-group tag.*')
# Unmatched start group tag.
start_tag = encoder.TagBytes(2, 3)
with self.assertRaises(message.DecodeError):
msg.FromString(start_tag)
# Mismatched end group tag.
with self.assertRaises(message.DecodeError):
msg.FromString(start_tag + end_tag)
# Field number 0 is illegal.
self.assertRaises(message.DecodeError, msg.FromString, b'\3\4')
def testDeterminismParameters(self, message_module):
# This message is always deterministically serialized, even if determinism
# is disabled, so we can use it to verify that all the determinism
# parameters work correctly.
golden_data = (b'\xe2\x02\nOne string'
b'\xe2\x02\nTwo string'
b'\xe2\x02\nRed string'
b'\xe2\x02\x0bBlue string')
golden_message = message_module.TestAllTypes()
golden_message.repeated_string.extend([
'One string',
'Two string',
'Red string',
'Blue string',
])
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=None))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=False))
self.assertEqual(golden_data,
golden_message.SerializeToString(deterministic=True))
class BadArgError(Exception):
pass
class BadArg(object):
def __nonzero__(self):
raise BadArgError()
def __bool__(self):
raise BadArgError()
with self.assertRaises(BadArgError):
golden_message.SerializeToString(deterministic=BadArg())
def testPickleSupport(self, message_module):
golden_message = message_module.TestAllTypes()
test_util.SetAllFields(golden_message)
golden_data = golden_message.SerializeToString()
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPickleNestedMessage(self, message_module):
golden_message = message_module.TestPickleNestedMessage.NestedMessage(bb=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPickleNestedNestedMessage(self, message_module):
cls = message_module.TestPickleNestedMessage.NestedMessage
golden_message = cls.NestedNestedMessage(cc=1)
pickled_message = pickle.dumps(golden_message)
unpickled_message = pickle.loads(pickled_message)
self.assertEqual(unpickled_message, golden_message)
def testPositiveInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCD\x02\x00\x00\x80\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\x7F')
else:
golden_data = (b'\x5D\x00\x00\x80\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\x7F'
b'\xCA\x02\x04\x00\x00\x80\x7F'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertEqual(golden_message.optional_float, math.inf)
self.assertEqual(golden_message.optional_double, math.inf)
self.assertEqual(golden_message.repeated_float[0], math.inf)
self.assertEqual(golden_message.repeated_double[0], math.inf)
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinity(self, message_module):
if message_module is unittest_pb2:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCD\x02\x00\x00\x80\xFF'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF0\xFF')
else:
golden_data = (b'\x5D\x00\x00\x80\xFF'
b'\x61\x00\x00\x00\x00\x00\x00\xF0\xFF'
b'\xCA\x02\x04\x00\x00\x80\xFF'
b'\xD2\x02\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertEqual(golden_message.optional_float, -math.inf)
self.assertEqual(golden_message.optional_double, -math.inf)
self.assertEqual(golden_message.repeated_float[0], -math.inf)
self.assertEqual(golden_message.repeated_double[0], -math.inf)
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumber(self, message_module):
golden_data = (b'\x5D\x00\x00\xC0\x7F'
b'\x61\x00\x00\x00\x00\x00\x00\xF8\x7F'
b'\xCD\x02\x00\x00\xC0\x7F'
b'\xD1\x02\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestAllTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(math.isnan(golden_message.optional_float))
self.assertTrue(math.isnan(golden_message.optional_double))
self.assertTrue(math.isnan(golden_message.repeated_float[0]))
self.assertTrue(math.isnan(golden_message.repeated_double[0]))
# The protocol buffer may serialize to any one of multiple different
# representations of a NaN. Rather than verify a specific representation,
# verify the serialized string can be converted into a correctly
# behaving protocol buffer.
serialized = golden_message.SerializeToString()
message = message_module.TestAllTypes()
message.ParseFromString(serialized)
self.assertTrue(math.isnan(message.optional_float))
self.assertTrue(math.isnan(message.optional_double))
self.assertTrue(math.isnan(message.repeated_float[0]))
self.assertTrue(math.isnan(message.repeated_double[0]))
def testPositiveInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertEqual(golden_message.packed_float[0], math.inf)
self.assertEqual(golden_message.packed_double[0], math.inf)
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNegativeInfinityPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\x80\xFF'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF0\xFF')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertEqual(golden_message.packed_float[0], -math.inf)
self.assertEqual(golden_message.packed_double[0], -math.inf)
self.assertEqual(golden_data, golden_message.SerializeToString())
def testNotANumberPacked(self, message_module):
golden_data = (b'\xA2\x06\x04\x00\x00\xC0\x7F'
b'\xAA\x06\x08\x00\x00\x00\x00\x00\x00\xF8\x7F')
golden_message = message_module.TestPackedTypes()
golden_message.ParseFromString(golden_data)
self.assertTrue(math.isnan(golden_message.packed_float[0]))
self.assertTrue(math.isnan(golden_message.packed_double[0]))
serialized = golden_message.SerializeToString()
message = message_module.TestPackedTypes()
message.ParseFromString(serialized)
self.assertTrue(math.isnan(message.packed_float[0]))
self.assertTrue(math.isnan(message.packed_double[0]))
def testExtremeFloatValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 127)
message.optional_float = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 127)
message.optional_float = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_float = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentNoSigBits)
message.optional_float = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -127)
message.optional_float = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -127)
message.optional_float = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_float = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentNoSigBits)
message.optional_float = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_float == -kMostNegExponentOneSigBit)
# Max 4 bytes float value
max_float = float.fromhex('0x1.fffffep+127')
message.optional_float = max_float
self.assertAlmostEqual(message.optional_float, max_float)
serialized_data = message.SerializeToString()
message.ParseFromString(serialized_data)
self.assertAlmostEqual(message.optional_float, max_float)
# Test set double to float field.
message.optional_float = 3.4028235e+39
self.assertEqual(message.optional_float, float('inf'))
serialized_data = message.SerializeToString()
message.ParseFromString(serialized_data)
self.assertEqual(message.optional_float, float('inf'))
message.optional_float = -3.4028235e+39
self.assertEqual(message.optional_float, float('-inf'))
message.optional_float = 1.4028235e-39
self.assertAlmostEqual(message.optional_float, 1.4028235e-39)
def testExtremeDoubleValues(self, message_module):
message = message_module.TestAllTypes()
# Most positive exponent, no significand bits set.
kMostPosExponentNoSigBits = math.pow(2, 1023)
message.optional_double = kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentNoSigBits)
# Most positive exponent, one significand bit set.
kMostPosExponentOneSigBit = 1.5 * math.pow(2, 1023)
message.optional_double = kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostPosExponentOneSigBit)
# Repeat last two cases with values of same magnitude, but negative.
message.optional_double = -kMostPosExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentNoSigBits)
message.optional_double = -kMostPosExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostPosExponentOneSigBit)
# Most negative exponent, no significand bits set.
kMostNegExponentNoSigBits = math.pow(2, -1023)
message.optional_double = kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentNoSigBits)
# Most negative exponent, one significand bit set.
kMostNegExponentOneSigBit = 1.5 * math.pow(2, -1023)
message.optional_double = kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == kMostNegExponentOneSigBit)
# Repeat last two cases with values of the same magnitude, but negative.
message.optional_double = -kMostNegExponentNoSigBits
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentNoSigBits)
message.optional_double = -kMostNegExponentOneSigBit
message.ParseFromString(message.SerializeToString())
self.assertTrue(message.optional_double == -kMostNegExponentOneSigBit)
def testFloatPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_float = 2.0
# Python/C++ customizes the C++ TextFormat to always print trailing ".0" for
# floats. upb doesn't do this, it matches C++ TextFormat.
if api_implementation.Type() == 'upb':
self.assertEqual(str(message), 'optional_float: 2\n')
else:
self.assertEqual(str(message), 'optional_float: 2.0\n')
def testFloatNanPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_float = float('nan')
self.assertEqual(str(message), 'optional_float: nan\n')
def testHighPrecisionFloatPrinting(self, message_module):
msg = message_module.TestAllTypes()
msg.optional_float = 0.12345678912345678
old_float = msg.optional_float
msg.ParseFromString(msg.SerializeToString())
self.assertEqual(old_float, msg.optional_float)
def testDoubleNanPrinting(self, message_module):
message = message_module.TestAllTypes()
message.optional_double = float('nan')
self.assertEqual(str(message), 'optional_double: nan\n')
def testHighPrecisionDoublePrinting(self, message_module):
msg = message_module.TestAllTypes()
msg.optional_double = 0.12345678912345678
self.assertEqual(str(msg), 'optional_double: 0.12345678912345678\n')
def testUnknownFieldPrinting(self, message_module):
populated = message_module.TestAllTypes()
test_util.SetAllNonLazyFields(populated)
empty = message_module.TestEmptyMessage()
empty.ParseFromString(populated.SerializeToString())
self.assertEqual(str(empty), '')
def testCopyFromEmpty(self, message_module):
msg = message_module.NestedTestAllTypes()
test_msg = message_module.NestedTestAllTypes()
test_util.SetAllFields(test_msg.payload)
self.assertTrue(test_msg.HasField('payload'))
# Copy from empty message
test_msg.CopyFrom(msg)
self.assertEqual(0, len(test_msg.ListFields()))
test_util.SetAllFields(test_msg.payload)
self.assertTrue(test_msg.HasField('payload'))
# Copy from a non exist message
test_msg.CopyFrom(msg.child)
self.assertFalse(test_msg.HasField('payload'))
self.assertEqual(0, len(test_msg.ListFields()))
def testAppendRepeatedCompositeField(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.append(
message_module.TestAllTypes.NestedMessage(bb=1))
nested = message_module.TestAllTypes.NestedMessage(bb=2)
msg.repeated_nested_message.append(nested)
try:
msg.repeated_nested_message.append(1)
except TypeError:
pass
self.assertEqual(2, len(msg.repeated_nested_message))
self.assertEqual([1, 2], [m.bb for m in msg.repeated_nested_message])
def testInsertRepeatedCompositeField(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.insert(
-1, message_module.TestAllTypes.NestedMessage(bb=1))
sub_msg = msg.repeated_nested_message[0]
msg.repeated_nested_message.insert(
0, message_module.TestAllTypes.NestedMessage(bb=2))
msg.repeated_nested_message.insert(
99, message_module.TestAllTypes.NestedMessage(bb=3))
msg.repeated_nested_message.insert(
-2, message_module.TestAllTypes.NestedMessage(bb=-1))
msg.repeated_nested_message.insert(
-1000, message_module.TestAllTypes.NestedMessage(bb=-1000))
try:
msg.repeated_nested_message.insert(1, 999)
except TypeError:
pass
self.assertEqual(5, len(msg.repeated_nested_message))
self.assertEqual([-1000, 2, -1, 1, 3],
[m.bb for m in msg.repeated_nested_message])
self.assertEqual(
str(msg), 'repeated_nested_message {\n'
' bb: -1000\n'
'}\n'
'repeated_nested_message {\n'
' bb: 2\n'
'}\n'
'repeated_nested_message {\n'
' bb: -1\n'
'}\n'
'repeated_nested_message {\n'
' bb: 1\n'
'}\n'
'repeated_nested_message {\n'
' bb: 3\n'
'}\n')
self.assertEqual(sub_msg.bb, 1)
def testAssignRepeatedField(self, message_module):
msg = message_module.NestedTestAllTypes()
msg.payload.repeated_int32[:] = [1, 2, 3, 4]
self.assertEqual(4, len(msg.payload.repeated_int32))
self.assertEqual([1, 2, 3, 4], msg.payload.repeated_int32)
def testMergeFromRepeatedField(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.append(1)
msg.repeated_int32.append(3)
msg.repeated_nested_message.add(bb=1)
msg.repeated_nested_message.add(bb=2)
other_msg = message_module.TestAllTypes()
other_msg.repeated_nested_message.add(bb=3)
other_msg.repeated_nested_message.add(bb=4)
other_msg.repeated_int32.append(5)
other_msg.repeated_int32.append(7)
msg.repeated_int32.MergeFrom(other_msg.repeated_int32)
self.assertEqual(4, len(msg.repeated_int32))
msg.repeated_nested_message.MergeFrom(other_msg.repeated_nested_message)
self.assertEqual([1, 2, 3, 4], [m.bb for m in msg.repeated_nested_message])
def testInternalMergeWithMissingRequiredField(self, message_module):
req = more_messages_pb2.RequiredField()
more_messages_pb2.RequiredWrapper(request=req)
def testMergeFromMissingRequiredField(self, message_module):
msg = more_messages_pb2.RequiredField()
message = more_messages_pb2.RequiredField()
message.MergeFrom(msg)
self.assertEqual(msg, message)
def testScalarRepeatedClear(self, message_module):
msg = message_module.TestAllTypes()
empty_size = msg.ByteSize()
msg.repeated_int32.append(1)
msg.repeated_int32.append(3)
repeated_int = msg.repeated_int32
self.assertEqual(2, len(msg.repeated_int32))
self.assertGreater(msg.ByteSize(), empty_size)
msg.repeated_int32.clear()
self.assertEqual(0, len(msg.repeated_int32))
self.assertEqual(0, len(repeated_int))
self.assertEqual(empty_size, msg.ByteSize())
def testCompositeRepeatedClear(self, message_module):
msg = message_module.TestAllTypes()
empty_size = msg.ByteSize()
msg.repeated_nested_message.add(bb=123)
msg.repeated_nested_message.add(bb=2)
repeated_nested_message = msg.repeated_nested_message
self.assertEqual(2, len(msg.repeated_nested_message))
self.assertGreater(msg.ByteSize(), empty_size)
msg.repeated_nested_message.clear()
self.assertEqual(0, len(msg.repeated_nested_message))
self.assertEqual(0, len(repeated_nested_message))
self.assertEqual(empty_size, msg.ByteSize())
def testCompositeRepeatedClearRelease(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.add(bb=123)
# sub msg reference should still work after clear()
sub_msg = msg.repeated_nested_message[0]
msg.repeated_nested_message.clear()
self.assertEqual(123, sub_msg.bb)
def testAddWrongRepeatedNestedField(self, message_module):
msg = message_module.TestAllTypes()
try:
msg.repeated_nested_message.add('wrong')
except TypeError:
pass
try:
msg.repeated_nested_message.add(value_field='wrong')
except ValueError:
pass
self.assertEqual(len(msg.repeated_nested_message), 0)
def testRepeatedContains(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
self.assertIn(2, msg.repeated_int32)
self.assertNotIn(0, msg.repeated_int32)
msg.repeated_nested_message.add(bb=1)
sub_msg1 = msg.repeated_nested_message[0]
sub_msg2 = message_module.TestAllTypes.NestedMessage(bb=2)
sub_msg3 = message_module.TestAllTypes.NestedMessage(bb=3)
msg.repeated_nested_message.append(sub_msg2)
msg.repeated_nested_message.insert(0, sub_msg3)
self.assertIn(sub_msg1, msg.repeated_nested_message)
self.assertIn(sub_msg2, msg.repeated_nested_message)
self.assertIn(sub_msg3, msg.repeated_nested_message)
def testRepeatedScalarIterable(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
add = 0
for item in msg.repeated_int32:
add += item
self.assertEqual(add, 6)
def testRepeatedNestedFieldIteration(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_nested_message.add(bb=1)
msg.repeated_nested_message.add(bb=2)
msg.repeated_nested_message.add(bb=3)
msg.repeated_nested_message.add(bb=4)
self.assertEqual([1, 2, 3, 4], [m.bb for m in msg.repeated_nested_message])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in reversed(msg.repeated_nested_message)])
self.assertEqual([4, 3, 2, 1],
[m.bb for m in msg.repeated_nested_message[::-1]])
def testSortEmptyRepeated(self, message_module):
message = message_module.NestedTestAllTypes()
self.assertFalse(message.HasField('child'))
self.assertFalse(message.HasField('payload'))
message.child.repeated_child.sort()
message.payload.repeated_int32.sort()
self.assertFalse(message.HasField('child'))
self.assertFalse(message.HasField('payload'))
def testSortingRepeatedScalarFieldsDefaultComparator(self, message_module):
"""Check some different types with the default comparator."""
message = message_module.TestAllTypes()
# TODO: would testing more scalar types strengthen test?
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_int32.append(2)
message.repeated_int32.sort()
self.assertEqual(message.repeated_int32[0], 1)
self.assertEqual(message.repeated_int32[1], 2)
self.assertEqual(message.repeated_int32[2], 3)
self.assertEqual(str(message.repeated_int32), str([1, 2, 3]))
message.repeated_float.append(1.1)
message.repeated_float.append(1.3)
message.repeated_float.append(1.2)
message.repeated_float.sort()
self.assertAlmostEqual(message.repeated_float[0], 1.1)
self.assertAlmostEqual(message.repeated_float[1], 1.2)
self.assertAlmostEqual(message.repeated_float[2], 1.3)
message.repeated_string.append('a')
message.repeated_string.append('c')
message.repeated_string.append('b')
message.repeated_string.sort()
self.assertEqual(message.repeated_string[0], 'a')
self.assertEqual(message.repeated_string[1], 'b')
self.assertEqual(message.repeated_string[2], 'c')
self.assertEqual(str(message.repeated_string), str([u'a', u'b', u'c']))
message.repeated_bytes.append(b'a')
message.repeated_bytes.append(b'c')
message.repeated_bytes.append(b'b')
message.repeated_bytes.sort()
self.assertEqual(message.repeated_bytes[0], b'a')
self.assertEqual(message.repeated_bytes[1], b'b')
self.assertEqual(message.repeated_bytes[2], b'c')
self.assertEqual(str(message.repeated_bytes), str([b'a', b'b', b'c']))
def testSortingRepeatedScalarFieldsCustomComparator(self, message_module):
"""Check some different types with custom comparator."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(message.repeated_int32[0], -1)
self.assertEqual(message.repeated_int32[1], -2)
self.assertEqual(message.repeated_int32[2], -3)
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(message.repeated_string[0], 'c')
self.assertEqual(message.repeated_string[1], 'bb')
self.assertEqual(message.repeated_string[2], 'aaa')
def testSortingRepeatedCompositeFieldsCustomComparator(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=operator.attrgetter('bb'))
self.assertEqual(message.repeated_nested_message[0].bb, 1)
self.assertEqual(message.repeated_nested_message[1].bb, 2)
self.assertEqual(message.repeated_nested_message[2].bb, 3)
self.assertEqual(message.repeated_nested_message[3].bb, 4)
self.assertEqual(message.repeated_nested_message[4].bb, 5)
self.assertEqual(message.repeated_nested_message[5].bb, 6)
self.assertEqual(
str(message.repeated_nested_message),
'[bb: 1\n, bb: 2\n, bb: 3\n, bb: 4\n, bb: 5\n, bb: 6\n]')
def testSortingRepeatedCompositeFieldsStable(self, message_module):
"""Check passing a custom comparator to sort a repeated composite field."""
message = message_module.TestAllTypes()
message.repeated_nested_message.add().bb = 21
message.repeated_nested_message.add().bb = 20
message.repeated_nested_message.add().bb = 13
message.repeated_nested_message.add().bb = 33
message.repeated_nested_message.add().bb = 11
message.repeated_nested_message.add().bb = 24
message.repeated_nested_message.add().bb = 10
message.repeated_nested_message.sort(key=lambda z: z.bb // 10)
self.assertEqual([13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
# Make sure that for the C++ implementation, the underlying fields
# are actually reordered.
pb = message.SerializeToString()
message.Clear()
message.MergeFromString(pb)
self.assertEqual([13, 11, 10, 21, 20, 24, 33],
[n.bb for n in message.repeated_nested_message])
def testRepeatedCompositeFieldSortArguments(self, message_module):
"""Check sorting a repeated composite field using list.sort() arguments."""
message = message_module.TestAllTypes()
get_bb = operator.attrgetter('bb')
message.repeated_nested_message.add().bb = 1
message.repeated_nested_message.add().bb = 3
message.repeated_nested_message.add().bb = 2
message.repeated_nested_message.add().bb = 6
message.repeated_nested_message.add().bb = 5
message.repeated_nested_message.add().bb = 4
message.repeated_nested_message.sort(key=get_bb)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[1, 2, 3, 4, 5, 6])
message.repeated_nested_message.sort(key=get_bb, reverse=True)
self.assertEqual([k.bb for k in message.repeated_nested_message],
[6, 5, 4, 3, 2, 1])
def testRepeatedScalarFieldSortArguments(self, message_module):
"""Check sorting a scalar field using list.sort() arguments."""
message = message_module.TestAllTypes()
message.repeated_int32.append(-3)
message.repeated_int32.append(-2)
message.repeated_int32.append(-1)
message.repeated_int32.sort(key=abs)
self.assertEqual(list(message.repeated_int32), [-1, -2, -3])
message.repeated_int32.sort(key=abs, reverse=True)
self.assertEqual(list(message.repeated_int32), [-3, -2, -1])
message.repeated_string.append('aaa')
message.repeated_string.append('bb')
message.repeated_string.append('c')
message.repeated_string.sort(key=len)
self.assertEqual(list(message.repeated_string), ['c', 'bb', 'aaa'])
message.repeated_string.sort(key=len, reverse=True)
self.assertEqual(list(message.repeated_string), ['aaa', 'bb', 'c'])
def testRepeatedFieldsComparable(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
m1.repeated_int32.append(0)
m1.repeated_int32.append(1)
m1.repeated_int32.append(2)
m2.repeated_int32.append(0)
m2.repeated_int32.append(1)
m2.repeated_int32.append(2)
m1.repeated_nested_message.add().bb = 1
m1.repeated_nested_message.add().bb = 2
m1.repeated_nested_message.add().bb = 3
m2.repeated_nested_message.add().bb = 1
m2.repeated_nested_message.add().bb = 2
m2.repeated_nested_message.add().bb = 3
def testRepeatedFieldsAreSequences(self, message_module):
m = message_module.TestAllTypes()
self.assertIsInstance(m.repeated_int32, collections.abc.MutableSequence)
self.assertIsInstance(m.repeated_nested_message,
collections.abc.MutableSequence)
def testRepeatedFieldsNotHashable(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(TypeError):
hash(m.repeated_int32)
with self.assertRaises(TypeError):
hash(m.repeated_nested_message)
def testRepeatedFieldInsideNestedMessage(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.repeated_int32.extend([])
self.assertTrue(m.HasField('payload'))
def testMergeFrom(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
# Cpp extension will lazily create a sub message which is immutable.
nested = m1.optional_nested_message
self.assertEqual(0, nested.bb)
m2.optional_nested_message.bb = 1
# Make sure cmessage pointing to a mutable message after merge instead of
# the lazily created message.
m1.MergeFrom(m2)
self.assertEqual(1, nested.bb)
# Test more nested sub message.
msg1 = message_module.NestedTestAllTypes()
msg2 = message_module.NestedTestAllTypes()
nested = msg1.child.payload.optional_nested_message
self.assertEqual(0, nested.bb)
msg2.child.payload.optional_nested_message.bb = 1
msg1.MergeFrom(msg2)
self.assertEqual(1, nested.bb)
# Test repeated field.
self.assertEqual(msg1.payload.repeated_nested_message,
msg1.payload.repeated_nested_message)
nested = msg2.payload.repeated_nested_message.add()
nested.bb = 1
msg1.MergeFrom(msg2)
self.assertEqual(1, len(msg1.payload.repeated_nested_message))
self.assertEqual(1, nested.bb)
def testMergeFromString(self, message_module):
m1 = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
# Cpp extension will lazily create a sub message which is immutable.
self.assertEqual(0, m1.optional_nested_message.bb)
m2.optional_nested_message.bb = 1
# Make sure cmessage pointing to a mutable message after merge instead of
# the lazily created message.
m1.MergeFromString(m2.SerializeToString())
self.assertEqual(1, m1.optional_nested_message.bb)
def testMergeFromStringUsingMemoryView(self, message_module):
m2 = message_module.TestAllTypes()
m2.optional_string = 'scalar string'
m2.repeated_string.append('repeated string')
m2.optional_bytes = b'scalar bytes'
m2.repeated_bytes.append(b'repeated bytes')
serialized = m2.SerializeToString()
memview = memoryview(serialized)
m1 = message_module.TestAllTypes.FromString(memview)
self.assertEqual(m1.optional_bytes, b'scalar bytes')
self.assertEqual(m1.repeated_bytes, [b'repeated bytes'])
self.assertEqual(m1.optional_string, 'scalar string')
self.assertEqual(m1.repeated_string, ['repeated string'])
# Make sure that the memoryview was correctly converted to bytes, and
# that a sub-sliced memoryview is not being used.
self.assertIsInstance(m1.optional_bytes, bytes)
self.assertIsInstance(m1.repeated_bytes[0], bytes)
self.assertIsInstance(m1.optional_string, str)
self.assertIsInstance(m1.repeated_string[0], str)
def testMergeFromEmpty(self, message_module):
m1 = message_module.TestAllTypes()
# Cpp extension will lazily create a sub message which is immutable.
self.assertEqual(0, m1.optional_nested_message.bb)
self.assertFalse(m1.HasField('optional_nested_message'))
# Make sure the sub message is still immutable after merge from empty.
m1.MergeFromString(b'') # field state should not change
self.assertFalse(m1.HasField('optional_nested_message'))
def ensureNestedMessageExists(self, msg, attribute):
"""Make sure that a nested message object exists.
As soon as a nested message attribute is accessed, it will be present in the
_fields dict, without being marked as actually being set.
"""
getattr(msg, attribute)
self.assertFalse(msg.HasField(attribute))
def testOneofGetCaseNonexistingField(self, message_module):
m = message_module.TestAllTypes()
self.assertRaises(ValueError, m.WhichOneof, 'no_such_oneof_field')
self.assertRaises(Exception, m.WhichOneof, 0)
def testOneofDefaultValues(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
# Oneof is set even when setting it to a default value.
m.oneof_uint32 = 0
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertFalse(m.HasField('oneof_string'))
m.oneof_string = ''
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_uint32'))
def testOneofSemantics(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
m.oneof_string = u'foo'
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertTrue(m.HasField('oneof_string'))
# Read nested message accessor without accessing submessage.
m.oneof_nested_message
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
# Read accessor of nested message without accessing submessage.
m.oneof_nested_message.bb
self.assertEqual('oneof_string', m.WhichOneof('oneof_field'))
self.assertTrue(m.HasField('oneof_string'))
self.assertFalse(m.HasField('oneof_nested_message'))
m.oneof_nested_message.bb = 11
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_string'))
self.assertTrue(m.HasField('oneof_nested_message'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
self.assertFalse(m.HasField('oneof_nested_message'))
self.assertTrue(m.HasField('oneof_bytes'))
def testOneofCompositeFieldReadAccess(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
self.assertEqual(11, m.oneof_uint32)
def testOneofWhichOneof(self, message_module):
m = message_module.TestAllTypes()
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
m.oneof_uint32 = 11
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
m.ClearField('oneof_bytes')
self.assertIs(None, m.WhichOneof('oneof_field'))
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
def testOneofClearField(self, message_module):
m = message_module.TestAllTypes()
m.ClearField('oneof_field')
m.oneof_uint32 = 11
m.ClearField('oneof_field')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearSetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.ClearField('oneof_uint32')
if message_module is unittest_pb2:
self.assertFalse(m.HasField('oneof_field'))
self.assertFalse(m.HasField('oneof_uint32'))
self.assertIs(None, m.WhichOneof('oneof_field'))
def testOneofClearUnsetField(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
self.ensureNestedMessageExists(m, 'oneof_nested_message')
m.ClearField('oneof_nested_message')
self.assertEqual(11, m.oneof_uint32)
if message_module is unittest_pb2:
self.assertTrue(m.HasField('oneof_field'))
self.assertTrue(m.HasField('oneof_uint32'))
self.assertEqual('oneof_uint32', m.WhichOneof('oneof_field'))
def testOneofDeserialize(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.ParseFromString(m.SerializeToString())
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofCopyFrom(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
m2.CopyFrom(m)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testOneofNestedMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_uint32 = 11
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_bytes = b'bb'
m2.child.payload.oneof_bytes = b'bb'
m2.MergeFrom(m)
self.assertEqual('oneof_uint32', m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_bytes', m2.child.payload.WhichOneof('oneof_field'))
def testOneofMessageMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_nested_message.bb = 11
m.child.payload.oneof_nested_message.bb = 12
m2 = message_module.NestedTestAllTypes()
m2.payload.oneof_uint32 = 13
m2.MergeFrom(m)
self.assertEqual('oneof_nested_message',
m2.payload.WhichOneof('oneof_field'))
self.assertEqual('oneof_nested_message',
m2.child.payload.WhichOneof('oneof_field'))
def testOneofReleaseMergeFrom(self, message_module):
m = unittest_pb2.TestOneof2()
m.foo_message.moo_int = 123
reference = m.foo_message
self.assertEqual(m.foo_message.moo_int, 123)
m2 = unittest_pb2.TestOneof2()
m2.foo_lazy_message.moo_int = 456
m.MergeFrom(m2)
self.assertEqual(reference.moo_int, 123)
self.assertEqual(m.foo_message.moo_int, 0)
m.foo_message.CopyFrom(reference)
self.assertEqual(m.foo_message.moo_int, 123)
def testNestedOneofRleaseMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_nested_message.bb = 1
m.child.payload.oneof_nested_message.bb = 2
ref1 = m.payload.oneof_nested_message
ref2 = m.child.payload.oneof_nested_message
other = message_module.NestedTestAllTypes()
other.payload.oneof_uint32 = 22
other.child.payload.oneof_string = 'hi'
self.assertEqual(ref1.bb, 1)
self.assertEqual(ref2.bb, 2)
m.MergeFrom(other)
# oneof messages are released
self.assertEqual(ref1.bb, 1)
self.assertEqual(ref2.bb, 2)
self.assertEqual(m.payload.oneof_nested_message.bb, 0)
self.assertEqual(m.child.payload.oneof_nested_message.bb, 0)
self.assertEqual(m.payload.oneof_uint32, 22)
self.assertEqual(m.child.payload.oneof_string, 'hi')
def testOneofNotReleaseMergeFrom(self, message_module):
m = message_module.NestedTestAllTypes()
m.payload.oneof_nested_message.bb = 1
ref = m.payload.oneof_nested_message
other = message_module.NestedTestAllTypes()
other.payload.oneof_nested_message.bb = 2
self.assertEqual(ref.bb, 1)
m.MergeFrom(other)
# oneof message is not released
self.assertEqual(ref.bb, 2)
def testOneofNestedMessageInit(self, message_module):
m = message_module.TestAllTypes(
oneof_nested_message=message_module.TestAllTypes.NestedMessage())
self.assertEqual('oneof_nested_message', m.WhichOneof('oneof_field'))
def testOneofClear(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m.Clear()
self.assertIsNone(m.WhichOneof('oneof_field'))
m.oneof_bytes = b'bb'
self.assertEqual('oneof_bytes', m.WhichOneof('oneof_field'))
def testAssignByteStringToUnicodeField(self, message_module):
"""Assigning a byte string to a string field should result
in the value being converted to a Unicode string.
"""
m = message_module.TestAllTypes()
m.optional_string = str('')
self.assertIsInstance(m.optional_string, str)
def testLongValuedSlice(self, message_module):
"""It should be possible to use int-valued indices in slices.
This didn't used to work in the v2 C++ implementation.
"""
m = message_module.TestAllTypes()
# Repeated scalar
m.repeated_int32.append(1)
sl = m.repeated_int32[int(0):int(len(m.repeated_int32))]
self.assertEqual(len(m.repeated_int32), len(sl))
# Repeated composite
m.repeated_nested_message.add().bb = 3
sl = m.repeated_nested_message[int(0):int(len(m.repeated_nested_message))]
self.assertEqual(len(m.repeated_nested_message), len(sl))
def testExtendShouldNotSwallowExceptions(self, message_module):
"""This didn't use to work in the v2 C++ implementation."""
m = message_module.TestAllTypes()
with self.assertRaises(NameError) as _:
m.repeated_int32.extend(a for i in range(10)) # pylint: disable=undefined-variable
with self.assertRaises(NameError) as _:
m.repeated_nested_enum.extend(a for i in range(10)) # pylint: disable=undefined-variable
FALSY_VALUES = [None, False, 0, 0.0]
EMPTY_VALUES = [b'', u'', bytearray(), [], {}, set()]
def testExtendInt32WithNothing(self, message_module):
"""Test no-ops extending repeated int32 fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
for falsy_value in MessageTest.FALSY_VALUES:
with self.assertRaises(TypeError) as context:
m.repeated_int32.extend(falsy_value)
self.assertIn('iterable', str(context.exception))
self.assertSequenceEqual([], m.repeated_int32)
for empty_value in MessageTest.EMPTY_VALUES:
m.repeated_int32.extend(empty_value)
self.assertSequenceEqual([], m.repeated_int32)
def testExtendFloatWithNothing(self, message_module):
"""Test no-ops extending repeated float fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
for falsy_value in MessageTest.FALSY_VALUES:
with self.assertRaises(TypeError) as context:
m.repeated_float.extend(falsy_value)
self.assertIn('iterable', str(context.exception))
self.assertSequenceEqual([], m.repeated_float)
for empty_value in MessageTest.EMPTY_VALUES:
m.repeated_float.extend(empty_value)
self.assertSequenceEqual([], m.repeated_float)
def testExtendStringWithNothing(self, message_module):
"""Test no-ops extending repeated string fields."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
for falsy_value in MessageTest.FALSY_VALUES:
with self.assertRaises(TypeError) as context:
m.repeated_string.extend(falsy_value)
self.assertIn('iterable', str(context.exception))
self.assertSequenceEqual([], m.repeated_string)
for empty_value in MessageTest.EMPTY_VALUES:
m.repeated_string.extend(empty_value)
self.assertSequenceEqual([], m.repeated_string)
def testExtendWithNoLen(self, message_module):
""" Test extending repeated fields with iterables but no len"""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(i for i in range(2))
self.assertSequenceEqual([0, 1], m.repeated_int32)
def testExtendInt32WithPythonList(self, message_module):
"""Test extending repeated int32 fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend([0])
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend([1, 2])
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend([3, 4])
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
with self.assertRaises(TypeError):
m.repeated_int32.extend([5, 6, 'hi', 7])
def testExtendFloatWithPythonList(self, message_module):
"""Test extending repeated float fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend([0.0])
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend([1.0, 2.0])
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend([3.0, 4.0])
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithPythonList(self, message_module):
"""Test extending repeated string fields with python lists."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend([''])
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(['11', '22'])
self.assertSequenceEqual(['', '11', '22'], m.repeated_string)
m.repeated_string.extend(['33', '44'])
self.assertSequenceEqual(['', '11', '22', '33', '44'], m.repeated_string)
def testExtendStringWithString(self, message_module):
"""Test extending repeated string fields with characters from a string."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend('abc')
self.assertSequenceEqual(['a', 'b', 'c'], m.repeated_string)
class TestIterable(object):
"""This iterable object mimics the behavior of numpy.array.
__nonzero__ fails for length > 1, and returns bool(item[0]) for length == 1.
"""
def __init__(self, values=None):
self._list = values or []
def __nonzero__(self):
size = len(self._list)
if size == 0:
return False
if size == 1:
return bool(self._list[0])
raise ValueError('Truth value is ambiguous.')
def __len__(self):
return len(self._list)
def __iter__(self):
return self._list.__iter__()
def testExtendInt32WithIterable(self, message_module):
"""Test extending repeated int32 fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([0]))
self.assertSequenceEqual([0], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([1, 2]))
self.assertSequenceEqual([0, 1, 2], m.repeated_int32)
m.repeated_int32.extend(MessageTest.TestIterable([3, 4]))
self.assertSequenceEqual([0, 1, 2, 3, 4], m.repeated_int32)
def testExtendFloatWithIterable(self, message_module):
"""Test extending repeated float fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([0.0]))
self.assertSequenceEqual([0.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([1.0, 2.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0], m.repeated_float)
m.repeated_float.extend(MessageTest.TestIterable([3.0, 4.0]))
self.assertSequenceEqual([0.0, 1.0, 2.0, 3.0, 4.0], m.repeated_float)
def testExtendStringWithIterable(self, message_module):
"""Test extending repeated string fields with iterable."""
m = message_module.TestAllTypes()
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable([]))
self.assertSequenceEqual([], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['']))
self.assertSequenceEqual([''], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['1', '2']))
self.assertSequenceEqual(['', '1', '2'], m.repeated_string)
m.repeated_string.extend(MessageTest.TestIterable(['3', '4']))
self.assertSequenceEqual(['', '1', '2', '3', '4'], m.repeated_string)
class TestIndex(object):
"""This index object mimics the behavior of numpy.int64 and other types."""
def __init__(self, value=None):
self.value = value
def __index__(self):
return self.value
def testRepeatedIndexingWithIntIndex(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
self.assertEqual(1, msg.repeated_int32[MessageTest.TestIndex(0)])
def testRepeatedIndexingWithNegative1IntIndex(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
self.assertEqual(3, msg.repeated_int32[MessageTest.TestIndex(-1)])
def testRepeatedIndexingWithNegative1Int(self, message_module):
msg = message_module.TestAllTypes()
msg.repeated_int32.extend([1, 2, 3])
self.assertEqual(3, msg.repeated_int32[-1])
def testPickleRepeatedScalarContainer(self, message_module):
# Pickle repeated scalar container is not supported.
m = message_module.TestAllTypes()
with self.assertRaises(pickle.PickleError) as _:
pickle.dumps(m.repeated_int32, pickle.HIGHEST_PROTOCOL)
def testSortEmptyRepeatedCompositeContainer(self, message_module):
"""Exercise a scenario that has led to segfaults in the past."""
m = message_module.TestAllTypes()
m.repeated_nested_message.sort()
def testHasFieldOnRepeatedField(self, message_module):
"""Using HasField on a repeated field should raise an exception."""
m = message_module.TestAllTypes()
with self.assertRaises(ValueError) as _:
m.HasField('repeated_int32')
def testRepeatedScalarFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_int32.pop()
m.repeated_int32.extend(range(5))
self.assertEqual(4, m.repeated_int32.pop())
self.assertEqual(0, m.repeated_int32.pop(0))
self.assertEqual(2, m.repeated_int32.pop(1))
self.assertEqual([1, 3], m.repeated_int32)
def testRepeatedCompositeFieldPop(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(IndexError) as _:
m.repeated_nested_message.pop()
with self.assertRaises(TypeError) as _:
m.repeated_nested_message.pop('0')
for i in range(5):
n = m.repeated_nested_message.add()
n.bb = i
self.assertEqual(4, m.repeated_nested_message.pop().bb)
self.assertEqual(0, m.repeated_nested_message.pop(0).bb)
self.assertEqual(2, m.repeated_nested_message.pop(1).bb)
self.assertEqual([1, 3], [n.bb for n in m.repeated_nested_message])
def testRepeatedCompareWithSelf(self, message_module):
m = message_module.TestAllTypes()
for i in range(5):
m.repeated_int32.insert(i, i)
n = m.repeated_nested_message.add()
n.bb = i
self.assertSequenceEqual(m.repeated_int32, m.repeated_int32)
self.assertEqual(m.repeated_nested_message, m.repeated_nested_message)
def testReleasedNestedMessages(self, message_module):
"""A case that lead to a segfault when a message detached from its parent
container has itself a child container.
"""
m = message_module.NestedTestAllTypes()
m = m.repeated_child.add()
m = m.child
m = m.repeated_child.add()
self.assertEqual(m.payload.optional_int32, 0)
def testSetRepeatedComposite(self, message_module):
m = message_module.TestAllTypes()
with self.assertRaises(AttributeError):
m.repeated_int32 = []
m.repeated_int32.append(1)
with self.assertRaises(AttributeError):
m.repeated_int32 = []
def testReturningType(self, message_module):
m = message_module.TestAllTypes()
self.assertEqual(float, type(m.optional_float))
self.assertEqual(float, type(m.optional_double))
self.assertEqual(bool, type(m.optional_bool))
m.optional_float = 1
m.optional_double = 1
m.optional_bool = 1
m.repeated_float.append(1)
m.repeated_double.append(1)
m.repeated_bool.append(1)
m.ParseFromString(m.SerializeToString())
self.assertEqual(float, type(m.optional_float))
self.assertEqual(float, type(m.optional_double))
self.assertEqual('1.0', str(m.optional_double))
self.assertEqual(bool, type(m.optional_bool))
self.assertEqual(float, type(m.repeated_float[0]))
self.assertEqual(float, type(m.repeated_double[0]))
self.assertEqual(bool, type(m.repeated_bool[0]))
self.assertEqual(True, m.repeated_bool[0])
def testDir(self, message_module):
m = message_module.TestAllTypes()
attributes = dir(m)
self.assertGreaterEqual(len(attributes), 124)
attribute_set = set(attributes)
self.assertIn('DESCRIPTOR', attributes)
self.assertIn('oneof_string', attribute_set)
self.assertIn('optional_double', attribute_set)
self.assertIn('repeated_float', attribute_set)
class_attributes = dir(type(m))
for attr in class_attributes:
if attr != 'Extensions':
self.assertIn(attr, attribute_set)
def testAllAttributeFromDirAccessible(self, message_module):
m = message_module.TestAllTypes()
attributes = dir(m)
for attribute in attributes:
try:
getattr(m, attribute)
except AttributeError:
self.fail(f'Attribute {attribute} is not accessible.')
def testEquality(self, message_module):
m = message_module.TestAllTypes()
m2 = message_module.TestAllTypes()
self.assertEqual(m, m)
self.assertEqual(m, m2)
self.assertEqual(m2, m)
different_m = message_module.TestAllTypes()
different_m.repeated_float.append(1)
self.assertNotEqual(m, different_m)
self.assertNotEqual(different_m, m)
self.assertIsNotNone(m)
self.assertIsNotNone(m)
self.assertNotEqual(42, m)
self.assertNotEqual(m, 42)
self.assertNotEqual('foo', m)
self.assertNotEqual(m, 'foo')
self.assertEqual(mock.ANY, m)
self.assertEqual(m, mock.ANY)
class ComparesWithFoo(object):
def __eq__(self, other):
if getattr(other, 'optional_string', 'not_foo') == 'foo':
return True
return NotImplemented
m.optional_string = 'foo'
self.assertEqual(m, ComparesWithFoo())
self.assertEqual(ComparesWithFoo(), m)
m.optional_string = 'bar'
self.assertNotEqual(m, ComparesWithFoo())
self.assertNotEqual(ComparesWithFoo(), m)
def testTypeUnion(self, message_module):
# Below python 3.10 you cannot create union types with the | operator, so we
# skip testing for unions with old versions.
if sys.version_info < (3, 10):
return
enum_type = enum_type_wrapper.EnumTypeWrapper(
message_module.TestAllTypes.NestedEnum.DESCRIPTOR
)
union_type = enum_type | int
self.assertIsInstance(union_type, types.UnionType)
def get_union() -> union_type:
return enum_type
union = get_union()
self.assertIsInstance(union, enum_type_wrapper.EnumTypeWrapper)
self.assertEqual(
union.DESCRIPTOR, message_module.TestAllTypes.NestedEnum.DESCRIPTOR
)
def testIn(self, message_module):
m = message_module.TestAllTypes()
self.assertNotIn('optional_nested_message', m)
self.assertNotIn('oneof_bytes', m)
self.assertNotIn('oneof_string', m)
with self.assertRaises(ValueError) as e:
'repeated_int32' in m
with self.assertRaises(ValueError) as e:
'repeated_nested_message' in m
with self.assertRaises(ValueError) as e:
1 in m
with self.assertRaises(ValueError) as e:
'not_a_field' in m
test_util.SetAllFields(m)
self.assertIn('optional_nested_message', m)
self.assertIn('oneof_bytes', m)
self.assertNotIn('oneof_string', m)
def testMessageClassName(self, message_module):
m = message_module.TestAllTypes()
self.assertEqual('TestAllTypes', type(m).__name__)
self.assertEqual('TestAllTypes', m.__class__.__qualname__)
nested = message_module.TestAllTypes.NestedMessage()
self.assertEqual('NestedMessage', type(nested).__name__)
self.assertEqual('NestedMessage', nested.__class__.__name__)
self.assertEqual(
'TestAllTypes.NestedMessage', nested.__class__.__qualname__
)
def testAssignBoolToEnum(self, message_module):
# TODO: change warning into error in 2026 Q1
# with self.assertRaises(TypeError):
with warnings.catch_warnings(record=True) as w:
m = message_module.TestAllTypes(optional_nested_enum=True)
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.optional_nested_enum, 1)
m = message_module.TestAllTypes(optional_nested_enum=2)
with warnings.catch_warnings(record=True) as w:
m.optional_nested_enum = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.optional_nested_enum, 1)
with warnings.catch_warnings(record=True) as w:
m.optional_nested_enum = 2
self.assertFalse(w)
self.assertEqual(m.optional_nested_enum, 2)
def testBoolToRepeatedEnum(self, message_module):
with warnings.catch_warnings(record=True) as w:
m = message_module.TestAllTypes(repeated_nested_enum=[True])
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.repeated_nested_enum, [1])
m = message_module.TestAllTypes()
with warnings.catch_warnings(record=True) as w:
m.repeated_nested_enum.append(True)
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.repeated_nested_enum, [1])
def testBoolToOneofEnum(self, message_module):
m = unittest_pb2.TestOneof2()
with warnings.catch_warnings(record=True) as w:
m.foo_enum = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.foo_enum, 1)
def testBoolToMapEnum(self, message_module):
m = map_unittest_pb2.TestMap()
with warnings.catch_warnings(record=True) as w:
m.map_int32_enum[10] = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.map_int32_enum[10], 1)
def testBoolToExtensionEnum(self, message_module):
m = unittest_pb2.TestAllExtensions()
with warnings.catch_warnings(record=True) as w:
m.Extensions[unittest_pb2.optional_nested_enum_extension] = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(
m.Extensions[unittest_pb2.optional_nested_enum_extension], 1
)
def testClosedEnumExtension(self, message_module):
m = unittest_pb2.TestAllExtensions()
m.ParseFromString(b'\xa8\x01\x7f')
unknown = unknown_fields.UnknownFieldSet(m)
# The data is present in unknown fields.
self.assertEqual(unknown[0].field_number, 21)
self.assertEqual(unknown[0].wire_type, wire_format.WIRETYPE_VARINT)
self.assertEqual(unknown[0].data, 0x7f)
# There is no extension present.
self.assertFalse(
m.HasExtension(unittest_pb2.optional_nested_enum_extension)
)
def testAssignBoolToInt(self, message_module):
with warnings.catch_warnings(record=True) as w:
m = message_module.TestAllTypes(optional_int32=True)
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.optional_int32, 1)
m = message_module.TestAllTypes(optional_uint32=123)
with warnings.catch_warnings(record=True) as w:
m.optional_uint32 = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.optional_uint32, 1)
with warnings.catch_warnings(record=True) as w:
m.optional_uint32 = 321
self.assertFalse(w)
self.assertEqual(m.optional_uint32, 321)
def testAssignBoolToRepeatedInt(self, message_module):
with warnings.catch_warnings(record=True) as w:
m = message_module.TestAllTypes(repeated_int64=[True])
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.repeated_int64, [1])
m = message_module.TestAllTypes()
with warnings.catch_warnings(record=True) as w:
m.repeated_int64.append(True)
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.repeated_int64, [1])
def testAssignBoolToOneofInt(self, message_module):
m = unittest_pb2.TestOneof2()
with warnings.catch_warnings(record=True) as w:
m.foo_int = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.foo_int, 1)
def testAssignBoolToMapInt(self, message_module):
m = map_unittest_pb2.TestMap()
with warnings.catch_warnings(record=True) as w:
m.map_int32_int32[10] = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.map_int32_int32[10], 1)
with warnings.catch_warnings(record=True) as w:
m.map_int32_int32[True] = 1
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.map_int32_int32[1], 1)
def testAssignBoolToExtensionInt(self, message_module):
m = unittest_pb2.TestAllExtensions()
with warnings.catch_warnings(record=True) as w:
m.Extensions[unittest_pb2.optional_int32_extension] = True
self.assertIn('bool', str(w[0].message))
self.assertEqual(m.Extensions[unittest_pb2.optional_int32_extension], 1)
@testing_refleaks.TestCase
|
MessageTest
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/newType2.py
|
{
"start": 206,
"end": 749
}
|
class ____(X2, A): ...
# This should generate two errors (one for `__new__` and one for `__init__`)
# because the first arg is not a string.
X3 = type(34, (object,))
# This should generate two errors (one for `__new__` and one for `__init__`)
# because the second arg is not a tuple of class types.
X4 = type("X4", 34)
# This should generate two errors (one for `__new__` and one for `__init__`)
# because the second arg is not a tuple of class types.
X5 = type("X5", (3,))
X6 = type("", tuple({str}), {})
X7 = type("", (float, str), {})
|
B
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_pruning.py
|
{
"start": 7112,
"end": 8389
}
|
class ____(nn.Module):
r"""Model with only Conv2d layers, all with bias and some with padding > 0,
some in a Sequential and some following. Activation function modules in between each layer.
Used to test that bias is propagated correctly in the special case of
pruned Conv2d-Bias-(Activation)Conv2d fusion, when the second Conv2d layer has padding > 0."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, padding=1, bias=True),
nn.ReLU(),
nn.Conv2d(32, 32, 3, 1, bias=False),
nn.ReLU(),
nn.Conv2d(32, 32, 3, 1, padding=1, bias=True),
nn.ReLU(),
nn.Conv2d(32, 32, 3, 1, padding=1, bias=True),
nn.ReLU(),
nn.Conv2d(32, 64, 3, 1, bias=True),
nn.Tanh(),
)
self.conv2d1 = nn.Conv2d(64, 48, 3, 1, padding=1, bias=True)
self.act1 = nn.ReLU()
self.conv2d2 = nn.Conv2d(48, 52, 3, 1, padding=1, bias=True)
self.act2 = nn.Tanh()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.conv2d1(x)
x = self.act1(x)
x = self.conv2d2(x)
x = self.act2(x)
return x
|
Conv2dPadBias
|
python
|
tiangolo__fastapi
|
docs_src/cookie_param_models/tutorial002_py310.py
|
{
"start": 86,
"end": 343
}
|
class ____(BaseModel):
model_config = {"extra": "forbid"}
session_id: str
fatebook_tracker: str | None = None
googall_tracker: str | None = None
@app.get("/items/")
async def read_items(cookies: Cookies = Cookie()):
return cookies
|
Cookies
|
python
|
bottlepy__bottle
|
test/test_securecookies.py
|
{
"start": 1304,
"end": 1745
}
|
class ____(TestSignedCookies):
def setUp(self):
super(TestSignedCookiesWithPickle, self).setUp()
self.data = dict(a=5, b=touni('υηι¢σ∂є'), c=[1,2,3,4,tob('bytestring')])
@api("0.9", "0.13")
def testValid(self):
super(TestSignedCookiesWithPickle, self).testValid()
@api("0.9", "0.13")
def testWrongKey(self):
super(TestSignedCookiesWithPickle, self).testWrongKey()
|
TestSignedCookiesWithPickle
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.