language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pypa__warehouse
|
tests/unit/admin/test_services.py
|
{
"start": 263,
"end": 1335
}
|
class ____:
def test_verify_service(self):
assert verifyClass(ISponsorLogoStorage, LocalSponsorLogoStorage)
def test_basic_init(self):
storage = LocalSponsorLogoStorage("/foo/bar/")
assert storage.base == "/foo/bar/"
def test_create_service(self):
request = pretend.stub(
registry=pretend.stub(settings={"sponsorlogos.path": "/the/one/two/"})
)
storage = LocalSponsorLogoStorage.create_service(None, request)
assert storage.base == "/the/one/two/"
def test_stores_file(self, tmpdir):
filename = str(tmpdir.join("testfile.txt"))
with open(filename, "wb") as fp:
fp.write(b"Test File!")
storage_dir = str(tmpdir.join("storage"))
storage = LocalSponsorLogoStorage(storage_dir)
result = storage.store("foo/bar.txt", filename)
assert result == "http://files:9001/sponsorlogos/foo/bar.txt"
with open(os.path.join(storage_dir, "foo/bar.txt"), "rb") as fp:
assert fp.read() == b"Test File!"
|
TestSponsorLogoStorage
|
python
|
django__django
|
tests/check_framework/test_database.py
|
{
"start": 188,
"end": 2150
}
|
class ____(TestCase):
databases = {"default", "other"}
@mock.patch("django.db.backends.base.validation.BaseDatabaseValidation.check")
def test_database_checks_called(self, mocked_check):
check_database_backends()
self.assertFalse(mocked_check.called)
check_database_backends(databases=self.databases)
self.assertTrue(mocked_check.called)
@unittest.skipUnless(connection.vendor == "mysql", "Test only for MySQL")
def test_mysql_strict_mode(self):
def _clean_sql_mode():
for alias in self.databases:
if hasattr(connections[alias], "sql_mode"):
del connections[alias].sql_mode
_clean_sql_mode()
good_sql_modes = [
"STRICT_TRANS_TABLES,STRICT_ALL_TABLES",
"STRICT_TRANS_TABLES",
"STRICT_ALL_TABLES",
]
for sql_mode in good_sql_modes:
with mock.patch.object(
connection,
"mysql_server_data",
{"sql_mode": sql_mode},
):
self.assertEqual(check_database_backends(databases=self.databases), [])
_clean_sql_mode()
bad_sql_modes = ["", "WHATEVER"]
for sql_mode in bad_sql_modes:
mocker_default = mock.patch.object(
connection,
"mysql_server_data",
{"sql_mode": sql_mode},
)
mocker_other = mock.patch.object(
connections["other"],
"mysql_server_data",
{"sql_mode": sql_mode},
)
with mocker_default, mocker_other:
# One warning for each database alias
result = check_database_backends(databases=self.databases)
self.assertEqual(len(result), 2)
self.assertEqual([r.id for r in result], ["mysql.W002", "mysql.W002"])
_clean_sql_mode()
|
DatabaseCheckTests
|
python
|
doocs__leetcode
|
solution/1800-1899/1800.Maximum Ascending Subarray Sum/Solution.py
|
{
"start": 0,
"end": 286
}
|
class ____:
def maxAscendingSum(self, nums: List[int]) -> int:
ans = t = 0
for i, v in enumerate(nums):
if i == 0 or v > nums[i - 1]:
t += v
ans = max(ans, t)
else:
t = v
return ans
|
Solution
|
python
|
milvus-io__pymilvus
|
pymilvus/exceptions.py
|
{
"start": 2299,
"end": 2408
}
|
class ____(MilvusException):
"""Raise when cannot trasfer dataframe to schema"""
|
CannotInferSchemaException
|
python
|
scipy__scipy
|
scipy/integrate/_ode.py
|
{
"start": 345,
"end": 792
}
|
class ____
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
|
ode
|
python
|
huggingface__transformers
|
src/transformers/models/mgp_str/tokenization_mgp_str.py
|
{
"start": 878,
"end": 3837
}
|
class ____(PreTrainedTokenizer):
"""
Construct a MGP-STR char tokenizer.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
unk_token (`str`, *optional*, defaults to `"[GO]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"[GO]"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"[s]"`):
The end of sequence token.
pad_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"[GO]"`):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(self, vocab_file, unk_token="[GO]", bos_token="[GO]", eos_token="[s]", pad_token="[GO]", **kwargs):
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.vocab = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.vocab.items()}
super().__init__(
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
special_tokens_pattern="none",
**kwargs,
)
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
vocab = dict(self.vocab).copy()
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
"""Tokenize a string."""
char_tokens = []
for s in text:
char_tokens.extend(s)
return char_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
return (vocab_file,)
__all__ = ["MgpstrTokenizer"]
|
MgpstrTokenizer
|
python
|
pytest-dev__pytest-xdist
|
src/xdist/workermanage.py
|
{
"start": 9702,
"end": 18850
}
|
class ____:
# Set when the worker is ready.
workerinfo: WorkerInfo
class RemoteHook:
@pytest.hookimpl(trylast=True)
def pytest_xdist_getremotemodule(self) -> Any:
return xdist.remote
def __init__(
self,
nodemanager: NodeManager,
gateway: execnet.Gateway,
config: pytest.Config,
putevent: Callable[[tuple[str, dict[str, Any]]], None],
) -> None:
config.pluginmanager.register(self.RemoteHook())
self.nodemanager = nodemanager
self.putevent = putevent
self.gateway = gateway
self.config = config
self.workerinput = {
"workerid": gateway.id,
"workercount": len(nodemanager.specs),
"testrunuid": nodemanager.testrunuid,
"mainargv": sys.argv,
}
self._down = False
self._shutdown_sent = False
self.log = Producer(f"workerctl-{gateway.id}", enabled=config.option.debug)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.gateway.id}>"
@property
def shutting_down(self) -> bool:
return self._down or self._shutdown_sent
def setup(self) -> None:
self.log("setting up worker session")
# Cache rinfo for backward compatibility, since pytest-cov
# accesses rinfo while the main thread is busy executing our
# remote_exec call, which triggers a deadlock error for the
# main_thread_only execmodel if the rinfo has not been cached.
self.gateway._rinfo()
spec = self.gateway.spec
args = [str(x) for x in self.config.invocation_params.args or ()]
option_dict = {}
if not spec.popen or spec.chdir:
args = make_reltoroot(self.nodemanager.roots, args)
if spec.popen:
name = "popen-%s" % self.gateway.id
if hasattr(self.config, "_tmp_path_factory"):
basetemp = self.config._tmp_path_factory.getbasetemp()
option_dict["basetemp"] = str(basetemp / name)
self.config.hook.pytest_configure_node(node=self)
remote_module = self.config.hook.pytest_xdist_getremotemodule()
self.channel = self.gateway.remote_exec(remote_module)
# change sys.path only for remote workers
# restore sys.path from a frozen copy for local workers
change_sys_path = _sys_path if self.gateway.spec.popen else None
self.channel.send((self.workerinput, args, option_dict, change_sys_path))
# putevent is only None in a test.
if self.putevent: # type: ignore[truthy-function]
self.channel.setcallback(self.process_from_remote, endmarker=Marker.END)
def ensure_teardown(self) -> None:
if hasattr(self, "channel"):
if not self.channel.isclosed():
self.log("closing", self.channel)
self.channel.close()
# del self.channel
if hasattr(self, "gateway"):
self.log("exiting", self.gateway)
self.gateway.exit()
# del self.gateway
def send_runtest_some(self, indices: Sequence[int]) -> None:
self.sendcommand("runtests", indices=indices)
def send_runtest_all(self) -> None:
self.sendcommand("runtests_all")
def send_steal(self, indices: Sequence[int]) -> None:
self.sendcommand("steal", indices=indices)
def shutdown(self) -> None:
if not self._down:
try:
self.sendcommand("shutdown")
except OSError:
pass
self._shutdown_sent = True
def sendcommand(self, name: str, **kwargs: object) -> None:
"""Send a named parametrized command to the other side."""
self.log(f"sending command {name}(**{kwargs})")
self.channel.send((name, kwargs))
def notify_inproc(self, eventname: str, **kwargs: object) -> None:
self.log(f"queuing {eventname}(**{kwargs})")
self.putevent((eventname, kwargs))
def process_from_remote(
self, eventcall: tuple[str, dict[str, Any]] | Literal[Marker.END]
) -> None:
"""This gets called for each object we receive from
the other side and if the channel closes.
Note that channel callbacks run in the receiver
thread of execnet gateways - we need to
avoid raising exceptions or doing heavy work.
"""
try:
if eventcall is Marker.END:
err: object | None = self.channel._getremoteerror() # type: ignore[no-untyped-call]
if not self._down:
if not err or isinstance(err, EOFError):
err = "Not properly terminated" # lost connection?
self.notify_inproc("errordown", node=self, error=err)
self._down = True
return
eventname, kwargs = eventcall
if eventname in ("collectionstart",):
self.log(f"ignoring {eventname}({kwargs})")
elif eventname == "workerready":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "internal_error":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "workerfinished":
self._down = True
self.workeroutput = kwargs["workeroutput"]
self.notify_inproc("workerfinished", node=self)
elif eventname in ("logstart", "logfinish"):
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname in ("testreport", "collectreport", "teardownreport"):
item_index = kwargs.pop("item_index", None)
rep = self.config.hook.pytest_report_from_serializable(
config=self.config, data=kwargs["data"]
)
if item_index is not None:
rep.item_index = item_index
self.notify_inproc(eventname, node=self, rep=rep)
elif eventname == "collectionfinish":
self.notify_inproc(eventname, node=self, ids=kwargs["ids"])
elif eventname == "runtest_protocol_complete":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "unscheduled":
self.notify_inproc(eventname, node=self, **kwargs)
elif eventname == "logwarning":
self.notify_inproc(
eventname,
message=kwargs["message"],
code=kwargs["code"],
nodeid=kwargs["nodeid"],
fslocation=kwargs["nodeid"],
)
elif eventname == "warning_recorded":
warning_message = unserialize_warning_message(
kwargs["warning_message_data"]
)
self.notify_inproc(
eventname,
warning_message=warning_message,
when=kwargs["when"],
nodeid=kwargs["nodeid"],
location=kwargs["location"],
)
else:
raise ValueError(f"unknown event: {eventname}")
except KeyboardInterrupt:
# should not land in receiver-thread
raise
except BaseException:
excinfo = pytest.ExceptionInfo.from_current()
print("!" * 20, excinfo)
self.config.notify_exception(excinfo)
self.shutdown()
self.notify_inproc("errordown", node=self, error=excinfo)
def unserialize_warning_message(data: dict[str, Any]) -> warnings.WarningMessage:
import importlib
if data["message_module"]:
mod = importlib.import_module(data["message_module"])
cls = getattr(mod, data["message_class_name"])
message = None
if data["message_args"] is not None:
try:
message = cls(*data["message_args"])
except TypeError:
pass
if message is None:
# could not recreate the original warning instance;
# create a generic Warning instance with the original
# message at least
message_text = "{mod}.{cls}: {msg}".format(
mod=data["message_module"],
cls=data["message_class_name"],
msg=data["message_str"],
)
message = Warning(message_text)
else:
message = data["message_str"]
if data["category_module"]:
mod = importlib.import_module(data["category_module"])
category = getattr(mod, data["category_class_name"])
else:
category = None
kwargs = {"message": message, "category": category}
# access private _WARNING_DETAILS because the attributes vary between Python versions
for attr_name in warnings.WarningMessage._WARNING_DETAILS: # type: ignore[attr-defined]
if attr_name in ("message", "category"):
continue
kwargs[attr_name] = data[attr_name]
return warnings.WarningMessage(**kwargs)
|
WorkerController
|
python
|
great-expectations__great_expectations
|
great_expectations/core/profiler_types_mapping.py
|
{
"start": 37,
"end": 3034
}
|
class ____:
"""Useful backend type mapping for building profilers."""
INT_TYPE_NAMES = [
"BIGINT",
"BYTEINT",
"ByteType()",
"INT",
"INT64",
"INTEGER",
"Int16Dtype",
"Int32Dtype",
"Int64Dtype",
"Int8Dtype",
"IntegerType",
"IntegerType()",
"LongType",
"LongType()",
"SMALLINT",
"ShortType()",
"TINYINT",
"UInt16Dtype",
"UInt32Dtype",
"UInt64Dtype",
"UInt8Dtype",
"int",
"int16",
"int32",
"int64",
"int8",
"int_",
"integer",
"uint16",
"uint32",
"uint64",
"uint8",
"Uint8",
"Uint16",
"Uint32",
"Uint64",
"Uint128",
"Uint256",
"Int8",
"Int16",
"Int32",
"Int64",
"Int128",
"Int256",
]
FLOAT_TYPE_NAMES = [
"DECIMAL",
"DOUBLE",
"DOUBLE_PRECISION",
"DecimalType()",
"DoubleType",
"DoubleType()",
"FLOAT",
"FLOAT4",
"FLOAT64",
"FLOAT8",
"FloatType",
"FloatType()",
"NUMERIC",
"REAL",
"float",
"float16",
"float32",
"float64",
"float_",
"number",
"Float32",
"Float64",
]
STRING_TYPE_NAMES = [
"CHAR",
"NCHAR",
"NTEXT",
"NVARCHAR",
"STRING",
"StringType",
"StringType()",
"TEXT",
"VARCHAR",
"dtype('O')",
"object",
"str",
"string",
"FixedString",
]
BOOLEAN_TYPE_NAMES = [
"BIT",
"BOOL",
"BOOLEAN",
"BooleanType",
"BooleanType()",
"TINYINT",
"bool",
"boolean",
"Bool",
]
DATETIME_TYPE_NAMES = [
"DATE",
"TIME",
"DATETIME",
"DATETIME2",
"DATETIME64",
"SMALLDATETIME",
"DATETIMEOFFSET",
"TIMESTAMP",
"Timestamp",
"TimestampType",
"TimestampType()",
"DateType",
"DateType()",
"datetime64",
"datetime64[ns]",
"timedelta[ns]",
"<M8[ns]",
"Date",
"Date32",
"DateTime",
"DateTime64",
]
BINARY_TYPE_NAMES = [
"BINARY",
"BinaryType()",
"IMAGE",
"VARBINARY",
"binary",
"image",
"varbinary",
]
CURRENCY_TYPE_NAMES = [
"MONEY",
"SMALLMONEY",
"money",
"smallmoney",
]
IDENTIFIER_TYPE_NAMES = ["UNIQUEIDENTIFIER", "uniqueidentifier", "UUID"]
MISCELLANEOUS_TYPE_NAMES = [
"SQL_VARIANT",
"sql_variant",
]
RECORD_TYPE_NAMES = [
"JSON",
"json",
"JSON",
]
OBJECT_TYPE_NAMES = [
"OBJECT",
"object",
]
|
ProfilerTypeMapping
|
python
|
apache__airflow
|
airflow-core/src/airflow/executors/base_executor.py
|
{
"start": 2495,
"end": 3580
}
|
class ____:
"""
For keeping track of attempts to queue again when task still apparently running.
We don't want to slow down the loop, so we don't block, but we allow it to be
re-checked for at least MIN_SECONDS seconds.
"""
MIN_SECONDS = 10
total_tries: int = field(default=0, init=False)
tries_after_min: int = field(default=0, init=False)
first_attempt_time: datetime = field(default_factory=lambda: pendulum.now("UTC"), init=False)
@property
def elapsed(self):
"""Seconds since first attempt."""
return (pendulum.now("UTC") - self.first_attempt_time).total_seconds()
def can_try_again(self):
"""Return False if there has been at least one try greater than MIN_SECONDS, otherwise return True."""
if self.tries_after_min > 0:
return False
self.total_tries += 1
elapsed = self.elapsed
if elapsed > self.MIN_SECONDS:
self.tries_after_min += 1
log.debug("elapsed=%s tries=%s", elapsed, self.total_tries)
return True
|
RunningRetryAttemptType
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/building/makespec.py
|
{
"start": 5770,
"end": 6376
}
|
class ____:
def __init__(self, *parts):
self.path = os.path.join(*parts)
self.variable_prefix = self.filename_suffix = None
def __repr__(self):
if self.filename_suffix is None:
self.variable_prefix, self.filename_suffix = make_variable_path(self.path)
if self.variable_prefix is None:
return repr(self.path)
return "os.path.join(" + self.variable_prefix + "," + repr(self.filename_suffix) + ")"
# An object used to construct extra preamble for the spec file, in order to accommodate extra collect_*() calls from the
# command-line
|
Path
|
python
|
huggingface__transformers
|
tests/models/emu3/test_modeling_emu3.py
|
{
"start": 1546,
"end": 3981
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=False,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=37,
max_position_embeddings=512,
initializer_range=0.02,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = input_ids.ne(self.pad_token_id).to(torch_device)
config = self.get_config()
return config, input_ids, attention_mask
def get_config(self):
return Emu3TextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
max_position_embeddings=self.max_position_embeddings,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
|
Emu3Text2TextModelTester
|
python
|
ionelmc__pytest-benchmark
|
src/pytest_benchmark/storage/elasticsearch.py
|
{
"start": 933,
"end": 9427
}
|
class ____:
def __init__(self, hosts, index, doctype, project_name, logger, default_machine_id=None):
self._es_hosts = hosts
self._es_index = index
self._es_doctype = doctype
self._es = elasticsearch.Elasticsearch(self._es_hosts, serializer=BenchmarkJSONSerializer())
self._project_name = project_name
self.default_machine_id = default_machine_id
self.logger = logger
self._cache = {}
self._create_index()
def __str__(self):
return str(self._es_hosts)
@property
def location(self):
return str(self._es_hosts)
def query(self):
"""
Returns sorted records names (ids) that corresponds with project.
"""
body = {'size': 0, 'aggs': {'benchmark_ids': {'terms': {'field': 'benchmark_id'}}}}
result = self._es.search(index=self._es_index, doc_type=self._es_doctype, body=body)
return sorted([record['key'] for record in result['aggregations']['benchmark_ids']['buckets']])
def load(self, id_prefix=None):
"""
Yield key and content of records that corresponds with project name.
"""
r = self._search(self._project_name, id_prefix)
groupped_data = self._group_by_commit_and_time(r['hits']['hits'])
result = list(groupped_data.items())
result.sort(key=lambda x: datetime.strptime(x[1]['datetime'], '%Y-%m-%dT%H:%M:%S.%f')) # noqa: DTZ007
for key, data in result:
for bench in data['benchmarks']:
normalize_stats(bench['stats'])
yield key, data
def _search(self, project, id_prefix=None):
body = {
'size': 1000,
'sort': [{'datetime': {'order': 'desc'}}],
'query': {'bool': {'filter': {'term': {'commit_info.project': project}}}},
}
if id_prefix:
body['query']['bool']['must'] = {'prefix': {'_id': id_prefix}}
return self._es.search(index=self._es_index, doc_type=self._es_doctype, body=body)
@staticmethod
def _benchmark_from_es_record(source_es_record):
result = {}
for benchmark_key in ('group', 'stats', 'options', 'param', 'name', 'params', 'fullname', 'benchmark_id'):
result[benchmark_key] = source_es_record[benchmark_key]
return result
@staticmethod
def _run_info_from_es_record(source_es_record):
result = {}
for run_key in ('machine_info', 'commit_info', 'datetime', 'version'):
result[run_key] = source_es_record[run_key]
return result
def _group_by_commit_and_time(self, hits):
result = {}
for hit in hits:
source_hit = hit['_source']
key = '{}_{}'.format(source_hit['commit_info']['id'], source_hit['datetime'])
benchmark = self._benchmark_from_es_record(source_hit)
if key in result:
result[key]['benchmarks'].append(benchmark)
else:
run_info = self._run_info_from_es_record(source_hit)
run_info['benchmarks'] = [benchmark]
result[key] = run_info
return result
def load_benchmarks(self, *args):
"""
Yield benchmarks that corresponds with project. Put path and
source (uncommon part of path) to benchmark dict.
"""
id_prefix = args[0] if args else None
r = self._search(self._project_name, id_prefix)
for hit in r['hits']['hits']:
bench = self._benchmark_from_es_record(hit['_source'])
bench.update(bench.pop('stats'))
bench['source'] = bench['benchmark_id']
yield bench
def save(self, output_json, save):
output_benchmarks = output_json.pop('benchmarks')
for bench in output_benchmarks:
# add top level info from output_json dict to each record
bench.update(output_json)
benchmark_id = save
if self.default_machine_id:
benchmark_id = self.default_machine_id + '_' + benchmark_id
doc_id = benchmark_id + '_' + bench['fullname']
bench['benchmark_id'] = benchmark_id
self._es.index(
index=self._es_index,
doc_type=self._es_doctype,
body=bench,
id=doc_id,
)
# hide user's credentials before logging
masked_hosts = _mask_hosts(self._es_hosts)
self.logger.info(f'Saved benchmark data to {masked_hosts} to index {self._es_index} as doctype {self._es_doctype}')
def _create_index(self):
mapping = {
'mappings': {
'benchmark': {
'properties': {
'commit_info': {
'properties': {
'dirty': {'type': 'boolean'},
'id': {'type': 'string', 'index': 'not_analyzed'},
'project': {'type': 'string', 'index': 'not_analyzed'},
}
},
'datetime': {'type': 'date', 'format': 'strict_date_optional_time||epoch_millis'},
'name': {'type': 'string', 'index': 'not_analyzed'},
'fullname': {'type': 'string', 'index': 'not_analyzed'},
'version': {'type': 'string', 'index': 'not_analyzed'},
'benchmark_id': {
'type': 'string',
'index': 'not_analyzed',
},
'machine_info': {
'properties': {
'machine': {'type': 'string', 'index': 'not_analyzed'},
'node': {'type': 'string', 'index': 'not_analyzed'},
'processor': {'type': 'string', 'index': 'not_analyzed'},
'python_build': {'type': 'string', 'index': 'not_analyzed'},
'python_compiler': {'type': 'string', 'index': 'not_analyzed'},
'python_implementation': {'type': 'string', 'index': 'not_analyzed'},
'python_implementation_version': {'type': 'string', 'index': 'not_analyzed'},
'python_version': {'type': 'string', 'index': 'not_analyzed'},
'release': {'type': 'string', 'index': 'not_analyzed'},
'system': {'type': 'string', 'index': 'not_analyzed'},
}
},
'options': {
'properties': {
'disable_gc': {'type': 'boolean'},
'max_time': {'type': 'double'},
'min_rounds': {'type': 'long'},
'min_time': {'type': 'double'},
'timer': {'type': 'string'},
'warmup': {'type': 'boolean'},
}
},
'stats': {
'properties': {
'hd15iqr': {'type': 'double'},
'iqr': {'type': 'double'},
'iqr_outliers': {'type': 'long'},
'iterations': {'type': 'long'},
'ld15iqr': {'type': 'double'},
'max': {'type': 'double'},
'mean': {'type': 'double'},
'median': {'type': 'double'},
'min': {'type': 'double'},
'outliers': {'type': 'string'},
'q1': {'type': 'double'},
'q3': {'type': 'double'},
'rounds': {'type': 'long'},
'stddev': {'type': 'double'},
'stddev_outliers': {'type': 'long'},
'ops': {'type': 'double'},
}
},
}
}
}
}
self._es.indices.create(index=self._es_index, ignore=400, body=mapping)
|
ElasticsearchStorage
|
python
|
great-expectations__great_expectations
|
great_expectations/core/expectation_validation_result.py
|
{
"start": 30176,
"end": 31804
}
|
class ____(Schema):
success = fields.Bool()
results = fields.List(fields.Nested(ExpectationValidationResultSchema))
suite_name = fields.String(required=True, allow_none=False)
suite_parameters = fields.Dict()
statistics = fields.Dict()
meta = fields.Dict(allow_none=True)
id = fields.UUID(required=False, allow_none=True)
# noinspection PyUnusedLocal
@pre_dump
def prepare_dump(self, data, **kwargs):
data = deepcopy(data)
if isinstance(data, ExpectationSuiteValidationResult):
data.meta = convert_to_json_serializable(data=data.meta)
data.statistics = convert_to_json_serializable(data=data.statistics)
elif isinstance(data, dict):
data["meta"] = convert_to_json_serializable(data=data.get("meta"))
data["statistics"] = convert_to_json_serializable(data=data.get("statistics"))
return data
def _convert_uuids_to_str(self, data):
"""
Utilize UUID for data validation but convert to string before usage in business logic
"""
attr = "id"
uuid_val = data.get(attr)
if uuid_val:
data[attr] = str(uuid_val)
return data
# noinspection PyUnusedLocal
@post_load
def make_expectation_suite_validation_result(self, data, **kwargs):
data = self._convert_uuids_to_str(data=data)
return ExpectationSuiteValidationResult(**data)
expectationSuiteValidationResultSchema = ExpectationSuiteValidationResultSchema()
expectationValidationResultSchema = ExpectationValidationResultSchema()
|
ExpectationSuiteValidationResultSchema
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_cloud_storage_transfer_service.py
|
{
"start": 31007,
"end": 33275
}
|
class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_operation_cancel(self, mock_hook):
op = CloudDataTransferServiceCancelOperationOperator(
operation_name=OPERATION_NAME,
task_id=TASK_ID,
google_impersonation_chain=IMPERSONATION_CHAIN,
)
result = op.execute(None)
mock_hook.assert_called_once_with(
api_version="v1",
gcp_conn_id="google_cloud_default",
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.cancel_transfer_operation.assert_called_once_with(
operation_name=OPERATION_NAME
)
assert result is None
# Setting all the operator's input parameters as templated dag_ids
# (could be anything else) just to test if the templating works for all
# fields
@pytest.mark.db_test
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_operation_cancel_with_templates(self, _, create_task_instance_of_operator, session):
dag_id = "test_operation_cancel_with_templates"
ti = create_task_instance_of_operator(
CloudDataTransferServiceCancelOperationOperator,
dag_id=dag_id,
operation_name="{{ dag.dag_id }}",
gcp_conn_id="{{ dag.dag_id }}",
api_version="{{ dag.dag_id }}",
task_id=TASK_ID,
)
session.add(ti)
session.commit()
ti.render_templates()
assert dag_id == ti.task.operation_name
assert dag_id == ti.task.gcp_conn_id
assert dag_id == ti.task.api_version
@mock.patch(
"airflow.providers.google.cloud.operators.cloud_storage_transfer_service.CloudDataTransferServiceHook"
)
def test_operation_cancel_should_throw_ex_when_name_none(self, mock_hook):
with pytest.raises(
AirflowException, match="The required parameter 'operation_name' is empty or None"
):
CloudDataTransferServiceCancelOperationOperator(operation_name="", task_id=TASK_ID)
|
TestGcpStorageTransferOperationsCancelOperator
|
python
|
django__django
|
tests/fixtures_regress/models.py
|
{
"start": 6251,
"end": 6340
}
|
class ____(models.Model):
parent = models.ManyToManyField("self", blank=True)
|
M2MToSelf
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pep8_naming/N815.py
|
{
"start": 427,
"end": 542
}
|
class ____(TypedDict):
lower: int
CONSTANT: str
mixedCase: bool
_mixedCase: list
mixed_Case: set
|
D
|
python
|
tensorflow__tensorflow
|
tensorflow/core/function/polymorphism/function_cache_test.py
|
{
"start": 2302,
"end": 3353
}
|
class ____(trace.TraceType):
def __init__(self, *shape: Optional[int]):
self.shape = shape
def is_subtype_of(self, other: "MockShape") -> bool:
if len(self.shape) != len(other.shape):
return False
if any(o is not None and s != o for s, o in zip(self.shape, other.shape)):
return False
return True
def most_specific_common_supertype(self, _):
raise NotImplementedError
def __str__(self):
return str(self.shape)
def __repr__(self):
return str(self)
def __hash__(self) -> int:
return hash(self.shape)
def __eq__(self, other: "MockShape") -> bool:
return self.shape == other.shape
def placeholder_value(self, placeholder_context):
raise NotImplementedError
def make_single_param_type(type_constraint):
return function_type.FunctionType(
[
function_type.Parameter(
"x",
function_type.Parameter.POSITIONAL_ONLY,
False,
type_constraint,
)
]
)
@dataclasses.dataclass(frozen=True)
|
MockShape
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/arithmetic.py
|
{
"start": 1182,
"end": 1575
}
|
class ____:
def setup(self):
# GH#31300
arr = np.arange(10**6)
df = DataFrame({"A": arr})
ser = df["A"]
self.df = df
self.ser = ser
def time_frame_op_with_fill_value_no_nas(self):
self.df.add(self.df, fill_value=4)
def time_series_op_with_fill_value_no_nas(self):
self.ser.add(self.ser, fill_value=4)
|
OpWithFillValue
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/axis_artist.py
|
{
"start": 9167,
"end": 13259
}
|
class ____(AttributeCopier, LabelBase):
"""
Axis label. Derived from `.Text`. The position of the text is updated
in the fly, so changing text position has no effect. Otherwise, the
properties can be changed as a normal `.Text`.
To change the pad between tick labels and axis label, use `set_pad`.
"""
def __init__(self, *args, axis_direction="bottom", axis=None, **kwargs):
self._axis = axis
self._pad = 5
self._external_pad = 0 # in pixels
LabelBase.__init__(self, *args, **kwargs)
self.set_axis_direction(axis_direction)
def set_pad(self, pad):
"""
Set the internal pad in points.
The actual pad will be the sum of the internal pad and the
external pad (the latter is set automatically by the `.AxisArtist`).
Parameters
----------
pad : float
The internal pad in points.
"""
self._pad = pad
def get_pad(self):
"""
Return the internal pad in points.
See `.set_pad` for more details.
"""
return self._pad
def get_ref_artist(self):
# docstring inherited
return self._axis.label
def get_text(self):
# docstring inherited
t = super().get_text()
if t == "__from_axes__":
return self._axis.label.get_text()
return self._text
_default_alignments = dict(left=("bottom", "center"),
right=("top", "center"),
bottom=("top", "center"),
top=("bottom", "center"))
def set_default_alignment(self, d):
"""
Set the default alignment. See `set_axis_direction` for details.
Parameters
----------
d : {"left", "bottom", "right", "top"}
"""
va, ha = _api.check_getitem(self._default_alignments, d=d)
self.set_va(va)
self.set_ha(ha)
_default_angles = dict(left=180,
right=0,
bottom=0,
top=180)
def set_default_angle(self, d):
"""
Set the default angle. See `set_axis_direction` for details.
Parameters
----------
d : {"left", "bottom", "right", "top"}
"""
self.set_rotation(_api.check_getitem(self._default_angles, d=d))
def set_axis_direction(self, d):
"""
Adjust the text angle and text alignment of axis label
according to the matplotlib convention.
===================== ========== ========= ========== ==========
Property left bottom right top
===================== ========== ========= ========== ==========
axislabel angle 180 0 0 180
axislabel va center top center bottom
axislabel ha right center right center
===================== ========== ========= ========== ==========
Note that the text angles are actually relative to (90 + angle
of the direction to the ticklabel), which gives 0 for bottom
axis.
Parameters
----------
d : {"left", "bottom", "right", "top"}
"""
self.set_default_alignment(d)
self.set_default_angle(d)
def get_color(self):
return self.get_attribute_from_ref_artist("color")
def draw(self, renderer):
if not self.get_visible():
return
self._offset_radius = \
self._external_pad + renderer.points_to_pixels(self.get_pad())
super().draw(renderer)
def get_window_extent(self, renderer=None):
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
if not self.get_visible():
return
r = self._external_pad + renderer.points_to_pixels(self.get_pad())
self._offset_radius = r
bb = super().get_window_extent(renderer)
return bb
|
AxisLabel
|
python
|
numba__llvmlite
|
llvmlite/tests/test_ir.py
|
{
"start": 109433,
"end": 125244
}
|
class ____(TestBase):
def test_integers(self):
c = ir.Constant(int32, 42)
self.assertEqual(str(c), 'i32 42')
c = ir.Constant(int1, 1)
self.assertEqual(str(c), 'i1 1')
c = ir.Constant(int1, 0)
self.assertEqual(str(c), 'i1 0')
c = ir.Constant(int1, True)
self.assertEqual(str(c), 'i1 true')
c = ir.Constant(int1, False)
self.assertEqual(str(c), 'i1 false')
c = ir.Constant(int1, ir.Undefined)
self.assertEqual(str(c), 'i1 undef')
c = ir.Constant(int1, None)
self.assertEqual(str(c), 'i1 0')
def test_reals(self):
# XXX Test NaNs and infs
c = ir.Constant(flt, 1.5)
self.assertEqual(str(c), 'float 0x3ff8000000000000')
c = ir.Constant(flt, -1.5)
self.assertEqual(str(c), 'float 0xbff8000000000000')
c = ir.Constant(dbl, 1.5)
self.assertEqual(str(c), 'double 0x3ff8000000000000')
c = ir.Constant(dbl, -1.5)
self.assertEqual(str(c), 'double 0xbff8000000000000')
c = ir.Constant(dbl, ir.Undefined)
self.assertEqual(str(c), 'double undef')
c = ir.Constant(dbl, None)
self.assertEqual(str(c), 'double 0.0')
def test_arrays(self):
c = ir.Constant(ir.ArrayType(int32, 3), (c32(5), c32(6), c32(4)))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 4]')
c = ir.Constant(ir.ArrayType(int32, 2), (c32(5), c32(ir.Undefined)))
self.assertEqual(str(c), '[2 x i32] [i32 5, i32 undef]')
c = ir.Constant.literal_array((c32(5), c32(6), c32(ir.Undefined)))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 6, i32 undef]')
with self.assertRaises(TypeError) as raises:
ir.Constant.literal_array((c32(5), ir.Constant(flt, 1.5)))
self.assertEqual(str(raises.exception),
"all elements must have the same type")
c = ir.Constant(ir.ArrayType(int32, 2), ir.Undefined)
self.assertEqual(str(c), '[2 x i32] undef')
c = ir.Constant(ir.ArrayType(int32, 2), None)
self.assertEqual(str(c), '[2 x i32] zeroinitializer')
# Raw array syntax
c = ir.Constant(ir.ArrayType(int8, 11), bytearray(b"foobar_123\x80"))
self.assertEqual(str(c), r'[11 x i8] c"foobar_123\80"')
c = ir.Constant(ir.ArrayType(int8, 4), bytearray(b"\x00\x01\x04\xff"))
self.assertEqual(str(c), r'[4 x i8] c"\00\01\04\ff"')
# Recursive instantiation of inner constants
c = ir.Constant(ir.ArrayType(int32, 3), (5, ir.Undefined, 6))
self.assertEqual(str(c), '[3 x i32] [i32 5, i32 undef, i32 6]')
# Invalid number of args
with self.assertRaises(ValueError):
ir.Constant(ir.ArrayType(int32, 3), (5, 6))
def test_vector(self):
vecty = ir.VectorType(ir.IntType(32), 8)
vals = [1, 2, 4, 3, 8, 6, 9, 7]
vec = ir.Constant(vecty, vals)
vec_repr = "<8 x i32> <{}>".format(
', '.join(map('i32 {}'.format, vals)))
self.assertEqual(str(vec), vec_repr)
def test_non_nullable_int(self):
constant = ir.Constant(ir.IntType(32), None).constant
self.assertEqual(constant, 0)
def test_structs(self):
st1 = ir.LiteralStructType((flt, int1))
st2 = ir.LiteralStructType((int32, st1))
c = ir.Constant(st1, (ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, True)))
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, True)))
self.assertEqual(c.type, st1)
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c = ir.Constant.literal_struct((ir.Constant(ir.FloatType(), 1.5),
ir.Constant(int1, ir.Undefined)))
self.assertEqual(c.type, st1)
self.assertEqual(str(c),
'{float, i1} {float 0x3ff8000000000000, i1 undef}')
c = ir.Constant(st1, ir.Undefined)
self.assertEqual(str(c), '{float, i1} undef')
c = ir.Constant(st1, None)
self.assertEqual(str(c), '{float, i1} zeroinitializer')
# Recursive instantiation of inner constants
c1 = ir.Constant(st1, (1.5, True))
self.assertEqual(str(c1),
'{float, i1} {float 0x3ff8000000000000, i1 true}')
c2 = ir.Constant(st2, (42, c1))
self.assertEqual(str(c2), ('{i32, {float, i1}} {i32 42, {float, i1} '
'{float 0x3ff8000000000000, i1 true}}'))
c3 = ir.Constant(st2, (42, (1.5, True)))
self.assertEqual(str(c3), str(c2))
# Invalid number of args
with self.assertRaises(ValueError):
ir.Constant(st2, (4, 5, 6))
def test_undefined_literal_struct_pickling(self):
i8 = ir.IntType(8)
st = ir.Constant(ir.LiteralStructType([i8, i8]), ir.Undefined)
self.assert_pickle_correctly(st)
def test_type_instantiaton(self):
"""
Instantiating a type should create a constant.
"""
c = int8(42)
self.assertIsInstance(c, ir.Constant)
self.assertEqual(str(c), 'i8 42')
c = int1(True)
self.assertIsInstance(c, ir.Constant)
self.assertEqual(str(c), 'i1 true')
# Arrays
at = ir.ArrayType(int32, 3)
c = at([c32(4), c32(5), c32(6)])
self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]')
c = at([4, 5, 6])
self.assertEqual(str(c), '[3 x i32] [i32 4, i32 5, i32 6]')
c = at(None)
self.assertEqual(str(c), '[3 x i32] zeroinitializer')
with self.assertRaises(ValueError):
at([4, 5, 6, 7])
# Structs
st1 = ir.LiteralStructType((flt, int1))
st2 = ir.LiteralStructType((int32, st1))
c = st1((1.5, True))
self.assertEqual(str(c), ('{float, i1} {float 0x3ff8000000000000, i1 '
'true}'))
c = st2((42, (1.5, True)))
self.assertEqual(str(c), ('{i32, {float, i1}} {i32 42, {float, i1} '
'{float 0x3ff8000000000000, i1 true}}'))
def test_repr(self):
"""
Constants should have a useful repr().
"""
c = int32(42)
self.assertEqual(repr(c), "<ir.Constant type='i32' value=42>")
def test_encoding_problem(self):
c = ir.Constant(ir.ArrayType(ir.IntType(8), 256),
bytearray(range(256)))
m = self.module()
gv = ir.GlobalVariable(m, c.type, "myconstant")
gv.global_constant = True
gv.initializer = c
# With utf-8, the following will cause:
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe0 in position
# 136: invalid continuation byte
parsed = llvm.parse_assembly(str(m))
# Make sure the encoding does not modify the IR
reparsed = llvm.parse_assembly(str(parsed))
self.assertEqual(str(parsed), str(reparsed))
def test_gep(self):
m = self.module()
tp = ir.LiteralStructType((flt, int1))
gv = ir.GlobalVariable(m, tp, "myconstant")
c = gv.gep([ir.Constant(int32, x) for x in (0, 1)])
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c),
'getelementptr ({float, i1}, ptr @"myconstant", i32 0, i32 1)') # noqa E501
else:
self.assertEqual(str(c),
'getelementptr ({float, i1}, {float, i1}* @"myconstant", i32 0, i32 1)') # noqa E501
self.assertEqual(c.type, ir.PointerType(int1))
const = ir.Constant(tp, None)
with self.assertRaises(TypeError):
const.gep([ir.Constant(int32, 0)])
const_ptr = ir.Constant(tp.as_pointer(), None)
c2 = const_ptr.gep([ir.Constant(int32, 0)])
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c2),
'getelementptr ({float, i1}, ptr null, i32 0)') # noqa E501
else:
self.assertEqual(str(c2),
'getelementptr ({float, i1}, {float, i1}* null, i32 0)') # noqa E501
self.assertEqual(c.type, ir.PointerType(int1))
def test_gep_addrspace_globalvar(self):
m = self.module()
tp = ir.LiteralStructType((flt, int1))
addrspace = 4
gv = ir.GlobalVariable(m, tp, "myconstant", addrspace=addrspace)
self.assertEqual(gv.addrspace, addrspace)
c = gv.gep([ir.Constant(int32, x) for x in (0, 1)])
self.assertEqual(c.type.addrspace, addrspace)
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c),
('getelementptr ({float, i1}, ptr '
'addrspace(4) @"myconstant", i32 0, i32 1)'))
else:
self.assertEqual(str(c),
('getelementptr ({float, i1}, {float, i1} '
'addrspace(4)* @"myconstant", i32 0, i32 1)'))
self.assertEqual(c.type, ir.PointerType(int1, addrspace=addrspace))
def test_trunc(self):
c = ir.Constant(int64, 1).trunc(int32)
self.assertEqual(str(c), 'trunc (i64 1 to i32)')
def test_zext(self):
c = ir.Constant(int32, 1).zext(int64)
self.assertEqual(str(c), 'zext (i32 1 to i64)')
def test_sext(self):
c = ir.Constant(int32, -1).sext(int64)
self.assertEqual(str(c), 'sext (i32 -1 to i64)')
def test_fptrunc(self):
c = ir.Constant(flt, 1).fptrunc(hlf)
self.assertEqual(str(c), 'fptrunc (float 0x3ff0000000000000 to half)')
def test_fpext(self):
c = ir.Constant(flt, 1).fpext(dbl)
self.assertEqual(str(c), 'fpext (float 0x3ff0000000000000 to double)')
def test_bitcast(self):
m = self.module()
gv = ir.GlobalVariable(m, int32, "myconstant")
c = gv.bitcast(int64.as_pointer())
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c), 'bitcast (ptr @"myconstant" to ptr)')
else:
self.assertEqual(str(c), 'bitcast (i32* @"myconstant" to i64*)')
def test_fptoui(self):
c = ir.Constant(flt, 1).fptoui(int32)
self.assertEqual(str(c), 'fptoui (float 0x3ff0000000000000 to i32)')
def test_uitofp(self):
c = ir.Constant(int32, 1).uitofp(flt)
self.assertEqual(str(c), 'uitofp (i32 1 to float)')
def test_fptosi(self):
c = ir.Constant(flt, 1).fptosi(int32)
self.assertEqual(str(c), 'fptosi (float 0x3ff0000000000000 to i32)')
def test_sitofp(self):
c = ir.Constant(int32, 1).sitofp(flt)
self.assertEqual(str(c), 'sitofp (i32 1 to float)')
def test_ptrtoint_1(self):
ptr = ir.Constant(int64.as_pointer(), None)
one = ir.Constant(int32, 1)
c = ptr.ptrtoint(int32)
self.assertRaises(TypeError, one.ptrtoint, int64)
self.assertRaises(TypeError, ptr.ptrtoint, flt)
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c), 'ptrtoint (ptr null to i32)')
else:
self.assertEqual(str(c), 'ptrtoint (i64* null to i32)')
def test_ptrtoint_2(self):
m = self.module()
gv = ir.GlobalVariable(m, int32, "myconstant")
c = gv.ptrtoint(int64)
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c), 'ptrtoint (ptr @"myconstant" to i64)')
self.assertRaisesRegex(
TypeError,
r"can only ptrtoint\(\) to integer type, not 'ptr'",
gv.ptrtoint,
int64.as_pointer())
else:
self.assertEqual(str(c), 'ptrtoint (i32* @"myconstant" to i64)')
self.assertRaisesRegex(
TypeError,
r"can only ptrtoint\(\) to integer type, not 'i64\*'",
gv.ptrtoint,
int64.as_pointer())
c2 = ir.Constant(int32, 0)
self.assertRaisesRegex(
TypeError,
r"can only call ptrtoint\(\) on pointer type, not 'i32'",
c2.ptrtoint,
int64)
def test_inttoptr(self):
one = ir.Constant(int32, 1)
pi = ir.Constant(flt, 3.14)
c = one.inttoptr(int64.as_pointer())
self.assertRaises(TypeError, one.inttoptr, int64)
self.assertRaises(TypeError, pi.inttoptr, int64.as_pointer())
if not ir_layer_typed_pointers_enabled:
self.assertEqual(str(c), 'inttoptr (i32 1 to ptr)')
else:
self.assertEqual(str(c), 'inttoptr (i32 1 to i64*)')
def test_neg(self):
one = ir.Constant(int32, 1)
self.assertEqual(str(one.neg()), 'sub (i32 0, i32 1)')
def test_not(self):
one = ir.Constant(int32, 1)
self.assertEqual(str(one.not_()), 'xor (i32 1, i32 -1)')
def test_fneg(self):
one = ir.Constant(flt, 1)
self.assertEqual(str(one.fneg()), 'fneg (float 0x3ff0000000000000)')
def test_int_binops(self):
one = ir.Constant(int32, 1)
two = ir.Constant(int32, 2)
oracle = {one.shl: 'shl', one.lshr: 'lshr', one.ashr: 'ashr',
one.add: 'add', one.sub: 'sub', one.mul: 'mul',
one.udiv: 'udiv', one.sdiv: 'sdiv', one.urem: 'urem',
one.srem: 'srem', one.or_: 'or', one.and_: 'and',
one.xor: 'xor'}
for fn, irop in oracle.items():
self.assertEqual(str(fn(two)), irop + ' (i32 1, i32 2)')
# unsigned integer compare
oracle = {'==': 'eq', '!=': 'ne', '>':
'ugt', '>=': 'uge', '<': 'ult', '<=': 'ule'}
for cop, cond in oracle.items():
actual = str(one.icmp_unsigned(cop, two))
expected = 'icmp ' + cond + ' (i32 1, i32 2)'
self.assertEqual(actual, expected)
# signed integer compare
oracle = {'==': 'eq', '!=': 'ne',
'>': 'sgt', '>=': 'sge', '<': 'slt', '<=': 'sle'}
for cop, cond in oracle.items():
actual = str(one.icmp_signed(cop, two))
expected = 'icmp ' + cond + ' (i32 1, i32 2)'
self.assertEqual(actual, expected)
def test_flt_binops(self):
one = ir.Constant(flt, 1)
two = ir.Constant(flt, 2)
oracle = {one.fadd: 'fadd', one.fsub: 'fsub', one.fmul: 'fmul',
one.fdiv: 'fdiv', one.frem: 'frem'}
for fn, irop in oracle.items():
actual = str(fn(two))
expected = irop + (' (float 0x3ff0000000000000,'
' float 0x4000000000000000)')
self.assertEqual(actual, expected)
# ordered float compare
oracle = {'==': 'oeq', '!=': 'one', '>': 'ogt', '>=': 'oge',
'<': 'olt', '<=': 'ole'}
for cop, cond in oracle.items():
actual = str(one.fcmp_ordered(cop, two))
expected = 'fcmp ' + cond + (' (float 0x3ff0000000000000,'
' float 0x4000000000000000)')
self.assertEqual(actual, expected)
# unordered float compare
oracle = {'==': 'ueq', '!=': 'une', '>': 'ugt', '>=': 'uge',
'<': 'ult', '<=': 'ule'}
for cop, cond in oracle.items():
actual = str(one.fcmp_unordered(cop, two))
expected = 'fcmp ' + cond + (' (float 0x3ff0000000000000,'
' float 0x4000000000000000)')
self.assertEqual(actual, expected)
|
TestConstant
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/auto_materialize_policy.py
|
{
"start": 469,
"end": 1087
}
|
class ____(graphene.ObjectType):
description = graphene.NonNull(graphene.String)
decisionType = graphene.NonNull(GrapheneAutoMaterializeDecisionType)
className = graphene.NonNull(graphene.String)
class Meta:
name = "AutoMaterializeRule"
def __init__(self, description: str, decision_type: AutoMaterializeDecisionType):
super().__init__(
decisionType=decision_type,
description=description,
# the class name just needs to be distinct for each rule, so we use the description
className=description,
)
|
GrapheneAutoMaterializeRule
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/solver24.py
|
{
"start": 789,
"end": 1173
}
|
class ____(Iterator[ClassC[AnyStr]], Protocol): ...
GenericPath: TypeAlias = AnyStr | PathLike[AnyStr]
def func2(iter: Iterable[object]) -> bool: ...
def func3(path: GenericPath[AnyStr]) -> ClassD[AnyStr]: ...
def func4(val: str):
func2(func3(val))
def func5(a: dict[T, U], b: list[T | U]):
pass
def func6(a: dict[str, int], b: list[str | int]):
func5(a, b)
|
ClassD
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/softplus_op_test.py
|
{
"start": 1159,
"end": 5421
}
|
class ____(test.TestCase):
def _npSoftplus(self, np_features):
np_features = np.asarray(np_features)
zero = np.asarray(0).astype(np_features.dtype)
return np.logaddexp(zero, np_features)
def _testSoftplus(self, np_features, use_gpu=False):
np_softplus = self._npSoftplus(np_features)
with self.cached_session(use_gpu=use_gpu):
softplus = nn_ops.softplus(np_features)
tf_softplus = self.evaluate(softplus)
self.assertAllCloseAccordingToType(
np_softplus, tf_softplus, half_rtol=5e-3, half_atol=5e-3,
bfloat16_rtol=5e-2, bfloat16_atol=5e-2
)
self.assertTrue(np.all(tf_softplus > 0))
self.assertShapeEqual(np_softplus, softplus)
def testNumbers(self):
for t in [
np.float16,
np.float32,
np.float64,
dtypes.bfloat16.as_numpy_dtype,
]:
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=False)
self._testSoftplus(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t),
use_gpu=True)
if t == dtypes.bfloat16.as_numpy_dtype:
# bfloat16 dtype doesn't have finfo.
# Calculate epsilon using machine_epsilon = base ^ (-(precision - 1))
log_eps = np.log(2 ** (-(8 - 1)))
else:
log_eps = np.log(np.finfo(t).eps)
one = t(1)
ten = t(10)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=False)
self._testSoftplus(
[
log_eps, log_eps - one, log_eps + one, log_eps - ten,
log_eps + ten - log_eps, -log_eps - one, -log_eps + one,
-log_eps - ten, -log_eps + ten
],
use_gpu=True)
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], y, [2, 5], x_init_value=x_init)
print("softplus (float) gradient err = ", err)
self.assertLess(err, 1e-4)
@test_util.run_deprecated_v1
def testGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad, [2, 5], x_init_value=x_init)
print("softplus (float) gradient of gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testGradGradGrad(self):
with self.cached_session():
x = constant_op.constant(
[-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
shape=[2, 5],
name="x")
y = nn_ops.softplus(x, name="softplus")
(grad,) = gradients_impl.gradients(y, x)
(grad_grad,) = gradients_impl.gradients(grad, x)
x_init = np.asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
dtype=np.float32,
order="F")
err = gradient_checker.compute_gradient_error(
x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
print("softplus (float) third-order gradient err = ", err)
self.assertLess(err, 5e-5)
@test_util.run_deprecated_v1
def testNoInts(self):
with self.cached_session():
with self.assertRaisesRegex(
TypeError,
"'features' has DataType int32 not in list of allowed values"):
nn_ops.softplus(constant_op.constant(42)).eval()
if __name__ == "__main__":
test.main()
|
SoftplusTest
|
python
|
pydata__xarray
|
xarray/core/datatree_render.py
|
{
"start": 361,
"end": 438
}
|
class ____(NamedTuple):
pre: str
fill: str
node: DataTree | str
|
Row
|
python
|
python-openxml__python-docx
|
src/docx/oxml/table.py
|
{
"start": 1366,
"end": 4603
}
|
class ____(BaseOxmlElement):
"""``<w:tr>`` element."""
add_tc: Callable[[], CT_Tc]
get_or_add_trPr: Callable[[], CT_TrPr]
_add_trPr: Callable[[], CT_TrPr]
tc_lst: list[CT_Tc]
# -- custom inserter below --
tblPrEx: CT_TblPrEx | None = ZeroOrOne("w:tblPrEx") # pyright: ignore[reportAssignmentType]
# -- custom inserter below --
trPr: CT_TrPr | None = ZeroOrOne("w:trPr") # pyright: ignore[reportAssignmentType]
tc = ZeroOrMore("w:tc")
@property
def grid_after(self) -> int:
"""The number of unpopulated layout-grid cells at the end of this row."""
trPr = self.trPr
if trPr is None:
return 0
return trPr.grid_after
@property
def grid_before(self) -> int:
"""The number of unpopulated layout-grid cells at the start of this row."""
trPr = self.trPr
if trPr is None:
return 0
return trPr.grid_before
def tc_at_grid_offset(self, grid_offset: int) -> CT_Tc:
"""The `tc` element in this tr at exact `grid offset`.
Raises ValueError when this `w:tr` contains no `w:tc` with exact starting `grid_offset`.
"""
# -- account for omitted cells at the start of the row --
remaining_offset = grid_offset - self.grid_before
for tc in self.tc_lst:
# -- We've gone past grid_offset without finding a tc, no sense searching further. --
if remaining_offset < 0:
break
# -- We've arrived at grid_offset, this is the `w:tc` we're looking for. --
if remaining_offset == 0:
return tc
# -- We're not there yet, skip forward the number of layout-grid cells this cell
# -- occupies.
remaining_offset -= tc.grid_span
raise ValueError(f"no `tc` element at grid_offset={grid_offset}")
@property
def tr_idx(self) -> int:
"""Index of this `w:tr` element within its parent `w:tbl` element."""
tbl = cast(CT_Tbl, self.getparent())
return tbl.tr_lst.index(self)
@property
def trHeight_hRule(self) -> WD_ROW_HEIGHT_RULE | None:
"""The value of `./w:trPr/w:trHeight/@w:hRule`, or |None| if not present."""
trPr = self.trPr
if trPr is None:
return None
return trPr.trHeight_hRule
@trHeight_hRule.setter
def trHeight_hRule(self, value: WD_ROW_HEIGHT_RULE | None):
trPr = self.get_or_add_trPr()
trPr.trHeight_hRule = value
@property
def trHeight_val(self):
"""Return the value of `w:trPr/w:trHeight@w:val`, or |None| if not present."""
trPr = self.trPr
if trPr is None:
return None
return trPr.trHeight_val
@trHeight_val.setter
def trHeight_val(self, value: Length | None):
trPr = self.get_or_add_trPr()
trPr.trHeight_val = value
def _insert_tblPrEx(self, tblPrEx: CT_TblPrEx):
self.insert(0, tblPrEx)
def _insert_trPr(self, trPr: CT_TrPr):
tblPrEx = self.tblPrEx
if tblPrEx is not None:
tblPrEx.addnext(trPr)
else:
self.insert(0, trPr)
def _new_tc(self):
return CT_Tc.new()
|
CT_Row
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/gridspec.py
|
{
"start": 20856,
"end": 27851
}
|
class ____:
"""
The location of a subplot in a `GridSpec`.
.. note::
Likely, you will never instantiate a `SubplotSpec` yourself. Instead,
you will typically obtain one from a `GridSpec` using item-access.
Parameters
----------
gridspec : `~matplotlib.gridspec.GridSpec`
The GridSpec, which the subplot is referencing.
num1, num2 : int
The subplot will occupy the *num1*-th cell of the given
*gridspec*. If *num2* is provided, the subplot will span between
*num1*-th cell and *num2*-th cell **inclusive**.
The index starts from 0.
"""
def __init__(self, gridspec, num1, num2=None):
self._gridspec = gridspec
self.num1 = num1
self.num2 = num2
def __repr__(self):
return (f"{self.get_gridspec()}["
f"{self.rowspan.start}:{self.rowspan.stop}, "
f"{self.colspan.start}:{self.colspan.stop}]")
@staticmethod
def _from_subplot_args(figure, args):
"""
Construct a `.SubplotSpec` from a parent `.Figure` and either
- a `.SubplotSpec` -- returned as is;
- one or three numbers -- a MATLAB-style subplot specifier.
"""
if len(args) == 1:
arg, = args
if isinstance(arg, SubplotSpec):
return arg
elif not isinstance(arg, Integral):
raise ValueError(
f"Single argument to subplot must be a three-digit "
f"integer, not {arg!r}")
try:
rows, cols, num = map(int, str(arg))
except ValueError:
raise ValueError(
f"Single argument to subplot must be a three-digit "
f"integer, not {arg!r}") from None
elif len(args) == 3:
rows, cols, num = args
else:
raise _api.nargs_error("subplot", takes="1 or 3", given=len(args))
gs = GridSpec._check_gridspec_exists(figure, rows, cols)
if gs is None:
gs = GridSpec(rows, cols, figure=figure)
if isinstance(num, tuple) and len(num) == 2:
if not all(isinstance(n, Integral) for n in num):
raise ValueError(
f"Subplot specifier tuple must contain integers, not {num}"
)
i, j = num
else:
if not isinstance(num, Integral) or num < 1 or num > rows*cols:
raise ValueError(
f"num must be an integer with 1 <= num <= {rows*cols}, "
f"not {num!r}"
)
i = j = num
return gs[i-1:j]
# num2 is a property only to handle the case where it is None and someone
# mutates num1.
@property
def num2(self):
return self.num1 if self._num2 is None else self._num2
@num2.setter
def num2(self, value):
self._num2 = value
def get_gridspec(self):
return self._gridspec
def get_geometry(self):
"""
Return the subplot geometry as tuple ``(n_rows, n_cols, start, stop)``.
The indices *start* and *stop* define the range of the subplot within
the `GridSpec`. *stop* is inclusive (i.e. for a single cell
``start == stop``).
"""
rows, cols = self.get_gridspec().get_geometry()
return rows, cols, self.num1, self.num2
@property
def rowspan(self):
"""The rows spanned by this subplot, as a `range` object."""
ncols = self.get_gridspec().ncols
return range(self.num1 // ncols, self.num2 // ncols + 1)
@property
def colspan(self):
"""The columns spanned by this subplot, as a `range` object."""
ncols = self.get_gridspec().ncols
# We explicitly support num2 referring to a column on num1's *left*, so
# we must sort the column indices here so that the range makes sense.
c1, c2 = sorted([self.num1 % ncols, self.num2 % ncols])
return range(c1, c2 + 1)
def is_first_row(self):
return self.rowspan.start == 0
def is_last_row(self):
return self.rowspan.stop == self.get_gridspec().nrows
def is_first_col(self):
return self.colspan.start == 0
def is_last_col(self):
return self.colspan.stop == self.get_gridspec().ncols
def get_position(self, figure):
"""
Update the subplot position from ``figure.subplotpars``.
"""
gridspec = self.get_gridspec()
nrows, ncols = gridspec.get_geometry()
rows, cols = np.unravel_index([self.num1, self.num2], (nrows, ncols))
fig_bottoms, fig_tops, fig_lefts, fig_rights = \
gridspec.get_grid_positions(figure)
fig_bottom = fig_bottoms[rows].min()
fig_top = fig_tops[rows].max()
fig_left = fig_lefts[cols].min()
fig_right = fig_rights[cols].max()
return Bbox.from_extents(fig_left, fig_bottom, fig_right, fig_top)
def get_topmost_subplotspec(self):
"""
Return the topmost `SubplotSpec` instance associated with the subplot.
"""
gridspec = self.get_gridspec()
if hasattr(gridspec, "get_topmost_subplotspec"):
return gridspec.get_topmost_subplotspec()
else:
return self
def __eq__(self, other):
"""
Two SubplotSpecs are considered equal if they refer to the same
position(s) in the same `GridSpec`.
"""
# other may not even have the attributes we are checking.
return ((self._gridspec, self.num1, self.num2)
== (getattr(other, "_gridspec", object()),
getattr(other, "num1", object()),
getattr(other, "num2", object())))
def __hash__(self):
return hash((self._gridspec, self.num1, self.num2))
def subgridspec(self, nrows, ncols, **kwargs):
"""
Create a GridSpec within this subplot.
The created `.GridSpecFromSubplotSpec` will have this `SubplotSpec` as
a parent.
Parameters
----------
nrows : int
Number of rows in grid.
ncols : int
Number of columns in grid.
Returns
-------
`.GridSpecFromSubplotSpec`
Other Parameters
----------------
**kwargs
All other parameters are passed to `.GridSpecFromSubplotSpec`.
See Also
--------
matplotlib.pyplot.subplots
Examples
--------
Adding three subplots in the space occupied by a single subplot::
fig = plt.figure()
gs0 = fig.add_gridspec(3, 1)
ax1 = fig.add_subplot(gs0[0])
ax2 = fig.add_subplot(gs0[1])
gssub = gs0[2].subgridspec(1, 3)
for i in range(3):
fig.add_subplot(gssub[0, i])
"""
return GridSpecFromSubplotSpec(nrows, ncols, self, **kwargs)
|
SubplotSpec
|
python
|
ray-project__ray
|
python/ray/experimental/channel/conftest.py
|
{
"start": 5688,
"end": 6060
}
|
class ____(ray_channel.shared_memory_channel.Channel):
"""
Patched Channel that records all write ops for testing.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ops = []
def write(self, *args, **kwargs):
self.ops.append((args, kwargs))
return super().write(*args, **kwargs)
|
TracedChannel
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/losses_test.py
|
{
"start": 38601,
"end": 41109
}
|
class ____(test.TestCase):
def testIncompatibleShapes(self):
with self.cached_session():
predictions = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = losses.huber_loss(labels, predictions).eval()
@test_util.run_deprecated_v1
def testAllQuadratic(self):
with self.cached_session():
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([1.0, -1.0, 0.0, 0.5])
loss = losses.huber_loss(labels, predictions)
self.assertAllClose(
loss, 0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4., atol=1e-5)
@test_util.run_deprecated_v1
def testAllLinear(self):
with self.cached_session():
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([0.0, 1.0, 0.0, 1.5])
loss = losses.huber_loss(labels, predictions)
self.assertAllClose(loss, (1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5, atol=1e-5)
@test_util.run_deprecated_v1
def testMixedQuadraticLinear(self):
with self.cached_session():
predictions = constant_op.constant([[1.5, -1.4, -1.0, 0.0],
[1.5, -1.4, -1.0, 0.0]])
labels = constant_op.constant([[1.0, -1.0, 0.0, 0.5],
[0.0, 1.0, 0.0, 1.5]])
loss = losses.huber_loss(labels, predictions)
quadratic = 0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4.
linear = (1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5
expected_loss = (quadratic + linear) / 2.
self.assertAllClose(loss, expected_loss, atol=1e-5)
def testAllQuadraticDelta(self):
with self.cached_session():
delta = 0.5
predictions = constant_op.constant([1.5, -1.4, -0.5, 0.0])
labels = constant_op.constant([1.0, -1.0, 0.0, 0.5])
expected = 0.5 * np.array([0.5**2, 0.4**2, 0.5**2, 0.5**2]).mean()
loss = losses.huber_loss(labels, predictions, delta=delta)
self.assertAllClose(expected, self.evaluate(loss), atol=1e-5)
def testAllLinearDelta(self):
delta = 0.5
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([0.0, 1.0, 0.0, 1.5])
expected = delta * np.array([1.5, 2.4, 1.0, 1.5]).mean()
expected -= 0.5 * delta**2
loss = losses.huber_loss(labels, predictions, delta=delta)
with self.cached_session():
self.assertAllClose(expected, self.evaluate(loss), atol=1e-5)
@test_util.run_deprecated_v1
|
HuberLossTest
|
python
|
getsentry__sentry
|
tests/sentry/deletions/test_monitor.py
|
{
"start": 427,
"end": 1823
}
|
class ____(APITestCase, TransactionTestCase, HybridCloudTestMixin):
def test_simple(self) -> None:
project = self.create_project(name="test")
env = Environment.objects.create(organization_id=project.organization_id, name="foo")
monitor = Monitor.objects.create(
organization_id=project.organization.id,
project_id=project.id,
config={"schedule": "* * * * *", "schedule_type": ScheduleType.CRONTAB},
)
monitor_env = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=env.id,
)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_env,
project_id=project.id,
date_added=monitor.date_added,
status=CheckInStatus.OK,
)
self.ScheduledDeletion.schedule(instance=monitor, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Monitor.objects.filter(id=monitor.id).exists()
assert not MonitorEnvironment.objects.filter(id=monitor_env.id).exists()
assert not MonitorCheckIn.objects.filter(id=checkin.id).exists()
# Shared objects should continue to exist.
assert Environment.objects.filter(id=env.id).exists()
assert Project.objects.filter(id=project.id).exists()
|
DeleteMonitorTest
|
python
|
bokeh__bokeh
|
examples/server/app/server_auth/auth.py
|
{
"start": 795,
"end": 2170
}
|
class ____(RequestHandler):
def get(self):
try:
errormessage = self.get_argument("error")
except Exception:
errormessage = ""
self.render("login.html", errormessage=errormessage)
def check_permission(self, username, password):
# !!!
# !!! This code below is a toy demonstration of the API, and not
# !!! intended for "real" use. A real app should use these APIs
# !!! to connect Oauth or some other established auth workflow.
# !!!
if username == "bokeh" and password == "bokeh":
return True
return False
def post(self):
username = self.get_argument("username", "")
password = self.get_argument("password", "")
auth = self.check_permission(username, password)
if auth:
self.set_current_user(username)
self.redirect("/")
else:
error_msg = "?error=" + tornado.escape.url_escape("Login incorrect")
self.redirect(login_url + error_msg)
def set_current_user(self, user):
if user:
self.set_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
# optional logout_url, available as curdoc().session_context.logout_url
logout_url = "/logout"
# optional logout handler for logout_url
|
LoginHandler
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_object_position09.py
|
{
"start": 315,
"end": 1685
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position09.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
chart.axis_ids = [60910208, 69231360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write("A1", "Foo", bold)
worksheet.write("B1", "Bar", italic)
worksheet.write_column("A2", data[0])
worksheet.write_column("B2", data[1])
worksheet.write_column("C2", data[2])
worksheet.set_row(12, None, None, {"hidden": True})
worksheet.set_column("F:F", 9, None, {"hidden": True})
chart.add_series({"values": "=Sheet1!$A$2:$A$6"})
chart.add_series({"values": "=Sheet1!$B$2:$B$6"})
chart.add_series({"values": "=Sheet1!$C$2:$C$6"})
worksheet.insert_chart("E9", chart, {"object_position": 4})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
pypa__warehouse
|
warehouse/accounts/forms.py
|
{
"start": 17506,
"end": 17835
}
|
class ____(wtforms.Form):
def __init__(self, *args, request, user_id, user_service, **kwargs):
super().__init__(*args, **kwargs)
self.request = request
self.user_id = user_id
self.user_service = user_service
remember_device = wtforms.BooleanField(default=False)
|
_TwoFactorAuthenticationForm
|
python
|
pandas-dev__pandas
|
pandas/tests/libs/test_hashtable.py
|
{
"start": 25148,
"end": 27212
}
|
class ____:
def test_value_count(self, dtype):
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
keys, counts, _ = ht.value_count(values, True)
assert len(keys) == 0
keys, counts, _ = ht.value_count(values, False)
assert len(keys) == 1 and np.all(np.isnan(keys))
assert counts[0] == 3
def test_duplicated_first(self, dtype):
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
result = ht.duplicated(values)
expected = np.array([False, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype):
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([np.nan, np.nan], dtype=dtype)
result = ht.ismember(arr, values)
expected = np.array([True, True, True], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype):
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([1], dtype=dtype)
result = ht.ismember(arr, values)
expected = np.array([False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype):
values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)
assert ht.mode(values, True)[0] == 42
assert np.isnan(ht.mode(values, False)[0])
def test_ismember_tuple_with_nans():
# GH-41836
values = np.empty(2, dtype=object)
values[:] = [("a", float("nan")), ("b", 1)]
comps = [("a", float("nan"))]
result = isin(values, comps)
expected = np.array([True, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_float_complex_int_are_equal_as_objects():
values = ["a", 5, 5.0, 5.0 + 0j]
comps = list(range(129))
result = isin(np.array(values, dtype=object), np.asarray(comps))
expected = np.array([False, True, True, True], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
|
TestHelpFunctionsWithNans
|
python
|
numba__numba
|
numba/tests/test_hashing.py
|
{
"start": 625,
"end": 2052
}
|
class ____(TestCase):
def test_warn_on_fnv(self):
# FNV hash alg variant is not supported, check Numba warns
work = """
import sys
import warnings
from collections import namedtuple
# hash_info is a StructSequence, mock as a named tuple
fields = ["width", "modulus", "inf", "nan", "imag", "algorithm",
"hash_bits", "seed_bits", "cutoff"]
hinfo = sys.hash_info
FAKE_HASHINFO = namedtuple('FAKE_HASHINFO', fields)
fd = dict()
for f in fields:
fd[f] = getattr(hinfo, f)
fd['algorithm'] = 'fnv'
fake_hashinfo = FAKE_HASHINFO(**fd)
# replace the hashinfo with the fnv version
sys.hash_info = fake_hashinfo
with warnings.catch_warnings(record=True) as warns:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
from numba import njit
@njit
def foo():
hash(1)
foo()
assert len(warns) > 0
expect = "FNV hashing is not implemented in Numba. See PEP 456"
for w in warns:
if expect in str(w.message):
break
else:
raise RuntimeError("Expected warning not found")
"""
subprocess.check_call([sys.executable, '-c', dedent(work)])
|
TestHashingSetup
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_success.py
|
{
"start": 3861,
"end": 3992
}
|
class ____(BaseModel, Generic[T]):
data: T
error: Optional[str]
response = Response[Model](data=model, error=None)
|
Response
|
python
|
scrapy__scrapy
|
scrapy/commands/startproject.py
|
{
"start": 940,
"end": 4418
}
|
class ____(ScrapyCommand):
requires_crawler_process = False
default_settings = {"LOG_ENABLED": False}
def syntax(self) -> str:
return "<project_name> [project_dir]"
def short_desc(self) -> str:
return "Create new project"
def _is_valid_name(self, project_name: str) -> bool:
def _module_exists(module_name: str) -> bool:
spec = find_spec(module_name)
return spec is not None and spec.loader is not None
if not re.search(r"^[_a-zA-Z]\w*$", project_name):
print(
"Error: Project names must begin with a letter and contain"
" only\nletters, numbers and underscores"
)
elif _module_exists(project_name):
print(f"Error: Module {project_name!r} already exists")
else:
return True
return False
def _copytree(self, src: Path, dst: Path) -> None:
"""
Since the original function always creates the directory, to resolve
the issue a new function had to be created. It's a simple copy and
was reduced for this case.
More info at:
https://github.com/scrapy/scrapy/pull/2005
"""
ignore = IGNORE
names = [x.name for x in src.iterdir()]
ignored_names = ignore(src, names)
if not dst.exists():
dst.mkdir(parents=True)
for name in names:
if name in ignored_names:
continue
srcname = src / name
dstname = dst / name
if srcname.is_dir():
self._copytree(srcname, dstname)
else:
copy2(srcname, dstname)
_make_writable(dstname)
copystat(src, dst)
_make_writable(dst)
def run(self, args: list[str], opts: argparse.Namespace) -> None:
if len(args) not in (1, 2):
raise UsageError
project_name = args[0]
project_dir = Path(args[-1])
if (project_dir / "scrapy.cfg").exists():
self.exitcode = 1
print(f"Error: scrapy.cfg already exists in {project_dir.resolve()}")
return
if not self._is_valid_name(project_name):
self.exitcode = 1
return
self._copytree(Path(self.templates_dir), project_dir.resolve())
move(project_dir / "module", project_dir / project_name)
for paths in TEMPLATES_TO_RENDER:
tplfile = Path(
project_dir,
*(
string.Template(s).substitute(project_name=project_name)
for s in paths
),
)
render_templatefile(
tplfile,
project_name=project_name,
ProjectName=string_camelcase(project_name),
)
print(
f"New Scrapy project '{project_name}', using template directory "
f"'{self.templates_dir}', created in:"
)
print(f" {project_dir.resolve()}\n")
print("You can start your first spider with:")
print(f" cd {project_dir}")
print(" scrapy genspider example example.com")
@property
def templates_dir(self) -> str:
assert self.settings is not None
return str(
Path(
self.settings["TEMPLATES_DIR"] or Path(scrapy.__path__[0], "templates"),
"project",
)
)
|
Command
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_discover_key_transactions.py
|
{
"start": 516,
"end": 951
}
|
class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user, superuser=False)
self.org = self.create_organization(owner=self.user, name="foo")
self.project = self.create_project(name="baz", organization=self.org)
self.event_data = load_data("transaction")
self.features = ["organizations:performance-view"]
|
TeamKeyTransactionTestBase
|
python
|
apache__airflow
|
providers/telegram/tests/unit/telegram/hooks/test_telegram.py
|
{
"start": 1304,
"end": 12744
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="telegram-webhook-without-token",
conn_type="http",
)
)
create_connection_without_db(
Connection(
conn_id="telegram_default",
conn_type="http",
password=TELEGRAM_TOKEN,
)
)
create_connection_without_db(
Connection(
conn_id="telegram-webhook-with-chat_id",
conn_type="http",
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
def test_should_use_default_connection(self):
hook = TelegramHook()
assert hook.token == TELEGRAM_TOKEN
assert not hook.chat_id
@pytest.mark.db_test
def test_should_raise_exception_if_conn_id_doesnt_exist(self, sdk_connection_not_found):
with pytest.raises(airflow.exceptions.AirflowNotFoundException) as ctx:
TelegramHook(telegram_conn_id="telegram-webhook-non-existent")
assert str(ctx.value) == "The conn_id `telegram-webhook-non-existent` isn't defined"
def test_should_raise_exception_if_conn_id_doesnt_contain_token(self):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
TelegramHook(telegram_conn_id="telegram-webhook-without-token")
assert str(ctx.value) == "Missing token(password) in Telegram connection"
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere(self, mock_get_conn):
hook = TelegramHook(telegram_conn_id="telegram_default")
error_message = "'chat_id' must be provided for telegram message"
with pytest.raises(airflow.exceptions.AirflowException, match=error_message):
hook.send_message({"text": "test telegram message"})
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_raise_exception_if_message_text_is_not_provided(self, mock_get_conn):
hook = TelegramHook(telegram_conn_id="telegram_default")
error_message = "'text' must be provided for telegram message"
with pytest.raises(airflow.exceptions.AirflowException, match=error_message):
hook.send_message({"chat_id": "-420913222"})
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram_default")
hook.send_message({"chat_id": "-420913222", "text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram_default", chat_id="-420913222")
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram-webhook-with-chat_id")
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_retry_when_any_telegram_error_is_encountered(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = AsyncMock(password="some_token")
mock_get_conn.return_value.send_message.side_effect = telegram_error_side_effect
hook = TelegramHook(telegram_conn_id="telegram-webhook-with-chat_id")
with pytest.raises(tenacity.RetryError) as ctx:
hook.send_message({"text": "test telegram message"})
assert "state=finished raised TelegramError" in str(ctx.value)
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
assert excepted_retry_count == mock_get_conn.return_value.send_message.call_count
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id="-420913222")
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere_when_sending_file(
self, mock_get_conn
):
hook = TelegramHook(telegram_conn_id="telegram_default")
error_message = "'chat_id' must be provided for telegram document message"
with pytest.raises(airflow.exceptions.AirflowException, match=error_message):
hook.send_file({"file": "/file/to/path"})
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_raise_exception_if_file_path_is_not_provided_when_sending_file(self, mock_get_conn):
hook = TelegramHook(telegram_conn_id="telegram_default")
error_message = "'file' parameter must be provided for sending a Telegram document message"
with pytest.raises(airflow.exceptions.AirflowException, match=error_message):
hook.send_file({"chat_id": "-420913222"})
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_file_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram_default")
hook.send_file({"chat_id": "-420913222", "file": "/file/to/path"})
mock_get_conn.return_value.send_document.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_document.assert_called_once_with(
**{
"chat_id": "-420913222",
"document": "/file/to/path",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_file_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram_default", chat_id="-420913222")
hook.send_file({"file": "/file/to/path"})
mock_get_conn.return_value.send_document.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_document.assert_called_once_with(
**{
"chat_id": "-420913222",
"document": "/file/to/path",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_file_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram-webhook-with-chat_id")
hook.send_file({"file": "/file/to/path"})
mock_get_conn.return_value.send_document.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_document.assert_called_once_with(
**{
"chat_id": "-420913222",
"document": "/file/to/path",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_retry_on_telegram_error_when_sending_file(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = AsyncMock(password="some_token")
mock_get_conn.return_value.send_document.side_effect = telegram_error_side_effect
hook = TelegramHook(telegram_conn_id="telegram-webhook-with-chat_id")
with pytest.raises(tenacity.RetryError) as ctx:
hook.send_file({"file": "/file/to/path"})
assert "state=finished raised TelegramError" in str(ctx.value)
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_document.assert_called_with(
**{
"chat_id": "-420913222",
"document": "/file/to/path",
}
)
assert excepted_retry_count == mock_get_conn.return_value.send_document.call_count
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_file_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = AsyncMock(password="some_token")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id="-420913222")
hook.send_file({"file": "/file/to/path"})
mock_get_conn.return_value.send_document.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_document.assert_called_once_with(
**{
"chat_id": "-420913222",
"document": "/file/to/path",
}
)
|
TestTelegramHook
|
python
|
spyder-ide__spyder
|
external-deps/python-lsp-server/test/plugins/test_definitions.py
|
{
"start": 312,
"end": 4797
}
|
class ____(object):
def __init__(self):
self.members = dict()
def add_member(self, id, name):
self.members[id] = name
subscripted_before_reference = {}
subscripted_before_reference[0] = 0
subscripted_before_reference
def my_func():
print('called')
alias = my_func
my_list = [1, None, alias]
inception = my_list[2]
inception()
import numpy
numpy.ones
"""
def test_definitions(config, workspace) -> None:
# Over 'a' in print a
cursor_pos = {"line": 3, "character": 6}
# The definition of 'a'
def_range = {
"start": {"line": 0, "character": 4},
"end": {"line": 0, "character": 5},
}
doc = Document(DOC_URI, workspace, DOC)
assert [{"uri": DOC_URI, "range": def_range}] == pylsp_definitions(
config, doc, cursor_pos
)
def test_indirect_definitions(config, workspace) -> None:
# Over 'subscripted_before_reference'
cursor_pos = {"line": 16, "character": 0}
# The definition of 'subscripted_before_reference',
# skipping intermediate writes to the most recent definition
def_range = {
"start": {"line": 14, "character": 0},
"end": {"line": 14, "character": len("subscripted_before_reference")},
}
doc = Document(DOC_URI, workspace, DOC)
assert [{"uri": DOC_URI, "range": def_range}] == pylsp_definitions(
config, doc, cursor_pos
)
def test_definition_with_multihop_inference_goto(config, workspace) -> None:
# Over 'inception()'
cursor_pos = {"line": 26, "character": 0}
# The most recent definition of 'inception',
# ignoring alias hops
def_range = {
"start": {"line": 24, "character": 0},
"end": {"line": 24, "character": len("inception")},
}
doc = Document(DOC_URI, workspace, DOC)
assert [{"uri": DOC_URI, "range": def_range}] == pylsp_definitions(
config, doc, cursor_pos
)
def test_numpy_definition(config, workspace) -> None:
# Over numpy.ones
cursor_pos = {"line": 29, "character": 8}
doc = Document(DOC_URI, workspace, DOC)
defns = pylsp_definitions(config, doc, cursor_pos)
assert len(defns) > 0, defns
def test_builtin_definition(config, workspace) -> None:
# Over 'i' in dict
cursor_pos = {"line": 8, "character": 24}
doc = Document(DOC_URI, workspace, DOC)
orig_settings = config.settings()
# Check definition for `dict` goes to `builtins.pyi::dict`
follow_defns_setting = {"follow_builtin_definitions": True}
settings = {"plugins": {"jedi_definition": follow_defns_setting}}
config.update(settings)
defns = pylsp_definitions(config, doc, cursor_pos)
assert len(defns) == 1
assert defns[0]["uri"].endswith("builtins.pyi")
# Check no definitions for `dict`
follow_defns_setting["follow_builtin_definitions"] = False
config.update(settings)
defns = pylsp_definitions(config, doc, cursor_pos)
assert not defns
config.update(orig_settings)
def test_assignment(config, workspace) -> None:
# Over 's' in self.members[id]
cursor_pos = {"line": 11, "character": 19}
# The assignment of 'self.members'
def_range = {
"start": {"line": 8, "character": 13},
"end": {"line": 8, "character": 20},
}
doc = Document(DOC_URI, workspace, DOC)
assert [{"uri": DOC_URI, "range": def_range}] == pylsp_definitions(
config, doc, cursor_pos
)
def test_document_path_definitions(config, workspace_other_root_path, tmpdir) -> None:
# Create a dummy module out of the workspace's root_path and try to get
# a definition on it in another file placed next to it.
module_content = """
def foo():
pass
"""
p = tmpdir.join("mymodule.py")
p.write(module_content)
# Content of doc to test definition
doc_content = """from mymodule import foo"""
doc_path = str(tmpdir) + os.path.sep + "myfile.py"
doc_uri = uris.from_fs_path(doc_path)
doc = Document(doc_uri, workspace_other_root_path, doc_content)
# The range where is defined in mymodule.py
def_range = {
"start": {"line": 1, "character": 4},
"end": {"line": 1, "character": 7},
}
# The position where foo is called in myfile.py
cursor_pos = {"line": 0, "character": 24}
# The uri for mymodule.py
module_path = str(p)
module_uri = uris.from_fs_path(module_path)
assert [{"uri": module_uri, "range": def_range}] == pylsp_definitions(
config, doc, cursor_pos
)
|
Directory
|
python
|
google__flatbuffers
|
python/flatbuffers/reflection/BaseType.py
|
{
"start": 95,
"end": 414
}
|
class ____(object):
None_ = 0
UType = 1
Bool = 2
Byte = 3
UByte = 4
Short = 5
UShort = 6
Int = 7
UInt = 8
Long = 9
ULong = 10
Float = 11
Double = 12
String = 13
Vector = 14
Obj = 15
Union = 16
Array = 17
Vector64 = 18
MaxBaseType = 19
|
BaseType
|
python
|
celery__celery
|
t/unit/app/test_loaders.py
|
{
"start": 545,
"end": 3777
}
|
class ____:
message_options = {'subject': 'Subject',
'body': 'Body',
'sender': 'x@x.com',
'to': 'y@x.com'}
server_options = {'host': 'smtp.x.com',
'port': 1234,
'user': 'x',
'password': 'qwerty',
'timeout': 3}
def setup_method(self):
self.loader = DummyLoader(app=self.app)
def test_handlers_pass(self):
self.loader.on_task_init('foo.task', 'feedface-cafebabe')
self.loader.on_worker_init()
def test_now(self):
assert self.loader.now(utc=True)
assert self.loader.now(utc=False)
def test_read_configuration_no_env(self):
assert base.BaseLoader(app=self.app).read_configuration(
'FOO_X_S_WE_WQ_Q_WE') is None
def test_autodiscovery(self):
with patch('celery.loaders.base.autodiscover_tasks') as auto:
auto.return_value = [Mock()]
auto.return_value[0].__name__ = 'moo'
self.loader.autodiscover_tasks(['A', 'B'])
assert 'moo' in self.loader.task_modules
self.loader.task_modules.discard('moo')
def test_import_task_module(self):
assert sys == self.loader.import_task_module('sys')
def test_init_worker_process(self):
self.loader.on_worker_process_init()
m = self.loader.on_worker_process_init = Mock()
self.loader.init_worker_process()
m.assert_called_with()
def test_config_from_object_module(self):
self.loader.import_from_cwd = Mock(return_value={
"override_backends": {"db": "custom.backend.module"},
})
self.loader.config_from_object('module_name')
self.loader.import_from_cwd.assert_called_with('module_name')
assert self.loader.override_backends == {"db": "custom.backend.module"}
def test_conf_property(self):
assert self.loader.conf['foo'] == 'bar'
assert self.loader._conf['foo'] == 'bar'
assert self.loader.conf['foo'] == 'bar'
def test_import_default_modules(self):
def modnames(l):
return [m.__name__ for m in l]
self.app.conf.imports = ('os', 'sys')
assert (sorted(modnames(self.loader.import_default_modules())) ==
sorted(modnames([os, sys])))
def test_import_default_modules_with_exception(self):
""" Make sure exceptions are not silenced since this step is prior to
setup logging. """
def trigger_exception(**kwargs):
raise ImportError('Dummy ImportError')
from celery.signals import import_modules
x = import_modules.connect(trigger_exception)
self.app.conf.imports = ('os', 'sys')
with pytest.raises(ImportError):
self.loader.import_default_modules()
import_modules.disconnect(x)
def test_import_from_cwd_custom_imp(self):
imp = Mock(name='imp')
self.loader.import_from_cwd('foo', imp=imp)
imp.assert_called()
def test_cmdline_config_ValueError(self):
with pytest.raises(ValueError):
self.loader.cmdline_config_parser(['broker.port=foobar'])
|
test_LoaderBase
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/schedules/__init__.py
|
{
"start": 2844,
"end": 4426
}
|
class ____(graphene.Mutation):
"""Disable a schedule from launching runs for a job."""
Output = graphene.NonNull(GrapheneScheduleMutationResult)
class Arguments:
id = graphene.Argument(graphene.String) # Schedule / InstigationState id
schedule_origin_id = graphene.Argument(graphene.String)
schedule_selector_id = graphene.Argument(graphene.String)
class Meta:
name = "StopRunningScheduleMutation"
@capture_error
@require_permission_check(Permissions.STOP_RUNNING_SCHEDULE)
def mutate(
self,
graphene_info: ResolveInfo,
id: Optional[str] = None,
schedule_origin_id: Optional[str] = None,
schedule_selector_id: Optional[str] = None,
):
if id:
cid = CompoundID.from_string(id)
schedule_origin_id = cid.remote_origin_id
schedule_selector_id = cid.selector_id
elif schedule_origin_id and CompoundID.is_valid_string(schedule_origin_id):
# cross-push handle if InstigationState.id being passed through as origin id
cid = CompoundID.from_string(schedule_origin_id)
schedule_origin_id = cid.remote_origin_id
schedule_selector_id = cid.selector_id
elif schedule_origin_id is None or schedule_selector_id is None:
raise DagsterInvariantViolationError(
"Must specify id or scheduleOriginId and scheduleSelectorId"
)
return stop_schedule(graphene_info, schedule_origin_id, schedule_selector_id)
|
GrapheneStopRunningScheduleMutation
|
python
|
pytorch__pytorch
|
.ci/lumen_cli/cli/lib/core/vllm/vllm_build.py
|
{
"start": 970,
"end": 4595
}
|
class ____:
"""
Parameters defining the vllm external input configurations.
Combine with VllmDockerBuildArgs to define the vllm build environment
"""
# USE_TORCH_WHEEL: when true, use local Torch wheels; requires TORCH_WHEELS_PATH.
# Otherwise docker build pull torch nightly during build
# TORCH_WHEELS_PATH: directory containing local torch wheels when use_torch_whl is True
use_torch_whl: bool = env_bool_field("USE_TORCH_WHEEL", True)
torch_whls_path: Path = env_path_field("TORCH_WHEELS_PATH", "./dist")
# USE_LOCAL_BASE_IMAGE: when true, use an existing local Docker base image; requires BASE_IMAGE
# Otherwise, pull dockerfile's default image remotely
# BASE_IMAGE: name:tag (only needed when use_local_base_image is True)
use_local_base_image: bool = env_bool_field("USE_LOCAL_BASE_IMAGE", True)
base_image: str = env_str_field("BASE_IMAGE")
# USE_LOCAL_DOCKERFILE: when true("1"), use a local Dockerfile; requires DOCKERFILE_PATH.
# otherwise, use vllm's default dockerfile.torch_nightly for build
# DOCKERFILE_PATH: path to Dockerfile used when use_local_dockerfile is True"
use_local_dockerfile: bool = env_bool_field("USE_LOCAL_DOCKERFILE", True)
dockerfile_path: Path = env_path_field(
"DOCKERFILE_PATH", ".github/ci_configs/vllm/Dockerfile"
)
# the cleaning script to remove torch dependencies from pip
cleaning_script: Path = env_path_field(
"cleaning_script", ".github/ci_configs/vllm/use_existing_torch.py"
)
# OUTPUT_DIR: where docker buildx (local exporter) will write artifacts
output_dir: Path = env_path_field("OUTPUT_DIR", "external/vllm")
# --- Build args ----------------------------------------------------------
target_stage: str = env_str_field("TARGET_STAGE", "export-wheels")
tag_name: str = env_str_field("TAG", "vllm-wheels")
cuda_version: str = env_str_field("CUDA_VERSION", "12.8.1")
python_version: str = env_str_field("PYTHON_VERSION", "3.12")
max_jobs: str = env_str_field("MAX_JOBS", "64")
sccache_bucket: str = env_str_field("SCCACHE_BUCKET")
sccache_region: str = env_str_field("SCCACHE_REGION")
torch_cuda_arch_list: str = env_str_field("TORCH_CUDA_ARCH_LIST", "8.9")
def __post_init__(self):
checks = [
(
self.use_torch_whl, # flag
True, # trigger_value
"torch_whls_path", # resource
is_path_exist, # check_func
"TORCH_WHEELS_PATH is not provided, but USE_TORCH_WHEEL is set to 1",
),
(
self.use_local_base_image,
True,
"base_image",
local_image_exists,
f"BASE_IMAGE {self.base_image} does not found, but USE_LOCAL_BASE_IMAGE is set to 1",
),
(
self.use_local_dockerfile,
True,
"dockerfile_path",
is_path_exist,
" DOCKERFILE_PATH path does not found, but USE_LOCAL_DOCKERFILE is set to 1",
),
]
for flag, trigger_value, attr_name, check_func, error_msg in checks:
value = getattr(self, attr_name)
if flag == trigger_value:
if not value or not check_func(value):
raise ValueError(error_msg)
else:
logger.info("flag %s is not set", flag)
if not self.output_dir:
raise ValueError("missing required output_dir")
@with_params_help(VllmBuildParameters)
|
VllmBuildParameters
|
python
|
mlflow__mlflow
|
mlflow/webhooks/types.py
|
{
"start": 3044,
"end": 3694
}
|
class ____(TypedDict):
"""Payload sent when a tag is deleted from a model version.
Example payload:
.. code-block:: python
{
"name": "example_model",
"version": "1",
"key": "example_key",
}
"""
name: str
"""The name of the registered model."""
version: str
"""The version of the model."""
key: str
"""The tag key being deleted."""
@classmethod
def example(cls) -> "ModelVersionTagDeletedPayload":
return cls(
name="example_model",
version="1",
key="example_key",
)
|
ModelVersionTagDeletedPayload
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_offline.py
|
{
"start": 26023,
"end": 27835
}
|
class ____(OfflineTestCaseMixin, TestCase):
template_names = ["base.html", "base2.html", "test_compressor_offline.html"]
templates_dir = "test_block_super_base_compressed"
expected_hash_offline = ["e4e9263fa4c0", "9cecd41a505f", "d3f749e83c81"]
expected_hash = ["028c3fc42232", "2e9d3f5545a6", "d3f749e83c81"]
# Block.super not supported for Jinja2 yet.
engines = ("django",)
def setUp(self):
super().setUp()
self.template_paths = []
self.templates = []
for template_name in self.template_names:
template_path = os.path.join(
settings.TEMPLATES[0]["DIRS"][0], template_name
)
self.template_paths.append(template_path)
with io.open(template_path, encoding=self.CHARSET) as file_:
template = Template(file_.read())
self.templates.append(template)
def _render_template(self, template, engine):
if engine == "django":
return template.render(Context(settings.COMPRESS_OFFLINE_CONTEXT))
elif engine == "jinja2":
return template.render(settings.COMPRESS_OFFLINE_CONTEXT) + "\n"
else:
return None
def _test_offline(self, engine, verbosity=0):
count, result = CompressCommand().handle_inner(
engines=[engine], verbosity=verbosity
)
self.assertEqual(len(self.expected_hash), count)
for expected_hash, template in zip(self.expected_hash_offline, self.templates):
expected = self._render_script(expected_hash)
self.assertIn(expected, result)
rendered_template = self._render_template(template, engine)
self.assertEqual(rendered_template, self._render_result([expected]))
|
OfflineCompressBlockSuperBaseCompressed
|
python
|
django__django
|
tests/template_tests/filter_tests/test_join.py
|
{
"start": 162,
"end": 2884
}
|
class ____(SimpleTestCase):
@setup({"join01": '{{ a|join:", " }}'})
def test_join01(self):
output = self.engine.render_to_string("join01", {"a": ["alpha", "beta & me"]})
self.assertEqual(output, "alpha, beta & me")
@setup({"join02": '{% autoescape off %}{{ a|join:", " }}{% endautoescape %}'})
def test_join02(self):
output = self.engine.render_to_string("join02", {"a": ["alpha", "beta & me"]})
self.assertEqual(output, "alpha, beta & me")
@setup({"join03": '{{ a|join:" & " }}'})
def test_join03(self):
output = self.engine.render_to_string("join03", {"a": ["alpha", "beta & me"]})
self.assertEqual(output, "alpha & beta & me")
@setup({"join04": '{% autoescape off %}{{ a|join:" & " }}{% endautoescape %}'})
def test_join04(self):
output = self.engine.render_to_string("join04", {"a": ["alpha", "beta & me"]})
self.assertEqual(output, "alpha & beta & me")
# Joining with unsafe joiners doesn't result in unsafe strings.
@setup({"join05": "{{ a|join:var }}"})
def test_join05(self):
output = self.engine.render_to_string(
"join05", {"a": ["alpha", "beta & me"], "var": " & "}
)
self.assertEqual(output, "alpha & beta & me")
@setup({"join06": "{{ a|join:var }}"})
def test_join06(self):
output = self.engine.render_to_string(
"join06", {"a": ["alpha", "beta & me"], "var": mark_safe(" & ")}
)
self.assertEqual(output, "alpha & beta & me")
@setup({"join07": "{{ a|join:var|lower }}"})
def test_join07(self):
output = self.engine.render_to_string(
"join07", {"a": ["Alpha", "Beta & me"], "var": " & "}
)
self.assertEqual(output, "alpha & beta & me")
@setup({"join08": "{{ a|join:var|lower }}"})
def test_join08(self):
output = self.engine.render_to_string(
"join08", {"a": ["Alpha", "Beta & me"], "var": mark_safe(" & ")}
)
self.assertEqual(output, "alpha & beta & me")
@setup(
{
"join_autoescape_off": (
"{% autoescape off %}"
"{{ var_list|join:var_joiner }}"
"{% endautoescape %}"
),
}
)
def test_join_autoescape_off(self):
var_list = ["<p>Hello World!</p>", "beta & me", "<script>Hi!</script>"]
context = {"var_list": var_list, "var_joiner": "<br/>"}
output = self.engine.render_to_string("join_autoescape_off", context)
expected_result = "<p>Hello World!</p><br/>beta & me<br/><script>Hi!</script>"
self.assertEqual(output, expected_result)
|
JoinTests
|
python
|
sympy__sympy
|
sympy/physics/quantum/state.py
|
{
"start": 12564,
"end": 14222
}
|
class ____(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] https://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
|
Bra
|
python
|
Pylons__pyramid
|
src/pyramid/interfaces.py
|
{
"start": 23773,
"end": 24093
}
|
class ____(Interface):
""" """
def __call__(self, request):
"""Must return a tuple of IReqest, IResponse or raise an exception.
The ``request`` argument will be an instance of an object that
provides IRequest."""
IRequest.combined = IRequest # for exception view lookups
|
IRequestHandler
|
python
|
celery__celery
|
t/smoke/tests/test_consumer.py
|
{
"start": 4532,
"end": 5703
}
|
class ____:
@pytest.fixture
def default_worker_app(self, default_worker_app: Celery) -> Celery:
app = default_worker_app
app.conf.worker_prefetch_multiplier = 1
app.conf.worker_enable_prefetch_count_reduction = False
app.conf.worker_cancel_long_running_tasks_on_connection_loss = True
app.conf.task_acks_late = True
return app
def test_max_prefetch_not_passed_on_broker_restart(self, celery_setup: CeleryTestSetup):
if isinstance(celery_setup.broker, RedisTestBroker):
# When running in debug it works, when running from CLI it sometimes works
pytest.xfail("Test is flaky with Redis broker")
sig = group(long_running_task.s(10) for _ in range(WORKER_CONCURRENCY))
r = sig.apply_async(queue=celery_setup.worker.worker_queue)
celery_setup.broker.restart()
noop.s().apply_async(queue=celery_setup.worker.worker_queue)
assert "Task t.smoke.tasks.noop" not in celery_setup.worker.logs()
r.get(timeout=RESULT_TIMEOUT)
assert "Task t.smoke.tasks.noop" in celery_setup.worker.logs()
|
test_worker_enable_prefetch_count_reduction_false
|
python
|
huggingface__transformers
|
src/transformers/quantizers/quantizer_bitnet.py
|
{
"start": 904,
"end": 4254
}
|
class ____(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Check out the paper introducing this method: https://huggingface.co/papers/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map")
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: list[str] | None = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: dict[str, int | str]) -> dict[str, int | str]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
BitNetHfQuantizer
|
python
|
PyCQA__pylint
|
pylint/pyreverse/diadefslib.py
|
{
"start": 10295,
"end": 11456
}
|
class ____:
"""Get diagram definitions from user (i.e. xml files) or generate them."""
def __init__(self, config: argparse.Namespace, args: Sequence[str]) -> None:
self.config = config
self.args = args
def get_diadefs(self, project: Project, linker: Linker) -> list[ClassDiagram]:
"""Get the diagram's configuration data.
:param project:The pyreverse project
:type project: pyreverse.utils.Project
:param linker: The linker
:type linker: pyreverse.inspector.Linker(IdGeneratorMixIn, LocalsVisitor)
:returns: The list of diagram definitions
:rtype: list(:class:`pylint.pyreverse.diagrams.ClassDiagram`)
"""
# read and interpret diagram definitions (Diadefs)
diagrams = []
generator = ClassDiadefGenerator(linker, self)
for klass in self.config.classes:
diagrams.append(generator.class_diagram(project, klass))
if not diagrams:
diagrams = DefaultDiadefGenerator(linker, self).visit(project)
for diagram in diagrams:
diagram.extract_relationships()
return diagrams
|
DiadefsHandler
|
python
|
pypa__setuptools
|
setuptools/tests/test_build_py.py
|
{
"start": 9865,
"end": 14201
}
|
class ____:
PYPROJECTS = {
"default_pyproject": DALS(
"""
[project]
name = "foo"
version = "1"
"""
),
"dont_include_package_data": DALS(
"""
[project]
name = "foo"
version = "1"
[tool.setuptools]
include-package-data = false
"""
),
"exclude_type_info": DALS(
"""
[project]
name = "foo"
version = "1"
[tool.setuptools]
include-package-data = false
[tool.setuptools.exclude-package-data]
"*" = ["py.typed", "*.pyi"]
"""
),
}
EXAMPLES = {
"simple_namespace": {
"directory_structure": {
"foo": {
"bar.pyi": "",
"py.typed": "",
"__init__.py": "",
}
},
"expected_type_files": {"foo/bar.pyi", "foo/py.typed"},
},
"nested_inside_namespace": {
"directory_structure": {
"foo": {
"bar": {
"py.typed": "",
"mod.pyi": "",
}
}
},
"expected_type_files": {"foo/bar/mod.pyi", "foo/bar/py.typed"},
},
"namespace_nested_inside_regular": {
"directory_structure": {
"foo": {
"namespace": {
"foo.pyi": "",
},
"__init__.pyi": "",
"py.typed": "",
}
},
"expected_type_files": {
"foo/namespace/foo.pyi",
"foo/__init__.pyi",
"foo/py.typed",
},
},
}
@pytest.mark.parametrize(
"pyproject",
[
"default_pyproject",
pytest.param(
"dont_include_package_data",
marks=pytest.mark.xfail(reason="pypa/setuptools#4350"),
),
],
)
@pytest.mark.parametrize("example", EXAMPLES.keys())
def test_type_files_included_by_default(self, tmpdir_cwd, pyproject, example):
structure = {
**self.EXAMPLES[example]["directory_structure"],
"pyproject.toml": self.PYPROJECTS[pyproject],
}
expected_type_files = self.EXAMPLES[example]["expected_type_files"]
jaraco.path.build(structure)
build_py = get_finalized_build_py()
outputs = get_outputs(build_py)
assert expected_type_files <= outputs
@pytest.mark.parametrize("pyproject", ["exclude_type_info"])
@pytest.mark.parametrize("example", EXAMPLES.keys())
def test_type_files_can_be_excluded(self, tmpdir_cwd, pyproject, example):
structure = {
**self.EXAMPLES[example]["directory_structure"],
"pyproject.toml": self.PYPROJECTS[pyproject],
}
expected_type_files = self.EXAMPLES[example]["expected_type_files"]
jaraco.path.build(structure)
build_py = get_finalized_build_py()
outputs = get_outputs(build_py)
assert expected_type_files.isdisjoint(outputs)
def test_stub_only_package(self, tmpdir_cwd):
structure = {
"pyproject.toml": DALS(
"""
[project]
name = "foo-stubs"
version = "1"
"""
),
"foo-stubs": {"__init__.pyi": "", "bar.pyi": ""},
}
expected_type_files = {"foo-stubs/__init__.pyi", "foo-stubs/bar.pyi"}
jaraco.path.build(structure)
build_py = get_finalized_build_py()
outputs = get_outputs(build_py)
assert expected_type_files <= outputs
def get_finalized_build_py(script_name="%build_py-test%"):
dist = Distribution({"script_name": script_name})
dist.parse_config_files()
build_py = dist.get_command_obj("build_py")
build_py.finalize_options()
return build_py
def get_outputs(build_py):
build_dir = Path(build_py.build_lib)
return {
os.path.relpath(x, build_dir).replace(os.sep, "/")
for x in build_py.get_outputs()
}
|
TestTypeInfoFiles
|
python
|
ApeWorX__ape
|
src/ape/contracts/base.py
|
{
"start": 12904,
"end": 14405
}
|
class ____(ManagerAccessMixin):
def __init__(self, abi: "MethodABI", address: "AddressType") -> None:
super().__init__()
self.abi: MethodABI = abi
self.address: AddressType = address
@log_instead_of_fail(default="<ContractTransaction>")
def __repr__(self) -> str:
return self.abi.signature
def serialize_transaction(self, *args, **kwargs) -> "TransactionAPI":
if "sender" in kwargs and isinstance(kwargs["sender"], (ContractInstance, Address)):
# Automatically impersonate contracts (if API available) when sender
kwargs["sender"] = self.account_manager.test_accounts[kwargs["sender"].address]
arguments = self.conversion_manager.convert_method_args(self.abi, args)
converted_kwargs = self.conversion_manager.convert_method_kwargs(kwargs)
return self.provider.network.ecosystem.encode_transaction(
self.address, self.abi, *arguments, **converted_kwargs
)
def __call__(self, *args, **kwargs) -> "ReceiptAPI":
txn = self.serialize_transaction(*args, **kwargs)
private = kwargs.get("private", False)
if "sender" in kwargs and hasattr(kwargs["sender"], "call"):
return kwargs["sender"].call(txn, **kwargs)
txn = self.provider.prepare_transaction(txn)
return (
self.provider.send_private_transaction(txn)
if private
else self.provider.send_transaction(txn)
)
|
ContractTransaction
|
python
|
jazzband__django-simple-history
|
simple_history/models.py
|
{
"start": 36955,
"end": 37365
}
|
class ____(
HistoricDescriptorMixin, ReverseOneToOneDescriptor
):
"""
Overrides get_queryset to provide historic query support, should the
instance be historic (and therefore was generated by a timepoint query)
and the other side of the relation also uses a history manager.
"""
def get_related_model(self):
return self.related.related_model
|
HistoricReverseOneToOneDescriptor
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_textbox36.py
|
{
"start": 346,
"end": 1337
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox36.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox(
"E9", "This is some text", {"url": "https://github.com/jmcnamara"}
)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_url_object(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
url = Url("https://github.com/jmcnamara")
worksheet.insert_textbox("E9", "This is some text", {"url": url})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/nn_ops/conv_ops_test.py
|
{
"start": 9859,
"end": 112791
}
|
class ____(parameterized.TestCase, test.TestCase):
def _DtypesToTest(self, use_gpu):
if test_util.IsMklEnabled():
return [dtypes.float32]
if use_gpu:
# It is important that float32 comes first, since we are using its
# gradients as a reference for fp16 gradients.
out = [dtypes.float32, dtypes.bfloat16]
if test_util.GpuSupportsHalfMatMulAndConv():
out.append(dtypes.float16)
if not test.is_built_with_rocm():
out.extend([dtypes.float64])
return out
return [dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16]
def _CreateNumpyTensor(self, shape):
total_size = 1
for s in shape:
total_size *= s
return np.arange(1, total_size + 1, dtype=np.float32).reshape(shape)
def _SetupValuesForDevice(
self,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu,
op_name,
):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
use_gpu: True if the operations should be run on GPU
op_name: Name of the op to be tested
Returns:
Symbolic tensor value that can be used to execute the computation
"""
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW(padding)
if op_name == "Conv2D":
conv = nn_ops.conv2d(
t1,
t2,
dilations=dilations,
strides=strides,
padding=padding,
data_format=data_format,
)
elif op_name == "Conv":
conv_format = (
"CHANNELS_LAST" if data_format == "NHWC" else "CHANNELS_FIRST"
)
conv_padding, explicit_paddings = nn_ops.convert_padding(padding)
conv = gen_nn_ops.conv(
t1,
t2,
strides=strides,
padding=conv_padding,
explicit_paddings=explicit_paddings,
data_format=conv_format,
dilations=dilations,
)
else:
raise ValueError("Invalid op name: %s" % op_name)
self.assertEqual(conv.dtype, dtype)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = self.evaluate(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _ComputeReferenceDilatedConv(
self,
tensor_in_sizes,
filter_in_sizes,
stride,
dilation,
padding,
data_format,
use_gpu,
):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections_abc.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format,
)
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format,
)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _ComputeReferenceDilatedConvParameters(
self,
tensor_in_sizes,
filter_in_sizes,
stride,
dilation,
padding,
data_format,
use_gpu,
op_name,
):
x1 = self._CreateNumpyTensor(tensor_in_sizes)
x2 = self._CreateNumpyTensor(filter_in_sizes)
with test_util.device(use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections_abc.Iterable):
strides = list(stride)
else:
strides = [stride, stride]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
full_strides = [1, 1] + strides
full_dilation = [1, 1] + dilation
else:
full_strides = [1] + strides + [1]
full_dilation = [1] + dilation + [1]
expected = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilation,
data_format=data_format)
if op_name == "Conv2D":
computed = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=padding,
data_format=data_format,
)
elif op_name == "Conv":
conv_format = (
"CHANNELS_LAST" if data_format == "NHWC" else "CHANNELS_FIRST"
)
conv_padding, explicit_paddings = nn_ops.convert_padding(padding)
computed = gen_nn_ops.conv(
t1,
t2,
strides=full_strides,
dilations=full_dilation,
padding=conv_padding,
explicit_paddings=explicit_paddings,
data_format=conv_format,
)
else:
raise ValueError("Invalid op name: %s" % op_name)
if data_format == "NCHW":
expected = test_util.NCHWToNHWC(expected)
computed = test_util.NCHWToNHWC(computed)
return expected, computed
def _VerifyDilatedConvValuesParameters(
self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
dilations,
data_format,
use_gpu,
op_name,
rtol=1e-4,
):
if use_gpu and not test.is_gpu_available():
self.skipTest("GPU not available")
expected_results = []
computed_results = []
expected, computed = self._ComputeReferenceDilatedConvParameters(
tensor_in_sizes,
filter_in_sizes,
strides,
dilations,
padding,
data_format,
use_gpu,
op_name,
)
expected_results.append(expected)
computed_results.append(computed)
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllCloseAccordingToType(
e_value.flatten(), c_value.flatten(), atol=1e-5, rtol=rtol
)
def _VerifyDilatedConvValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, dilations, rtol=1e-4):
expected_results = []
computed_results = []
for data_format, use_gpu in GetTestConfigs():
expected, computed = self._ComputeReferenceDilatedConv(
tensor_in_sizes,
filter_in_sizes,
strides,
dilations,
padding,
data_format,
use_gpu,
)
expected_results.append(expected)
computed_results.append(computed)
tolerance = 1e-2 if use_gpu else 1e-5
expected_values = self.evaluate(expected_results)
computed_values = self.evaluate(computed_results)
for e_value, c_value in zip(expected_values, computed_values):
tf_logging.debug("expected = %s", e_value)
tf_logging.debug("actual = %s", c_value)
self.assertAllClose(
e_value.flatten(), c_value.flatten(), atol=tolerance, rtol=rtol)
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5):
if gpu_only and not test.is_gpu_available():
return
tensors = []
dilations = list(dilations)
for data_format, use_gpu, op_name in GetTestConfigs():
if gpu_only and not use_gpu:
continue
dtypes_to_test = self._DtypesToTest(use_gpu)
if not test_grappler_layout_optimizer and data_format == "NHWC":
dtypes_to_test.append(dtypes.int32)
for dtype in dtypes_to_test:
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu,
op_name=op_name,
)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# Grappler's layout optimizer will not optimize a fetch node, so
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
if np.issubdtype(value.dtype, np.integer):
self.assertAllEqual(np.rint(expected), np.ravel(value))
else:
self.assertAllCloseAccordingToType(
expected, np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)
def _VerifyValuesParameters(
self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
data_format,
dtype,
use_gpu,
op_name,
dilations=(1, 1),
gpu_only=False,
test_grappler_layout_optimizer=False,
tol=1e-5,
):
if (gpu_only and not use_gpu) or not test.is_gpu_available():
self.skipTest("GPU not available")
if (
test_grappler_layout_optimizer or data_format != "NHWC"
) and dtype == dtypes.int32:
self.skipTest("int32 not supported")
tensors = []
dilations = list(dilations)
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
dtype,
use_gpu=use_gpu,
op_name=op_name,
)
if test_grappler_layout_optimizer and data_format == "NHWC" and use_gpu:
# Grappler's layout optimizer will not optimize a fetch node, so
# this identity allows Grappler to optimize the Conv2D node.
result = array_ops.identity(result)
tensors.append(result)
values = self.evaluate(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
if np.issubdtype(value.dtype, np.integer):
self.assertAllEqual(np.rint(expected), np.ravel(value))
else:
self.assertAllCloseAccordingToType(
expected, np.ravel(value), atol=tol, rtol=tol
)
self.assertShapeEqual(value, conv)
self.assertEqual(value.dtype, conv.dtype.as_numpy_dtype)
def _VerifyExplicitPaddings(
self,
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
data_format,
dtype,
use_gpu,
op_name,
dilations=(1, 1),
test_grappler_layout_optimizer=False,
tol=1e-5,
):
"""Verifies Conv2D with explicit padding generates correct values.
It does this by comparing with Conv2D without explicit padding. This
function assumes Conv2D without explicit padding works correctly.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
strides: [row_stride, col_stride] for the convolution;
padding: Explicit padding amounts.
data_format: "NCHW" or "NHWC"
dtype: data type to perform test
use_gpu: True if testing on the GPU
op_name: "Conv" or "Conv2D"
dilations: Dilation values
test_grappler_layout_optimizer: If True, allow the Grappler layout
optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.
tol: The absolute and relative tolerance.
"""
input_tensor = self._CreateNumpyTensor(tensor_in_sizes)
filter_tensor = self._CreateNumpyTensor(filter_in_sizes)
input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])
dilations = list(dilations)
conv2d_result = nn_ops.conv2d(
input_tensor,
filter_tensor, [1] + list(strides) + [1],
"VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))
self._VerifyValuesParameters(
tensor_in_sizes,
filter_in_sizes,
strides,
padding,
expected,
data_format,
dtype,
use_gpu,
op_name,
dilations,
test_grappler_layout_optimizer=test_grappler_layout_optimizer,
tol=tol,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x1Filter(self, data_format, dtype, use_gpu, op_name):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@test_util.run_in_graph_and_eager_modes
def testConv2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.conv2d(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.conv2d(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
batch_dims = 2
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = gen_nn_ops.conv(
x1, filter_in, strides=[1, 1, 1, 1], padding="VALID"
)
conv2 = gen_nn_ops.conv(
x2,
filter_in,
strides=[1, 1, 1, 1],
padding="VALID",
batch_dims=batch_dims,
)
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(conv1, self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionClass2DExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
convolver1 = nn_ops.Convolution(
input_shape=x1.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver1.num_batch_dims, 1)
convolver2 = nn_ops.Convolution(
input_shape=x2.shape,
filter_shape=filter_in.shape,
strides=[1, 1],
padding="VALID")
self.assertEqual(convolver2.num_batch_dims, 2)
conv1 = convolver1(x1, filter_in)
conv2 = convolver2(x2, filter_in)
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@test_util.run_in_graph_and_eager_modes
def testConvolutionWith2SpatialDimensionsAndExpandedBatch(self):
tensor_in_sizes_batch = [10, 2, 3, 3]
tensor_in_sizes_expanded_batch = [2, 5, 2, 3, 3]
filter_in_sizes = [1, 1, 3, 3]
filter_in = self._CreateNumpyTensor(filter_in_sizes)
x1 = self._CreateNumpyTensor(tensor_in_sizes_batch)
x2 = x1.reshape(tensor_in_sizes_expanded_batch)
conv1 = nn_ops.convolution(
x1,
filter_in,
strides=[1, 1],
padding="VALID")
conv2 = nn_ops.convolution(
x2,
filter_in,
strides=[1, 1],
padding="VALID")
self.assertEqual(conv1.shape, tensor_in_sizes_batch)
self.assertEqual(conv2.shape, tensor_in_sizes_expanded_batch)
self.assertAllEqual(
conv1,
self.evaluate(conv2).reshape(conv1.shape))
@parameterized.named_parameters(*DILATED_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter2x1Dilation(self, data_format, use_gpu, op_name):
self._VerifyDilatedConvValuesParameters(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmpty(self, data_format, dtype, use_gpu, op_name):
expected_output = []
self._VerifyValuesParameters(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*DILATED_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyDilation(self, data_format, use_gpu, op_name):
self._VerifyDilatedConvValuesParameters(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Filter(self, data_format, dtype, use_gpu, op_name):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*DILATED_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterDilation(self, data_format, use_gpu, op_name):
self._VerifyDilatedConvValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2Filter(self, data_format, dtype, use_gpu, op_name):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*DILATED_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D1x2FilterDilation(self, data_format, use_gpu, op_name):
self._VerifyDilatedConvValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2(self, data_format, dtype, use_gpu, op_name):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride2Same(
self, data_format, dtype, use_gpu, op_name
):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2FilterStride1x2(self, data_format, dtype, use_gpu, op_name):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideValid(
self, data_format, dtype, use_gpu, op_name
):
expected_output = [65, 95, 275, 305]
self._VerifyValuesParameters(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="VALID",
expected=expected_output,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSmallerThanStrideSame(
self, data_format, dtype, use_gpu, op_name
):
self._VerifyValuesParameters(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 7, 9],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyValuesParameters(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2],
padding="SAME",
expected=[1, 3, 9, 11],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyValuesParameters(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3],
padding="SAME",
expected=[44, 28, 41, 16],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSize(
self, data_format, dtype, use_gpu, op_name
):
self._VerifyValuesParameters(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
padding="VALID",
expected=[50, 60],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*DILATED_PARAMS)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeDilation(
self, data_format, use_gpu, op_name
):
self._VerifyDilatedConvValuesParameters(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[2, 2, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2D0x0Padding(self, data_format, dtype, use_gpu, op_name):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[3, 4, 3, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2D1x1Padding(self, data_format, dtype, use_gpu, op_name):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Padding(self, data_format, dtype, use_gpu, op_name):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[2, 1, 2, 1],
strides=[1, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 2],
filter_in_sizes=[1, 1, 2, 1],
strides=[2, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyBottomPadding(self, data_format, dtype, use_gpu, op_name):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[0, 3], [0, 0]],
tol=2e-5,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[2, 2, 4, 3],
filter_in_sizes=[1, 2, 3, 2],
strides=[2, 2],
padding=[[0, 3], [0, 0]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2DOnlyTopRightPadding(self, data_format, dtype, use_gpu, op_name):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 2],
strides=[1, 1],
padding=[[1, 0], [0, 2]],
tol=5e-5,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 4, 2],
filter_in_sizes=[2, 2, 2, 2],
strides=[1, 3],
padding=[[1, 0], [0, 2]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2DLotsPadding(self, data_format, dtype, use_gpu, op_name):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 1, 1, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1],
padding=[[3, 4], [4, 2]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 1, 1],
filter_in_sizes=[2, 2, 1, 3],
strides=[2, 1],
padding=[[3, 4], [4, 2]],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2DExplicitPaddingWithDilations(
self, data_format, dtype, use_gpu, op_name
):
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
@test_util.run_in_graph_and_eager_modes()
def testConv2dOnlyPaddingReturnsZeros(
self, data_format, dtype, use_gpu, op_name
):
self._VerifyValuesParameters(
tensor_in_sizes=[1, 0, 2, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
expected=[0, 0, 0, 0, 0, 0, 0, 0],
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
@parameterized.named_parameters(*TEST_PARAMS)
def testConv2DExplicitPaddingWithLayoutOptimizer(
self, data_format, dtype, use_gpu, op_name
):
# Test with Grappler's layout optimizer, to ensure the layout optimizer
# handles explicit padding correctly.
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 3, 2, 1],
filter_in_sizes=[1, 2, 1, 2],
strides=[1, 1],
padding=[[1, 0], [0, 1]],
dilations=[2, 1],
test_grappler_layout_optimizer=True,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
self._VerifyExplicitPaddings(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[3, 2, 2, 1],
strides=[1, 1],
padding=[[2, 1], [1, 2]],
dilations=[2, 3],
test_grappler_layout_optimizer=True,
data_format=data_format,
dtype=dtype,
use_gpu=use_gpu,
op_name=op_name,
)
def _VerifyGroupConvFwd(self, tensor_in_sizes, filter_in_sizes, dilations,
strides, padding, data_format, dtype):
"""Verify the output of group convolution is equal to a for-loop implementation.
Args:
tensor_in_sizes: Input tensor dimensions in [batch, input_rows,
input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,
input_depth, output_depth].
dilations: Dilated rate: [col_dilation, row_dilation]
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
dtype: Data type for inputs and outputs.
"""
tensor_in = self._CreateNumpyTensor(tensor_in_sizes)
filter_in = self._CreateNumpyTensor(filter_in_sizes)
num_groups = tensor_in_sizes[3] // filter_in_sizes[2]
assert num_groups > 1 and \
filter_in_sizes[2] * num_groups == tensor_in_sizes[3]
with test_util.device(True):
t1 = constant_op.constant(tensor_in, dtype=dtype)
t2 = constant_op.constant(filter_in, dtype=dtype)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
t1_splits = array_ops.split(t1, num_groups, axis=1)
else:
t1_splits = array_ops.split(t1, num_groups, axis=3)
t2_splits = array_ops.split(t2, num_groups, axis=3)
def MakeConv2d(inputs, filters):
return nn_ops.conv2d(
inputs,
filters,
strides,
padding,
dilations=dilations,
data_format=data_format)
group_conv = MakeConv2d(t1, t2)
group_conv_loop = array_ops.concat(
[MakeConv2d(t1s, t2s) for t1s, t2s in zip(t1_splits, t2_splits)],
axis=1 if data_format == "NCHW" else 3)
results = self.evaluate([group_conv, group_conv_loop])
tol_to_use = 1e-5
self.assertAllClose(
results[0], results[1], atol=tol_to_use, rtol=tol_to_use)
@test_util.run_in_graph_and_eager_modes
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="MIOpen does not support group conv yet!",
)
def testConv2DGroupConvFwd(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
data_formats = ["NHWC", "NCHW"]
else:
data_formats = ["NHWC"]
for data_format in data_formats:
for dilation in [1, 2]:
for stride in [1, 2]:
for filter_dims in [[3, 3, 4, 8], [1, 1, 2, 16]]:
self._VerifyGroupConvFwd([10, 32, 32, 16], filter_dims,
dilations=[dilation, dilation],
strides=[stride, stride],
padding="SAME",
data_format=data_format,
dtype=dtypes.float32)
@test_util.deprecated_graph_mode_only
@test_util.run_gpu_only
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="MIOpen does not support group conv yet!",
)
def testInputGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
@test_util.deprecated_graph_mode_only
@test_util.run_gpu_only
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="MIOpen does not support group conv yet!",
)
def testFilterGradientGroupConv(self):
for data_format in ["NCHW", "NHWC"]:
for test_input in [True, False]:
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
num_groups=2,
padding="VALID",
in_depth=4,
out_depth=6,
stride_rows=1,
stride_cols=1,
test_input=test_input,
data_format=data_format,
use_gpu=True,
max_err=0.005)
# TODO(yzhwang): this currently fails.
# self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
err,
dilations=(1, 1)):
if use_gpu and not test.is_gpu_available():
return
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
with test_util.device(use_gpu):
if len(input_sizes) == 4:
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if isinstance(padding, (list, tuple)):
padding = [(0, 0)] + padding + [(0, 0)]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
dilations = test_util.NHWCToNCHW(dilations)
if isinstance(padding, (list, tuple)):
padding = test_util.NHWCToNCHW((padding))
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format,
dilations=dilations)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertAllCloseAccordingToType(expected, value.flatten(), atol=1e-5)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
if data_format == "NCHW":
new_input_sizes = test_util.NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = constant_op.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_input(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropInput(self):
expected_output = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [
14.0, 32.0, 50.0, 100.0, 163.0, 226.0, 167.0, 212.0, 257.0, 122.0,
140.0, 158.0, 478.0, 541.0, 604.0, 437.0, 482.0, 527.0
]
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [
1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 7.0, 12.0, 11.0, 18.0, 15.0, 24.0, 12.0,
16.0, 15.0, 20.0, 18.0, 24.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [
1.0, 0.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0, 0.0,
0.0, 0.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla("XLA requires input_sizes to be a 4D shape.")
def testConv2DInputSizesContainsOnlySpatialDimensionsBackpropInput(self):
expected_output = [5.0, 11.0, 17.0, 23.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=[2, 2],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla("b/239598470")
def testConv2DBackpropInputDegenerateBackpropInput(self):
input_sizes = [3, 1, 1, 2]
expected_output = np.zeros(input_sizes).flatten()
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(
input_sizes=input_sizes,
filter_sizes=[1, 3, 2, 3],
output_sizes=[3, 1, 0, 3],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
explicit_strides = [1] + strides + [1]
new_padding = padding
new_dilations = [1] + dilations + [1]
if isinstance(new_padding, (list, tuple)):
new_padding = [(0, 0)] + new_padding + [(0, 0)]
if data_format == "NCHW":
explicit_strides = test_util.NHWCToNCHW(explicit_strides)
new_dilations = test_util.NHWCToNCHW(new_dilations)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(new_padding)
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes, dtype=dtype)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes, dtype=dtype)
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=explicit_strides,
padding=new_padding,
dilations=new_dilations,
data_format=data_format)
value = self.evaluate(conv)
self.assertShapeEqual(value, conv)
tf_logging.debug("expected = %s", expected)
tf_logging.debug("actual = %s", value)
self.assertAllCloseAccordingToType(expected, value.flatten(), err)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with test_util.device(use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = test_util.NHWCToNCHW(t0)
t2 = test_util.NHWCToNCHW(t2)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv2d_backprop_filter(
t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = self.evaluate(conv)
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=2e-4, atol=2e-4)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DEmptyBackpropFilter(self):
expected = []
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DBackpropFilterWithEmptyInput(self):
expected = [0, 0, 0, 0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [
17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0, 32.0, 43.0, 54.0,
37.0, 50.0, 63.0, 42.0, 57.0, 72.0, 62.0, 85.0, 108.0, 67.0, 92.0,
117.0, 72.0, 99.0, 126.0, 77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0,
120.0, 153.0
]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes
def testConv2DKernelSizeMatchesInputSizeBackpropFilter(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0, 4.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(
input_sizes=[1, 2, 2, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropInputDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu):
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t1)[0]
conv_2 = gradients_impl.gradients(conv_forward_2, t1)[0]
# "values" consists of two tensors for two backprops
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
# Testing for backprops
def _RunAndVerifyBackpropFilterDilation(self, input_sizes, filter_sizes,
output_sizes, strides, dilations,
padding, data_format, use_gpu, err):
x1 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(filter_sizes)
default_dilations = (dilations[0] == 1 and dilations[1] == 1)
if default_dilations or use_gpu:
with self.cached_session(use_gpu=use_gpu):
if data_format == "NCHW":
input_sizes = test_util.NHWCToNCHW(input_sizes)
t1 = constant_op.constant(x1, shape=input_sizes)
t2 = constant_op.constant(x2, shape=filter_sizes)
full_strides = [1] + strides + [1]
full_dilations = [1] + dilations + [1]
if data_format == "NCHW":
full_strides = test_util.NHWCToNCHW(full_strides)
full_dilations = test_util.NHWCToNCHW(full_dilations)
conv_forward = nn_ops.conv2d(
t1,
t2,
strides=full_strides,
dilations=full_dilations,
padding=padding,
data_format=data_format)
conv_forward_2 = nn_ops.convolution(
t1,
t2,
padding=padding,
strides=strides,
dilation_rate=dilations,
data_format=data_format)
if data_format == "NCHW":
conv_forward = test_util.NCHWToNHWC(conv_forward)
conv_forward_2 = test_util.NCHWToNHWC(conv_forward_2)
conv = gradients_impl.gradients(conv_forward, t2)[0]
conv_2 = gradients_impl.gradients(conv_forward, t2)[0]
value = self.evaluate(conv)
value_2 = self.evaluate(conv_2)
self.assertShapeEqual(value, conv)
self.assertShapeEqual(value_2, conv_2)
tf_logging.debug("expected = %s", value_2)
tf_logging.debug("actual = %s", value)
self.assertArrayNear(value_2.flatten(), value.flatten(), err)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterStride1x1Dilation2x1(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropFilterDilation1x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropFilterDilation1x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 0],
output_sizes=[1, 1, 2, 0],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropFilterDilation2x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 4, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropFilterDilation2x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputStride1x1Dilation2x1(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 5, 1],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth1ValidBackpropInputDilation1x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2DEmptyBackpropInputDilation1x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[0, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[0, 1, 2, 1],
strides=[1, 1],
dilations=[1, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
@test_util.deprecated_graph_mode_only
def testConv2D2x2Depth3ValidBackpropInputDilation2x1(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
# The GPU version of this test is not very stable. So adjusting the
# error threshold to 1e-4.
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 2, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
dilations=[2, 1],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
@test_util.deprecated_graph_mode_only
def testConv2DKernelSizeMatchesInputSizeBackpropInputDilation2x2(self):
if test.is_gpu_available() or test_util.IsMklEnabled():
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputDilation(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 1, 1, 2],
strides=[1, 1],
dilations=[2, 2],
padding="VALID",
data_format=data_format,
use_gpu=use_gpu,
err=1e-5)
def _RunAndVerifyBackpropInputExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=2e-5):
if use_gpu and not test.is_gpu_available():
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x1 = self._CreateNumpyTensor(filter_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
padded_input_sizes = input_sizes[:]
padded_input_sizes[1] += padding[0][0] + padding[0][1]
padded_input_sizes[2] += padding[1][0] + padding[1][1]
c = nn_ops.conv2d_backprop_input(
padded_input_sizes,
x1,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
c = c[:, padding[0][0]:(c.shape[1] - padding[0][1]), padding[1][0]:(
c.shape[2] - padding[1][1]), :]
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropInput(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
err=err,
dilations=dilations)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
dilations=[2, 2], use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
dilations=[2, 3],
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropInput(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
err=5e-5,
use_gpu=use_gpu)
self._RunAndVerifyBackpropInputExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
dilations=[2, 1],
use_gpu=use_gpu)
def _RunAndVerifyBackpropFilterExplicitPadding(self,
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
data_format,
use_gpu,
dilations=(1, 1),
err=1e-5):
if use_gpu and not test.is_gpu_available():
return
if not use_gpu and dilations != (1, 1):
return # Non-default dilations is currently not supported on the CPU.
x0 = self._CreateNumpyTensor(input_sizes)
x2 = self._CreateNumpyTensor(output_sizes)
dilations = list(dilations)
x0 = np.pad(x0, [(0, 0)] + padding + [(0, 0)], "constant")
c = nn_ops.conv2d_backprop_filter(
x0,
filter_sizes,
x2,
strides=[1] + strides + [1],
padding="VALID",
dilations=[1] + dilations + [1])
expected = list(self.evaluate(array_ops.reshape(c, [-1])))
self._RunAndVerifyBackpropFilter(
input_sizes,
filter_sizes,
output_sizes,
strides,
padding,
expected,
data_format,
use_gpu=use_gpu,
dilations=dilations,
err=err)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding0x0BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 4, 2],
filter_sizes=[2, 2, 2, 3],
output_sizes=[1, 1, 2, 3],
strides=[2, 2],
padding=[[0, 0], [0, 0]],
data_format=data_format, use_gpu=use_gpu)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding1x1BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 2],
output_sizes=[1, 3, 4, 2],
strides=[1, 1],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
err=5e-5)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 2],
filter_sizes=[1, 1, 2, 1],
output_sizes=[1, 4, 3, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
use_gpu=use_gpu,
data_format=data_format)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 4, 2, 1],
strides=[1, 2],
padding=[[1, 1], [1, 1]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 2])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding2x2BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[2, 3, 1, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[2, 2, 5, 1],
strides=[3, 1],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 6, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 3, 4, 1],
strides=[1, 2],
padding=[[2, 2], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 3])
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_1_8_4_1_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 10, 8, 1],
strides=[1, 1],
padding=[[1, 8], [4, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 5, 3, 1],
filter_sizes=[3, 2, 1, 1],
output_sizes=[1, 4, 8, 1],
strides=[3, 1],
padding=[[1, 8], [4, 2]],
use_gpu=use_gpu,
data_format=data_format)
@test_util.run_in_graph_and_eager_modes()
def testConv2D2x2Depth1Padding_5_0_2_2_BackpropFilter(self):
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 3, 3, 1],
filter_sizes=[2, 1, 1, 1],
output_sizes=[1, 7, 7, 1],
strides=[1, 1],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
err=1e-4)
self._RunAndVerifyBackpropFilterExplicitPadding(
input_sizes=[1, 4, 2, 1],
filter_sizes=[3, 3, 1, 1],
output_sizes=[1, 5, 2, 1],
strides=[1, 2],
padding=[[5, 0], [2, 2]],
data_format=data_format,
use_gpu=use_gpu,
dilations=[2, 1])
# Gradient checkers
def ConstructAndTestGradient(self,
batch,
input_rows,
input_cols,
filter_rows,
filter_cols,
in_depth,
out_depth,
stride_rows,
stride_cols,
padding,
test_input,
data_format,
use_gpu,
num_groups=1,
max_err=0.003):
assert in_depth % num_groups == 0 and out_depth % num_groups == 0
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth // num_groups, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
elif padding == "SAME":
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
else:
self.assertIsInstance(padding, (list, tuple))
output_rows = (input_rows + padding[1][0] + padding[1][1] - filter_rows +
stride_rows) // stride_rows
output_cols = (input_cols + padding[2][0] + padding[2][1] - filter_cols +
stride_cols) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
for dtype in self._DtypesToTest(use_gpu=use_gpu):
with self.cached_session(use_gpu=use_gpu):
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=dtype, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=dtype, name="filter")
strides = [1, stride_rows, stride_cols, 1]
new_padding = padding
if data_format == "NCHW":
new_input_tensor = test_util.NHWCToNCHW(input_tensor)
strides = test_util.NHWCToNCHW(strides)
if isinstance(padding, (list, tuple)):
new_padding = test_util.NHWCToNCHW(padding)
else:
new_input_tensor = input_tensor
conv = nn_ops.conv2d(
new_input_tensor,
filter_tensor,
strides,
new_padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = test_util.NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
jacob_t, jacob_n = gradient_checker.compute_gradient(input_tensor,
input_shape,
conv,
output_shape)
else:
jacob_t, jacob_n = gradient_checker.compute_gradient(filter_tensor,
filter_shape,
conv,
output_shape)
if dtype == dtypes.float32:
reference_jacob_t = jacob_t
err = np.fabs(jacob_t - jacob_n).max()
else:
# Compare fp16/bf16 theoretical gradients to fp32 gradients,
# since fp16/bf16 numerical gradients are too imprecise.
err = np.fabs(jacob_t - reference_jacob_t).max()
tf_logging.debug("conv_2d gradient error = %s", err)
self.assertLess(err, max_err)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.005,
)
@test_util.deprecated_graph_mode_only
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.005,
)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.005,
)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradientKernelSizeMatchesInputSize(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=3,
filter_rows=4,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.0025)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1x1PaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding=[[0, 0], [1, 1], [1, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.003)
@test_util.deprecated_graph_mode_only
def testFilterGradient2x2PaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding=[[0, 0], [2, 2], [2, 2], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.005)
@test_util.deprecated_graph_mode_only
def testInputGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient1_2_3_4PaddingStride3x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=8,
input_cols=5,
filter_rows=4,
filter_cols=2,
in_depth=3,
out_depth=2,
stride_rows=3,
stride_cols=2,
padding=[[0, 0], [1, 2], [3, 4], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testInputGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testFilterGradient4_3_2_1PaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=3,
input_rows=5,
input_cols=7,
filter_rows=3,
filter_cols=2,
in_depth=1,
out_depth=2,
stride_rows=2,
stride_cols=1,
padding=[[0, 0], [4, 3], [2, 1], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.005,
)
@test_util.deprecated_graph_mode_only
def testInputGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=True,
data_format=data_format,
use_gpu=use_gpu,
max_err=0.005,
)
@test_util.deprecated_graph_mode_only
def testFilterGradient0_0_0_5PaddingStride1x2(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(
batch=2,
input_rows=6,
input_cols=7,
filter_rows=3,
filter_cols=4,
in_depth=3,
out_depth=2,
stride_rows=1,
stride_cols=2,
padding=[[0, 0], [0, 0], [0, 5], [0, 0]],
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
@test_util.deprecated_graph_mode_only
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(
dtypes.float32, shape=[1, 3]),
strides=[1, 1, 1, 1],
padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(
dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(
dtypes.float32, shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1],
padding="SAME")
# Input depth divisible by filter depth (group convolution).
# No exceptions should appear.
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 8]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 16]),
strides=[1, 1, 1, 1],
padding="SAME")
# Negative padding.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, -1], [1, 2], [0, 0]])
# Nonzero padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[1, 0], [0, 0], [0, 0], [0, 0]])
# Nonzero NCHW padding in nonspatial dimension.
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 1], [0, 0], [0, 0]],
data_format="NCHW")
# Wrong amount of padding
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0, 0], [0, 0], [0, 0]])
# Only specify one padding amount per dimension
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[[0], [0], [0], [0]])
# Explicit padding elements are not lists
with self.assertRaises(ValueError):
nn_ops.conv2d(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding=[0, 0, 0, 0])
def testOpEdgeCases(self):
# Illegal strides.
with self.assertRaisesRegex((ValueError, errors_impl.UnimplementedError),
"strides in the batch and depth"):
input_val = np.ones([2, 4, 10, 10])
filter_val = np.ones([2, 4, 10, 10])
self.evaluate(
nn_ops.conv2d(
input_val, filter_val, strides=[2, 1, 1, 1], padding="SAME"))
with self.assertRaisesRegex((ValueError, errors_impl.UnimplementedError),
"strides in the batch and depth"):
input_val = np.ones([2, 4, 10, 10])
filter_val = np.ones([2, 4, 10, 10])
self.evaluate(
nn_ops.conv2d(
input_val, filter_val, strides=[1, 1, 1, 2], padding="SAME"))
# TODO(b/195689143): Will enable when fixed for V2 behavior
# # Filter larger than input.
# with self.assertRaisesRegex(ValueError, "Negative dimension size"):
# input_val = np.ones([32, 20, 20, 3])
# filter_val = np.ones([20, 21, 3, 2])
# self.evaluate(
# nn_ops.conv2d(
# input_val, filter_val, strides=[1, 1, 1, 1], padding="VALID"))
# with self.assertRaisesRegex(ValueError, "Negative dimension size"):
# input_val = np.ones([32, 20, 20, 3])
# filter_val = np.ones([21, 20, 3, 2])
# self.evaluate(
# nn_ops.conv2d(
# input_val, filter_val, strides=[1, 1, 1, 1], padding="VALID"))
#
# # Filter larger than input + padding.
# with self.assertRaisesRegex(ValueError, "Negative dimension size"):
# input_val = np.ones([32, 20, 20, 3])
# filter_val = np.ones([24, 25, 3, 2])
# self.evaluate(
# nn_ops.conv2d(
# input_val,
# filter_val,
# strides=[1, 1, 1, 1],
# padding=[[0, 0], [2, 2], [2, 2], [0, 0]]))
# Filter dimensions must be greater than 0.
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError, "filter must not have zero elements"
"|has a non-positive dimension"):
input_val = np.ones([1, 1, 1, 1])
filter_val = np.ones([1, 0, 1, 1])
self.evaluate(
nn_ops.conv2d(
input_val, filter_val, strides=[1, 1, 1, 1], padding="SAME"))
# Negative padding during backprop.
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
filter_val = np.ones([18, 18, 3, 2])
out_backprop_val = np.ones([32, 3, 2, 2])
self.evaluate(
nn_ops.conv2d_backprop_input([32, 20, 20, 3],
filter_val,
out_backprop_val,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0,
0]]))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"All elements of explicit_paddings must be nonnegative"):
input_val = np.ones([32, 20, 20, 3])
out_backprop_val = np.ones([32, 3, 2, 2])
self.evaluate(
nn_ops.conv2d_backprop_filter(
input_val, [18, 18, 3, 2],
out_backprop_val,
strides=[1, 1, 1, 1],
padding=[[0, 0], [-1, 0], [0, 0], [0, 0]]))
def testConvOpEdgeCases(self):
# Illegal strides.
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, errors_impl.UnimplementedError),
"strides in the batch and depth",
):
input_val = np.ones([2, 4, 10, 10])
filter_val = np.ones([2, 4, 10, 10])
self.evaluate(
gen_nn_ops.conv(
input_val, filter_val, strides=[2, 1, 1, 1], padding="SAME"
)
)
with self.assertRaisesRegex(
(errors_impl.InvalidArgumentError, errors_impl.UnimplementedError),
"strides in the batch and depth",
):
input_val = np.ones([2, 4, 10, 10])
filter_val = np.ones([2, 4, 10, 10])
self.evaluate(
gen_nn_ops.conv(
input_val, filter_val, strides=[1, 1, 1, 2], padding="SAME"
)
)
# Filter dimensions must be greater than 0.
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"filter must not have zero elements|has a non-positive dimension",
):
input_val = np.ones([1, 1, 1, 1])
filter_val = np.ones([1, 0, 1, 1])
self.evaluate(
gen_nn_ops.conv(
input_val, filter_val, strides=[1, 1, 1, 1], padding="SAME"
)
)
def testConv2DBackpropInputInvalidOutBackpropRaiseError(self):
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
with self.cached_session():
input_sizes = constant_op.constant([65534, 65534],
shape=[2],
dtype=dtypes.int32)
filters = constant_op.constant(
0.159749106, shape=[3, 3, 2, 2], dtype=dtypes.float32)
out_backprop = constant_op.constant(0, shape=[], dtype=dtypes.float32)
t = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=out_backprop,
strides=[1, 1, 1, 1],
padding="SAME",
use_cudnn_on_gpu=True,
explicit_paddings=[],
data_format="NHWC",
dilations=[1, 1, 1, 1])
self.evaluate(t)
@test_util.run_all_without_tensor_float_32("Avoid TF32 conv on GPU")
|
Conv2DTest
|
python
|
huggingface__transformers
|
src/transformers/models/perceiver/modeling_perceiver.py
|
{
"start": 9779,
"end": 12498
}
|
class ____(nn.Module):
"""Attention module, including a dense block."""
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
use_query_residual=True,
):
super().__init__()
# MultiHead attention
if is_cross_attention and qk_channels is None:
if config.cross_attention_shape_for_attention == "q":
qk_channels = q_dim
elif config.cross_attention_shape_for_attention == "kv":
qk_channels = kv_dim
else:
raise ValueError(
f"Unknown value {config.cross_attention_shape_for_attention} for "
"cross_attention_shape_for_attention."
)
else:
if qk_channels is None:
qk_channels = q_dim
if v_channels is None:
v_channels = qk_channels
self.self = PerceiverSelfAttention(
config,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=q_dim,
kv_dim=kv_dim,
)
# dense block
output_channels = None
if is_cross_attention:
output_channels = q_dim
else:
if output_channels is None:
output_channels = v_channels
self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels)
self.use_query_residual = use_query_residual
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
inputs,
inputs_mask,
output_attentions,
)
# Output projection
attention_output = self.output(self_outputs[0])
# Optionally include a residual to the original queries.
# Consider omitting the residual if the semantics of query and output
# are different, e.g. if queries are positions and outputs are pixels.
if self.use_query_residual:
attention_output = attention_output + hidden_states
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
|
PerceiverAttention
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 256323,
"end": 256718
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "migration_source")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
migration_source = sgqlc.types.Field(
"MigrationSource", graphql_name="migrationSource"
)
|
CreateMigrationSourcePayload
|
python
|
tiangolo__fastapi
|
tests/test_dependency_after_yield_raise.py
|
{
"start": 174,
"end": 1803
}
|
class ____(Exception):
pass
def catching_dep() -> Any:
try:
yield "s"
except CustomError as err:
raise HTTPException(status_code=418, detail="Session error") from err
def broken_dep() -> Any:
yield "s"
raise ValueError("Broken after yield")
app = FastAPI()
@app.get("/catching")
def catching(d: Annotated[str, Depends(catching_dep)]) -> Any:
raise CustomError("Simulated error during streaming")
@app.get("/broken")
def broken(d: Annotated[str, Depends(broken_dep)]) -> Any:
return {"message": "all good?"}
client = TestClient(app)
def test_catching():
response = client.get("/catching")
assert response.status_code == 418
assert response.json() == {"detail": "Session error"}
def test_broken_raise():
with pytest.raises(ValueError, match="Broken after yield"):
client.get("/broken")
def test_broken_no_raise():
"""
When a dependency with yield raises after the yield (not in an except), the
response is already "successfully" sent back to the client, but there's still
an error in the server afterwards, an exception is raised and captured or shown
in the server logs.
"""
with TestClient(app, raise_server_exceptions=False) as client:
response = client.get("/broken")
assert response.status_code == 200
assert response.json() == {"message": "all good?"}
def test_broken_return_finishes():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/broken")
assert response.status_code == 200
assert response.json() == {"message": "all good?"}
|
CustomError
|
python
|
spack__spack
|
lib/spack/spack/database.py
|
{
"start": 8589,
"end": 10464
}
|
class ____(NamedTuple):
"""Data class to configure locks in Database objects
Args:
enable: whether to enable locks or not.
database_timeout: timeout for the database lock
package_timeout: timeout for the package lock
"""
enable: bool
database_timeout: Optional[int]
package_timeout: Optional[int]
#: Configure a database to avoid using locks
NO_LOCK: LockConfiguration = LockConfiguration(
enable=False, database_timeout=None, package_timeout=None
)
#: Configure the database to use locks without a timeout
NO_TIMEOUT: LockConfiguration = LockConfiguration(
enable=True, database_timeout=None, package_timeout=None
)
#: Default configuration for database locks
DEFAULT_LOCK_CFG: LockConfiguration = LockConfiguration(
enable=True,
database_timeout=_DEFAULT_DB_LOCK_TIMEOUT,
package_timeout=_DEFAULT_PKG_LOCK_TIMEOUT,
)
def lock_configuration(configuration):
"""Return a LockConfiguration from a spack.config.Configuration object."""
return LockConfiguration(
enable=configuration.get("config:locks", True),
database_timeout=configuration.get("config:db_lock_timeout"),
package_timeout=configuration.get("config:package_lock_timeout"),
)
def prefix_lock_path(root_dir: Union[str, pathlib.Path]) -> pathlib.Path:
"""Returns the path of the prefix lock file, given the root directory.
Args:
root_dir: root directory containing the database directory
"""
return pathlib.Path(root_dir) / _DB_DIRNAME / "prefix_lock"
def failures_lock_path(root_dir: Union[str, pathlib.Path]) -> pathlib.Path:
"""Returns the path of the failures lock file, given the root directory.
Args:
root_dir: root directory containing the database directory
"""
return pathlib.Path(root_dir) / _DB_DIRNAME / "prefix_failures"
|
LockConfiguration
|
python
|
getsentry__sentry
|
src/sentry/monitors/migrations/0009_backfill_monitor_detectors.py
|
{
"start": 2407,
"end": 3712
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("monitors", "0008_fix_processing_error_keys"),
]
operations = [
migrations.RunPython(
backfill_monitor_detectors,
migrations.RunPython.noop,
hints={"tables": ["sentry_monitor"]},
)
]
|
Migration
|
python
|
walkccc__LeetCode
|
solutions/3532. Path Existence Queries in a Graph I/3532.py
|
{
"start": 514,
"end": 878
}
|
class ____:
def pathExistenceQueries(
self,
n: int,
nums: list[int],
maxDiff: int,
queries: list[list[int]]
) -> list[bool]:
uf = UnionFind(n)
for i in range(1, n):
if abs(nums[i] - nums[i - 1]) <= maxDiff:
uf.unionByRank(i, i - 1)
return [uf.find(u) == uf.find(v)
for u, v in queries]
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/matching_files_dataset_test.py
|
{
"start": 1204,
"end": 5524
}
|
class ____(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(MatchingFilesDatasetTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
super(MatchingFilesDatasetTest, self).tearDown()
def _touchTempFiles(self, filenames):
for filename in filenames:
open(os.path.join(self.tmp_dir, filename), 'a').close()
@combinations.generate(test_base.default_test_combinations())
def testNonExistingDirectory(self):
"""Test the MatchingFiles dataset with a non-existing directory."""
self.tmp_dir = os.path.join(self.tmp_dir, 'nonexistingdir')
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
@combinations.generate(test_base.default_test_combinations())
def testEmptyDirectory(self):
"""Test the MatchingFiles dataset with an empty directory."""
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
@combinations.generate(test_base.default_test_combinations())
def testSimpleDirectory(self):
"""Test the MatchingFiles dataset with a simple directory."""
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testFileSuffixes(self):
"""Test the MatchingFiles dataset using the suffixes of filename."""
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*.py'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:-1]
],
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testFileMiddles(self):
"""Test the MatchingFiles dataset using the middles of filename."""
filenames = ['aa.txt', 'bb.py', 'bbc.pyc', 'cc.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, 'b*.py*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:3]
],
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testNestedDirectories(self):
"""Test the MatchingFiles dataset with nested directories."""
filenames = []
width = 8
depth = 4
for i in range(width):
for j in range(depth):
new_base = os.path.join(self.tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
filenames.append(filename)
open(filename, 'w').close()
patterns = [
os.path.join(self.tmp_dir, os.path.join(*['**' for _ in range(depth)]),
suffix) for suffix in ['*.txt', '*.log']
]
dataset = matching_files.MatchingFilesDataset(patterns)
next_element = self.getNext(dataset)
expected_filenames = [
compat.as_bytes(filename)
for filename in filenames
if filename.endswith('.txt') or filename.endswith('.log')
]
actual_filenames = []
while True:
try:
actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))
except errors.OutOfRangeError:
break
self.assertCountEqual(expected_filenames, actual_filenames)
|
MatchingFilesDatasetTest
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_function_base.py
|
{
"start": 67308,
"end": 71439
}
|
class ____(TestCase):
A = np.array(
[
[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
[0.9297531, 0.32296769, 0.19267156],
]
)
B = np.array(
[
[0.10377691, 0.5417086, 0.49807457],
[0.82872117, 0.77801674, 0.39226705],
[0.9314666, 0.66800209, 0.03538394],
]
)
res1 = np.array(
[
[1.0, 0.9379533, -0.04931983],
[0.9379533, 1.0, 0.30007991],
[-0.04931983, 0.30007991, 1.0],
]
)
res2 = np.array(
[
[1.0, 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],
[0.9379533, 1.0, 0.30007991, -0.04781421, 0.88157256, 0.78052386],
[-0.04931983, 0.30007991, 1.0, -0.96717111, 0.71483595, 0.83053601],
[0.30151751, -0.04781421, -0.96717111, 1.0, -0.51366032, -0.66173113],
[0.66318558, 0.88157256, 0.71483595, -0.51366032, 1.0, 0.98317823],
[0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.0],
]
)
def test_non_array(self):
assert_almost_equal(
np.corrcoef([0, 1, 0], [1, 0, 1]), [[1.0, -1.0], [-1.0, 1.0]]
)
def test_simple(self):
tgt1 = corrcoef(self.A)
assert_almost_equal(tgt1, self.res1)
assert_(np.all(np.abs(tgt1) <= 1.0))
tgt2 = corrcoef(self.A, self.B)
assert_almost_equal(tgt2, self.res2)
assert_(np.all(np.abs(tgt2) <= 1.0))
@skip(reason="deprecated in numpy, ignore")
def test_ddof(self):
# ddof raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
sup.filter(DeprecationWarning)
# ddof has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
@skip(reason="deprecated in numpy, ignore")
def test_bias(self):
# bias raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
sup.filter(DeprecationWarning)
# bias has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
res = corrcoef(x)
tgt = np.array([[1.0, -1.0j], [1.0j, 1.0]])
assert_allclose(res, tgt)
assert_(np.all(np.abs(res) <= 1.0))
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(np.corrcoef(x, y), np.array([[1.0, -1.0j], [1.0j, 1.0]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always", RuntimeWarning)
assert_array_equal(corrcoef(np.array([])), np.nan)
assert_array_equal(
corrcoef(np.array([]).reshape(0, 2)), np.array([]).reshape(0, 0)
)
assert_array_equal(
corrcoef(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]),
)
def test_extreme(self):
x = [[1e-100, 1e100], [1e100, 1e-100]]
c = corrcoef(x)
assert_array_almost_equal(c, np.array([[1.0, -1.0], [-1.0, 1.0]]))
assert_(np.all(np.abs(c) <= 1.0))
@parametrize("test_type", [np.half, np.single, np.double])
def test_corrcoef_dtype(self, test_type):
cast_A = self.A.astype(test_type)
res = corrcoef(cast_A, dtype=test_type)
assert test_type == res.dtype
@instantiate_parametrized_tests
|
TestCorrCoef
|
python
|
crytic__slither
|
slither/slithir/variables/tuple_ssa.py
|
{
"start": 231,
"end": 522
}
|
class ____(TupleVariable): # pylint: disable=too-few-public-methods
def __init__(self, t: TupleVariable) -> None:
super().__init__(t.node, t.index)
self._non_ssa_version = t
@property
def non_ssa_version(self):
return self._non_ssa_version
|
TupleVariableSSA
|
python
|
huggingface__transformers
|
src/transformers/models/edgetam_video/modular_edgetam_video.py
|
{
"start": 40216,
"end": 42155
}
|
class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.cross_attention = EdgeTamVideoPerceiverAttention(config)
self.mlp = EdgeTamVideoPerceiverMLP(config)
self.dropout = nn.Dropout(config.perceiver_resampler_hidden_dropout)
self.self_attention = EdgeTamVideoPerceiverAttention(config)
self.self_mlp = EdgeTamVideoPerceiverMLP(config)
# Layer norms moved from attention classes to here
self.layer_norm_input = nn.LayerNorm(config.perceiver_resampler_hidden_size)
self.layer_norm_latents = nn.LayerNorm(config.perceiver_resampler_hidden_size)
self.layer_norm_self = nn.LayerNorm(config.perceiver_resampler_hidden_size)
def forward(
self,
latents: torch.Tensor,
input_features: torch.Tensor,
positional_encoding: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# Cross attention with layer norms
normalized_latents = self.layer_norm_latents(latents)
normalized_input = self.layer_norm_input(input_features)
cross_attention_output = self.cross_attention(
query=normalized_latents,
key=normalized_input,
value=normalized_input,
positional_encoding=positional_encoding,
)
latents = latents + self.dropout(cross_attention_output)
mlp_output = self.mlp(latents)
latents = latents + mlp_output
# Self attention with layer norm
normalized_latents_self = self.layer_norm_self(latents)
self_attention_output = self.self_attention(
query=normalized_latents_self, key=normalized_latents_self, value=normalized_latents_self
)
latents = latents + self_attention_output
self_mlp_output = self.self_mlp(latents)
latents = latents + self_mlp_output
return latents
|
EdgeTamVideoPerceiverEncoderLayer
|
python
|
pytest-dev__pytest
|
src/_pytest/terminal.py
|
{
"start": 11068,
"end": 61369
}
|
class ____:
def __init__(self, config: Config, file: TextIO | None = None) -> None:
import _pytest.config
self.config = config
self._numcollected = 0
self._session: Session | None = None
self._showfspath: bool | None = None
self.stats: dict[str, list[Any]] = {}
self._main_color: str | None = None
self._known_types: list[str] | None = None
self.startpath = config.invocation_params.dir
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
self._screen_width = self._tw.fullwidth
self.currentfspath: None | Path | str | int = None
self.reportchars = getreportopt(config)
self.foldskipped = config.option.fold_skipped
self.hasmarkup = self._tw.hasmarkup
# isatty should be a method but was wrongly implemented as a boolean.
# We use CallableBool here to support both.
self.isatty = compat.CallableBool(file.isatty())
self._progress_nodeids_reported: set[str] = set()
self._timing_nodeids_reported: set[str] = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write = timing.Instant()
self._already_displayed_warnings: int | None = None
self._keyboardinterrupt_memo: ExceptionRepr | None = None
def _determine_show_progress_info(
self,
) -> Literal["progress", "count", "times", False]:
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038) unless explicitly
# overridden by progress-even-when-capture-no
if (
self.config.getoption("capture", "no") == "no"
and self.config.getini("console_output_style")
!= "progress-even-when-capture-no"
):
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
cfg: str = self.config.getini("console_output_style")
if cfg in {"progress", "progress-even-when-capture-no"}:
return "progress"
elif cfg == "count":
return "count"
elif cfg == "times":
return "times"
else:
return False
@property
def verbosity(self) -> int:
verbosity: int = self.config.option.verbose
return verbosity
@property
def showheader(self) -> bool:
return self.verbosity >= 0
@property
def no_header(self) -> bool:
return bool(self.config.option.no_header)
@property
def no_summary(self) -> bool:
return bool(self.config.option.no_summary)
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0
return self._showfspath
@showfspath.setter
def showfspath(self, value: bool | None) -> None:
self._showfspath = value
@property
def showlongtestinfo(self) -> bool:
return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0
@property
def reported_progress(self) -> int:
"""The amount of items reported in the progress so far.
:meta private:
"""
return len(self._progress_nodeids_reported)
def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None:
fspath = self.config.rootpath / nodeid.split("::")[0]
if self.currentfspath is None or fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
relfspath = bestrelpath(self.startpath, fspath)
self._tw.line()
self._tw.write(relfspath + " ")
self._tw.write(res, flush=True, **markup)
def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self) -> None:
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def wrap_write(
self,
content: str,
*,
flush: bool = False,
margin: int = 8,
line_sep: str = "\n",
**markup: bool,
) -> None:
"""Wrap message with margin for progress info."""
width_of_current_line = self._tw.width_of_current_line
wrapped = line_sep.join(
textwrap.wrap(
" " * width_of_current_line + content,
width=self._screen_width - margin,
drop_whitespace=True,
replace_whitespace=False,
),
)
wrapped = wrapped[width_of_current_line:]
self._tw.write(wrapped, flush=flush, **markup)
def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
self._tw.write(content, flush=flush, **markup)
def write_raw(self, content: str, *, flush: bool = False) -> None:
self._tw.write_raw(content, flush=flush)
def flush(self) -> None:
self._tw.flush()
def write_line(self, line: str | bytes, **markup: bool) -> None:
if not isinstance(line, str):
line = str(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line: str, **markup: bool) -> None:
"""Rewinds the terminal cursor to the beginning and writes the given line.
:param erase:
If True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(
self,
sep: str,
title: str | None = None,
fullwidth: int | None = None,
**markup: bool,
) -> None:
self.ensure_newline()
self._tw.sep(sep, title, fullwidth, **markup)
def section(self, title: str, sep: str = "=", **kw: bool) -> None:
self._tw.sep(sep, title, **kw)
def line(self, msg: str, **kw: bool) -> None:
self._tw.line(msg, **kw)
def _add_stats(self, category: str, items: Sequence[Any]) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items)
if set_main_color:
self._set_main_color()
def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return True
def pytest_warning_recorded(
self,
warning_message: warnings.WarningMessage,
nodeid: str,
) -> None:
from _pytest.warnings import warning_record_to_str
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
self._add_stats("warnings", [warning_report])
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
msg = f"PLUGIN registered: {plugin}"
# XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line.
self.write_line(msg)
def pytest_deselected(self, items: Sequence[Item]) -> None:
self._add_stats("deselected", items)
def pytest_runtest_logstart(
self, nodeid: str, location: tuple[str, int | None, str]
) -> None:
fspath, lineno, domain = location
# Ensure that the path is printed before the
# 1st test of a module starts running.
if self.showlongtestinfo:
line = self._locationline(nodeid, fspath, lineno, domain)
self.write_ensure_prefix(line, "")
self.flush()
elif self.showfspath:
self.write_fspath_result(nodeid, "")
self.flush()
def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tests_ran = True
rep = report
res = TestShortLogReport(
*self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
)
category, letter, word = res.category, res.letter, res.word
if not isinstance(word, tuple):
markup = None
else:
word, markup = word
self._add_stats(category, [rep])
if not letter and not word:
# Probably passed setup/teardown.
return
if markup is None:
was_xfail = hasattr(report, "wasxfail")
if rep.passed and not was_xfail:
markup = {"green": True}
elif rep.passed and was_xfail:
markup = {"yellow": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
self._progress_nodeids_reported.add(rep.nodeid)
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0:
self._tw.write(letter, **markup)
# When running in xdist, the logreport and logfinish of multiple
# items are interspersed, e.g. `logreport`, `logreport`,
# `logfinish`, `logfinish`. To avoid the "past edge" calculation
# from getting confused and overflowing (#7166), do the past edge
# printing here and not in logfinish, except for the 100% which
# should only be printed after all teardowns are finished.
if self._show_progress_info and not self._is_last_item:
self._write_progress_information_if_past_edge()
else:
line = self._locationline(rep.nodeid, *rep.location)
running_xdist = hasattr(rep, "node")
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if rep.skipped or hasattr(report, "wasxfail"):
reason = _get_raw_skip_reason(rep)
if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2:
available_width = (
(self._tw.fullwidth - self._tw.width_of_current_line)
- len(" [100%]")
- 1
)
formatted_reason = _format_trimmed(
" ({})", reason, available_width
)
else:
formatted_reason = f" ({reason})"
if reason and formatted_reason is not None:
self.wrap_write(formatted_reason)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write(f"[{rep.node.gateway.id}]")
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
self.flush()
@property
def _is_last_item(self) -> bool:
assert self._session is not None
return self.reported_progress == self._session.testscollected
@hookimpl(wrapper=True)
def pytest_runtestloop(self) -> Generator[None, object, object]:
result = yield
# Write the final/100% progress -- deferred until the loop is complete.
if (
self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0
and self._show_progress_info
and self.reported_progress
):
self._write_progress_information_filling_space()
return result
def _get_progress_information_message(self) -> str:
assert self._session
collected = self._session.testscollected
if self._show_progress_info == "count":
if collected:
progress = self.reported_progress
counter_format = f"{{:{len(str(collected))}d}}"
format_string = f" [{counter_format}/{{}}]"
return format_string.format(progress, collected)
return f" [ {collected} / {collected} ]"
if self._show_progress_info == "times":
if not collected:
return ""
all_reports = (
self._get_reports_to_display("passed")
+ self._get_reports_to_display("xpassed")
+ self._get_reports_to_display("failed")
+ self._get_reports_to_display("xfailed")
+ self._get_reports_to_display("skipped")
+ self._get_reports_to_display("error")
+ self._get_reports_to_display("")
)
current_location = all_reports[-1].location[0]
not_reported = [
r for r in all_reports if r.nodeid not in self._timing_nodeids_reported
]
tests_in_module = sum(
i.location[0] == current_location for i in self._session.items
)
tests_completed = sum(
r.when == "setup"
for r in not_reported
if r.location[0] == current_location
)
last_in_module = tests_completed == tests_in_module
if self.showlongtestinfo or last_in_module:
self._timing_nodeids_reported.update(r.nodeid for r in not_reported)
return format_node_duration(
sum(r.duration for r in not_reported if isinstance(r, TestReport))
)
return ""
if collected:
return f" [{self.reported_progress * 100 // collected:3d}%]"
return " [100%]"
def _write_progress_information_if_past_edge(self) -> None:
w = self._width_of_current_line
if self._show_progress_info == "count":
assert self._session
num_tests = self._session.testscollected
progress_length = len(f" [{num_tests}/{num_tests}]")
elif self._show_progress_info == "times":
progress_length = len(" 99h 59m")
else:
progress_length = len(" [100%]")
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
main_color, _ = self._get_main_color()
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", **{main_color: True})
def _write_progress_information_filling_space(self) -> None:
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), flush=True, **{color: True})
@property
def _width_of_current_line(self) -> int:
"""Return the width of the current line."""
return self._tw.width_of_current_line
def pytest_collection(self) -> None:
if self.isatty():
if self.config.option.verbose >= 0:
self.write("collecting ... ", flush=True, bold=True)
elif self.config.option.verbose >= 1:
self.write("collecting ... ", flush=True, bold=True)
def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self._add_stats("error", [report])
elif report.skipped:
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, Item)]
self._numcollected += len(items)
if self.isatty():
self.report_collect()
def report_collect(self, final: bool = False) -> None:
if self.config.option.verbose < 0:
return
if not final:
# Only write the "collecting" report every `REPORT_COLLECTING_RESOLUTION`.
if (
self._collect_report_last_write.elapsed().seconds
< REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = timing.Instant()
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - deselected
line = "collected " if final else "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += f" / {errors} error{'s' if errors != 1 else ''}"
if deselected:
line += f" / {deselected} deselected"
if skipped:
line += f" / {skipped} skipped"
if self._numcollected > selected:
line += f" / {selected} selected"
if self.isatty():
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@hookimpl(trylast=True)
def pytest_sessionstart(self, session: Session) -> None:
self._session = session
self._session_start = timing.Instant()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
if not self.no_header:
msg = f"platform {sys.platform} -- Python {verinfo}"
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info:
verinfo = ".".join(map(str, pypy_version_info[:3]))
msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]"
msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}"
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, start_path=self.startpath
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(
self, lines: Sequence[str | Sequence[str]]
) -> None:
for line_or_lines in reversed(lines):
if isinstance(line_or_lines, str):
self.write_line(line_or_lines)
else:
for line in line_or_lines:
self.write_line(line)
def pytest_report_header(self, config: Config) -> list[str]:
result = [f"rootdir: {config.rootpath}"]
if config.inipath:
warning = ""
if config._ignored_config_files:
warning = f" (WARNING: ignoring pytest config in {', '.join(config._ignored_config_files)}!)"
result.append(
"configfile: " + bestrelpath(config.rootpath, config.inipath) + warning
)
if config.args_source == Config.ArgsSource.TESTPATHS:
testpaths: list[str] = config.getini("testpaths")
result.append("testpaths: {}".format(", ".join(testpaths)))
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
result.append(
"plugins: {}".format(", ".join(_plugin_nameversions(plugininfo)))
)
return result
def pytest_collection_finish(self, session: Session) -> None:
self.report_collect(True)
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config,
start_path=self.startpath,
items=session.items,
)
self._write_report_lines_from_hooks(lines)
if self.config.getoption("collectonly"):
if session.items:
if self.config.option.verbose > -1:
self._tw.line("")
self._printcollecteditems(session.items)
failed = self.stats.get("failed")
if failed:
self._tw.sep("!", "collection failures")
for rep in failed:
rep.toterminal(self._tw)
def _printcollecteditems(self, items: Sequence[Item]) -> None:
test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES)
if test_cases_verbosity < 0:
if test_cases_verbosity < -1:
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line(f"{name}: {count}")
else:
for item in items:
self._tw.line(item.nodeid)
return
stack: list[Node] = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
indent = (len(stack) - 1) * " "
self._tw.line(f"{indent}{col}")
if test_cases_verbosity >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
for line in doc.splitlines():
self._tw.line("{}{}".format(indent + " ", line))
@hookimpl(wrapper=True)
def pytest_sessionfinish(
self, session: Session, exitstatus: int | ExitCode
) -> Generator[None]:
result = yield
self._tw.line("")
summary_exit_codes = (
ExitCode.OK,
ExitCode.TESTS_FAILED,
ExitCode.INTERRUPTED,
ExitCode.USAGE_ERROR,
ExitCode.NO_TESTS_COLLECTED,
)
if exitstatus in summary_exit_codes and not self.no_summary:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus, config=self.config
)
if session.shouldfail:
self.write_sep("!", str(session.shouldfail), red=True)
if exitstatus == ExitCode.INTERRUPTED:
self._report_keyboardinterrupt()
self._keyboardinterrupt_memo = None
elif session.shouldstop:
self.write_sep("!", str(session.shouldstop), red=True)
self.summary_stats()
return result
@hookimpl(wrapper=True)
def pytest_terminal_summary(self) -> Generator[None]:
self.summary_errors()
self.summary_failures()
self.summary_xfailures()
self.summary_warnings()
self.summary_passes()
self.summary_xpasses()
try:
return (yield)
finally:
self.short_test_summary()
# Display any extra warnings from teardown here (if any).
self.summary_warnings()
def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self) -> None:
if self._keyboardinterrupt_memo is not None:
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self) -> None:
excrepr = self._keyboardinterrupt_memo
assert excrepr is not None
assert excrepr.reprcrash is not None
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --full-trace)",
yellow=True,
)
def _locationline(
self, nodeid: str, fspath: str, lineno: int | None, domain: str
) -> str:
def mkrel(nodeid: str) -> str:
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# fspath comes from testid which has a "/"-normalized path.
if fspath:
res = mkrel(nodeid)
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + bestrelpath(self.startpath, Path(fspath))
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
head_line = rep.head_line
if head_line:
return head_line
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# Summaries for sessionfinish.
#
def getreports(self, name: str):
return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")]
def summary_warnings(self) -> None:
if self.hasopt("w"):
all_warnings: list[WarningReport] | None = self.stats.get("warnings")
if not all_warnings:
return
final = self._already_displayed_warnings is not None
if final:
warning_reports = all_warnings[self._already_displayed_warnings :]
else:
warning_reports = all_warnings
self._already_displayed_warnings = len(warning_reports)
if not warning_reports:
return
reports_grouped_by_message: dict[str, list[WarningReport]] = {}
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
def collapsed_location_report(reports: list[WarningReport]) -> str:
locations = []
for w in reports:
location = w.get_location(self.config)
if location:
locations.append(location)
if len(locations) < 10:
return "\n".join(map(str, locations))
counts_by_filename = Counter(
str(loc).split("::", 1)[0] for loc in locations
)
return "\n".join(
"{}: {} warning{}".format(k, v, "s" if v > 1 else "")
for k, v in counts_by_filename.items()
)
title = "warnings summary (final)" if final else "warnings summary"
self.write_sep("=", title, yellow=True, bold=False)
for message, message_reports in reports_grouped_by_message.items():
maybe_location = collapsed_location_report(message_reports)
if maybe_location:
self._tw.line(maybe_location)
lines = message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line(
"-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html"
)
def summary_passes(self) -> None:
self.summary_passes_combined("passed", "PASSES", "P")
def summary_xpasses(self) -> None:
self.summary_passes_combined("xpassed", "XPASSES", "X")
def summary_passes_combined(
self, which_reports: str, sep_title: str, needed_opt: str
) -> None:
if self.config.option.tbstyle != "no":
if self.hasopt(needed_opt):
reports: list[TestReport] = self.getreports(which_reports)
if not reports:
return
self.write_sep("=", sep_title)
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, green=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def _get_teardown_reports(self, nodeid: str) -> list[TestReport]:
reports = self.getreports("")
return [
report
for report in reports
if report.when == "teardown" and report.nodeid == nodeid
]
def _handle_teardown_sections(self, nodeid: str) -> None:
for report in self._get_teardown_reports(nodeid):
self.print_teardown_sections(report)
def print_teardown_sections(self, rep: TestReport) -> None:
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self) -> None:
style = self.config.option.tbstyle
self.summary_failures_combined("failed", "FAILURES", style=style)
def summary_xfailures(self) -> None:
show_tb = self.config.option.xfail_tb
style = self.config.option.tbstyle if show_tb else "no"
self.summary_failures_combined("xfailed", "XFAILURES", style=style)
def summary_failures_combined(
self,
which_reports: str,
sep_title: str,
*,
style: str,
needed_opt: str | None = None,
) -> None:
if style != "no":
if not needed_opt or self.hasopt(needed_opt):
reports: list[BaseReport] = self.getreports(which_reports)
if not reports:
return
self.write_sep("=", sep_title)
if style == "line":
for rep in reports:
line = self._getcrashline(rep)
self._outrep_summary(rep)
self.write_line(line)
else:
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def summary_errors(self) -> None:
if self.config.option.tbstyle != "no":
reports: list[BaseReport] = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if rep.when == "collect":
msg = "ERROR collecting " + msg
else:
msg = f"ERROR at {rep.when} of {msg}"
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
def _outrep_summary(self, rep: BaseReport) -> None:
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self) -> None:
if self.verbosity < -1:
return
session_duration = self._session_start.elapsed()
(parts, main_color) = self.build_summary_stats_line()
line_parts = []
display_sep = self.verbosity >= 0
if display_sep:
fullwidth = self._tw.fullwidth
for text, markup in parts:
with_markup = self._tw.markup(text, **markup)
if display_sep:
fullwidth += len(with_markup) - len(text)
line_parts.append(with_markup)
msg = ", ".join(line_parts)
main_markup = {main_color: True}
duration = f" in {format_session_duration(session_duration.seconds)}"
duration_with_markup = self._tw.markup(duration, **main_markup)
if display_sep:
fullwidth += len(duration_with_markup) - len(duration)
msg += duration_with_markup
if display_sep:
markup_for_end_sep = self._tw.markup("", **main_markup)
if markup_for_end_sep.endswith("\x1b[0m"):
markup_for_end_sep = markup_for_end_sep[:-4]
fullwidth += len(markup_for_end_sep)
msg += markup_for_end_sep
if display_sep:
self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
else:
self.write_line(msg, **main_markup)
def short_test_summary(self) -> None:
if not self.reportchars:
return
def show_simple(lines: list[str], *, stat: str) -> None:
failed = self.stats.get(stat, [])
if not failed:
return
config = self.config
for rep in failed:
color = _color_for_type.get(stat, _color_for_type_default)
line = _get_line_with_reprcrash_message(
config, rep, self._tw, {color: True}
)
lines.append(line)
def show_xfailed(lines: list[str]) -> None:
xfailed = self.stats.get("xfailed", [])
for rep in xfailed:
verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
self.config, {_color_for_type["warnings"]: True}
)
markup_word = self._tw.markup(verbose_word, **verbose_markup)
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
line = f"{markup_word} {nodeid}"
reason = rep.wasxfail
if reason:
line += " - " + str(reason)
lines.append(line)
def show_xpassed(lines: list[str]) -> None:
xpassed = self.stats.get("xpassed", [])
for rep in xpassed:
verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
self.config, {_color_for_type["warnings"]: True}
)
markup_word = self._tw.markup(verbose_word, **verbose_markup)
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
line = f"{markup_word} {nodeid}"
reason = rep.wasxfail
if reason:
line += " - " + str(reason)
lines.append(line)
def show_skipped_folded(lines: list[str]) -> None:
skipped: list[CollectReport] = self.stats.get("skipped", [])
fskips = _folded_skips(self.startpath, skipped) if skipped else []
if not fskips:
return
verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup(
self.config, {_color_for_type["warnings"]: True}
)
markup_word = self._tw.markup(verbose_word, **verbose_markup)
prefix = "Skipped: "
for num, fspath, lineno, reason in fskips:
if reason.startswith(prefix):
reason = reason[len(prefix) :]
if lineno is not None:
lines.append(f"{markup_word} [{num}] {fspath}:{lineno}: {reason}")
else:
lines.append(f"{markup_word} [{num}] {fspath}: {reason}")
def show_skipped_unfolded(lines: list[str]) -> None:
skipped: list[CollectReport] = self.stats.get("skipped", [])
for rep in skipped:
assert rep.longrepr is not None
assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr)
assert len(rep.longrepr) == 3, (rep, rep.longrepr)
verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
self.config, {_color_for_type["warnings"]: True}
)
markup_word = self._tw.markup(verbose_word, **verbose_markup)
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
line = f"{markup_word} {nodeid}"
reason = rep.longrepr[2]
if reason:
line += " - " + str(reason)
lines.append(line)
def show_skipped(lines: list[str]) -> None:
if self.foldskipped:
show_skipped_folded(lines)
else:
show_skipped_unfolded(lines)
REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = {
"x": show_xfailed,
"X": show_xpassed,
"f": partial(show_simple, stat="failed"),
"s": show_skipped,
"p": partial(show_simple, stat="passed"),
"E": partial(show_simple, stat="error"),
}
lines: list[str] = []
for char in self.reportchars:
action = REPORTCHAR_ACTIONS.get(char)
if action: # skipping e.g. "P" (passed with output) here.
action(lines)
if lines:
self.write_sep("=", "short test summary info", cyan=True, bold=True)
for line in lines:
self.write_line(line)
def _get_main_color(self) -> tuple[str, list[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types
def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color
def _set_main_color(self) -> None:
unknown_types: list[str] = []
for found_type in self.stats:
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]:
"""
Build the parts used in the last summary stats line.
The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
This function builds a list of the "parts" that make up for the text in that line, in
the example above it would be::
[
("12 passed", {"green": True}),
("2 errors", {"red": True}
]
That last dict for each line is a "markup dictionary", used by TerminalWriter to
color output.
The final color of the line is also determined by this function, and is the second
element of the returned tuple.
"""
if self.config.getoption("collectonly"):
return self._build_collect_only_summary_stats_line()
else:
return self._build_normal_summary_stats_line()
def _get_reports_to_display(self, key: str) -> list[Any]:
"""Get test/collection reports for the given status key, such as `passed` or `error`."""
reports = self.stats.get(key, [])
return [x for x in reports if getattr(x, "count_towards_summary", True)]
def _build_normal_summary_stats_line(
self,
) -> tuple[list[tuple[str, dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self._get_reports_to_display(key)
if reports:
count = len(reports)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % pluralize(count, key), markup)) # noqa: UP031
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _build_collect_only_summary_stats_line(
self,
) -> tuple[list[tuple[str, dict[str, bool]]], str]:
deselected = len(self._get_reports_to_display("deselected"))
errors = len(self._get_reports_to_display("error"))
if self._numcollected == 0:
parts = [("no tests collected", {"yellow": True})]
main_color = "yellow"
elif deselected == 0:
main_color = "green"
collected_output = "%d %s collected" % pluralize(self._numcollected, "test") # noqa: UP031
parts = [(collected_output, {main_color: True})]
else:
all_tests_were_deselected = self._numcollected == deselected
if all_tests_were_deselected:
main_color = "yellow"
collected_output = f"no tests collected ({deselected} deselected)"
else:
main_color = "green"
selected = self._numcollected - deselected
collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
parts = [(collected_output, {main_color: True})]
if errors:
main_color = _color_for_type["error"]
parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] # noqa: UP031
return parts, main_color
def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
path, *parts = nodeid.split("::")
if parts:
parts_markup = tw.markup("::".join(parts), bold=True)
return path + "::" + parts_markup
else:
return path
def _format_trimmed(format: str, msg: str, available_width: int) -> str | None:
"""Format msg into format, ellipsizing it if doesn't fit in available_width.
Returns None if even the ellipsis can't fit.
"""
# Only use the first line.
i = msg.find("\n")
if i != -1:
msg = msg[:i]
ellipsis = "..."
format_width = wcswidth(format.format(""))
if format_width + len(ellipsis) > available_width:
return None
if format_width + wcswidth(msg) > available_width:
available_width -= len(ellipsis)
msg = msg[:available_width]
while format_width + wcswidth(msg) > available_width:
msg = msg[:-1]
msg += ellipsis
return format.format(msg)
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: dict[str, bool]
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word, verbose_markup = rep._get_verbose_word_with_markup(
config, word_markup
)
word = tw.markup(verbose_word, **verbose_markup)
node = _get_node_id_with_markup(tw, config, rep)
line = f"{word} {node}"
line_width = wcswidth(line)
msg: str | None
try:
if isinstance(rep.longrepr, str):
msg = rep.longrepr
else:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
if (
running_on_ci() or config.option.verbose >= 2
) and not config.option.force_short_summary:
msg = f" - {msg}"
else:
available_width = tw.fullwidth - line_width
msg = _format_trimmed(" - {}", msg, available_width)
if msg is not None:
line += msg
return line
def _folded_skips(
startpath: Path,
skipped: Sequence[CollectReport],
) -> list[tuple[int, str, int | None, str]]:
d: dict[tuple[str, int | None, str], list[CollectReport]] = {}
for event in skipped:
assert event.longrepr is not None
assert isinstance(event.longrepr, tuple), (event, event.longrepr)
assert len(event.longrepr) == 3, (event, event.longrepr)
fspath, lineno, reason = event.longrepr
# For consistency, report all fspaths in relative form.
fspath = bestrelpath(startpath, Path(fspath))
keywords = getattr(event, "keywords", {})
# Folding reports with global pytestmark variable.
# This is a workaround, because for now we cannot identify the scope of a skip marker
# TODO: Revisit after marks scope would be fixed.
if (
event.when == "setup"
and "skip" in keywords
and "pytestmark" not in keywords
):
key: tuple[str, int | None, str] = (fspath, None, reason)
else:
key = (fspath, lineno, reason)
d.setdefault(key, []).append(event)
values: list[tuple[int, str, int | None, str]] = []
for key, events in d.items():
values.append((len(events), *key))
return values
_color_for_type = {
"failed": "red",
"error": "red",
"warnings": "yellow",
"passed": "green",
"subtests passed": "green",
"subtests failed": "red",
}
_color_for_type_default = "yellow"
def pluralize(count: int, noun: str) -> tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
if noun not in ["error", "warnings", "test"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
# set it to singular here so we can determine plurality in the same way as we do
# for `error`.
noun = noun.replace("warnings", "warning")
return count, noun + "s" if count != 1 else noun
def _plugin_nameversions(plugininfo) -> list[str]:
values: list[str] = []
for plugin, dist in plugininfo:
# Gets us name and version!
name = f"{dist.project_name}-{dist.version}"
# Questionable convenience, but it keeps things short.
if name.startswith("pytest-"):
name = name[7:]
# We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
def format_session_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
return f"{seconds:.2f}s"
else:
dt = datetime.timedelta(seconds=int(seconds))
return f"{seconds:.2f}s ({dt})"
def format_node_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the test progress."""
# The formatting is designed to be compact and readable, with at most 7 characters
# for durations below 100 hours.
if seconds < 0.00001:
return f" {seconds * 1000000:.3f}us"
if seconds < 0.0001:
return f" {seconds * 1000000:.2f}us"
if seconds < 0.001:
return f" {seconds * 1000000:.1f}us"
if seconds < 0.01:
return f" {seconds * 1000:.3f}ms"
if seconds < 0.1:
return f" {seconds * 1000:.2f}ms"
if seconds < 1:
return f" {seconds * 1000:.1f}ms"
if seconds < 60:
return f" {seconds:.3f}s"
if seconds < 3600:
return f" {seconds // 60:.0f}m {seconds % 60:.0f}s"
return f" {seconds // 3600:.0f}h {(seconds % 3600) // 60:.0f}m"
def _get_raw_skip_reason(report: TestReport) -> str:
"""Get the reason string of a skip/xfail/xpass test report.
The string is just the part given by the user.
"""
if hasattr(report, "wasxfail"):
reason = report.wasxfail
if reason.startswith("reason: "):
reason = reason[len("reason: ") :]
return reason
else:
assert report.skipped
assert isinstance(report.longrepr, tuple)
_, _, reason = report.longrepr
if reason.startswith("Skipped: "):
reason = reason[len("Skipped: ") :]
elif reason == "Skipped":
reason = ""
return reason
|
TerminalReporter
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_line06.py
|
{
"start": 315,
"end": 1375
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_line06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line", "subtype": "percent_stacked"})
chart.axis_ids = [108321408, 108634112]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_asset_condition_evaluations.py
|
{
"start": 13375,
"end": 31233
}
|
class ____(ExecutingGraphQLContextTestMatrix):
def test_auto_materialize_sensor(self, graphql_context: WorkspaceRequestContext):
sensor_origin = RemoteInstigatorOrigin(
repository_origin=infer_repository(graphql_context).get_remote_origin(),
instigator_name="my_auto_materialize_sensor",
)
check.not_none(graphql_context.instance.schedule_storage).add_instigator_state(
InstigatorState(
sensor_origin,
InstigatorType.SENSOR,
status=InstigatorStatus.RUNNING,
instigator_data=SensorInstigatorData(
sensor_type=SensorType.AUTO_MATERIALIZE,
cursor=asset_daemon_cursor_to_instigator_serialized_cursor(
AssetDaemonCursor.empty(12345)
),
),
)
)
with patch(
graphql_context.instance.__class__.__module__
+ "."
+ graphql_context.instance.__class__.__name__
+ ".auto_materialize_use_sensors",
new_callable=PropertyMock,
) as mock_my_property:
mock_my_property.return_value = False
results = execute_dagster_graphql(
graphql_context,
AUTO_MATERIALIZE_POLICY_SENSORS_QUERY,
variables={
"assetKey": {"path": ["fresh_diamond_bottom"]},
},
)
assert results.data["assetNodeOrError"]["currentAutoMaterializeEvaluationId"] == "0"
with patch(
graphql_context.instance.__class__.__module__
+ "."
+ graphql_context.instance.__class__.__name__
+ ".auto_materialize_use_sensors",
new_callable=PropertyMock,
) as mock_my_property:
mock_my_property.return_value = True
results = execute_dagster_graphql(
graphql_context,
AUTO_MATERIALIZE_POLICY_SENSORS_QUERY,
variables={
"assetKey": {"path": ["fresh_diamond_bottom"]},
},
)
assert any(
instigator["name"] == "my_auto_materialize_sensor"
for instigator in results.data["assetNodeOrError"]["targetingInstigators"]
)
assert results.data["assetNodeOrError"]["currentAutoMaterializeEvaluationId"] == "12345"
def _get_node(
self, unique_id: str, evaluations: Sequence[Mapping[str, Any]]
) -> Mapping[str, Any]:
return next(iter([node for node in evaluations if node["uniqueId"] == unique_id]))
def _get_condition_evaluation(
self,
asset_key: AssetKey,
description: str,
partitions_def: PartitionsDefinition,
true_partition_keys: Sequence[str],
candidate_partition_keys: Optional[Sequence[str]] = None,
child_evaluations: Optional[Sequence[AutomationConditionEvaluation]] = None,
) -> AutomationConditionEvaluation:
return AutomationConditionEvaluation(
condition_snapshot=AutomationConditionNodeSnapshot(
class_name="...",
description=description,
unique_id=str(random.randint(0, 100000000)),
),
true_subset=SerializableEntitySubset(
key=asset_key,
value=partitions_def.subset_with_partition_keys(true_partition_keys),
),
candidate_subset=SerializableEntitySubset(
key=asset_key,
value=partitions_def.subset_with_partition_keys(candidate_partition_keys),
)
if candidate_partition_keys
else HistoricalAllPartitionsSubsetSentinel(),
start_timestamp=123,
end_timestamp=456,
child_evaluations=child_evaluations or [],
subsets_with_metadata=[],
)
def test_get_evaluations_with_partitions(self, graphql_context: WorkspaceRequestContext):
asset_key = AssetKey("upstream_static_partitioned_asset")
partitions_def = StaticPartitionsDefinition(["a", "b", "c", "d", "e", "f"])
results = execute_dagster_graphql(
graphql_context,
LEGACY_QUERY,
variables={
"assetKey": {"path": ["upstream_static_partitioned_asset"]},
"limit": 10,
"cursor": None,
},
)
assert results.data["assetConditionEvaluationRecordsOrError"] == {"records": []}
evaluation = self._get_condition_evaluation(
asset_key,
"All of",
partitions_def,
["a", "b"],
child_evaluations=[
self._get_condition_evaluation(
asset_key,
"Any of",
partitions_def,
["a", "b", "c"],
child_evaluations=[
self._get_condition_evaluation(
asset_key, "parent_updated", partitions_def, ["a", "c"]
),
self._get_condition_evaluation(asset_key, "missing", partitions_def, ["b"]),
self._get_condition_evaluation(asset_key, "other", partitions_def, []),
],
),
self._get_condition_evaluation(
asset_key,
"Not",
partitions_def,
["a", "b"],
candidate_partition_keys=["a", "b", "c"],
child_evaluations=[
self._get_condition_evaluation(
asset_key,
"Any of",
partitions_def,
["c"],
["a", "b", "c"],
child_evaluations=[
self._get_condition_evaluation(
asset_key,
"parent missing",
partitions_def,
["c"],
["a", "b", "c"],
),
self._get_condition_evaluation(
asset_key,
"parent outdated",
partitions_def,
[],
["a", "b", "c"],
),
],
),
],
),
],
)
check.not_none(
graphql_context.instance.schedule_storage
).add_auto_materialize_asset_evaluations(
evaluation_id=10,
asset_evaluations=[
AutomationConditionEvaluationWithRunIds(
evaluation=evaluation, run_ids=frozenset({"runid1", "runid2"})
)
],
)
results = execute_dagster_graphql(
graphql_context,
LEGACY_QUERY,
variables={
"assetKey": {"path": ["upstream_static_partitioned_asset"]},
"limit": 10,
"cursor": None,
},
)
records = results.data["assetConditionEvaluationRecordsOrError"]["records"]
assert len(records) == 1
assert records[0]["numRequested"] == 2
evaluation = records[0]["evaluation"]
# all nodes in the tree
assert len(evaluation["evaluationNodes"]) == 9
rootNode = evaluation["evaluationNodes"][0]
assert rootNode["uniqueId"] == evaluation["rootUniqueId"]
assert rootNode["description"] == "All of"
assert rootNode["numTrue"] == 2
assert len(rootNode["childUniqueIds"]) == 2
notNode = self._get_node(rootNode["childUniqueIds"][1], evaluation["evaluationNodes"])
assert notNode["description"] == "Not"
assert notNode["numTrue"] == 2
skipNode = self._get_node(notNode["childUniqueIds"][0], evaluation["evaluationNodes"])
assert skipNode["description"] == "Any of"
assert skipNode["numTrue"] == 1
evaluationId = records[0]["evaluationId"]
uniqueId = skipNode["uniqueId"]
results = execute_dagster_graphql(
graphql_context,
TRUE_PARTITIONS_QUERY,
variables={
"assetKey": {"path": ["upstream_static_partitioned_asset"]},
"evaluationId": evaluationId,
"nodeUniqueId": uniqueId,
},
)
assert set(results.data["truePartitionsForAutomationConditionEvaluationNode"]) == {"c"}
# test one of the true partitions
specific_result = execute_dagster_graphql(
graphql_context,
LEGACY_QUERY_FOR_SPECIFIC_PARTITION,
variables={
"assetKey": {"path": ["upstream_static_partitioned_asset"]},
"partition": "b",
"evaluationId": 10,
},
)
evaluation = specific_result.data["assetConditionEvaluationForPartition"]
assert len(evaluation["evaluationNodes"]) == 9
rootNode = evaluation["evaluationNodes"][0]
assert rootNode["uniqueId"] == evaluation["rootUniqueId"]
assert rootNode["description"] == "All of"
assert rootNode["status"] == "TRUE"
assert len(rootNode["childUniqueIds"]) == 2
notNode = self._get_node(rootNode["childUniqueIds"][1], evaluation["evaluationNodes"])
assert notNode["description"] == "Not"
assert notNode["status"] == "TRUE"
skipNode = self._get_node(notNode["childUniqueIds"][0], evaluation["evaluationNodes"])
assert skipNode["description"] == "Any of"
assert skipNode["status"] == "FALSE"
# test one of the false partitions
specific_result = execute_dagster_graphql(
graphql_context,
LEGACY_QUERY_FOR_SPECIFIC_PARTITION,
variables={
"assetKey": {"path": ["upstream_static_partitioned_asset"]},
"partition": "d",
"evaluationId": 10,
},
)
evaluation = specific_result.data["assetConditionEvaluationForPartition"]
assert len(evaluation["evaluationNodes"]) == 9
rootNode = evaluation["evaluationNodes"][0]
assert rootNode["uniqueId"] == evaluation["rootUniqueId"]
assert rootNode["description"] == "All of"
assert rootNode["status"] == "FALSE"
assert len(rootNode["childUniqueIds"]) == 2
notNode = self._get_node(rootNode["childUniqueIds"][1], evaluation["evaluationNodes"])
assert notNode["description"] == "Not"
assert notNode["status"] == "SKIPPED"
skipNode = self._get_node(notNode["childUniqueIds"][0], evaluation["evaluationNodes"])
assert skipNode["description"] == "Any of"
assert skipNode["status"] == "SKIPPED"
def test_get_evaluations_with_partitions_updated(
self, graphql_context: WorkspaceRequestContext
):
@asset(
partitions_def=StaticPartitionsDefinition(["a", "b", "c", "d"]),
automation_condition=AutomationCondition.eager().with_label("blah"),
deps=["up"],
)
def A() -> None: ...
results = execute_dagster_graphql(
graphql_context,
QUERY,
variables={"assetKey": {"path": ["A"]}, "limit": 10, "cursor": None},
)
assert results.data == {"assetConditionEvaluationRecordsOrError": {"records": []}}
result = evaluate_automation_conditions([A], DagsterInstance.ephemeral())
check.not_none(
graphql_context.instance.schedule_storage
).add_auto_materialize_asset_evaluations(
evaluation_id=10,
asset_evaluations=[
AutomationConditionEvaluationWithRunIds(
evaluation=result.results[0].serializable_evaluation,
run_ids=frozenset({"runid1"}),
)
],
)
results = execute_dagster_graphql(
graphql_context,
QUERY,
variables={"assetKey": {"path": ["A"]}, "limit": 10, "cursor": None},
)
records = results.data["assetConditionEvaluationRecordsOrError"]["records"]
assert len(records) == 1
record = records[0]
assert not record["isLegacy"]
assert record["numRequested"] == 0
# all nodes in the tree
assert len(record["evaluationNodes"]) == 35
rootNode = record["evaluationNodes"][0]
assert rootNode["uniqueId"] == record["rootUniqueId"]
assert rootNode["userLabel"] == "blah"
assert rootNode["expandedLabel"] == [
"(in_latest_time_window)",
"AND",
"(((newly_missing) OR (any_deps_updated)) SINCE (handled))",
"AND",
"(NOT (any_deps_missing))",
"AND",
"(NOT (any_deps_in_progress))",
"AND",
"(NOT (in_progress))",
]
assert rootNode["numTrue"] == 0
assert rootNode["operatorType"] == "and"
assert len(rootNode["childUniqueIds"]) == 5
def _get_node(id):
return next(n for n in record["evaluationNodes"] if n["uniqueId"] == id)
not_any_deps_missing_node = _get_node(rootNode["childUniqueIds"][2])
any_deps_missing_node = _get_node(not_any_deps_missing_node["childUniqueIds"][0])
up_node = _get_node(any_deps_missing_node["childUniqueIds"][0])
assert up_node["expandedLabel"] == ["up", "((missing) AND (NOT (will_be_requested)))"]
evaluationId = record["evaluationId"]
uniqueId = rootNode["uniqueId"]
results = execute_dagster_graphql(
graphql_context,
TRUE_PARTITIONS_QUERY,
variables={
"assetKey": {"path": ["A"]},
"evaluationId": evaluationId,
"nodeUniqueId": uniqueId,
},
)
assert set(results.data["truePartitionsForAutomationConditionEvaluationNode"]) == set()
childNode = record["evaluationNodes"][1]
assert childNode["userLabel"] is None
assert childNode["expandedLabel"] == ["in_latest_time_window"]
assert childNode["numTrue"] == 4
assert len(childNode["childUniqueIds"]) == 0
evaluationId = record["evaluationId"]
uniqueId = childNode["uniqueId"]
results = execute_dagster_graphql(
graphql_context,
TRUE_PARTITIONS_QUERY,
variables={
"assetKey": {"path": ["A"]},
"evaluationId": evaluationId,
"nodeUniqueId": uniqueId,
},
)
assert set(results.data["truePartitionsForAutomationConditionEvaluationNode"]) == {
"a",
"b",
"c",
"d",
}
def test_since_metadata_field(self, graphql_context: WorkspaceRequestContext):
"""Test that the sinceMetadata field is correctly populated for SinceCondition evaluations."""
asset_key = AssetKey("test_asset")
partitions_def = StaticPartitionsDefinition(["a", "b"])
# Create an evaluation with SinceCondition metadata
since_metadata = SinceConditionData(
trigger_evaluation_id=5,
trigger_timestamp=1234567890.0,
reset_evaluation_id=3,
reset_timestamp=1234567880.0,
).to_metadata()
evaluation: AutomationConditionEvaluation[EntityKey] = AutomationConditionEvaluation(
condition_snapshot=AutomationConditionNodeSnapshot(
class_name="SinceCondition",
description="since_test",
unique_id="since_node_123",
),
true_subset=SerializableEntitySubset(
key=asset_key,
value=partitions_def.subset_with_partition_keys(["a"]),
),
candidate_subset=HistoricalAllPartitionsSubsetSentinel(),
start_timestamp=123.0,
end_timestamp=456.0,
child_evaluations=[],
subsets_with_metadata=[],
metadata=since_metadata,
)
check.not_none(
graphql_context.instance.schedule_storage
).add_auto_materialize_asset_evaluations(
evaluation_id=200,
asset_evaluations=[
AutomationConditionEvaluationWithRunIds(evaluation=evaluation, run_ids=frozenset())
],
)
# Query with the sinceMetadata field
results = execute_dagster_graphql(
graphql_context,
QUERY_WITH_SINCE_METADATA,
variables={"assetKey": {"path": ["test_asset"]}, "limit": 10, "cursor": None},
)
records = results.data["assetConditionEvaluationRecordsOrError"]["records"]
assert len(records) == 1
# Find the SinceCondition node
evaluation_nodes = records[0]["evaluationNodes"]
assert len(evaluation_nodes) == 1
since_node = evaluation_nodes[0]
# Verify sinceMetadata is populated correctly
assert since_node["sinceMetadata"] is not None
since_metadata_result = since_node["sinceMetadata"]
assert since_metadata_result["triggerEvaluationId"] == 5
assert since_metadata_result["triggerTimestamp"] == 1234567890.0
assert since_metadata_result["resetEvaluationId"] == 3
assert since_metadata_result["resetTimestamp"] == 1234567880.0
|
TestAssetConditionEvaluations
|
python
|
crytic__slither
|
slither/detectors/variables/var_read_using_this.py
|
{
"start": 292,
"end": 2163
}
|
class ____(AbstractDetector):
ARGUMENT = "var-read-using-this"
HELP = "Contract reads its own variable using `this`"
IMPACT = DetectorClassification.OPTIMIZATION
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#public-variable-read-in-external-context"
WIKI_TITLE = "Public variable read in external context"
WIKI_DESCRIPTION = "The contract reads its own variable using `this`, adding overhead of an unnecessary STATICCALL."
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract C {
mapping(uint => address) public myMap;
function test(uint x) external returns(address) {
return this.myMap(x);
}
}
```
"""
WIKI_RECOMMENDATION = "Read the variable directly from storage instead of calling the contract."
def _detect(self) -> List[Output]:
results = []
for c in self.contracts:
for func in c.functions:
for node in self._detect_var_read_using_this(func):
info: DETECTOR_INFO = [
"The function ",
func,
" reads ",
node,
" with `this` which adds an extra STATICCALL.\n",
]
json = self.generate_result(info)
results.append(json)
return results
@staticmethod
def _detect_var_read_using_this(func: Function) -> List[Node]:
results: List[Node] = []
for _, ir in func.high_level_calls:
if (
ir.destination == SolidityVariable("this")
and ir.is_static_call()
and ir.function.visibility == "public"
):
results.append(ir.node)
return sorted(results, key=lambda x: x.node_id)
|
VarReadUsingThis
|
python
|
django__django
|
tests/force_insert_update/models.py
|
{
"start": 587,
"end": 751
}
|
class ____(Counter):
other_counter_ptr = models.OneToOneField(
Counter, primary_key=True, parent_link=True, on_delete=models.CASCADE
)
|
OtherSubCounter
|
python
|
scipy__scipy
|
scipy/stats/_relative_risk.py
|
{
"start": 428,
"end": 9571
}
|
class ____:
"""
Result of `scipy.stats.contingency.relative_risk`.
Attributes
----------
relative_risk : float
This is::
(exposed_cases/exposed_total) / (control_cases/control_total)
exposed_cases : int
The number of "cases" (i.e. occurrence of disease or other event
of interest) among the sample of "exposed" individuals.
exposed_total : int
The total number of "exposed" individuals in the sample.
control_cases : int
The number of "cases" among the sample of "control" or non-exposed
individuals.
control_total : int
The total number of "control" individuals in the sample.
Methods
-------
confidence_interval :
Compute the confidence interval for the relative risk estimate.
"""
relative_risk: float
exposed_cases: int
exposed_total: int
control_cases: int
control_total: int
def confidence_interval(self, confidence_level=0.95):
"""
Compute the confidence interval for the relative risk.
The confidence interval is computed using the Katz method
(i.e. "Method C" of [1]_; see also [2]_, section 3.1.2).
Parameters
----------
confidence_level : float, optional
The confidence level to use for the confidence interval.
Default is 0.95.
Returns
-------
ci : ConfidenceInterval instance
The return value is an object with attributes ``low`` and
``high`` that hold the confidence interval.
References
----------
.. [1] D. Katz, J. Baptista, S. P. Azen and M. C. Pike, "Obtaining
confidence intervals for the risk ratio in cohort studies",
Biometrics, 34, 469-474 (1978).
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
CRC Press LLC, Boca Raton, FL, USA (1996).
Examples
--------
>>> from scipy.stats.contingency import relative_risk
>>> result = relative_risk(exposed_cases=10, exposed_total=75,
... control_cases=12, control_total=225)
>>> result.relative_risk
2.5
>>> result.confidence_interval()
ConfidenceInterval(low=1.1261564003469628, high=5.549850800541033)
"""
if not 0 <= confidence_level <= 1:
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
# Handle edge cases where either exposed_cases or control_cases
# is zero. We follow the convention of the R function riskratio
# from the epitools library.
if self.exposed_cases == 0 and self.control_cases == 0:
# relative risk is nan.
return ConfidenceInterval(low=np.nan, high=np.nan)
elif self.exposed_cases == 0:
# relative risk is 0.
return ConfidenceInterval(low=0.0, high=np.nan)
elif self.control_cases == 0:
# relative risk is inf
return ConfidenceInterval(low=np.nan, high=np.inf)
alpha = 1 - confidence_level
z = ndtri(1 - alpha/2)
rr = self.relative_risk
# Estimate of the variance of log(rr) is
# var(log(rr)) = 1/exposed_cases - 1/exposed_total +
# 1/control_cases - 1/control_total
# and the standard error is the square root of that.
se = np.sqrt(1/self.exposed_cases - 1/self.exposed_total +
1/self.control_cases - 1/self.control_total)
delta = z*se
katz_lo = rr*np.exp(-delta)
katz_hi = rr*np.exp(delta)
return ConfidenceInterval(low=katz_lo, high=katz_hi)
def relative_risk(exposed_cases, exposed_total, control_cases, control_total):
"""
Compute the relative risk (also known as the risk ratio).
This function computes the relative risk associated with a 2x2
contingency table ([1]_, section 2.2.3; [2]_, section 3.1.2). Instead
of accepting a table as an argument, the individual numbers that are
used to compute the relative risk are given as separate parameters.
This is to avoid the ambiguity of which row or column of the contingency
table corresponds to the "exposed" cases and which corresponds to the
"control" cases. Unlike, say, the odds ratio, the relative risk is not
invariant under an interchange of the rows or columns.
Parameters
----------
exposed_cases : nonnegative int
The number of "cases" (i.e. occurrence of disease or other event
of interest) among the sample of "exposed" individuals.
exposed_total : positive int
The total number of "exposed" individuals in the sample.
control_cases : nonnegative int
The number of "cases" among the sample of "control" or non-exposed
individuals.
control_total : positive int
The total number of "control" individuals in the sample.
Returns
-------
result : instance of `~scipy.stats._result_classes.RelativeRiskResult`
The object has the float attribute ``relative_risk``, which is::
rr = (exposed_cases/exposed_total) / (control_cases/control_total)
The object also has the method ``confidence_interval`` to compute
the confidence interval of the relative risk for a given confidence
level.
See Also
--------
odds_ratio
Notes
-----
The R package epitools has the function `riskratio`, which accepts
a table with the following layout::
disease=0 disease=1
exposed=0 (ref) n00 n01
exposed=1 n10 n11
With a 2x2 table in the above format, the estimate of the CI is
computed by `riskratio` when the argument method="wald" is given,
or with the function `riskratio.wald`.
For example, in a test of the incidence of lung cancer among a
sample of smokers and nonsmokers, the "exposed" category would
correspond to "is a smoker" and the "disease" category would
correspond to "has or had lung cancer".
To pass the same data to ``relative_risk``, use::
relative_risk(n11, n10 + n11, n01, n00 + n01)
.. versionadded:: 1.7.0
References
----------
.. [1] Alan Agresti, An Introduction to Categorical Data Analysis
(second edition), Wiley, Hoboken, NJ, USA (2007).
.. [2] Hardeo Sahai and Anwer Khurshid, Statistics in Epidemiology,
CRC Press LLC, Boca Raton, FL, USA (1996).
Examples
--------
>>> from scipy.stats.contingency import relative_risk
This example is from Example 3.1 of [2]_. The results of a heart
disease study are summarized in the following table::
High CAT Low CAT Total
-------- ------- -----
CHD 27 44 71
No CHD 95 443 538
Total 122 487 609
CHD is coronary heart disease, and CAT refers to the level of
circulating catecholamine. CAT is the "exposure" variable, and
high CAT is the "exposed" category. So the data from the table
to be passed to ``relative_risk`` is::
exposed_cases = 27
exposed_total = 122
control_cases = 44
control_total = 487
>>> result = relative_risk(27, 122, 44, 487)
>>> result.relative_risk
2.4495156482861398
Find the confidence interval for the relative risk.
>>> result.confidence_interval(confidence_level=0.95)
ConfidenceInterval(low=1.5836990926700116, high=3.7886786315466354)
The interval does not contain 1, so the data supports the statement
that high CAT is associated with greater risk of CHD.
"""
# Relative risk is a trivial calculation. The nontrivial part is in the
# `confidence_interval` method of the RelativeRiskResult class.
exposed_cases = _validate_int(exposed_cases, 0, "exposed_cases")
exposed_total = _validate_int(exposed_total, 1, "exposed_total")
control_cases = _validate_int(control_cases, 0, "control_cases")
control_total = _validate_int(control_total, 1, "control_total")
if exposed_cases > exposed_total:
raise ValueError('exposed_cases must not exceed exposed_total.')
if control_cases > control_total:
raise ValueError('control_cases must not exceed control_total.')
if exposed_cases == 0 and control_cases == 0:
# relative risk is 0/0.
rr = np.nan
elif exposed_cases == 0:
# relative risk is 0/nonzero
rr = 0.0
elif control_cases == 0:
# relative risk is nonzero/0.
rr = np.inf
else:
p1 = exposed_cases / exposed_total
p2 = control_cases / control_total
rr = p1 / p2
return RelativeRiskResult(relative_risk=rr,
exposed_cases=exposed_cases,
exposed_total=exposed_total,
control_cases=control_cases,
control_total=control_total)
|
RelativeRiskResult
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/structured_query.py
|
{
"start": 289,
"end": 2295
}
|
class ____(ABC):
"""Defines interface for IR translation using a visitor pattern."""
allowed_comparators: Sequence[Comparator] | None = None
"""Allowed comparators for the visitor."""
allowed_operators: Sequence[Operator] | None = None
"""Allowed operators for the visitor."""
def _validate_func(self, func: Operator | Comparator) -> None:
if (
isinstance(func, Operator)
and self.allowed_operators is not None
and func not in self.allowed_operators
):
msg = (
f"Received disallowed operator {func}. Allowed "
f"comparators are {self.allowed_operators}"
)
raise ValueError(msg)
if (
isinstance(func, Comparator)
and self.allowed_comparators is not None
and func not in self.allowed_comparators
):
msg = (
f"Received disallowed comparator {func}. Allowed "
f"comparators are {self.allowed_comparators}"
)
raise ValueError(msg)
@abstractmethod
def visit_operation(self, operation: Operation) -> Any:
"""Translate an Operation.
Args:
operation: Operation to translate.
"""
@abstractmethod
def visit_comparison(self, comparison: Comparison) -> Any:
"""Translate a Comparison.
Args:
comparison: Comparison to translate.
"""
@abstractmethod
def visit_structured_query(self, structured_query: StructuredQuery) -> Any:
"""Translate a StructuredQuery.
Args:
structured_query: StructuredQuery to translate.
"""
def _to_snake_case(name: str) -> str:
"""Convert a name into snake_case."""
snake_case = ""
for i, char in enumerate(name):
if char.isupper() and i != 0:
snake_case += "_" + char.lower()
else:
snake_case += char.lower()
return snake_case
|
Visitor
|
python
|
realpython__materials
|
python-guitar-synthesizer/source_code_final/src/digitar/pitch.py
|
{
"start": 130,
"end": 783
}
|
class ____:
frequency: Hertz
@classmethod
def from_scientific_notation(cls, notation: str) -> Self:
if match := re.fullmatch(r"([A-G]#?)(-?\d+)?", notation):
note = match.group(1)
octave = int(match.group(2) or 0)
semitones = "C C# D D# E F F# G G# A A# B".split()
index = octave * 12 + semitones.index(note) - 57
return cls(frequency=440.0 * 2 ** (index / 12))
else:
raise ValueError(f"Invalid scientific pitch notation: {notation}")
def adjust(self, num_semitones: int) -> Self:
return Pitch(self.frequency * 2 ** (num_semitones / 12))
|
Pitch
|
python
|
plotly__plotly.py
|
plotly/graph_objs/_bar.py
|
{
"start": 215,
"end": 94673
}
|
class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "bar"
_valid_props = {
"alignmentgroup",
"base",
"basesrc",
"cliponaxis",
"constraintext",
"customdata",
"customdatasrc",
"dx",
"dy",
"error_x",
"error_y",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextanchor",
"insidetextfont",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"marker",
"meta",
"metasrc",
"name",
"offset",
"offsetgroup",
"offsetsrc",
"opacity",
"orientation",
"outsidetextfont",
"selected",
"selectedpoints",
"showlegend",
"stream",
"text",
"textangle",
"textfont",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatefallback",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"width",
"widthsrc",
"x",
"x0",
"xaxis",
"xcalendar",
"xhoverformat",
"xperiod",
"xperiod0",
"xperiodalignment",
"xsrc",
"y",
"y0",
"yaxis",
"ycalendar",
"yhoverformat",
"yperiod",
"yperiod0",
"yperiodalignment",
"ysrc",
"zorder",
}
@property
def alignmentgroup(self):
"""
Set several traces linked to the same position axis or matching
axes to the same alignmentgroup. This controls whether bars
compute their positional range dependently or independently.
The 'alignmentgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["alignmentgroup"]
@alignmentgroup.setter
def alignmentgroup(self, val):
self["alignmentgroup"] = val
@property
def base(self):
"""
Sets where the bar base is drawn (in position axis units). In
"stack" or "relative" barmode, traces that set "base" will be
excluded and drawn in "overlay" mode instead.
The 'base' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["base"]
@base.setter
def base(self, val):
self["base"] = val
@property
def basesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `base`.
The 'basesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["basesrc"]
@basesrc.setter
def basesrc(self, val):
self["basesrc"] = val
@property
def cliponaxis(self):
"""
Determines whether the text nodes are clipped about the subplot
axes. To show the text nodes above axis lines and tick labels,
make sure to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
The 'cliponaxis' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cliponaxis"]
@cliponaxis.setter
def cliponaxis(self, val):
self["cliponaxis"] = val
@property
def constraintext(self):
"""
Constrain the size of text inside or outside a bar to be no
larger than the bar itself.
The 'constraintext' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'both', 'none']
Returns
-------
Any
"""
return self["constraintext"]
@constraintext.setter
def constraintext(self, val):
self["constraintext"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def dx(self):
"""
Sets the x coordinate step. See `x0` for more info.
The 'dx' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dx"]
@dx.setter
def dx(self, val):
self["dx"] = val
@property
def dy(self):
"""
Sets the y coordinate step. See `y0` for more info.
The 'dy' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dy"]
@dy.setter
def dy(self, val):
self["dy"] = val
@property
def error_x(self):
"""
The 'error_x' property is an instance of ErrorX
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.ErrorX`
- A dict of string/value properties that will be passed
to the ErrorX constructor
Returns
-------
plotly.graph_objs.bar.ErrorX
"""
return self["error_x"]
@error_x.setter
def error_x(self, val):
self["error_x"] = val
@property
def error_y(self):
"""
The 'error_y' property is an instance of ErrorY
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.ErrorY`
- A dict of string/value properties that will be passed
to the ErrorY constructor
Returns
-------
plotly.graph_objs.bar.ErrorY
"""
return self["error_y"]
@error_y.setter
def error_y(self, val):
self["error_y"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.bar.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables `value`
and `label`. Anything contained in tag `<extra>` is displayed
in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Sets hover text elements associated with each (x,y) pair. If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (x,y) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def insidetextanchor(self):
"""
Determines if texts are kept at center or start/end points in
`textposition` "inside" mode.
The 'insidetextanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['end', 'middle', 'start']
Returns
-------
Any
"""
return self["insidetextanchor"]
@insidetextanchor.setter
def insidetextanchor(self, val):
self["insidetextanchor"] = val
@property
def insidetextfont(self):
"""
Sets the font used for `text` lying inside the bar.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Returns
-------
plotly.graph_objs.bar.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.bar.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.bar.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def offset(self):
"""
Shifts the position where the bar is drawn (in position axis
units). In "group" barmode, traces that set "offset" will be
excluded and drawn in "overlay" mode instead.
The 'offset' property is a number and may be specified as:
- An int or float
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["offset"]
@offset.setter
def offset(self, val):
self["offset"] = val
@property
def offsetgroup(self):
"""
Set several traces linked to the same position axis or matching
axes to the same offsetgroup where bars of the same position
coordinate will line up.
The 'offsetgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["offsetgroup"]
@offsetgroup.setter
def offsetgroup(self, val):
self["offsetgroup"] = val
@property
def offsetsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `offset`.
The 'offsetsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["offsetsrc"]
@offsetsrc.setter
def offsetsrc(self, val):
self["offsetsrc"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def orientation(self):
"""
Sets the orientation of the bars. With "v" ("h"), the value of
the each bar spans along the vertical (horizontal).
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['v', 'h']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
@property
def outsidetextfont(self):
"""
Sets the font used for `text` lying outside the bar.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Returns
-------
plotly.graph_objs.bar.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Returns
-------
plotly.graph_objs.bar.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.bar.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair. If a single
string, the same string appears over all the data points. If an
array of string, the items are mapped in order to the this
trace's (x,y) coordinates. If trace `hoverinfo` contains a
"text" flag and "hovertext" is not set, these elements will be
seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textangle(self):
"""
Sets the angle of the tick labels with respect to the bar. For
example, a `tickangle` of -90 draws the tick labels vertically.
With "auto" the texts may automatically be rotated to fit with
the maximum size in bars.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
@property
def textfont(self):
"""
Sets the font used for `text`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.bar.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def textposition(self):
"""
Specifies the location of the `text`. "inside" positions `text`
inside, next to the bar end (rotated and scaled if needed).
"outside" positions `text` outside, next to the bar end (scaled
if needed), unless there is another bar stacked on this one,
then the text gets pushed inside. "auto" tries to position
`text` inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If "none", no
text appears.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'auto', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`textposition`.
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appears on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`) are
available. Finally, the template string has access to variables
`value` and `label`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'texttemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["texttemplatefallback"]
@texttemplatefallback.setter
def texttemplatefallback(self, val):
self["texttemplatefallback"] = val
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Returns
-------
plotly.graph_objs.bar.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def width(self):
"""
Sets the bar width (in position axis units).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def x(self):
"""
Sets the x coordinates.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def x0(self):
"""
Alternate to `x`. Builds a linear space of x coordinates. Use
with `dx` where `x0` is the starting coordinate and `dx` the
step.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the x axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'xperiod' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod"]
@xperiod.setter
def xperiod(self, val):
self["xperiod"] = val
@property
def xperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the x0
axis. When `x0period` is round number of weeks, the `x0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'xperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod0"]
@xperiod0.setter
def xperiod0(self, val):
self["xperiod0"] = val
@property
def xperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
The 'xperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["xperiodalignment"]
@xperiodalignment.setter
def xperiodalignment(self, val):
self["xperiodalignment"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def y(self):
"""
Sets the y coordinates.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def y0(self):
"""
Alternate to `y`. Builds a linear space of y coordinates. Use
with `dy` where `y0` is the starting coordinate and `dy` the
step.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def yperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the y axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'yperiod' property accepts values of any type
Returns
-------
Any
"""
return self["yperiod"]
@yperiod.setter
def yperiod(self, val):
self["yperiod"] = val
@property
def yperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the y0
axis. When `y0period` is round number of weeks, the `y0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'yperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["yperiod0"]
@yperiod0.setter
def yperiod0(self, val):
self["yperiod0"] = val
@property
def yperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
The 'yperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["yperiodalignment"]
@yperiodalignment.setter
def yperiodalignment(self, val):
self["yperiodalignment"] = val
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
base
Sets where the bar base is drawn (in position axis
units). In "stack" or "relative" barmode, traces that
set "base" will be excluded and drawn in "overlay" mode
instead.
basesrc
Sets the source reference on Chart Studio Cloud for
`base`.
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
error_x
:class:`plotly.graph_objects.bar.ErrorX` instance or
dict with compatible properties
error_y
:class:`plotly.graph_objects.bar.ErrorY` instance or
dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.bar.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `value` and `label`. Anything contained in
tag `<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.bar.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.bar.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
offsetsrc
Sets the source reference on Chart Studio Cloud for
`offset`.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the bars. With "v" ("h"), the
value of the each bar spans along the vertical
(horizontal).
outsidetextfont
Sets the font used for `text` lying outside the bar.
selected
:class:`plotly.graph_objects.bar.Selected` instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.bar.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `value` and `label`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.bar.Unselected` instance
or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
width
Sets the bar width (in position axis units).
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
"""
def __init__(
self,
arg=None,
alignmentgroup=None,
base=None,
basesrc=None,
cliponaxis=None,
constraintext=None,
customdata=None,
customdatasrc=None,
dx=None,
dy=None,
error_x=None,
error_y=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextanchor=None,
insidetextfont=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
marker=None,
meta=None,
metasrc=None,
name=None,
offset=None,
offsetgroup=None,
offsetsrc=None,
opacity=None,
orientation=None,
outsidetextfont=None,
selected=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textangle=None,
textfont=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatefallback=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
width=None,
widthsrc=None,
x=None,
x0=None,
xaxis=None,
xcalendar=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
y=None,
y0=None,
yaxis=None,
ycalendar=None,
yhoverformat=None,
yperiod=None,
yperiod0=None,
yperiodalignment=None,
ysrc=None,
zorder=None,
**kwargs,
):
"""
Construct a new Bar object
The data visualized by the span of the bars is set in `y` if
`orientation` is set to "v" (the default) and the labels are
set in `x`. By setting `orientation` to "h", the roles are
interchanged.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Bar`
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
base
Sets where the bar base is drawn (in position axis
units). In "stack" or "relative" barmode, traces that
set "base" will be excluded and drawn in "overlay" mode
instead.
basesrc
Sets the source reference on Chart Studio Cloud for
`base`.
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
error_x
:class:`plotly.graph_objects.bar.ErrorX` instance or
dict with compatible properties
error_y
:class:`plotly.graph_objects.bar.ErrorY` instance or
dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.bar.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `value` and `label`. Anything contained in
tag `<extra>` is displayed in the secondary box, for
example `<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.bar.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
marker
:class:`plotly.graph_objects.bar.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
offsetsrc
Sets the source reference on Chart Studio Cloud for
`offset`.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the bars. With "v" ("h"), the
value of the each bar spans along the vertical
(horizontal).
outsidetextfont
Sets the font used for `text` lying outside the bar.
selected
:class:`plotly.graph_objects.bar.Selected` instance or
dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.bar.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appears on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. All attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `value` and `label`.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.bar.Unselected` instance
or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
width
Sets the bar width (in position axis units).
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ycalendar
Sets the calendar system to use with `y` date data.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Bar
"""
super().__init__("bar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Bar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Bar`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("alignmentgroup", arg, alignmentgroup)
self._set_property("base", arg, base)
self._set_property("basesrc", arg, basesrc)
self._set_property("cliponaxis", arg, cliponaxis)
self._set_property("constraintext", arg, constraintext)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("dx", arg, dx)
self._set_property("dy", arg, dy)
self._set_property("error_x", arg, error_x)
self._set_property("error_y", arg, error_y)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("insidetextanchor", arg, insidetextanchor)
self._set_property("insidetextfont", arg, insidetextfont)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("marker", arg, marker)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("offset", arg, offset)
self._set_property("offsetgroup", arg, offsetgroup)
self._set_property("offsetsrc", arg, offsetsrc)
self._set_property("opacity", arg, opacity)
self._set_property("orientation", arg, orientation)
self._set_property("outsidetextfont", arg, outsidetextfont)
self._set_property("selected", arg, selected)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textangle", arg, textangle)
self._set_property("textfont", arg, textfont)
self._set_property("textposition", arg, textposition)
self._set_property("textpositionsrc", arg, textpositionsrc)
self._set_property("textsrc", arg, textsrc)
self._set_property("texttemplate", arg, texttemplate)
self._set_property("texttemplatefallback", arg, texttemplatefallback)
self._set_property("texttemplatesrc", arg, texttemplatesrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("unselected", arg, unselected)
self._set_property("visible", arg, visible)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._set_property("x", arg, x)
self._set_property("x0", arg, x0)
self._set_property("xaxis", arg, xaxis)
self._set_property("xcalendar", arg, xcalendar)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xperiod", arg, xperiod)
self._set_property("xperiod0", arg, xperiod0)
self._set_property("xperiodalignment", arg, xperiodalignment)
self._set_property("xsrc", arg, xsrc)
self._set_property("y", arg, y)
self._set_property("y0", arg, y0)
self._set_property("yaxis", arg, yaxis)
self._set_property("ycalendar", arg, ycalendar)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("yperiod", arg, yperiod)
self._set_property("yperiod0", arg, yperiod0)
self._set_property("yperiodalignment", arg, yperiodalignment)
self._set_property("ysrc", arg, ysrc)
self._set_property("zorder", arg, zorder)
self._props["type"] = "bar"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Bar
|
python
|
allegroai__clearml
|
clearml/backend_interface/metrics/events.py
|
{
"start": 5236,
"end": 5704
}
|
class ____(MetricsEventAdapter):
"""Scalar event adapter"""
def __init__(self, metric: str, variant: str, value: float, iter: int, **kwargs: Any) -> None:
self._value = self._convert_np_nan_inf(value)
super(ScalarEvent, self).__init__(metric=metric, variant=variant, iter=iter, **kwargs)
def get_api_event(self) -> "events.MetricsScalarEvent":
return events.MetricsScalarEvent(value=self._value, **self._get_base_dict())
|
ScalarEvent
|
python
|
xlwings__xlwings
|
xlwings/_xlmac.py
|
{
"start": 10648,
"end": 13440
}
|
class ____(base_classes.Books):
def __init__(self, app):
self.app = app
@property
def api(self):
return None
@property
def active(self):
return Book(self.app, self.app.xl.active_workbook.name.get())
def __call__(self, name_or_index):
b = Book(self.app, name_or_index)
if not b.xl.exists():
raise KeyError(name_or_index)
return b
def __contains__(self, key):
return Book(self.app, key).xl.exists()
def __len__(self):
return self.app.xl.count(each=kw.workbook)
def add(self):
if self.app.visible:
self.app.activate()
xl = self.app.xl.make(new=kw.workbook)
wb = Book(self.app, xl.name.get())
return wb
def open(
self,
fullname,
update_links=None,
read_only=None,
format=None,
password=None,
write_res_password=None,
ignore_read_only_recommended=None,
origin=None,
delimiter=None,
editable=None,
notify=None,
converter=None,
add_to_mru=None,
local=None,
corrupt_load=None,
):
# TODO: format and origin currently require a native appscript keyword,
# read_only doesn't seem to work
# Unsupported params
if local is not None:
# TODO: replace with specific error when Exceptions are refactored
raise xlwings.XlwingsError("local is not supported on macOS")
if corrupt_load is not None:
# TODO: replace with specific error when Exceptions are refactored
raise xlwings.XlwingsError("corrupt_load is not supported on macOS")
# update_links: on Windows only constants 0 and 3 seem to be supported in
# this context
if update_links:
update_links = kw.update_remote_and_external_links
else:
update_links = kw.do_not_update_links
if self.app.visible:
self.app.activate()
filename = os.path.basename(fullname)
self.app.xl.open_workbook(
workbook_file_name=fullname,
update_links=update_links,
read_only=read_only,
format=format,
password=password,
write_reserved_password=write_res_password,
ignore_read_only_recommended=ignore_read_only_recommended,
origin=origin,
delimiter=delimiter,
editable=editable,
notify=notify,
converter=converter,
add_to_mru=add_to_mru,
timeout=-1,
)
wb = Book(self.app, filename)
return wb
def __iter__(self):
n = len(self)
for i in range(n):
yield Book(self.app, i + 1)
|
Books
|
python
|
huggingface__transformers
|
src/transformers/models/auto/modeling_auto.py
|
{
"start": 91056,
"end": 91164
}
|
class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING
|
AutoModelForAudioXVector
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py
|
{
"start": 40,
"end": 98
}
|
class ____:
def __str__(self):
return 3.05
|
Float
|
python
|
huggingface__transformers
|
src/transformers/models/trocr/modeling_trocr.py
|
{
"start": 17829,
"end": 18021
}
|
class ____(PreTrainedModel):
config: TrOCRConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["TrOCRDecoderLayer"]
|
TrOCRPreTrainedModel
|
python
|
huggingface__transformers
|
tests/models/nougat/test_tokenization_nougat.py
|
{
"start": 3368,
"end": 5178
}
|
class ____(unittest.TestCase):
def test_equation_tag(self):
input_text = "(3.2) \\[Equation Text\\]"
excepted_output = "\\[Equation Text \\tag{3.2}\\]"
self.assertEqual(markdown_compatible(input_text), excepted_output)
def test_equation_tag_letters(self):
input_text = "(18a) \\[Equation Text\\]"
excepted_output = "\\[Equation Text \\tag{18a}\\]"
self.assertEqual(markdown_compatible(input_text), excepted_output)
def test_bold_formatting(self):
input_text = r"This is \bm{bold} text."
expected_output = r"This is \mathbf{bold} text."
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_url_conversion(self):
input_text = "Visit my website at https://www.example.com"
expected_output = "Visit my website at [https://www.example.com](https://www.example.com)"
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_algorithm_code_block(self):
input_text = "```python\nprint('Hello, world!')\n```"
expected_output = "```\npython\nprint('Hello, world!')\n```"
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_escape_characters(self):
input_text = r"Escaped characters like \n should not be \\[affected\\]"
expected_output = r"Escaped characters like \n should not be \\[affected\\]"
self.assertEqual(markdown_compatible(input_text), expected_output)
def test_nested_tags(self):
input_text = r"This is a super nested \bm{\bm{\bm{\bm{\bm{bold}}}}} tag."
expected_output = r"This is a super nested \mathbf{\mathbf{\mathbf{\mathbf{\mathbf{bold}}}}} tag."
self.assertEqual(markdown_compatible(input_text), expected_output)
|
MarkdownCompatibleTest
|
python
|
FactoryBoy__factory_boy
|
factory/declarations.py
|
{
"start": 15737,
"end": 18278
}
|
class ____(BaseDeclaration):
def __init__(self, decider, yes_declaration=SKIP, no_declaration=SKIP):
super().__init__()
if enums.get_builder_phase(decider) is None:
# No builder phase => flat value
decider = SelfAttribute(decider, default=None)
self.decider = decider
self.yes = yes_declaration
self.no = no_declaration
phases = {
'yes_declaration': enums.get_builder_phase(yes_declaration),
'no_declaration': enums.get_builder_phase(no_declaration),
}
used_phases = {phase for phase in phases.values() if phase is not None}
if len(used_phases) > 1:
raise TypeError(f"Inconsistent phases for {self!r}: {phases!r}")
self.FACTORY_BUILDER_PHASE = used_phases.pop() if used_phases else enums.BuilderPhase.ATTRIBUTE_RESOLUTION
def evaluate_post(self, instance, step, overrides):
"""Handle post-generation declarations"""
decider_phase = enums.get_builder_phase(self.decider)
if decider_phase == enums.BuilderPhase.ATTRIBUTE_RESOLUTION:
# Note: we work on the *builder stub*, not on the actual instance.
# This gives us access to all Params-level definitions.
choice = self.decider.evaluate_pre(
instance=step.stub, step=step, overrides=overrides)
else:
assert decider_phase == enums.BuilderPhase.POST_INSTANTIATION
choice = self.decider.evaluate_post(
instance=instance, step=step, overrides={})
target = self.yes if choice else self.no
if enums.get_builder_phase(target) == enums.BuilderPhase.POST_INSTANTIATION:
return target.evaluate_post(
instance=instance,
step=step,
overrides=overrides,
)
else:
# Flat value (can't be ATTRIBUTE_RESOLUTION, checked in __init__)
return target
def evaluate_pre(self, instance, step, overrides):
choice = self.decider.evaluate_pre(instance=instance, step=step, overrides={})
target = self.yes if choice else self.no
# The value can't be POST_INSTANTIATION, checked in __init__;
# evaluate it as `evaluate_pre`
return self._unwrap_evaluate_pre(
target,
instance=instance,
step=step,
overrides=overrides,
)
def __repr__(self):
return f'Maybe({self.decider!r}, yes={self.yes!r}, no={self.no!r})'
|
Maybe
|
python
|
ansible__ansible
|
test/integration/targets/var_precedence/ansible-var-precedence-check.py
|
{
"start": 2311,
"end": 4454
}
|
class ____(object):
BASESCRIPT = '''#!/usr/bin/python
import json
data = """{{ data }}"""
data = json.loads(data)
print(json.dumps(data, indent=2, sort_keys=True))
'''
BASEINV = {
'_meta': {
'hostvars': {
'testhost': {}
}
}
}
def __init__(self, features):
self.ENV = Environment()
self.features = features
self.fpath = None
self.inventory = self.BASEINV.copy()
self.build()
def build(self):
xhost = 'testhost'
if 'script_host' in self.features:
self.inventory['_meta']['hostvars'][xhost]['findme'] = 'script_host'
else:
self.inventory['_meta']['hostvars'][xhost] = {}
if 'script_child' in self.features:
self.inventory['child'] = {
'hosts': [xhost],
'vars': {'findme': 'script_child'}
}
if 'script_parent' in self.features:
self.inventory['parent'] = {
'vars': {'findme': 'script_parent'}
}
if 'script_child' in self.features:
self.inventory['parent']['children'] = ['child']
else:
self.inventory['parent']['hosts'] = [xhost]
if 'script_all' in self.features:
self.inventory['all'] = {
'hosts': [xhost],
'vars': {
'findme': 'script_all'
},
}
else:
self.inventory['all'] = {
'hosts': [xhost],
}
def write_script(self):
fdir = os.path.join(TESTDIR, 'inventory')
if not os.path.isdir(fdir):
os.makedirs(fdir)
fpath = os.path.join(fdir, 'hosts')
# fpath = os.path.join(TESTDIR, 'inventory')
self.fpath = fpath
data = json.dumps(self.inventory)
t = self.ENV.from_string(self.BASESCRIPT)
fdata = t.render(data=data)
with open(fpath, 'w') as f:
f.write(fdata + '\n')
st = os.stat(fpath)
os.chmod(fpath, st.st_mode | stat.S_IEXEC)
|
DynamicInventory
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
|
{
"start": 332,
"end": 1041
}
|
class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
max_workers: Optional[int] = Field(
None,
description=(
"The maximum number of workers to which the cluster can scale up when"
" overloaded. max_workers must be strictly greater than min_workers."
),
)
min_workers: Optional[int] = Field(
None,
description=(
"The minimum number of workers to which the cluster can scale down when"
" underutilized. It is also the initial number of workers the cluster has"
" after creation."
),
)
|
AutoScale
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/coordinator/values.py
|
{
"start": 13872,
"end": 14272
}
|
class ____(PerWorkerValues):
"""Distributed iterator for `ClusterCoordinator`."""
def __next__(self):
return self.get_next()
def get_next(self, name=None):
"""Returns the next input from the iterator for all replicas."""
raise NotImplementedError("Iterating over an `AsyncDistributedIterator` "
"is not supported right now.")
|
PerWorkerDistributedIterator
|
python
|
django__django
|
tests/messages_tests/urls.py
|
{
"start": 1730,
"end": 1847
}
|
class ____(forms.Form):
name = forms.CharField(required=True)
slug = forms.SlugField(required=True)
|
ContactForm
|
python
|
facelessuser__soupsieve
|
tests/test_level1/test_list.py
|
{
"start": 91,
"end": 1036
}
|
class ____(util.TestCase):
"""Test selector lists."""
def test_multiple_tags(self):
"""Test multiple selectors."""
self.assert_selector(
"""
<div>
<p>Some text <span id="1"> in a paragraph</span>.
<a id="2" href="http://google.com">Link</a>
</p>
</div>
""",
"span, a",
["1", "2"],
flags=util.HTML
)
def test_invalid_start_comma(self):
"""Test that selectors cannot start with a comma."""
self.assert_raises(', p', SelectorSyntaxError)
def test_invalid_end_comma(self):
"""Test that selectors cannot end with a comma."""
self.assert_raises('p,', SelectorSyntaxError)
def test_invalid_double_comma(self):
"""Test that selectors cannot have double combinators."""
self.assert_raises('div,, a', SelectorSyntaxError)
|
TestSelectorLists
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 63755,
"end": 63992
}
|
class ____(BaseModel, extra="forbid"):
"""
Select points with null payload for a specified field
"""
is_null: "PayloadField" = Field(..., description="Select points with null payload for a specified field")
|
IsNullCondition
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/tests/models.py
|
{
"start": 6118,
"end": 6288
}
|
class ____(QuerySet):
def my_queryset_foo(self):
# Just a method to prove the existence of the custom queryset.
return self.all()
|
PlainMyManagerQuerySet
|
python
|
getsentry__sentry
|
src/sentry_plugins/github/webhooks/events/installation.py
|
{
"start": 147,
"end": 847
}
|
class ____(Webhook):
# https://developer.github.com/v3/activity/events/types/#installationevent
def __call__(self, event, organization):
action = event["action"]
installation = event["installation"]
# TODO(jess): handle uninstalls
if action == "created":
try:
with transaction.atomic(router.db_for_write(Integration)):
Integration.objects.create(
provider="github_apps",
external_id=installation["id"],
name=installation["account"]["login"],
)
except IntegrityError:
pass
|
InstallationEventWebhook
|
python
|
donnemartin__interactive-coding-challenges
|
graphs_trees/tree_lca/test_lca.py
|
{
"start": 18,
"end": 1312
}
|
class ____(unittest.TestCase):
def test_lca(self):
node10 = Node(10)
node5 = Node(5)
node12 = Node(12)
node3 = Node(3)
node1 = Node(1)
node8 = Node(8)
node9 = Node(9)
node18 = Node(18)
node20 = Node(20)
node40 = Node(40)
node3.left = node1
node3.right = node8
node5.left = node12
node5.right = node3
node20.left = node40
node9.left = node18
node9.right = node20
node10.left = node5
node10.right = node9
root = node10
node0 = Node(0)
binary_tree = BinaryTree()
self.assertEqual(binary_tree.lca(root, node0, node5), None)
self.assertEqual(binary_tree.lca(root, node5, node0), None)
self.assertEqual(binary_tree.lca(root, node1, node8), node3)
self.assertEqual(binary_tree.lca(root, node12, node8), node5)
self.assertEqual(binary_tree.lca(root, node12, node40), node10)
self.assertEqual(binary_tree.lca(root, node9, node20), node9)
self.assertEqual(binary_tree.lca(root, node3, node5), node5)
print('Success: test_lca')
def main():
test = TestLowestCommonAncestor()
test.test_lca()
if __name__ == '__main__':
main()
|
TestLowestCommonAncestor
|
python
|
huggingface__transformers
|
src/transformers/models/data2vec/modeling_data2vec_text.py
|
{
"start": 34601,
"end": 37757
}
|
class ____(Data2VecTextPreTrainedModel):
_tied_weights_keys = {
"lm_head.decoder.weight": "data2vec_text.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `Data2VecTextForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.data2vec_text = Data2VecTextModel(config, add_pooling_layer=False)
self.lm_head = Data2VecTextLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.data2vec_text(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(prediction_scores.device)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Data2VecText Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
|
Data2VecTextForMaskedLM
|
python
|
pallets__werkzeug
|
tests/test_datastructures.py
|
{
"start": 17447,
"end": 19178
}
|
class ____:
storage_class = ds.CombinedMultiDict
def test_basic_interface(self):
d1 = ds.MultiDict([("foo", "1")])
d2 = ds.MultiDict([("bar", "2"), ("bar", "3")])
d = self.storage_class([d1, d2])
# lookup
assert d["foo"] == "1"
assert d["bar"] == "2"
assert d.getlist("bar") == ["2", "3"]
assert sorted(d.items()) == [("bar", "2"), ("foo", "1")]
assert sorted(d.items(multi=True)) == [("bar", "2"), ("bar", "3"), ("foo", "1")]
assert "missingkey" not in d
assert "foo" in d
# type lookup
assert d.get("foo", type=int) == 1
assert d.getlist("bar", type=int) == [2, 3]
# get key errors for missing stuff
with pytest.raises(KeyError):
d["missing"]
# make sure that they are immutable
with pytest.raises(TypeError):
d["foo"] = "blub"
# copies are mutable
d = d.copy()
d["foo"] = "blub"
# make sure lists merges
md1 = ds.MultiDict((("foo", "bar"), ("foo", "baz")))
md2 = ds.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
assert list(x.lists()) == [("foo", ["bar", "baz", "blafasel"])]
# make sure dicts are created properly
assert x.to_dict() == {"foo": "bar"}
assert x.to_dict(flat=False) == {"foo": ["bar", "baz", "blafasel"]}
def test_length(self):
d1 = ds.MultiDict([("foo", "1")])
d2 = ds.MultiDict([("bar", "2")])
assert len(d1) == len(d2) == 1
d = self.storage_class([d1, d2])
assert len(d) == 2
d1.clear()
assert len(d1) == 0
assert len(d) == 1
|
TestCombinedMultiDict
|
python
|
PrefectHQ__prefect
|
tests/telemetry/instrumentation_tester.py
|
{
"start": 2169,
"end": 3792
}
|
class ____:
tracer_provider: TracerProvider
memory_exporter: InMemorySpanExporter
meter_provider: MeterProvider
memory_metrics_reader: InMemoryMetricReader
def __init__(self):
self.tracer_provider, self.memory_exporter = create_tracer_provider()
# This is done because set_tracer_provider cannot override the
# current tracer provider.
reset_trace_globals()
trace_api.set_tracer_provider(self.tracer_provider)
self.memory_exporter.clear()
# This is done because set_meter_provider cannot override the
# current meter provider.
reset_metrics_globals()
self.meter_provider, self.memory_metrics_reader = create_meter_provider()
metrics_api.set_meter_provider(self.meter_provider)
def reset(self):
reset_trace_globals()
reset_metrics_globals()
def get_finished_spans(self):
return self.memory_exporter.get_finished_spans()
@staticmethod
def assert_has_attributes(obj: HasAttributes, attributes: Dict[str, Any]):
assert obj.attributes is not None
for key, val in attributes.items():
assert key in obj.attributes, f"Key {key!r} not found in attributes"
assert obj.attributes[key] == val, f"Value for key {key!r} does not match"
@staticmethod
def assert_span_instrumented_for(span: Union[Span, ReadableSpan], module):
assert span.instrumentation_scope is not None
assert span.instrumentation_scope.name == module.__name__
assert span.instrumentation_scope.version == module.__version__
|
InstrumentationTester
|
python
|
ansible__ansible
|
lib/ansible/_internal/_errors/_handler.py
|
{
"start": 282,
"end": 724
}
|
class ____(enum.Enum):
"""Action to take when an error is encountered."""
IGNORE = enum.auto()
WARNING = enum.auto()
ERROR = enum.auto()
@classmethod
def from_config(cls, setting: str, variables: dict[str, t.Any] | None = None) -> t.Self:
"""Return an `ErrorAction` enum from the specified Ansible config setting."""
return cls[config.get_config_value(setting, variables=variables).upper()]
|
ErrorAction
|
python
|
getsentry__sentry
|
src/sentry/utils/arroyo.py
|
{
"start": 4359,
"end": 6999
}
|
class ____:
def __init__(self, num_processes: int, initializer: Callable[[], None] | None = None) -> None:
self.__initializer = initializer
if settings.KAFKA_CONSUMER_FORCE_DISABLE_MULTIPROCESSING:
self.__pool = None
else:
self.__pool = ArroyoMultiprocessingPool(
num_processes, _get_arroyo_subprocess_initializer(initializer)
)
@property
def initializer(self) -> Callable[[], None] | None:
return self.__initializer
@property
def pool(self) -> ArroyoMultiprocessingPool | None:
return self.__pool
def close(self) -> None:
if self.__pool is not None:
self.__pool.close()
def run_task_with_multiprocessing(
*,
pool: MultiprocessingPool,
function: Callable[[Message[TStrategyPayload]], TResult],
**kwargs: Any,
) -> (
RunTask[TStrategyPayload, TResult] | ArroyoRunTaskWithMultiprocessing[TStrategyPayload, TResult]
):
"""
A variant of arroyo's RunTaskWithMultiprocessing that can switch between
multiprocessing and non-multiprocessing mode based on the
`KAFKA_CONSUMER_FORCE_DISABLE_MULTIPROCESSING` setting.
"""
if settings.KAFKA_CONSUMER_FORCE_DISABLE_MULTIPROCESSING:
kwargs.pop("num_processes", None)
kwargs.pop("input_block_size", None)
kwargs.pop("output_block_size", None)
kwargs.pop("max_batch_size", None)
kwargs.pop("max_batch_time", None)
if pool.initializer is not None:
pool.initializer()
# Assert that initializer can be pickled and loaded again from subprocesses.
pickle.loads(pickle.dumps(pool.initializer))
pickle.loads(pickle.dumps(function))
return RunTask(function=function, **kwargs)
else:
assert pool.pool is not None
return ArroyoRunTaskWithMultiprocessing(pool=pool.pool, function=function, **kwargs)
def _import_and_run(
initializer: Callable[[], None],
main_fn_pickle: bytes,
args_pickle: bytes,
*additional_args: Any,
) -> None:
initializer()
# explicitly use pickle so that we can be sure arguments get unpickled
# after sentry gets initialized
main_fn = pickle.loads(main_fn_pickle)
args = pickle.loads(args_pickle)
main_fn(*args, *additional_args)
def run_with_initialized_sentry(main_fn: Callable[..., None], *args: Any) -> Callable[..., None]:
main_fn_pickle = pickle.dumps(main_fn)
args_pickle = pickle.dumps(args)
return partial(
_import_and_run, _get_arroyo_subprocess_initializer(None), main_fn_pickle, args_pickle
)
|
MultiprocessingPool
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/wrappers/common.py
|
{
"start": 1149,
"end": 6138
}
|
class ____(
gym.Wrapper[ObsType, ActType, ObsType, ActType], gym.utils.RecordConstructorArgs
):
"""Limits the number of steps for an environment through truncating the environment if a maximum number of timesteps is exceeded.
If a truncation is not defined inside the environment itself, this is the only place that the truncation signal is issued.
Critically, this is different from the `terminated` signal that originates from the underlying environment as part of the MDP.
No vector wrapper exists.
Example using the TimeLimit wrapper:
>>> from gymnasium.wrappers import TimeLimit
>>> from gymnasium.envs.classic_control import CartPoleEnv
>>> spec = gym.spec("CartPole-v1")
>>> spec.max_episode_steps
500
>>> env = gym.make("CartPole-v1")
>>> env # TimeLimit is included within the environment stack
<TimeLimit<OrderEnforcing<PassiveEnvChecker<CartPoleEnv<CartPole-v1>>>>>
>>> env.spec # doctest: +ELLIPSIS
EnvSpec(id='CartPole-v1', ..., max_episode_steps=500, ...)
>>> env = gym.make("CartPole-v1", max_episode_steps=3)
>>> env.spec # doctest: +ELLIPSIS
EnvSpec(id='CartPole-v1', ..., max_episode_steps=3, ...)
>>> env = TimeLimit(CartPoleEnv(), max_episode_steps=10)
>>> env
<TimeLimit<CartPoleEnv instance>>
Example of `TimeLimit` determining the episode step
>>> env = gym.make("CartPole-v1", max_episode_steps=3)
>>> _ = env.reset(seed=123)
>>> _ = env.action_space.seed(123)
>>> _, _, terminated, truncated, _ = env.step(env.action_space.sample())
>>> terminated, truncated
(False, False)
>>> _, _, terminated, truncated, _ = env.step(env.action_space.sample())
>>> terminated, truncated
(False, False)
>>> _, _, terminated, truncated, _ = env.step(env.action_space.sample())
>>> terminated, truncated
(False, True)
Change logs:
* v0.10.6 - Initially added
* v0.25.0 - With the step API update, the termination and truncation signal is returned separately.
"""
def __init__(
self,
env: gym.Env,
max_episode_steps: int,
):
"""Initializes the :class:`TimeLimit` wrapper with an environment and the number of steps after which truncation will occur.
Args:
env: The environment to apply the wrapper
max_episode_steps: the environment step after which the episode is truncated (``elapsed >= max_episode_steps``)
"""
assert (
isinstance(max_episode_steps, int) and max_episode_steps > 0
), f"Expect the `max_episode_steps` to be positive, actually: {max_episode_steps}"
gym.utils.RecordConstructorArgs.__init__(
self, max_episode_steps=max_episode_steps
)
gym.Wrapper.__init__(self, env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Steps through the environment and if the number of steps elapsed exceeds ``max_episode_steps`` then truncate.
Args:
action: The environment step action
Returns:
The environment step ``(observation, reward, terminated, truncated, info)`` with `truncated=True`
if the number of steps elapsed >= max episode steps
"""
observation, reward, terminated, truncated, info = self.env.step(action)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
truncated = True
return observation, reward, terminated, truncated, info
def reset(
self, *, seed: int | None = None, options: dict[str, Any] | None = None
) -> tuple[ObsType, dict[str, Any]]:
"""Resets the environment with :param:`**kwargs` and sets the number of steps elapsed to zero.
Args:
seed: Seed for the environment
options: Options for the environment
Returns:
The reset environment
"""
self._elapsed_steps = 0
return super().reset(seed=seed, options=options)
@property
def spec(self) -> EnvSpec | None:
"""Modifies the environment spec to include the `max_episode_steps=self._max_episode_steps`."""
if self._cached_spec is not None:
return self._cached_spec
env_spec = self.env.spec
if env_spec is not None:
try:
env_spec = deepcopy(env_spec)
env_spec.max_episode_steps = self._max_episode_steps
except Exception as e:
gym.logger.warn(
f"An exception occurred ({e}) while copying the environment spec={env_spec}"
)
return None
self._cached_spec = env_spec
return env_spec
|
TimeLimit
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/storage.py
|
{
"start": 242,
"end": 361
}
|
class ____(BuildMediaFileSystemStorageTest):
internal_redirect_root_path = "proxito-static"
|
StaticFileSystemStorageTest
|
python
|
ApeWorX__ape
|
src/ape/types/private_mempool.py
|
{
"start": 1122,
"end": 1888
}
|
class ____(str, Enum):
"""
Hints on what data should be shared about the bundle and its transactions.
"""
CALLDATA = "calldata"
"""
The calldata of the bundle's transactions should be shared.
"""
CONTRACT_ADDRESS = "contract_address"
"""
The address of the bundle's transactions should be shared.
"""
LOGS = "logs"
"""
The logs of the bundle's transactions should be shared.
"""
FUNCTION_SELECTOR = "function_selector"
"""
The function selector of the bundle's transactions should be shared.
"""
HASH = "hash"
"""
The hash of the bundle's transactions should be shared.
"""
TX_HASH = "tx_hash"
"""
The hash of the bundle should be shared.
"""
|
PrivacyHint
|
python
|
getsentry__sentry
|
src/sentry/integrations/middleware/metrics.py
|
{
"start": 225,
"end": 694
}
|
class ____(StrEnum):
"""Different types of operations that middleware can perform."""
ENSURE_CONTROL_SILO = "ensure_control_silo"
GET_CONTROL_RESPONSE = "get_control_response"
GET_REGION_RESPONSE = "get_region_response"
GET_RESPONSE_FROM_FIRST_REGION = "get_response_from_first_region"
GET_RESPONSE_FROM_ALL_REGIONS = "get_response_from_all_regions"
GET_ORGS_FROM_INTEGRATION = "get_orgs_from_integration"
@dataclass
|
MiddlewareOperationType
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.