language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_escapeall.py
|
{
"start": 53,
"end": 886
}
|
class ____(util.MdCase):
"""Test escaping cases."""
extension = [
'pymdownx.escapeall'
]
extension_configs = {}
def test_html_special_char_lt(self):
"""Test `<`."""
self.check_markdown(
r'\<pre>foo',
'<p><pre>foo</p>'
)
def test_html_special_char_gt(self):
"""Test `>`."""
self.check_markdown(
r'<span\>foo',
'<p><span>foo</p>'
)
def test_html_special_char_amp(self):
"""Test `&`."""
self.check_markdown(
r'This \& that',
'<p>This & that</p>'
)
def test_normal_escape(self):
"""Test normal escapes."""
self.check_markdown(
r'This & \that',
'<p>This & that</p>'
)
|
TestEscapeAll
|
python
|
mlflow__mlflow
|
mlflow/models/resources.py
|
{
"start": 1623,
"end": 2608
}
|
class ____(Resource, ABC):
"""
Base class to define all the Databricks resources to serve a model.
Example usage: https://docs.databricks.com/en/generative-ai/log-agent.html#specify-resources-for-pyfunc-or-langchain-agent
"""
@property
def target_uri(self) -> str:
return "databricks"
@property
def type(self) -> ResourceType:
raise NotImplementedError("Subclasses must implement the 'type' property.")
def __init__(self, name: str, on_behalf_of_user: bool | None = None):
self.name = name
self.on_behalf_of_user = on_behalf_of_user
def to_dict(self):
result = {self.type.value: [{"name": self.name}]}
if self.on_behalf_of_user is not None:
result[self.type.value][0]["on_behalf_of_user"] = self.on_behalf_of_user
return result
@classmethod
def from_dict(cls, data: dict[str, str]):
return cls(data["name"], data.get("on_behalf_of_user"))
|
DatabricksResource
|
python
|
celery__celery
|
t/unit/events/test_cursesmon.py
|
{
"start": 122,
"end": 2304
}
|
class ____:
def setup_method(self):
from celery.events import cursesmon
self.monitor = cursesmon.CursesMonitor(object(), app=self.app)
self.win = MockWindow()
self.monitor.win = self.win
def test_format_row_with_default_widths(self):
self.win.x, self.win.y = 91, 24
row = self.monitor.format_row(
'783da208-77d0-40ca-b3d6-37dd6dbb55d3',
'task.task.task.task.task.task.task.task.task.tas',
'workerworkerworkerworkerworkerworkerworkerworker',
'21:13:20',
'SUCCESS')
assert ('783da208-77d0-40ca-b3d6-37dd6dbb55d3 '
'workerworker... task.task.[.]tas 21:13:20 SUCCESS ' == row)
def test_format_row_with_truncated_uuid(self):
self.win.x, self.win.y = 80, 24
row = self.monitor.format_row(
'783da208-77d0-40ca-b3d6-37dd6dbb55d3',
'task.task.task.task.task.task.task.task.task.tas',
'workerworkerworkerworkerworkerworkerworkerworker',
'21:13:20',
'SUCCESS')
expected = ('783da208-77d0-40ca-b3d... workerworker... '
'task.task.[.]tas 21:13:20 SUCCESS ')
assert row == expected
def test_format_title_row(self):
self.win.x, self.win.y = 80, 24
row = self.monitor.format_row('UUID', 'TASK',
'WORKER', 'TIME', 'STATE')
assert ('UUID WORKER '
'TASK TIME STATE ' == row)
def test_format_row_for_wide_screen_with_short_uuid(self):
self.win.x, self.win.y = 140, 24
row = self.monitor.format_row(
'783da208-77d0-40ca-b3d6-37dd6dbb55d3',
'task.task.task.task.task.task.task.task.task.tas',
'workerworkerworkerworkerworkerworkerworkerworker',
'21:13:20',
'SUCCESS')
assert len(row) == 136
assert ('783da208-77d0-40ca-b3d6-37dd6dbb55d3 '
'workerworkerworkerworkerworkerworker... '
'task.task.task.task.task.task.task.[.]tas '
'21:13:20 SUCCESS ' == row)
|
test_CursesDisplay
|
python
|
tornadoweb__tornado
|
tornado/test/httpclient_test.py
|
{
"start": 36111,
"end": 36804
}
|
class ____(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_plain_error(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
def test_error_with_response(self):
resp = HTTPResponse(HTTPRequest("http://example.com/"), 403)
with self.assertRaises(HTTPError) as cm:
resp.rethrow()
e = cm.exception
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
|
HTTPErrorTestCase
|
python
|
ansible__ansible
|
lib/ansible/modules/unarchive.py
|
{
"start": 38274,
"end": 38511
}
|
class ____(TgzArchive):
def __init__(self, src, b_dest, file_args, module):
super(TarXzArchive, self).__init__(src, b_dest, file_args, module)
self.zipflag = '-J'
# Class to handle zstd compressed tar files
|
TarXzArchive
|
python
|
nedbat__coveragepy
|
coverage/html.py
|
{
"start": 24794,
"end": 31524
}
|
class ____:
"""Logic and data to support incremental reporting.
When generating an HTML report, often only a few of the source files have
changed since the last time we made the HTML report. This means previously
created HTML pages can be reused without generating them again, speeding
the command.
This class manages a JSON data file that captures enough information to
know whether an HTML page for a .py file needs to be regenerated or not.
The data file also needs to store all the information needed to create the
entry for the file on the index page so that if the HTML page is reused,
the index page can still be created to refer to it.
The data looks like::
{
"note": "This file is an internal implementation detail ...",
// A fixed number indicating the data format. STATUS_FORMAT
"format": 5,
// The version of coverage.py
"version": "7.4.4",
// A hash of a number of global things, including the configuration
// settings and the pyfile.html template itself.
"globals": "540ee119c15d52a68a53fe6f0897346d",
"files": {
// An entry for each source file keyed by the flat_rootname().
"z_7b071bdc2a35fa80___init___py": {
// Hash of the source, the text of the .py file.
"hash": "e45581a5b48f879f301c0f30bf77a50c",
// Information for the index.html file.
"index": {
"url": "z_7b071bdc2a35fa80___init___py.html",
"file": "cogapp/__init__.py",
"description": "",
// The Numbers for this file.
"nums": { "precision": 2, "n_files": 1, "n_statements": 43, ... }
}
},
...
}
}
"""
STATUS_FILE = "status.json"
STATUS_FORMAT = 5
NOTE = (
"This file is an internal implementation detail to speed up HTML report"
+ " generation. Its format can change at any time. You might be looking"
+ " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json"
)
def __init__(self, directory: str) -> None:
self.directory = directory
self._reset()
def _reset(self) -> None:
"""Initialize to empty. Causes all files to be reported."""
self.globals = ""
self.files: dict[str, FileInfo] = {}
def read(self) -> None:
"""Read the information we stored last time."""
try:
status_file = os.path.join(self.directory, self.STATUS_FILE)
with open(status_file, encoding="utf-8") as fstatus:
status = json.load(fstatus)
except (OSError, ValueError):
# Status file is missing or malformed.
usable = False
else:
if status["format"] != self.STATUS_FORMAT:
usable = False
elif status["version"] != coverage.__version__:
usable = False
else:
usable = True
if usable:
self.files = {}
for filename, filedict in status["files"].items():
indexdict = filedict["index"]
index_item = IndexItem(**indexdict)
index_item.nums = Numbers(**indexdict["nums"])
fileinfo = FileInfo(
hash=filedict["hash"],
index=index_item,
)
self.files[filename] = fileinfo
self.globals = status["globals"]
else:
self._reset()
def write(self) -> None:
"""Write the current status."""
status_file = os.path.join(self.directory, self.STATUS_FILE)
status_data = {
"note": self.NOTE,
"format": self.STATUS_FORMAT,
"version": coverage.__version__,
"globals": self.globals,
"files": {fname: dataclasses.asdict(finfo) for fname, finfo in self.files.items()},
}
with open(status_file, "w", encoding="utf-8") as fout:
json.dump(status_data, fout, separators=(",", ":"))
def check_global_data(self, *data: Any) -> None:
"""Check the global data that can affect incremental reporting.
Pass in whatever global information could affect the content of the
HTML pages. If the global data has changed since last time, this will
clear the data so that all files are regenerated.
"""
m = Hasher()
for d in data:
m.update(d)
these_globals = m.hexdigest()
if self.globals != these_globals:
self._reset()
self.globals = these_globals
def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool:
"""Can we skip reporting this file?
`data` is a CoverageData object, `fr` is a `FileReporter`, and
`rootname` is the name being used for the file.
Returns True if the HTML page is fine as-is, False if we need to recreate
the HTML page.
"""
m = Hasher()
m.update(fr.source().encode("utf-8"))
add_data_to_hash(data, fr.filename, m)
this_hash = m.hexdigest()
file_info = self.files.setdefault(rootname, FileInfo())
if this_hash == file_info.hash:
# Nothing has changed to require the file to be reported again.
return True
else:
# File has changed, record the latest hash and force regeneration.
file_info.hash = this_hash
return False
def index_info(self, fname: str) -> IndexItem:
"""Get the information for index.html for `fname`."""
return self.files.get(fname, FileInfo()).index
def set_index_info(self, fname: str, info: IndexItem) -> None:
"""Set the information for index.html for `fname`."""
self.files.setdefault(fname, FileInfo()).index = info
# Helpers for templates and generating HTML
def escape(t: str) -> str:
"""HTML-escape the text in `t`.
This is only suitable for HTML text, not attributes.
"""
# Convert HTML special chars into HTML entities.
return t.replace("&", "&").replace("<", "<")
def pair(ratio: tuple[int, int]) -> str:
"""Format a pair of numbers so JavaScript can read them in an attribute."""
return "{} {}".format(*ratio)
def pretty_file(filename: str) -> str:
"""Return a prettier version of `filename` for display."""
return re.sub(r"[/\\]", "\N{THIN SPACE}\\g<0>\N{THIN SPACE}", filename)
|
IncrementalChecker
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_session_create_response.py
|
{
"start": 8384,
"end": 8925
}
|
class ____(BaseModel):
read_only: Optional[bool] = None
"""Indicates whether or not a tool modifies data or is read-only.
If an MCP server is
[annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
it will match this filter.
"""
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
ToolMcpToolAllowedTools: TypeAlias = Union[List[str], ToolMcpToolAllowedToolsMcpToolFilter, None]
|
ToolMcpToolAllowedToolsMcpToolFilter
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/output_parsers/base.py
|
{
"start": 113,
"end": 235
}
|
class ____:
"""Structured output class."""
raw_output: str
parsed_output: Optional[Any] = None
|
StructuredOutput
|
python
|
ansible__ansible
|
test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py
|
{
"start": 4729,
"end": 4911
}
|
class ____(json.JSONEncoder):
def default(self, o):
if isinstance(o, Exception):
return str(o)
return json.JSONEncoder.default(self, o)
|
ReporterEncoder
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/rolling.py
|
{
"start": 5215,
"end": 5794
}
|
class ____(Methods):
params = (
["DataFrame", "Series"],
["50s", "1h", "1d"],
["int", "float"],
["median", "mean", "max", "min", "std", "count", "skew", "kurt", "sum", "sem"],
)
param_names = ["constructor", "window", "dtype", "method"]
def setup(self, constructor, window, dtype, method):
N = 10**5
arr = (100 * np.random.random(N)).astype(dtype)
index = pd.date_range("2017-01-01", periods=N, freq="5s")
self.window = getattr(pd, constructor)(arr, index=index).rolling(window)
|
VariableWindowMethods
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-kyriba/source_kyriba/source.py
|
{
"start": 4878,
"end": 5020
}
|
class ____(KyribaStream):
primary_key = "uuid"
use_cache = True
def path(self, **kwargs) -> str:
return "accounts"
|
Accounts
|
python
|
networkx__networkx
|
networkx/algorithms/approximation/tests/test_vertex_cover.py
|
{
"start": 185,
"end": 1942
}
|
class ____:
"""Unit tests for the approximate minimum weighted vertex cover
function,
:func:`~networkx.algorithms.approximation.vertex_cover.min_weighted_vertex_cover`.
"""
def test_unweighted_directed(self):
# Create a star graph in which half the nodes are directed in
# and half are directed out.
G = nx.DiGraph()
G.add_edges_from((0, v) for v in range(1, 26))
G.add_edges_from((v, 0) for v in range(26, 51))
cover = min_weighted_vertex_cover(G)
assert 1 == len(cover)
assert is_cover(G, cover)
def test_unweighted_undirected(self):
# create a simple star graph
size = 50
sg = nx.star_graph(size)
cover = min_weighted_vertex_cover(sg)
assert 1 == len(cover)
assert is_cover(sg, cover)
def test_weighted(self):
wg = nx.Graph()
wg.add_node(0, weight=10)
wg.add_node(1, weight=1)
wg.add_node(2, weight=1)
wg.add_node(3, weight=1)
wg.add_node(4, weight=1)
wg.add_edge(0, 1)
wg.add_edge(0, 2)
wg.add_edge(0, 3)
wg.add_edge(0, 4)
wg.add_edge(1, 2)
wg.add_edge(2, 3)
wg.add_edge(3, 4)
wg.add_edge(4, 1)
cover = min_weighted_vertex_cover(wg, weight="weight")
csum = sum(wg.nodes[node]["weight"] for node in cover)
assert 4 == csum
assert is_cover(wg, cover)
def test_unweighted_self_loop(self):
slg = nx.Graph()
slg.add_node(0)
slg.add_node(1)
slg.add_node(2)
slg.add_edge(0, 1)
slg.add_edge(2, 2)
cover = min_weighted_vertex_cover(slg)
assert 2 == len(cover)
assert is_cover(slg, cover)
|
TestMWVC
|
python
|
charliermarsh__ruff
|
scripts/ty_benchmark/src/benchmark/tool.py
|
{
"start": 555,
"end": 1450
}
|
class ____(abc.ABC):
def write_config(self, project: Project, venv: Venv) -> None:
"""Write the tool's configuration file."""
if config := self.config(project, venv):
config_name, config_text = config
config_path = venv.project_path / config_name
config_path.write_text(dedent(config_text))
def config(self, project: Project, venv: Venv) -> tuple[Path, str] | None:
"""Returns the path to the tool's configuration file with the configuration
content or `None` if the tool requires no configuration file.
We write a configuration over using CLI arguments because
most LSPs don't accept per CLI.
"""
return None
@abc.abstractmethod
def command(self, project: Project, venv: Venv, single_threaded: bool) -> Command:
"""Generate a command to benchmark a given tool."""
|
Tool
|
python
|
PyCQA__pylint
|
tests/functional/u/unused/unused_import_class_def_keyword.py
|
{
"start": 344,
"end": 413
}
|
class ____:
def __init_subclass__(cls, **kwargs):
pass
|
Child
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/map/callable.py
|
{
"start": 592,
"end": 1933
}
|
class ____(MapDataPipe[_T_co]):
r"""
Apply the input function over each item from the source DataPipe (functional name: ``map``).
The function can be any regular Python function or partial object. Lambda
function is not recommended as it is not supported by pickle.
Args:
datapipe: Source MapDataPipe
fn: Function being applied to each item
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.map import SequenceWrapper, Mapper
>>> def add_one(x):
... return x + 1
>>> dp = SequenceWrapper(range(10))
>>> map_dp_1 = dp.map(add_one)
>>> list(map_dp_1)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> map_dp_2 = Mapper(dp, lambda x: x + 1)
>>> list(map_dp_2)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
datapipe: MapDataPipe
fn: Callable
def __init__(
self,
datapipe: MapDataPipe,
fn: Callable = default_fn,
) -> None:
super().__init__()
self.datapipe = datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
def __len__(self) -> int:
# pyrefly: ignore [bad-argument-type]
return len(self.datapipe)
def __getitem__(self, index) -> _T_co:
return self.fn(self.datapipe[index])
|
MapperMapDataPipe
|
python
|
doocs__leetcode
|
lcof/面试题25. 合并两个排序的链表/Solution.py
|
{
"start": 136,
"end": 530
}
|
class ____:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy = cur = ListNode(0)
while l1 and l2:
if l1.val <= l2.val:
cur.next = l1
l1 = l1.next
else:
cur.next = l2
l2 = l2.next
cur = cur.next
cur.next = l1 or l2
return dummy.next
|
Solution
|
python
|
scipy__scipy
|
scipy/signal/_ltisys.py
|
{
"start": 25312,
"end": 27419
}
|
class ____(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.5 seconds:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, dt=0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
|
TransferFunctionDiscrete
|
python
|
allegroai__clearml
|
clearml/utilities/version.py
|
{
"start": 401,
"end": 550
}
|
class ____(object):
epoch = attrib()
release = attrib()
dev = attrib()
pre = attrib()
post = attrib()
local = attrib()
|
_Version
|
python
|
google__pytype
|
pytype/vm_utils.py
|
{
"start": 3691,
"end": 3848
}
|
class ____(abc.ABC):
"""Base class for detailed name error messages."""
@abc.abstractmethod
def to_error_message(self) -> str:
...
|
_NameErrorDetails
|
python
|
PyCQA__pylint
|
tests/functional/ext/docparams/return/missing_return_doc_Google.py
|
{
"start": 2169,
"end": 2492
}
|
class ____:
"""test_ignores_return_in_abstract_method_google
Example of an abstract method documenting the return type that an
implementation should return.
"""
@abc.abstractmethod
def foo_method(self):
"""docstring ...
Returns:
int: Ten
"""
return 10
|
Foo
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_vision.py
|
{
"start": 13316,
"end": 14203
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionAddProductToProductSetOperator(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
task_id="id",
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.add_product_to_product_set.assert_called_once_with(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOCATION_TEST,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
|
TestCloudVisionAddProductToProductSetOperator
|
python
|
RaRe-Technologies__gensim
|
gensim/models/ldaseqmodel.py
|
{
"start": 48810,
"end": 62167
}
|
class ____(utils.SaveLoad):
"""Posterior values associated with each set of documents.
TODO: use **Hoffman, Blei, Bach: Online Learning for Latent Dirichlet Allocation, NIPS 2010.**
to update phi, gamma. End game would be to somehow replace LdaPost entirely with LdaModel.
"""
def __init__(self, doc=None, lda=None, max_doc_len=None, num_topics=None, gamma=None, lhood=None):
"""Initialize the posterior value structure for the given LDA model.
Parameters
----------
doc : list of (int, int)
A BOW representation of the document. Each element in the list is a pair of a word's ID and its number
of occurences in the document.
lda : :class:`~gensim.models.ldamodel.LdaModel`, optional
The underlying LDA model.
max_doc_len : int, optional
The maximum number of words in a document.
num_topics : int, optional
Number of topics discovered by the LDA model.
gamma : numpy.ndarray, optional
Topic weight variational parameters for each document. If not supplied, it will be inferred from the model.
lhood : float, optional
The log likelihood lower bound.
"""
self.doc = doc
self.lda = lda
self.gamma = gamma
self.lhood = lhood
if self.gamma is None:
self.gamma = np.zeros(num_topics)
if self.lhood is None:
self.lhood = np.zeros(num_topics + 1)
if max_doc_len is not None and num_topics is not None:
self.phi = np.zeros((max_doc_len, num_topics))
self.log_phi = np.zeros((max_doc_len, num_topics))
# the following are class variables which are to be integrated during Document Influence Model
self.doc_weight = None
self.renormalized_doc_weight = None
def update_phi(self, doc_number, time):
"""Update variational multinomial parameters, based on a document and a time-slice.
This is done based on the original Blei-LDA paper, where:
log_phi := beta * exp(Ψ(gamma)), over every topic for every word.
TODO: incorporate lee-sueng trick used in
**Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.
Parameters
----------
doc_number : int
Document number. Unused.
time : int
Time slice. Unused.
Returns
-------
(list of float, list of float)
Multinomial parameters, and their logarithm, for each word in the document.
"""
num_topics = self.lda.num_topics
# digamma values
dig = np.zeros(num_topics)
for k in range(num_topics):
dig[k] = digamma(self.gamma[k])
n = 0 # keep track of iterations for phi, log_phi
for word_id, count in self.doc:
for k in range(num_topics):
self.log_phi[n][k] = dig[k] + self.lda.topics[word_id][k]
log_phi_row = self.log_phi[n]
phi_row = self.phi[n]
# log normalize
v = log_phi_row[0]
for i in range(1, len(log_phi_row)):
v = np.logaddexp(v, log_phi_row[i])
# subtract every element by v
log_phi_row = log_phi_row - v
phi_row = np.exp(log_phi_row)
self.log_phi[n] = log_phi_row
self.phi[n] = phi_row
n += 1 # increase iteration
return self.phi, self.log_phi
def update_gamma(self):
"""Update variational dirichlet parameters.
This operations is described in the original Blei LDA paper:
gamma = alpha + sum(phi), over every topic for every word.
Returns
-------
list of float
The updated gamma parameters for each word in the document.
"""
self.gamma = np.copy(self.lda.alpha)
n = 0 # keep track of number of iterations for phi, log_phi
for word_id, count in self.doc:
phi_row = self.phi[n]
for k in range(self.lda.num_topics):
self.gamma[k] += phi_row[k] * count
n += 1
return self.gamma
def init_lda_post(self):
"""Initialize variational posterior. """
total = sum(count for word_id, count in self.doc)
self.gamma.fill(self.lda.alpha[0] + float(total) / self.lda.num_topics)
self.phi[:len(self.doc), :] = 1.0 / self.lda.num_topics
# doc_weight used during DIM
# ldapost.doc_weight = None
def compute_lda_lhood(self):
"""Compute the log likelihood bound.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
num_topics = self.lda.num_topics
gamma_sum = np.sum(self.gamma)
# to be used in DIM
# sigma_l = 0
# sigma_d = 0
lhood = gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)
self.lhood[num_topics] = lhood
# influence_term = 0
digsum = digamma(gamma_sum)
model = "DTM" # noqa:F841
for k in range(num_topics):
# below code only to be used in DIM mode
# if ldapost.doc_weight is not None and (model == "DIM" or model == "fixed"):
# influence_topic = ldapost.doc_weight[k]
# influence_term = \
# - ((influence_topic * influence_topic + sigma_l * sigma_l) / 2.0 / (sigma_d * sigma_d))
e_log_theta_k = digamma(self.gamma[k]) - digsum
lhood_term = \
(self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k + \
gammaln(self.gamma[k]) - gammaln(self.lda.alpha[k])
# TODO: check why there's an IF
n = 0
for word_id, count in self.doc:
if self.phi[n][k] > 0:
lhood_term += \
count * self.phi[n][k] * (e_log_theta_k + self.lda.topics[word_id][k] - self.log_phi[n][k])
n += 1
self.lhood[k] = lhood_term
lhood += lhood_term
# in case of DIM add influence term
# lhood += influence_term
return lhood
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-8,
lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
"""Posterior inference for lda.
Parameters
----------
doc_number : int
The documents number.
time : int
Time slice.
ldaseq : object
Unused.
LDA_INFERENCE_CONVERGED : float
Epsilon value used to check whether the inference step has sufficiently converged.
lda_inference_max_iter : int
Maximum number of iterations in the inference step.
g : object
Unused. Will be useful when the DIM model is implemented.
g3_matrix: object
Unused. Will be useful when the DIM model is implemented.
g4_matrix: object
Unused. Will be useful when the DIM model is implemented.
g5_matrix: object
Unused. Will be useful when the DIM model is implemented.
Returns
-------
float
The optimal lower bound for the true posterior using the approximate distribution.
"""
self.init_lda_post()
# sum of counts in a doc
total = sum(count for word_id, count in self.doc)
model = "DTM"
if model == "DIM":
# if in DIM then we initialise some variables here
pass
lhood = self.compute_lda_lhood()
lhood_old = 0
converged = 0
iter_ = 0
# first iteration starts here
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
while converged > LDA_INFERENCE_CONVERGED and iter_ <= lda_inference_max_iter:
iter_ += 1
lhood_old = lhood
self.gamma = self.update_gamma()
model = "DTM"
if model == "DTM" or sslm is None:
self.phi, self.log_phi = self.update_phi(doc_number, time)
elif model == "DIM" and sslm is not None:
self.phi, self.log_phi = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix)
lhood = self.compute_lda_lhood()
converged = np.fabs((lhood_old - lhood) / (lhood_old * total))
return lhood
def update_lda_seq_ss(self, time, doc, topic_suffstats):
"""Update lda sequence sufficient statistics from an lda posterior.
This is very similar to the :meth:`~gensim.models.ldaseqmodel.LdaPost.update_gamma` method and uses
the same formula.
Parameters
----------
time : int
The time slice.
doc : list of (int, float)
Unused but kept here for backwards compatibility. The document set in the constructor (`self.doc`) is used
instead.
topic_suffstats : list of float
Sufficient statistics for each topic.
Returns
-------
list of float
The updated sufficient statistics for each topic.
"""
num_topics = self.lda.num_topics
for k in range(num_topics):
topic_ss = topic_suffstats[k]
n = 0
for word_id, count in self.doc:
topic_ss[word_id][time] += count * self.phi[n][k]
n += 1
topic_suffstats[k] = topic_ss
return topic_suffstats
# the following functions are used in update_obs as the objective function.
def f_obs(x, *args):
"""Function which we are optimising for minimizing obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The value of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
# flag
init_mult = 1000
T = len(x)
val = 0
term1 = 0
term2 = 0
# term 3 and 4 for DIM
term3 = 0
term4 = 0
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
mean = sslm.mean[word]
variance = sslm.variance[word]
# only used for DIM mode
# w_phi_l = sslm.w_phi_l[word]
# m_update_coeff = sslm.m_update_coeff[word]
for t in range(1, T + 1):
mean_t = mean[t]
mean_t_prev = mean[t - 1]
val = mean_t - mean_t_prev
term1 += val * val
term2 += word_counts[t - 1] * mean_t - totals[t - 1] * np.exp(mean_t + variance[t] / 2) / sslm.zeta[t - 1]
model = "DTM"
if model == "DIM":
# stuff happens
pass
if sslm.chain_variance > 0.0:
term1 = - (term1 / (2 * sslm.chain_variance))
term1 = term1 - mean[0] * mean[0] / (2 * init_mult * sslm.chain_variance)
else:
term1 = 0.0
final = -(term1 + term2 + term3 + term4)
return final
def df_obs(x, *args):
"""Derivative of the objective function which optimises obs.
Parameters
----------
x : list of float
The obs values for this word.
sslm : :class:`~gensim.models.ldaseqmodel.sslm`
The State Space Language Model for DTM.
word_counts : list of int
Total word counts for each time slice.
totals : list of int of length `len(self.time_slice)`
The totals for each time slice.
mean_deriv_mtx : list of float
Mean derivative for each time slice.
word : int
The word's ID.
deriv : list of float
Mean derivative for each time slice.
Returns
-------
list of float
The derivative of the objective function evaluated at point `x`.
"""
sslm, word_counts, totals, mean_deriv_mtx, word, deriv = args
sslm.obs[word] = x
sslm.mean[word], sslm.fwd_mean[word] = sslm.compute_post_mean(word, sslm.chain_variance)
model = "DTM"
if model == "DTM":
deriv = sslm.compute_obs_deriv(word, word_counts, totals, mean_deriv_mtx, deriv)
elif model == "DIM":
deriv = sslm.compute_obs_deriv_fixed(
p.word, p.word_counts, p.totals, p.sslm, p.mean_deriv_mtx, deriv) # noqa:F821
return np.negative(deriv)
|
LdaPost
|
python
|
chroma-core__chroma
|
chromadb/server/fastapi/__init__.py
|
{
"start": 4766,
"end": 5879
}
|
class ____(fastapi.APIRouter): # type: ignore
# A simple subclass of fastapi's APIRouter which treats URLs with a
# trailing "/" the same as URLs without. Docs will only contain URLs
# without trailing "/"s.
def add_api_route(self, path: str, *args: Any, **kwargs: Any) -> None:
# If kwargs["include_in_schema"] isn't passed OR is True, we should
# only include the non-"/" path. If kwargs["include_in_schema"] is
# False, include neither.
exclude_from_schema = (
"include_in_schema" in kwargs and not kwargs["include_in_schema"]
)
def include_in_schema(path: str) -> bool:
nonlocal exclude_from_schema
return not exclude_from_schema and not path.endswith("/")
kwargs["include_in_schema"] = include_in_schema(path)
super().add_api_route(path, *args, **kwargs)
if path.endswith("/"):
path = path[:-1]
else:
path = path + "/"
kwargs["include_in_schema"] = include_in_schema(path)
super().add_api_route(path, *args, **kwargs)
|
ChromaAPIRouter
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
|
{
"start": 67349,
"end": 67794
}
|
class ____(test_util.TensorFlowTestCase):
def testInvertPermutation(self):
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
with self.subTest(dtype=dtype, use_gpu=True):
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
y = gen_array_ops.snapshot(x)
self.assertAllEqual(y, [0, 1, 2, 3])
@test_util.with_eager_op_as_function
@test_util.run_all_in_graph_and_eager_modes
|
SnapshotOpTest
|
python
|
pypa__warehouse
|
tests/unit/search/test_services.py
|
{
"start": 113,
"end": 413
}
|
class ____:
def test_null_service(self):
service = NullSearchService.create_service(pretend.stub(), pretend.stub())
config = pretend.stub()
assert service.reindex(config, ["foo", "bar"]) is None
assert service.unindex(config, ["foo", "bar"]) is None
|
TestSearchService
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/pygments/formatters/rtf.py
|
{
"start": 485,
"end": 11957
}
|
class ____(Formatter):
"""
Format tokens as RTF markup. This formatter automatically outputs full RTF
documents with color information and other useful stuff. Perfect for Copy and
Paste into Microsoft(R) Word(R) documents.
Please note that ``encoding`` and ``outencoding`` options are ignored.
The RTF format is ASCII natively, but handles unicode characters correctly
thanks to escape sequences.
.. versionadded:: 0.6
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`fontface`
The used font family, for example ``Bitstream Vera Sans``. Defaults to
some generic font which is supposed to have fixed width.
`fontsize`
Size of the font used. Size is specified in half points. The
default is 24 half-points, giving a size 12 font.
.. versionadded:: 2.0
`linenos`
Turn on line numbering (default: ``False``).
.. versionadded:: 2.18
`lineno_fontsize`
Font size for line numbers. Size is specified in half points
(default: `fontsize`).
.. versionadded:: 2.18
`lineno_padding`
Number of spaces between the (inline) line numbers and the
source code (default: ``2``).
.. versionadded:: 2.18
`linenostart`
The line number for the first line (default: ``1``).
.. versionadded:: 2.18
`linenostep`
If set to a number n > 1, only every nth line number is printed.
.. versionadded:: 2.18
`lineno_color`
Color for line numbers specified as a hex triplet, e.g. ``'5e5e5e'``.
Defaults to the style's line number color if it is a hex triplet,
otherwise ansi bright black.
.. versionadded:: 2.18
`hl_lines`
Specify a list of lines to be highlighted, as line numbers separated by
spaces, e.g. ``'3 7 8'``. The line numbers are relative to the input
(i.e. the first line is line 1) unless `hl_linenostart` is set.
.. versionadded:: 2.18
`hl_color`
Color for highlighting the lines specified in `hl_lines`, specified as
a hex triplet (default: style's `highlight_color`).
.. versionadded:: 2.18
`hl_linenostart`
If set to ``True`` line numbers in `hl_lines` are specified
relative to `linenostart` (default ``False``).
.. versionadded:: 2.18
"""
name = 'RTF'
aliases = ['rtf']
filenames = ['*.rtf']
def __init__(self, **options):
r"""
Additional options accepted:
``fontface``
Name of the font used. Could for example be ``'Courier New'``
to further specify the default which is ``'\fmodern'``. The RTF
specification claims that ``\fmodern`` are "Fixed-pitch serif
and sans serif fonts". Hope every RTF implementation thinks
the same about modern...
"""
Formatter.__init__(self, **options)
self.fontface = options.get('fontface') or ''
self.fontsize = get_int_opt(options, 'fontsize', 0)
self.linenos = get_bool_opt(options, 'linenos', False)
self.lineno_fontsize = get_int_opt(options, 'lineno_fontsize',
self.fontsize)
self.lineno_padding = get_int_opt(options, 'lineno_padding', 2)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.hl_linenostart = get_bool_opt(options, 'hl_linenostart', False)
self.hl_color = options.get('hl_color', '')
if not self.hl_color:
self.hl_color = self.style.highlight_color
self.hl_lines = []
for lineno in get_list_opt(options, 'hl_lines', []):
try:
lineno = int(lineno)
if self.hl_linenostart:
lineno = lineno - self.linenostart + 1
self.hl_lines.append(lineno)
except ValueError:
pass
self.lineno_color = options.get('lineno_color', '')
if not self.lineno_color:
if self.style.line_number_color == 'inherit':
# style color is the css value 'inherit'
# default to ansi bright-black
self.lineno_color = _ansimap['ansibrightblack']
else:
# style color is assumed to be a hex triplet as other
# colors in pygments/style.py
self.lineno_color = self.style.line_number_color
self.color_mapping = self._create_color_mapping()
def _escape(self, text):
return text.replace('\\', '\\\\') \
.replace('{', '\\{') \
.replace('}', '\\}')
def _escape_text(self, text):
# empty strings, should give a small performance improvement
if not text:
return ''
# escape text
text = self._escape(text)
buf = []
for c in text:
cn = ord(c)
if cn < (2**7):
# ASCII character
buf.append(str(c))
elif (2**7) <= cn < (2**16):
# single unicode escape sequence
buf.append('{\\u%d}' % cn)
elif (2**16) <= cn:
# RTF limits unicode to 16 bits.
# Force surrogate pairs
buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
return ''.join(buf).replace('\n', '\\par')
@staticmethod
def hex_to_rtf_color(hex_color):
if hex_color[0] == "#":
hex_color = hex_color[1:]
return '\\red%d\\green%d\\blue%d;' % (
int(hex_color[0:2], 16),
int(hex_color[2:4], 16),
int(hex_color[4:6], 16)
)
def _split_tokens_on_newlines(self, tokensource):
"""
Split tokens containing newline characters into multiple token
each representing a line of the input file. Needed for numbering
lines of e.g. multiline comments.
"""
for ttype, value in tokensource:
if value == '\n':
yield (ttype, value)
elif "\n" in value:
lines = value.split("\n")
for line in lines[:-1]:
yield (ttype, line+"\n")
if lines[-1]:
yield (ttype, lines[-1])
else:
yield (ttype, value)
def _create_color_mapping(self):
"""
Create a mapping of style hex colors to index/offset in
the RTF color table.
"""
color_mapping = OrderedDict()
offset = 1
if self.linenos:
color_mapping[self.lineno_color] = offset
offset += 1
if self.hl_lines:
color_mapping[self.hl_color] = offset
offset += 1
for _, style in self.style:
for color in style['color'], style['bgcolor'], style['border']:
if color and color not in color_mapping:
color_mapping[color] = offset
offset += 1
return color_mapping
@property
def _lineno_template(self):
if self.lineno_fontsize != self.fontsize:
return '{{\\fs{} \\cf{} %s{}}}'.format(self.lineno_fontsize,
self.color_mapping[self.lineno_color],
" " * self.lineno_padding)
return '{{\\cf{} %s{}}}'.format(self.color_mapping[self.lineno_color],
" " * self.lineno_padding)
@property
def _hl_open_str(self):
return rf'{{\highlight{self.color_mapping[self.hl_color]} '
@property
def _rtf_header(self):
lines = []
# rtf 1.8 header
lines.append('{\\rtf1\\ansi\\uc0\\deff0'
'{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
% (self.fontface and ' '
+ self._escape(self.fontface) or ''))
# color table
lines.append('{\\colortbl;')
for color, _ in self.color_mapping.items():
lines.append(self.hex_to_rtf_color(color))
lines.append('}')
# font and fontsize
lines.append('\\f0\\sa0')
if self.fontsize:
lines.append('\\fs%d' % self.fontsize)
# ensure Libre Office Writer imports and renders consecutive
# space characters the same width, needed for line numbering.
# https://bugs.documentfoundation.org/show_bug.cgi?id=144050
lines.append('\\dntblnsbdb')
return lines
def format_unencoded(self, tokensource, outfile):
for line in self._rtf_header:
outfile.write(line + "\n")
tokensource = self._split_tokens_on_newlines(tokensource)
# first pass of tokens to count lines, needed for line numbering
if self.linenos:
line_count = 0
tokens = [] # for copying the token source generator
for ttype, value in tokensource:
tokens.append((ttype, value))
if value.endswith("\n"):
line_count += 1
# width of line number strings (for padding with spaces)
linenos_width = len(str(line_count+self.linenostart-1))
tokensource = tokens
# highlight stream
lineno = 1
start_new_line = True
for ttype, value in tokensource:
if start_new_line and lineno in self.hl_lines:
outfile.write(self._hl_open_str)
if start_new_line and self.linenos:
if (lineno-self.linenostart+1)%self.linenostep == 0:
current_lineno = lineno + self.linenostart - 1
lineno_str = str(current_lineno).rjust(linenos_width)
else:
lineno_str = "".rjust(linenos_width)
outfile.write(self._lineno_template % lineno_str)
while not self.style.styles_token(ttype) and ttype.parent:
ttype = ttype.parent
style = self.style.style_for_token(ttype)
buf = []
if style['bgcolor']:
buf.append('\\cb%d' % self.color_mapping[style['bgcolor']])
if style['color']:
buf.append('\\cf%d' % self.color_mapping[style['color']])
if style['bold']:
buf.append('\\b')
if style['italic']:
buf.append('\\i')
if style['underline']:
buf.append('\\ul')
if style['border']:
buf.append('\\chbrdr\\chcfpat%d' %
self.color_mapping[style['border']])
start = ''.join(buf)
if start:
outfile.write(f'{{{start} ')
outfile.write(self._escape_text(value))
if start:
outfile.write('}')
start_new_line = False
# complete line of input
if value.endswith("\n"):
# close line highlighting
if lineno in self.hl_lines:
outfile.write('}')
# newline in RTF file after closing }
outfile.write("\n")
start_new_line = True
lineno += 1
outfile.write('}\n')
|
RtfFormatter
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/packages/backports/weakref_finalize.py
|
{
"start": 257,
"end": 5343
}
|
class ____(object):
"""Class for finalization of weakrefable objects
finalize(obj, func, *args, **kwargs) returns a callable finalizer
object which will be called when obj is garbage collected. The
first time the finalizer is called it evaluates func(*arg, **kwargs)
and returns the result. After this the finalizer is dead, and
calling it just returns None.
When the program exits any remaining finalizers for which the
atexit attribute is true will be run in reverse order of creation.
By default atexit is true.
"""
# Finalizer objects don't have any state of their own. They are
# just used as keys to lookup _Info objects in the registry. This
# ensures that they cannot be part of a ref-cycle.
__slots__ = ()
_registry = {}
_shutdown = False
_index_iter = itertools.count()
_dirty = False
_registered_with_atexit = False
class _Info(object):
__slots__ = ("weakref", "func", "args", "kwargs", "atexit", "index")
def __init__(self, obj, func, *args, **kwargs):
if not self._registered_with_atexit:
# We may register the exit function more than once because
# of a thread race, but that is harmless
import atexit
atexit.register(self._exitfunc)
weakref_finalize._registered_with_atexit = True
info = self._Info()
info.weakref = ref(obj, self)
info.func = func
info.args = args
info.kwargs = kwargs or None
info.atexit = True
info.index = next(self._index_iter)
self._registry[self] = info
weakref_finalize._dirty = True
def __call__(self, _=None):
"""If alive then mark as dead and return func(*args, **kwargs);
otherwise return None"""
info = self._registry.pop(self, None)
if info and not self._shutdown:
return info.func(*info.args, **(info.kwargs or {}))
def detach(self):
"""If alive then mark as dead and return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None and self._registry.pop(self, None):
return (obj, info.func, info.args, info.kwargs or {})
def peek(self):
"""If alive then return (obj, func, args, kwargs);
otherwise return None"""
info = self._registry.get(self)
obj = info and info.weakref()
if obj is not None:
return (obj, info.func, info.args, info.kwargs or {})
@property
def alive(self):
"""Whether finalizer is alive"""
return self in self._registry
@property
def atexit(self):
"""Whether finalizer should be called at exit"""
info = self._registry.get(self)
return bool(info) and info.atexit
@atexit.setter
def atexit(self, value):
info = self._registry.get(self)
if info:
info.atexit = bool(value)
def __repr__(self):
info = self._registry.get(self)
obj = info and info.weakref()
if obj is None:
return "<%s object at %#x; dead>" % (type(self).__name__, id(self))
else:
return "<%s object at %#x; for %r at %#x>" % (
type(self).__name__,
id(self),
type(obj).__name__,
id(obj),
)
@classmethod
def _select_for_exit(cls):
# Return live finalizers marked for exit, oldest first
L = [(f, i) for (f, i) in cls._registry.items() if i.atexit]
L.sort(key=lambda item: item[1].index)
return [f for (f, i) in L]
@classmethod
def _exitfunc(cls):
# At shutdown invoke finalizers for which atexit is true.
# This is called once all other non-daemonic threads have been
# joined.
reenable_gc = False
try:
if cls._registry:
import gc
if gc.isenabled():
reenable_gc = True
gc.disable()
pending = None
while True:
if pending is None or weakref_finalize._dirty:
pending = cls._select_for_exit()
weakref_finalize._dirty = False
if not pending:
break
f = pending.pop()
try:
# gc is disabled, so (assuming no daemonic
# threads) the following is the only line in
# this function which might trigger creation
# of a new finalizer
f()
except Exception:
sys.excepthook(*sys.exc_info())
assert f not in cls._registry
finally:
# prevent any more finalizers from executing during shutdown
weakref_finalize._shutdown = True
if reenable_gc:
gc.enable()
|
weakref_finalize
|
python
|
astropy__astropy
|
astropy/io/ascii/core.py
|
{
"start": 7475,
"end": 7572
}
|
class ____(NoType):
"""
Indicates that a column consists of numerical data.
"""
|
NumType
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_geometry_to_be_near_shape.py
|
{
"start": 465,
"end": 3462
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.geometry.near_shape"
condition_value_keys = (
"shape",
"shape_format",
"column_shape_format",
"distance_tol",
)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
shape = kwargs.get("shape")
shape_format = kwargs.get("shape_format")
column_shape_format = kwargs.get("column_shape_format")
distance_tol = kwargs.get("distance_tol")
# Check that shape is given and given in the correct format
if shape is not None:
try:
if shape_format == "wkt":
shape_ref = geos.from_wkt(shape)
elif shape_format == "wkb":
shape_ref = geos.from_wkb(shape)
elif shape_format == "geojson":
shape_ref = geos.from_geojson(shape)
else:
raise NotImplementedError( # noqa: TRY301
"Shape constructor method not implemented. Must be in WKT, WKB, or GeoJSON format."
)
except Exception:
raise Exception("A valid reference shape was not given.") # noqa: TRY002, TRY003
else:
raise Exception("A shape must be provided for this method.") # noqa: TRY002, TRY003
# Load the column into a pygeos Geometry vector from numpy array (Series not supported).
if column_shape_format == "wkt":
shape_test = geos.from_wkt(column.to_numpy(), on_invalid="ignore")
elif column_shape_format == "wkb":
shape_test = geos.from_wkb(column.to_numpy(), on_invalid="ignore")
else:
raise NotImplementedError("Column values shape format not implemented.")
# Allow for an array of reference shapes to be provided. Return a union of all the shapes in the array (Polygon or Multipolygon)
shape_ref = geos.union_all(shape_ref)
# Prepare the geometries
geos.prepare(shape_ref)
geos.prepare(shape_test)
# Return whether the distance is below the tolerance.
return pd.Series(geos.dwithin(shape_test, shape_ref, distance_tol))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesGeometryNearShape
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-weaviate/destination_weaviate/config.py
|
{
"start": 509,
"end": 1081
}
|
class ____(BaseModel):
mode: Literal["username_password"] = Field("username_password", const=True)
username: str = Field(..., title="Username", description="Username for the Weaviate cluster", order=1)
password: str = Field(..., title="Password", description="Password for the Weaviate cluster", airbyte_secret=True, order=2)
class Config(OneOfOptionConfig):
title = "Username/Password"
description = "Authenticate using username and password (suitable for self-managed Weaviate clusters)"
discriminator = "mode"
|
UsernamePasswordAuth
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-self-rag/llama_index/packs/self_rag/base.py
|
{
"start": 9493,
"end": 10222
}
|
class ____(BaseLlamaPack):
"""Simple short form Self-RAG pack."""
def __init__(
self,
model_path: str,
retriever: BaseRetriever,
verbose: bool = False,
**kwargs: Any,
) -> None:
"""Init params."""
self.query_engine = SelfRAGQueryEngine(model_path, retriever, verbose)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"query_engine": self.query_engine,
"llm": self.query_engine.llm,
"retriever": self.query_engine.retriever,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
SelfRAGPack
|
python
|
marshmallow-code__marshmallow
|
src/marshmallow/exceptions.py
|
{
"start": 2101,
"end": 2273
}
|
class ____(MarshmallowError, TypeError):
"""Raised when an argument is passed to a field class that cannot be resolved to a Field instance."""
|
_FieldInstanceResolutionError
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/class_interval.py
|
{
"start": 5806,
"end": 5872
}
|
class ____(A14):
def m2(self):
return _test_source()
|
C14
|
python
|
html5lib__html5lib-python
|
html5lib/tests/test_stream.py
|
{
"start": 2009,
"end": 2101
}
|
class ____(HTMLUnicodeInputStream):
_defaultChunkSize = 2
|
HTMLUnicodeInputStreamShortChunk
|
python
|
getsentry__sentry
|
src/sentry/testutils/cases.py
|
{
"start": 112850,
"end": 115282
}
|
class ____(APITestCase):
def _create_monitor(self, **kwargs):
if "owner_user_id" not in kwargs:
kwargs["owner_user_id"] = self.user.id
return Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": "* * * * *",
"schedule_type": ScheduleType.CRONTAB,
"checkin_margin": None,
"max_runtime": None,
},
**kwargs,
)
def _create_monitor_environment(self, monitor, name="production", **kwargs):
environment = Environment.get_or_create(project=self.project, name=name)
monitorenvironment_defaults = {
"status": monitor.status,
**kwargs,
}
return MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=environment.id,
**monitorenvironment_defaults,
)
def _create_issue_alert_rule(self, monitor, exclude_slug_filter=False):
conditions = [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
},
{
"id": "sentry.rules.conditions.regression_event.RegressionEventCondition",
},
]
if not exclude_slug_filter:
conditions.append(
{
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "monitor.slug",
"match": "eq",
"value": monitor.slug,
},
)
actions = [
{
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": self.user.id,
"targetType": "Member",
"uuid": str(uuid4()),
},
]
rule = ProjectRuleCreator(
name="New Cool Rule",
project=self.project,
conditions=conditions,
filter_match="all",
action_match="any",
actions=actions,
frequency=5,
environment=self.environment.id,
).run()
rule.update(source=RuleSource.CRON_MONITOR)
config = monitor.config
config["alert_rule_id"] = rule.id
monitor.config = config
monitor.save()
return rule
|
MonitorTestCase
|
python
|
getsentry__sentry
|
src/sentry/statistical_detectors/redis.py
|
{
"start": 430,
"end": 2371
}
|
class ____(DetectorStore):
def __init__(
self,
regression_type: RegressionType,
client: RedisCluster | StrictRedis | None = None,
ttl=STATE_TTL,
):
self.regression_type = regression_type
self.ttl = ttl
self._client: RedisCluster | StrictRedis | None = None
@property
def client(
self,
client: RedisCluster | StrictRedis | None = None,
) -> RedisCluster | StrictRedis:
if self._client is None:
self._client = self.get_redis_client() if client is None else client
return self._client
def bulk_read_states(
self, payloads: list[DetectorPayload]
) -> list[Mapping[str | bytes, bytes | float | int | str]]:
with self.client.pipeline() as pipeline:
for payload in payloads:
key = self.make_key(payload)
pipeline.hgetall(key)
return pipeline.execute()
def bulk_write_states(
self,
payloads: list[DetectorPayload],
states: list[Mapping[str | bytes, bytes | float | int | str] | None],
) -> None:
# the number of new states must match the number of payloads
assert len(states) == len(payloads)
with self.client.pipeline() as pipeline:
for state, payload in zip(states, payloads):
if state is None:
continue
key = self.make_key(payload)
pipeline.hmset(key, state)
pipeline.expire(key, self.ttl)
pipeline.execute()
def make_key(self, payload: DetectorPayload) -> str:
return (
f"sd:p:{payload.project_id}:{self.regression_type.abbreviate()}:{payload.fingerprint}"
)
@staticmethod
def get_redis_client() -> RedisCluster | StrictRedis:
return redis.redis_clusters.get(settings.SENTRY_STATISTICAL_DETECTORS_REDIS_CLUSTER)
|
RedisDetectorStore
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/initializers/initializers_v1.py
|
{
"start": 2146,
"end": 2392
}
|
class ____(init_ops.VarianceScaling):
def __init__(self, seed=None):
super(LecunUniform, self).__init__(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
def get_config(self):
return {'seed': self.seed}
|
LecunUniform
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatter3d/line/colorbar/_tickformatstop.py
|
{
"start": 233,
"end": 8549
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.line.colorbar"
_path_str = "scatter3d.line.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.line
.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.line.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.line.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickformatstop
|
python
|
bokeh__bokeh
|
src/bokeh/models/widgets/inputs.py
|
{
"start": 3258,
"end": 3827
}
|
class ____(ModelEvent):
"""
Notifies the input widget that its input/value needs to be cleared.
This is specially useful for widgets whose value can't be simply cleared by
assigning to ``value`` (or equivalent) property.
"""
event_name = "clear_input"
def __init__(self, model: InputWidget) -> None:
if not isinstance(model, InputWidget):
raise ValueError(f"{self.__class__.__name__} event only applies to input models, i.e. instances of bokeh.models.widgets.InputWidget")
super().__init__(model=model)
|
ClearInput
|
python
|
catalyst-team__catalyst
|
examples/self_supervised/src/common.py
|
{
"start": 2104,
"end": 7101
}
|
class ____(torch.nn.Module):
"""Contrastive model with projective head.
Args:
model: projective head for the train time
encoder: model for the future uses
"""
def __init__(self, model, encoder):
super(ContrastiveModel, self).__init__()
self.model = model
self.encoder = encoder
def forward(self, x):
"""Forward method.
Args:
x: input for the encoder
Returns:
(embeddings, projections)
"""
emb = self.encoder(x)
projection = self.model(emb)
return emb, projection
def get_loaders(
dataset: str, batch_size: int, num_workers: Optional[int]
) -> Dict[str, DataLoader]:
"""Init loaders based on parsed parametrs.
Args:
dataset: dataset for the experiment
batch_size: batch size for loaders
num_workers: number of workers to process loaders
Returns:
{"train":..., "valid":...}
"""
transforms = DATASETS[dataset]["train_transform"]
transform_original = DATASETS[dataset]["valid_transform"]
try:
train_data = DATASETS[dataset]["dataset"](root="data", train=True, download=True)
valid_data = DATASETS[dataset]["dataset"](
root="data", train=False, download=True
)
except:
train_data = DATASETS[dataset]["dataset"](
root="data", split="train", download=True
)
valid_data = DATASETS[dataset]["dataset"](
root="data", split="test", download=True
)
train_data = SelfSupervisedDatasetWrapper(
train_data,
transforms=transforms,
transform_original=transform_original,
)
valid_data = SelfSupervisedDatasetWrapper(
valid_data,
transforms=transforms,
transform_original=transform_original,
)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
valid_loader = DataLoader(valid_data, batch_size=batch_size, num_workers=num_workers)
return {"train": train_loader, "valid": valid_loader}
def conv_block(in_channels, out_channels, pool=False):
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
if pool:
layers.append(nn.MaxPool2d(2))
return nn.Sequential(*layers)
def resnet_mnist(in_size: int, in_channels: int, out_features: int, size: int = 16):
sz, sz2, sz4 = size, size * 2, size * 4
out_size = (((in_size // 16) * 16) ** 2 * 4) // size
return nn.Sequential(
conv_block(in_channels, sz),
conv_block(sz, sz2, pool=True),
ResidualBlock(nn.Sequential(conv_block(sz2, sz2), conv_block(sz2, sz2))),
conv_block(sz2, sz4, pool=True),
ResidualBlock(nn.Sequential(conv_block(sz4, sz4), conv_block(sz4, sz4))),
nn.Sequential(
nn.MaxPool2d(4),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(out_size, out_features),
),
)
def resnet9(in_size: int, in_channels: int, out_features: int, size: int = 16):
sz, sz2, sz4, sz8 = size, size * 2, size * 4, size * 8
assert (
in_size >= 32
), "The graph is not valid for images with resolution lower then 32x32."
out_size = (((in_size // 32) * 32) ** 2 * 2) // size
return nn.Sequential(
conv_block(in_channels, sz),
conv_block(sz, sz2, pool=True),
ResidualBlock(nn.Sequential(conv_block(sz2, sz2), conv_block(sz2, sz2))),
conv_block(sz2, sz4, pool=True),
conv_block(sz4, sz8, pool=True),
ResidualBlock(nn.Sequential(conv_block(sz8, sz8), conv_block(sz8, sz8))),
nn.Sequential(
nn.MaxPool2d(4),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(out_size, out_features),
),
)
def get_contrastive_model(
in_size: int,
in_channels: int,
feature_dim: int,
encoder_dim: int = 512,
hidden_dim: int = 512,
) -> ContrastiveModel:
"""Init contrastive model based on parsed parametrs.
Args:
in_size: size of an image (in_size x in_size)
in_channels: number of channels in an image
feature_dim: dimensinality of contrative projection
encoder_dim: dimensinality of encoder output
hidden_dim: dimensinality of encoder-contrative projection
Returns:
ContrstiveModel instance
"""
try:
encoder = resnet9(
in_size=in_size, in_channels=in_channels, out_features=encoder_dim
)
except:
encoder = resnet_mnist(
in_size=in_size, in_channels=in_channels, out_features=encoder_dim
)
projection_head = nn.Sequential(
nn.Linear(encoder_dim, hidden_dim, bias=False),
nn.ReLU(inplace=True),
nn.Linear(hidden_dim, feature_dim, bias=True),
)
model = ContrastiveModel(projection_head, encoder)
return model
|
ContrastiveModel
|
python
|
crytic__slither
|
slither/vyper_parsing/declarations/event.py
|
{
"start": 335,
"end": 1154
}
|
class ____: # pylint: disable=too-few-public-methods
"""
Event class
"""
def __init__(self, event: Event, event_def: EventDef) -> None:
self._event = event
self._event.name = event_def.name
self._elemsNotParsed = event_def.body
def analyze(self, contract) -> None:
for elem_to_parse in self._elemsNotParsed:
if not isinstance(elem_to_parse, AnnAssign):
assert isinstance(elem_to_parse, Pass)
continue
elem = EventVariable()
elem.set_offset(elem_to_parse.src, self._event.contract.compilation_unit)
event_parser = EventVariableVyper(elem, elem_to_parse)
event_parser.analyze(contract)
self._event.elems.append(elem)
self._elemsNotParsed = []
|
EventVyper
|
python
|
geekcomputers__Python
|
nitkarshchourasia/to_sort/django_projects/ToDo_webapp/todo/apps.py
|
{
"start": 36,
"end": 140
}
|
class ____(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "todo"
|
TodoConfig
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/filters.py
|
{
"start": 33693,
"end": 34035
}
|
class ____(PrefectBaseModel):
"""Filter by `ArtifactCollection.type`."""
any_: Optional[List[str]] = Field(
default=None, description="A list of artifact types to include"
)
not_any_: Optional[List[str]] = Field(
default=None, description="A list of artifact types to exclude"
)
|
ArtifactCollectionFilterType
|
python
|
walkccc__LeetCode
|
solutions/1408. String Matching in an Array/1408.py
|
{
"start": 0,
"end": 233
}
|
class ____:
def stringMatching(self, words: list[str]) -> list[str]:
ans = []
for a in words:
for b in words:
if len(a) < len(b) and b.find(a) != -1:
ans.append(a)
break
return ans
|
Solution
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/string_conversion.py
|
{
"start": 1472,
"end": 2537
}
|
class ____:
g: str = ""
def __str__(self):
return self.g
def join_source_and_attribute_source(i: int):
if i > 0:
a: str = request.GET["tainted"]
else:
a: C = C()
eval(f"{a}") # noqa: P204
def multiple_targets_for_single_expression_2(a: Union[int, B, C]):
eval(f"{a}") # noqa: P204
def joined_base():
a = request.GET["tainted"]
b = "benign"
eval(f"{a}{b}") # noqa: P204
def analyze_implicit_call():
b = B()
b.f = request.GET["tainted"]
# Require inferring a tito model for `B.__str__`
eval(f"{str(b)}") # noqa: P204.
# Require analyzing an implicit call to `str(b)`
eval(f"{b}") # noqa: P204.
def multiple_targets_for_single_expression_3(b_or_c: Union[B, C], d: int):
a = 1
# Require the proper accumulation of tito information under implicit `str`
return f"{a}{b_or_c}{d}"
def tito_f(x):
return x
def tito_g(y):
return y
def compute_tito(x, y):
# Require the proper accumulation of tito information
return f"{tito_g(y)}{tito_f(x)}"
|
C
|
python
|
pypa__warehouse
|
warehouse/admin/views/banners.py
|
{
"start": 4068,
"end": 5297
}
|
class ____(wtforms.Form):
name = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=100),
wtforms.validators.InputRequired(),
],
)
text = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=280),
wtforms.validators.InputRequired(),
],
)
link_url = wtforms.fields.StringField(
validators=[
wtforms.validators.InputRequired(),
URIValidator(),
]
)
link_label = wtforms.fields.StringField(
validators=[
wtforms.validators.Optional(),
],
default=Banner.DEFAULT_BTN_LABEL,
)
fa_icon = wtforms.fields.StringField(
validators=[
wtforms.validators.Length(max=40),
wtforms.validators.Optional(),
],
default=Banner.DEFAULT_FA_ICON,
)
active = wtforms.fields.BooleanField(
validators=[wtforms.validators.Optional()], default=False
)
dismissable = wtforms.fields.BooleanField(
validators=[wtforms.validators.Optional()], default=False
)
end = wtforms.fields.DateField(validators=[wtforms.validators.InputRequired()])
|
BannerForm
|
python
|
kamyu104__LeetCode-Solutions
|
Python/path-in-zigzag-labelled-binary-tree.py
|
{
"start": 35,
"end": 419
}
|
class ____(object):
def pathInZigZagTree(self, label):
"""
:type label: int
:rtype: List[int]
"""
count = 2**label.bit_length()
result = []
while label >= 1:
result.append(label)
label = ((count//2) + ((count-1)-label)) // 2
count //= 2
result.reverse()
return result
|
Solution
|
python
|
apache__airflow
|
providers/common/compat/src/airflow/providers/common/compat/lineage/entities.py
|
{
"start": 2173,
"end": 2668
}
|
class ____:
"""Table entity."""
database: str = attr.ib()
cluster: str = attr.ib()
name: str = attr.ib()
tags: list[Tag] = []
description: str | None = None
columns: list[Column] = []
owners: list[User] = []
extra: dict[str, Any] = {}
type_hint: str | None = None
template_fields: ClassVar = (
"database",
"cluster",
"name",
"tags",
"description",
"columns",
"owners",
"extra",
)
|
Table
|
python
|
python-excel__xlwt
|
tests/test_simple.py
|
{
"start": 265,
"end": 2070
}
|
class ____(unittest.TestCase):
def create_simple_xls(self, **kw):
font0 = xlwt.Font()
font0.name = 'Times New Roman'
font0.colour_index = 2
font0.bold = True
style0 = xlwt.XFStyle()
style0.font = font0
style1 = xlwt.XFStyle()
style1.num_format_str = 'D-MMM-YY'
wb = xlwt.Workbook(**kw)
ws = wb.add_sheet('A Test Sheet')
ws.write(0, 0, 'Test', style0)
ws.write(1, 0, datetime(2010, 12, 5), style1)
ws.write(2, 0, 1)
ws.write(2, 1, 1)
ws.write(2, 2, xlwt.Formula("A3+B3"))
return wb, ws
def test_create_simple_xls(self):
wb, _ = self.create_simple_xls()
wb.save(in_tst_output_dir('simple.xls'))
self.assertTrue(filecmp.cmp(in_tst_dir('simple.xls'),
in_tst_output_dir('simple.xls'),
shallow=False))
def test_create_less_simple_xls(self):
wb, ws = self.create_simple_xls()
more_content=[
[
'A{0}'.format(i),
'Zażółć gęślą jaźń {0} {1}'.format(i, LOREM_IPSUM),
]
for idx, i in enumerate(range(1000, 1050))
]
for r_idx, content_row in enumerate(more_content, 3):
for c_idx, cell in enumerate(content_row):
ws.write(r_idx, c_idx, cell)
wb.save(in_tst_output_dir('less_simple.xls'))
self.assertTrue(filecmp.cmp(in_tst_dir('less_simple.xls'),
in_tst_output_dir('less_simple.xls'),
shallow=False))
def test_font_compression(self):
wb, ws = self.create_simple_xls(style_compression = 2)
wb.save(in_tst_output_dir('simple.xls'), )
|
TestSimple
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/lib/test_index_tricks.py
|
{
"start": 20782,
"end": 21557
}
|
class ____(TestCase):
@xfail # (reason="ndindex not implemented")
def test_ndindex(self):
x = list(ndindex(1, 2, 3))
expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
assert_array_equal(x, expected)
x = list(ndindex((1, 2, 3)))
assert_array_equal(x, expected)
# Test use of scalars and tuples
x = list(ndindex((3,)))
assert_array_equal(x, list(ndindex(3)))
# Make sure size argument is optional
x = list(ndindex())
assert_equal(x, [()])
x = list(ndindex(()))
assert_equal(x, [()])
# Make sure 0-sized ndindex works correctly
x = list(ndindex(*[0]))
assert_equal(x, [])
if __name__ == "__main__":
run_tests()
|
TestNdIndex
|
python
|
doocs__leetcode
|
solution/1600-1699/1672.Richest Customer Wealth/Solution.py
|
{
"start": 0,
"end": 124
}
|
class ____:
def maximumWealth(self, accounts: List[List[int]]) -> int:
return max(sum(v) for v in accounts)
|
Solution
|
python
|
doocs__leetcode
|
solution/1900-1999/1993.Operations on Tree/Solution.py
|
{
"start": 0,
"end": 1366
}
|
class ____:
def __init__(self, parent: List[int]):
n = len(parent)
self.locked = [-1] * n
self.parent = parent
self.children = [[] for _ in range(n)]
for son, fa in enumerate(parent[1:], 1):
self.children[fa].append(son)
def lock(self, num: int, user: int) -> bool:
if self.locked[num] == -1:
self.locked[num] = user
return True
return False
def unlock(self, num: int, user: int) -> bool:
if self.locked[num] == user:
self.locked[num] = -1
return True
return False
def upgrade(self, num: int, user: int) -> bool:
def dfs(x: int):
nonlocal find
for y in self.children[x]:
if self.locked[y] != -1:
self.locked[y] = -1
find = True
dfs(y)
x = num
while x != -1:
if self.locked[x] != -1:
return False
x = self.parent[x]
find = False
dfs(num)
if not find:
return False
self.locked[num] = user
return True
# Your LockingTree object will be instantiated and called as such:
# obj = LockingTree(parent)
# param_1 = obj.lock(num,user)
# param_2 = obj.unlock(num,user)
# param_3 = obj.upgrade(num,user)
|
LockingTree
|
python
|
pytorch__pytorch
|
test/test_stateless.py
|
{
"start": 36086,
"end": 37195
}
|
class ____(TestCase):
def test_private_stateless_warns(self):
script = """
import torch
import warnings
with warnings.catch_warnings(record=True) as w:
from torch.nn.utils import _stateless
exit(len(w))
"""
try:
subprocess.check_output(
[sys.executable, '-W', 'always', '-c', script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, 1)
else:
self.assertTrue(False, "No warning was raised.")
def test_stateless_functional_call_warns(self):
m = torch.nn.Linear(1, 1)
params = dict(m.named_parameters())
x = torch.randn(3, 1)
with self.assertWarnsRegex(FutureWarning, "Please use `torch.func.functional_call`"):
stateless.functional_call(m, params, x)
|
TestStatelessDeprecation
|
python
|
great-expectations__great_expectations
|
great_expectations/types/__init__.py
|
{
"start": 466,
"end": 8315
}
|
class ____:
"""A convenience class for migrating away from untyped dictionaries to stronger typed objects.
Can be instantiated with arguments:
my_A = MyClassA(
foo="a string",
bar=1,
)
Can be instantiated from a dictionary:
my_A = MyClassA(
**{
"foo": "a string",
"bar": 1,
}
)
Can be accessed using both dictionary and dot notation
my_A.foo == "a string"
my_A.bar == 1
my_A["foo"] == "a string"
my_A["bar"] == 1
Pairs nicely with @dataclass:
@dataclass()
class MyClassA(DictDot):
foo: str
bar: int
Can be made immutable:
@dataclass(frozen=True)
class MyClassA(DictDot):
foo: str
bar: int
For more examples of usage, please see `test_dataclass_serializable_dot_dict_pattern.py` in the tests folder.
""" # noqa: E501 # FIXME CoP
include_field_names: ClassVar[Set[str]] = set()
exclude_field_names: ClassVar[Set[str]] = set()
def __getitem__(self, item):
if isinstance(item, int):
return list(self.__dict__.keys())[item]
return getattr(self, item)
def __setitem__(self, key, value) -> None:
setattr(self, key, value)
def __delitem__(self, key) -> None:
delattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def __len__(self):
return len(self.__dict__)
def keys(self):
return self.__dict__.keys()
def values(self):
return self.to_raw_dict().values()
def items(self):
return self.to_raw_dict().items()
def get(self, key, default_value=None):
if self.__contains__(key=key):
return self.__getitem__(item=key)
return self.__dict__.get(key, default_value)
def to_raw_dict(self) -> dict: # noqa: C901 # FIXME CoP
"""Convert this object into a standard dictionary, recursively.
This is often convenient for serialization, and in cases where an untyped version of the object is required.
""" # noqa: E501 # FIXME CoP
new_dict = safe_deep_copy(data=self.__dict__)
# This is needed to play nice with pydantic.
if "__initialised__" in new_dict:
del new_dict["__initialised__"]
# DictDot's to_raw_dict method works recursively, when a DictDot contains other DictDots.
for key, value in new_dict.items():
# Recursive conversion works on keys that are DictDots...
if isinstance(value, DictDot):
new_dict[key] = value.to_raw_dict()
# ...and Enums...
if isinstance(value, Enum):
new_dict[key] = value.value
# ...and when DictDots and Enums are nested one layer deeper in lists or tuples
if isinstance(value, (list, tuple)):
new_dict[key] = [temp_element for temp_element in value]
for i, element in enumerate(value):
if isinstance(element, DictDot):
new_dict[key][i] = element.to_raw_dict()
if isinstance(element, Enum):
new_dict[key][i] = element.value
# Note: conversion will not work automatically if there are additional layers in between. # noqa: E501 # FIXME CoP
return new_dict
def to_dict(self) -> dict: # noqa: C901 # FIXME CoP
new_dict = {
key: self[key]
for key in self.property_names(
include_keys=self.include_field_names,
exclude_keys=self.exclude_field_names,
)
}
for key, value in new_dict.items():
if isinstance(value, pydantic.BaseModel):
new_dict[key] = value.dict()
if isinstance(value, DictDot):
new_dict[key] = value.to_dict()
if isinstance(value, Enum):
new_dict[key] = value.value
if isinstance(value, (list, tuple)):
new_dict[key] = [temp_element for temp_element in value]
for i, element in enumerate(value):
if isinstance(value, pydantic.BaseModel):
new_dict[key][i] = element.dict()
if isinstance(element, DictDot):
new_dict[key][i] = element.to_dict()
if isinstance(element, Enum):
new_dict[key][i] = element.value
return new_dict
def property_names( # noqa: C901 # FIXME CoP
self,
include_keys: Optional[Set[str]] = None,
exclude_keys: Optional[Set[str]] = None,
) -> Set[str]:
"""
Assuming that -- by convention -- names of private properties of an object are prefixed by "_" (a single
underscore character), return these property names as public property names. To support this convention, the
extending classes must implement property accessors, corresponding to the property names, return by this method.
:param include_keys: inclusion list ("include only these properties, while excluding all the rest")
:param exclude_keys: exclusion list ("exclude only these properties, while include all the rest")
:return: property names, subject to inclusion/exclusion filtering
""" # noqa: E501 # FIXME CoP
if include_keys is None:
include_keys = set()
if exclude_keys is None:
exclude_keys = set()
if include_keys & exclude_keys:
raise ValueError( # noqa: TRY003 # FIXME CoP
"Common keys between sets of include_keys and exclude_keys filtering directives are illegal." # noqa: E501 # FIXME CoP
)
key: str
# Gather private fields:
# By Python convention, properties of non-trivial length, prefixed by underscore ("_") character, are private. # noqa: E501 # FIXME CoP
private_fields: Set[str] = set(
filter(
lambda name: len(name) > 1,
[key[1:] for key in self.keys() if key[0] == "_"],
)
)
# Gather public fields.
public_fields: Set[str] = {key for key in self.keys() if key[0] != "_"}
# Combine private and public fields using the "Set Union" operation.
property_names: Set[str] = public_fields | private_fields
keys_for_exclusion: list = []
def assert_valid_keys(keys: Set[str], purpose: str) -> None:
name: str
for name in keys:
try:
_ = self[name]
except AttributeError:
try:
_ = self[f"_{name}"]
except AttributeError:
raise ValueError( # noqa: TRY003 # FIXME CoP
f'Property "{name}", marked for {purpose} on object "{type(self)!s}", does not exist.' # noqa: E501 # FIXME CoP
)
if include_keys:
# Make sure that all properties, marked for inclusion, actually exist on the object.
assert_valid_keys(keys=include_keys, purpose="inclusion")
keys_for_exclusion.extend([key for key in property_names if key not in include_keys])
if exclude_keys:
# Make sure that all properties, marked for exclusion, actually exist on the object.
assert_valid_keys(keys=exclude_keys, purpose="exclusion")
keys_for_exclusion.extend([key for key in property_names if key in exclude_keys])
keys_for_exclusion = list(set(keys_for_exclusion))
return {key for key in property_names if key not in keys_for_exclusion}
|
DictDot
|
python
|
ansible__ansible
|
test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/test/test_test.py
|
{
"start": 145,
"end": 253
}
|
class ____:
def tests(self):
return {
'test_name_ok': test_name_ok,
}
|
TestModule
|
python
|
openai__openai-python
|
src/openai/resources/beta/assistants.py
|
{
"start": 46357,
"end": 46958
}
|
class ____:
def __init__(self, assistants: Assistants) -> None:
self._assistants = assistants
self.create = to_streamed_response_wrapper(
assistants.create,
)
self.retrieve = to_streamed_response_wrapper(
assistants.retrieve,
)
self.update = to_streamed_response_wrapper(
assistants.update,
)
self.list = to_streamed_response_wrapper(
assistants.list,
)
self.delete = to_streamed_response_wrapper(
assistants.delete,
)
|
AssistantsWithStreamingResponse
|
python
|
ipython__ipython
|
IPython/terminal/pt_inputhooks/wx.py
|
{
"start": 1576,
"end": 7126
}
|
class ____:
def Run(self, time, input_is_ready):
self.input_is_ready = input_is_ready
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if self.input_is_ready():
self.timer.Stop()
self.evtloop.Exit()
@ignore_keyboardinterrupts
def inputhook_wx2(context):
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10, # CHANGE time here to control polling interval
input_is_ready=context.input_is_ready)
return 0
@ignore_keyboardinterrupts
def inputhook_wx3(context):
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not context.input_is_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print('Sleep for 1 s') # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print('Sleep for 0.05 s') # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
return 0
@ignore_keyboardinterrupts
def inputhook_wxphoenix(context):
"""Run the wx event loop until the user provides more input.
This input hook is suitable for use with wxPython >= 4 (a.k.a. Phoenix).
It uses the same approach to that used in
ipykernel.eventloops.loop_wx. The wx.MainLoop is executed, and a wx.Timer
is used to periodically poll the context for input. As soon as input is
ready, the wx.MainLoop is stopped.
"""
app = wx.GetApp()
if app is None:
return
if context.input_is_ready():
return
assert wx.IsMainThread()
# Wx uses milliseconds
poll_interval = 100
# Use a wx.Timer to periodically check whether input is ready - as soon as
# it is, we exit the main loop
timer = wx.Timer()
def poll(ev):
if context.input_is_ready():
timer.Stop()
app.ExitMainLoop()
timer.Start(poll_interval)
timer.Bind(wx.EVT_TIMER, poll)
# The import of wx on Linux sets the handler for signal.SIGINT to 0. This
# is a bug in wx or gtk. We fix by just setting it back to the Python
# default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
# The SetExitOnFrameDelete call allows us to run the wx mainloop without
# having a frame open.
app.SetExitOnFrameDelete(False)
app.MainLoop()
# Get the major wx version number to figure out what input hook we should use.
major_version = 3
try:
major_version = int(wx.__version__[0])
except Exception:
pass
# Use the phoenix hook on all platforms for wxpython >= 4
if major_version >= 4:
inputhook = inputhook_wxphoenix
# On OSX, evtloop.Pending() always returns True, regardless of there being
# any events pending. As such we can't use implementations 1 or 3 of the
# inputhook as those depend on a pending/dispatch loop.
elif sys.platform == 'darwin':
inputhook = inputhook_wx2
else:
inputhook = inputhook_wx3
|
EventLoopRunner
|
python
|
django__django
|
tests/contenttypes_tests/models.py
|
{
"start": 1807,
"end": 2088
}
|
class ____(models.Model):
text = models.CharField(max_length=200)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
question = GenericForeignKey()
class Meta:
order_with_respect_to = "question"
|
Answer
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol45.py
|
{
"start": 235,
"end": 306
}
|
class ____(Protocol):
def __call__(self, item: S, /) -> S: ...
|
Proto1
|
python
|
jazzband__prettytable
|
src/prettytable/prettytable.py
|
{
"start": 2097,
"end": 2180
}
|
class ____(IntEnum):
FRAME = 0
ALL = 1
NONE = 2
HEADER = 3
|
HRuleStyle
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/frames.py
|
{
"start": 446662,
"end": 463444
}
|
class ____(Request):
"""
Get unique sorce id that exist in the frames in the given dataview
:param dataview: Dataview specification
:type dataview: Dataview
:param max_count: Number of source IDs to return. default=100, Optional
:type max_count: int
"""
_service = "frames"
_action = "get_source_ids_for_dataview"
_version = "2.23"
_schema = {
"definitions": {
"dataview": {
"properties": {
"augmentation": {
"description": "Augmentation parameters. Only for training and testing tasks.",
"oneOf": [
{"$ref": "#/definitions/dv_augmentation"},
{"type": "null"},
],
},
"filters": {
"description": "List of FilterRule ('OR' relationship)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": ["array", "null"],
},
"iteration": {
"description": "Iteration parameters. Not applicable for register (import) tasks.",
"oneOf": [
{"$ref": "#/definitions/iteration"},
{"type": "null"},
],
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": ["object", "null"],
},
"mapping": {
"description": "Mapping parameters",
"oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}],
},
"output_rois": {
"description": (
"'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which"
" led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be"
" returned multiple times with a different roi each time.\n\nNote: this should be used for"
" Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be"
" returned\n "
),
"oneOf": [
{"$ref": "#/definitions/output_rois_enum"},
{"type": "null"},
],
},
"versions": {
"description": "View dataset versions",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/dv_augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
"view_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": ["string", "null"],
},
"merge_with": {
"description": "Version ID to merge with",
"type": ["string", "null"],
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"dataview": {
"$ref": "#/definitions/dataview",
"description": "Dataview specification",
},
"max_count": {
"default": 100,
"description": "Number of source IDs to return. default=100, Optional",
"type": "integer",
},
},
"required": ["dataview"],
}
def __init__(self, dataview, max_count=100, **kwargs):
super(GetSourceIdsForDataviewRequest, self).__init__(**kwargs)
self.dataview = dataview
self.max_count = max_count
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
if isinstance(value, dict):
value = Dataview.from_dict(value)
else:
self.assert_isinstance(value, "dataview", Dataview)
self._property_dataview = value
@schema_property("max_count")
def max_count(self):
return self._property_max_count
@max_count.setter
def max_count(self, value):
if value is None:
self._property_max_count = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "max_count", six.integer_types)
self._property_max_count = value
|
GetSourceIdsForDataviewRequest
|
python
|
pytorch__pytorch
|
torch/fx/experimental/symbolic_shapes.py
|
{
"start": 77084,
"end": 77483
}
|
class ____:
"""
Data structure specifying how we should create symbols in
``create_symbolic_sizes_strides_storage_offset``; e.g., should
they be static or dynamic.
This is an abstract base class because we are probably going to add
another version of this that says "use exactly these SymInts, don't
allocate fresh symbols."
"""
@dataclass(frozen=True)
|
SymbolicContext
|
python
|
psf__black
|
tests/data/cases/remove_newline_after_code_block_open.py
|
{
"start": 1981,
"end": 3218
}
|
class ____:
def bar(self):
print("The newline above me should be kept!")
for i in range(5):
print(f"{i}) The line above me should be kept!")
for i in range(5):
print(f"{i}) The lines above me should be kept!")
for i in range(5):
for j in range(7):
print(f"{i}) The lines above me should be kept!")
if random.randint(0, 3) == 0:
print("The new line above me will be kept!")
if random.randint(0, 3) == 0:
print("The new lines above me will be kept!")
if random.randint(0, 3) == 0:
if random.uniform(0, 1) > 0.5:
print("Two lines above me will be kept!")
while True:
print("The newline above me should be kept!")
while True:
print("The newlines above me should be kept!")
while True:
while False:
print("The newlines above me should be kept!")
with open("/path/to/file.txt", mode="w") as file:
file.write("The new line above me will be kept!")
with open("/path/to/file.txt", mode="w") as file:
file.write("The new lines above me will be kept!")
with open("/path/to/file.txt", mode="r") as read_file:
with open("/path/to/output_file.txt", mode="w") as write_file:
write_file.writelines(read_file.readlines())
|
Foo
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/errors.py
|
{
"start": 17310,
"end": 17849
}
|
class ____(DagsterError):
"""An exception has occurred in one or more of the child processes dagster manages.
This error forwards the message and stack trace for all of the collected errors.
"""
def __init__(self, *args, **kwargs):
from dagster._utils.error import SerializableErrorInfo
self.subprocess_error_infos = check.list_param(
kwargs.pop("subprocess_error_infos"), "subprocess_error_infos", SerializableErrorInfo
)
super().__init__(*args, **kwargs)
|
DagsterSubprocessError
|
python
|
wandb__wandb
|
wandb/sdk/mailbox/mailbox_handle.py
|
{
"start": 335,
"end": 437
}
|
class ____(Exception):
"""The handle has no response and has been abandoned."""
|
HandleAbandonedError
|
python
|
crytic__slither
|
slither/visitors/expression/find_calls.py
|
{
"start": 1526,
"end": 4644
}
|
class ____(ExpressionVisitor):
def __init__(self, expression: Expression) -> None:
self._result: Optional[List[Expression]] = None
super().__init__(expression)
def result(self) -> List[Expression]:
if self._result is None:
self._result = list(set(get(self.expression)))
return self._result
def _post_assignement_operation(self, expression: AssignmentOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
set_val(expression, val)
def _post_binary_operation(self, expression: BinaryOperation) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
set_val(expression, val)
def _post_call_expression(self, expression: CallExpression) -> None:
called = get(expression.called)
argss = [get(a) for a in expression.arguments if a]
args = [item for sublist in argss for item in sublist]
val = called + args
val += [expression]
set_val(expression, val)
def _post_conditional_expression(self, expression: ConditionalExpression) -> None:
if_expr = get(expression.if_expression)
else_expr = get(expression.else_expression)
then_expr = get(expression.then_expression)
val = if_expr + else_expr + then_expr
set_val(expression, val)
def _post_elementary_type_name_expression(
self, expression: ElementaryTypeNameExpression
) -> None:
set_val(expression, [])
# save only identifier expression
def _post_identifier(self, expression: Identifier) -> None:
set_val(expression, [])
def _post_index_access(self, expression: IndexAccess) -> None:
left = get(expression.expression_left)
right = get(expression.expression_right)
val = left + right
set_val(expression, val)
def _post_literal(self, expression: Literal) -> None:
set_val(expression, [])
def _post_member_access(self, expression: MemberAccess) -> None:
expr = get(expression.expression)
val = expr
set_val(expression, val)
def _post_new_array(self, expression: NewArray) -> None:
set_val(expression, [])
def _post_new_contract(self, expression: NewContract) -> None:
set_val(expression, [])
def _post_new_elementary_type(self, expression: NewElementaryType) -> None:
set_val(expression, [])
def _post_tuple_expression(self, expression: TupleExpression) -> None:
expressions = [get(e) for e in expression.expressions if e]
val = [item for sublist in expressions for item in sublist]
set_val(expression, val)
def _post_type_conversion(self, expression: TypeConversion) -> None:
expr = get(expression.expression)
val = expr
set_val(expression, val)
def _post_unary_operation(self, expression: UnaryOperation) -> None:
expr = get(expression.expression)
val = expr
set_val(expression, val)
|
FindCalls
|
python
|
google__pytype
|
pytype/tests/test_utils.py
|
{
"start": 2075,
"end": 2632
}
|
class ____:
"""Util class for generating fake Opcode for testing."""
def __init__(self, filename, line, endline, col, endcol, methodname):
self.code = FakeCode(filename, methodname)
self.line = line
self.endline = endline
self.col = col
self.endcol = endcol
self.name = "FAKE_OPCODE"
def to_stack(self):
return [frame_state.SimpleFrame(self)]
def fake_stack(length):
return [
frame_state.SimpleFrame(
FakeOpcode("foo.py", i, i, i, i, "function%d" % i)
)
for i in range(length)
]
|
FakeOpcode
|
python
|
tornadoweb__tornado
|
tornado/netutil.py
|
{
"start": 18566,
"end": 19956
}
|
class ____(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
_threadpool = None # type: ignore
_threadpool_pid = None # type: int
def initialize(self, num_threads: int = 10) -> None: # type: ignore
threadpool = ThreadedResolver._create_threadpool(num_threads)
super().initialize(executor=threadpool, close_executor=False)
@classmethod
def _create_threadpool(
cls, num_threads: int
) -> concurrent.futures.ThreadPoolExecutor:
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
cls._threadpool = concurrent.futures.ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
|
ThreadedResolver
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/internal/conjecture/shrinking/bytes.py
|
{
"start": 562,
"end": 880
}
|
class ____(Collection):
def __init__(self, initial, predicate, **kwargs):
super().__init__(
# implicit conversion from bytes to list of integers here
list(initial),
lambda val: predicate(bytes(val)),
ElementShrinker=Integer,
**kwargs,
)
|
Bytes
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/admin.py
|
{
"start": 1174,
"end": 1503
}
|
class ____:
"""Make admin inlines read-only."""
show_change_link = True
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
|
ReadOnlyInlineMixin
|
python
|
pytorch__pytorch
|
torch/ao/quantization/experimental/fake_quantize.py
|
{
"start": 337,
"end": 1821
}
|
class ____(FakeQuantizeBase):
alpha: Tensor
gamma: Tensor
quantization_levels: Tensor
level_indices: Tensor
def __init__(self, observer: Callable = APoTObserver, **observer_kwargs: Any):
super().__init__()
self.activation_post_process = observer(**observer_kwargs)
self.dtype = self.activation_post_process.dtype
def calculate_qparams( # type: ignore[override]
self, signed: bool = False
) -> tuple[Tensor, Tensor, Tensor, Tensor]:
return self.activation_post_process.calculate_qparams(signed=signed)
def forward(self, X: torch.Tensor) -> Tensor: # type: ignore[override]
if self.observer_enabled[0] == 1:
self.activation_post_process.forward(X)
result = self.activation_post_process.calculate_qparams(signed=False)
self.alpha = result[0]
self.gamma = result[1]
self.quantization_levels = result[2]
self.level_indices = result[3]
if self.fake_quant_enabled[0] == 1:
if (
self.alpha is None
or self.gamma is None
or self.quantization_levels is None
or self.level_indices is None
):
raise AssertionError("Must set qparams for fake quant")
X = fake_quantize_function.apply(
X, self.alpha, self.gamma, self.quantization_levels, self.level_indices
)
return X
|
APoTFakeQuantize
|
python
|
huggingface__transformers
|
tests/utils/test_hf_argparser.py
|
{
"start": 1631,
"end": 1734
}
|
class ____:
foo: bool = False
baz: bool = True
opt: bool | None = None
|
WithDefaultBoolExample
|
python
|
paramiko__paramiko
|
paramiko/pkey.py
|
{
"start": 33863,
"end": 36719
}
|
class ____:
"""
OpenSSH plain public key or OpenSSH signed public key (certificate).
Tries to be as dumb as possible and barely cares about specific
per-key-type data.
.. note::
Most of the time you'll want to call `from_file`, `from_string` or
`from_message` for useful instantiation, the main constructor is
basically "I should be using ``attrs`` for this."
"""
def __init__(self, type_, blob, comment=None):
"""
Create a new public blob of given type and contents.
:param str type_: Type indicator, eg ``ssh-rsa``.
:param bytes blob: The blob bytes themselves.
:param str comment: A comment, if one was given (e.g. file-based.)
"""
self.key_type = type_
self.key_blob = blob
self.comment = comment
@classmethod
def from_file(cls, filename):
"""
Create a public blob from a ``-cert.pub``-style file on disk.
"""
with open(filename) as f:
string = f.read()
return cls.from_string(string)
@classmethod
def from_string(cls, string):
"""
Create a public blob from a ``-cert.pub``-style string.
"""
fields = string.split(None, 2)
if len(fields) < 2:
msg = "Not enough fields for public blob: {}"
raise ValueError(msg.format(fields))
key_type = fields[0]
key_blob = decodebytes(b(fields[1]))
try:
comment = fields[2].strip()
except IndexError:
comment = None
# Verify that the blob message first (string) field matches the
# key_type
m = Message(key_blob)
blob_type = m.get_text()
if blob_type != key_type:
deets = "key type={!r}, but blob type={!r}".format(
key_type, blob_type
)
raise ValueError("Invalid PublicBlob contents: {}".format(deets))
# All good? All good.
return cls(type_=key_type, blob=key_blob, comment=comment)
@classmethod
def from_message(cls, message):
"""
Create a public blob from a network `.Message`.
Specifically, a cert-bearing pubkey auth packet, because by definition
OpenSSH-style certificates 'are' their own network representation."
"""
type_ = message.get_text()
return cls(type_=type_, blob=message.asbytes())
def __str__(self):
ret = "{} public key/certificate".format(self.key_type)
if self.comment:
ret += "- {}".format(self.comment)
return ret
def __eq__(self, other):
# Just piggyback on Message/BytesIO, since both of these should be one.
return self and other and self.key_blob == other.key_blob
def __ne__(self, other):
return not self == other
|
PublicBlob
|
python
|
neetcode-gh__leetcode
|
python/0039-combination-sum.py
|
{
"start": 0,
"end": 508
}
|
class ____:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
res = []
def dfs(i, cur, total):
if total == target:
res.append(cur.copy())
return
if i >= len(candidates) or total > target:
return
cur.append(candidates[i])
dfs(i, cur, total + candidates[i])
cur.pop()
dfs(i + 1, cur, total)
dfs(0, [], 0)
return res
|
Solution
|
python
|
explosion__spaCy
|
spacy/lang/lij/__init__.py
|
{
"start": 182,
"end": 330
}
|
class ____(BaseDefaults):
tokenizer_exceptions = TOKENIZER_EXCEPTIONS
infixes = TOKENIZER_INFIXES
stop_words = STOP_WORDS
|
LigurianDefaults
|
python
|
falconry__falcon
|
falcon/routing/compiled.py
|
{
"start": 43741,
"end": 43916
}
|
class ____(_CxChild):
def src(self, indentation: int) -> str:
return '{0}groups = match.groupdict()'.format(_TAB_STR * indentation)
|
_CxPrefetchGroupsFromPatternMatch
|
python
|
django__django
|
tests/auth_tests/test_management.py
|
{
"start": 49789,
"end": 53213
}
|
class ____(TestCase):
def setUp(self):
self._original_permissions = Permission._meta.permissions[:]
self._original_default_permissions = Permission._meta.default_permissions
self.app_config = apps.get_app_config("auth")
def tearDown(self):
Permission._meta.permissions = self._original_permissions
Permission._meta.default_permissions = self._original_default_permissions
ContentType.objects.clear_cache()
def test_default_permissions(self):
permission_content_type = ContentType.objects.get_by_natural_key(
"auth", "permission"
)
Permission._meta.permissions = [
("my_custom_permission", "Some permission"),
]
create_permissions(self.app_config, verbosity=0)
# view/add/change/delete permission by default + custom permission
self.assertEqual(
Permission.objects.filter(
content_type=permission_content_type,
).count(),
5,
)
Permission.objects.filter(content_type=permission_content_type).delete()
Permission._meta.default_permissions = []
create_permissions(self.app_config, verbosity=0)
# custom permission only since default permissions is empty
self.assertEqual(
Permission.objects.filter(
content_type=permission_content_type,
).count(),
1,
)
def test_unavailable_models(self):
"""
#24075 - Permissions shouldn't be created or deleted if the ContentType
or Permission models aren't available.
"""
state = migrations.state.ProjectState()
# Unavailable contenttypes.ContentType
with self.assertNumQueries(0):
create_permissions(self.app_config, verbosity=0, apps=state.apps)
# Unavailable auth.Permission
state = migrations.state.ProjectState(real_apps={"contenttypes"})
with self.assertNumQueries(0):
create_permissions(self.app_config, verbosity=0, apps=state.apps)
def test_create_permissions_checks_contenttypes_created(self):
"""
`post_migrate` handler ordering isn't guaranteed. Simulate a case
where create_permissions() is called before create_contenttypes().
"""
# Warm the manager cache.
ContentType.objects.get_for_model(Group)
# Apply a deletion as if e.g. a database 'flush' had been executed.
ContentType.objects.filter(app_label="auth", model="group").delete()
# This fails with a foreign key constraint without the fix.
create_permissions(apps.get_app_config("auth"), interactive=False, verbosity=0)
def test_permission_with_proxy_content_type_created(self):
"""
A proxy model's permissions use its own content type rather than the
content type of the concrete model.
"""
opts = UserProxy._meta
codename = get_permission_codename("add", opts)
self.assertTrue(
Permission.objects.filter(
content_type__model=opts.model_name,
content_type__app_label=opts.app_label,
codename=codename,
).exists()
)
@override_settings(
MIGRATION_MODULES=dict(
settings.MIGRATION_MODULES,
auth_tests="auth_tests.operations_migrations",
),
)
|
CreatePermissionsTests
|
python
|
qdrant__qdrant-client
|
tools/async_client_generator/transformers/remote/import_from_transformer.py
|
{
"start": 41,
"end": 1011
}
|
class ____(ast.NodeTransformer):
def __init__(self, import_replace_map: Optional[dict[str, str]] = None):
self.import_replace_map = import_replace_map if import_replace_map is not None else {}
def visit_ImportFrom(self, node: ast.ImportFrom) -> ast.AST:
# update module name
for old_value, new_value in self.import_replace_map.items():
if node.module is not None:
node.module = node.module.replace(old_value, new_value)
# update imported item name
for i, alias in enumerate(node.names):
if hasattr(alias, "name"):
for old_value, new_value in self.import_replace_map.items():
alias.name = alias.name.replace(old_value, new_value)
if alias.name == "get_channel":
alias.name = "get_async_channel"
alias.asname = "get_channel"
return self.generic_visit(node)
|
RemoteImportFromTransformer
|
python
|
Textualize__textual
|
docs/examples/how-to/containers03.py
|
{
"start": 272,
"end": 657
}
|
class ____(App):
"""Simple app to play with containers."""
CSS = """
.with-border {
border: heavy green;
}
"""
def compose(self) -> ComposeResult:
with Horizontal(classes="with-border"): # (1)!
yield Box()
yield Box()
yield Box()
if __name__ == "__main__":
app = ContainerApp()
app.run()
|
ContainerApp
|
python
|
numpy__numpy
|
numpy/distutils/tests/test_fcompiler_gnu.py
|
{
"start": 1643,
"end": 2136
}
|
class ____:
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
v = fc.version_match(vs)
assert_(v == version, (vs, v))
def test_not_gfortran(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, _ in g77_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))
|
TestGFortranVersions
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 196536,
"end": 197447
}
|
class ____(ParseElementEnhance):
r"""Matches if an expression matches at the beginning of a line within
the parse string
Example:
.. testcode::
test = '''\
BBB this line
BBB and this line
BBB but not this one
A BBB and definitely not this one
'''
for t in (AtLineStart('BBB') + rest_of_line).search_string(test):
print(t)
prints:
.. testoutput::
['BBB', ' this line']
['BBB', ' and this line']
"""
def __init__(self, expr: Union[ParserElement, str]) -> None:
super().__init__(expr)
self.callPreparse = False
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if col(loc, instring) != 1:
raise ParseException(instring, loc, "not found at line start")
return super().parseImpl(instring, loc, do_actions)
|
AtLineStart
|
python
|
scipy__scipy
|
scipy/stats/tests/test_distributions.py
|
{
"start": 308815,
"end": 313693
}
|
class ____:
def test_pdf_bounds(self):
# test bounds
y = stats.truncweibull_min.pdf([0.1, 2.0], 2.0, 0.11, 1.99)
assert_equal(y, [0.0, 0.0])
def test_logpdf(self):
y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, np.inf)
assert_equal(y, 0.0)
# hand calculation
y = stats.truncweibull_min.logpdf(2.0, 1.0, 2.0, 4.0)
assert_allclose(y, 0.14541345786885884)
def test_ppf_bounds(self):
# test bounds
y = stats.truncweibull_min.ppf([0.0, 1.0], 2.0, 0.1, 2.0)
assert_equal(y, [0.1, 2.0])
def test_cdf_to_ppf(self):
q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.]
x = stats.truncweibull_min.ppf(q, 2., 0., 3.)
q_out = stats.truncweibull_min.cdf(x, 2., 0., 3.)
assert_allclose(q, q_out)
def test_sf_to_isf(self):
q = [0., 0.1, .25, 0.50, 0.75, 0.90, 1.]
x = stats.truncweibull_min.isf(q, 2., 0., 3.)
q_out = stats.truncweibull_min.sf(x, 2., 0., 3.)
assert_allclose(q, q_out)
def test_munp(self):
c = 2.
a = 1.
b = 3.
def xnpdf(x, n):
return x**n*stats.truncweibull_min.pdf(x, c, a, b)
m0 = stats.truncweibull_min.moment(0, c, a, b)
assert_equal(m0, 1.)
m1 = stats.truncweibull_min.moment(1, c, a, b)
m1_expected, _ = quad(lambda x: xnpdf(x, 1), a, b)
assert_allclose(m1, m1_expected)
m2 = stats.truncweibull_min.moment(2, c, a, b)
m2_expected, _ = quad(lambda x: xnpdf(x, 2), a, b)
assert_allclose(m2, m2_expected)
m3 = stats.truncweibull_min.moment(3, c, a, b)
m3_expected, _ = quad(lambda x: xnpdf(x, 3), a, b)
assert_allclose(m3, m3_expected)
m4 = stats.truncweibull_min.moment(4, c, a, b)
m4_expected, _ = quad(lambda x: xnpdf(x, 4), a, b)
assert_allclose(m4, m4_expected)
def test_reference_values(self):
a = 1.
b = 3.
c = 2.
x_med = np.sqrt(1 - np.log(0.5 + np.exp(-(8. + np.log(2.)))))
cdf = stats.truncweibull_min.cdf(x_med, c, a, b)
assert_allclose(cdf, 0.5)
lc = stats.truncweibull_min.logcdf(x_med, c, a, b)
assert_allclose(lc, -np.log(2.))
ppf = stats.truncweibull_min.ppf(0.5, c, a, b)
assert_allclose(ppf, x_med)
sf = stats.truncweibull_min.sf(x_med, c, a, b)
assert_allclose(sf, 0.5)
ls = stats.truncweibull_min.logsf(x_med, c, a, b)
assert_allclose(ls, -np.log(2.))
isf = stats.truncweibull_min.isf(0.5, c, a, b)
assert_allclose(isf, x_med)
def test_compare_weibull_min(self):
# Verify that the truncweibull_min distribution gives the same results
# as the original weibull_min
x = 1.5
c = 2.0
a = 0.0
b = np.inf
scale = 3.0
p = stats.weibull_min.pdf(x, c, scale=scale)
p_trunc = stats.truncweibull_min.pdf(x, c, a, b, scale=scale)
assert_allclose(p, p_trunc)
lp = stats.weibull_min.logpdf(x, c, scale=scale)
lp_trunc = stats.truncweibull_min.logpdf(x, c, a, b, scale=scale)
assert_allclose(lp, lp_trunc)
cdf = stats.weibull_min.cdf(x, c, scale=scale)
cdf_trunc = stats.truncweibull_min.cdf(x, c, a, b, scale=scale)
assert_allclose(cdf, cdf_trunc)
lc = stats.weibull_min.logcdf(x, c, scale=scale)
lc_trunc = stats.truncweibull_min.logcdf(x, c, a, b, scale=scale)
assert_allclose(lc, lc_trunc)
s = stats.weibull_min.sf(x, c, scale=scale)
s_trunc = stats.truncweibull_min.sf(x, c, a, b, scale=scale)
assert_allclose(s, s_trunc)
ls = stats.weibull_min.logsf(x, c, scale=scale)
ls_trunc = stats.truncweibull_min.logsf(x, c, a, b, scale=scale)
assert_allclose(ls, ls_trunc)
# # Also test using a large value x, for which computing the survival
# # function using the CDF would result in 0.
s = stats.truncweibull_min.sf(30, 2, a, b, scale=3)
assert_allclose(s, np.exp(-100))
ls = stats.truncweibull_min.logsf(30, 2, a, b, scale=3)
assert_allclose(ls, -100)
def test_compare_weibull_min2(self):
# Verify that the truncweibull_min distribution PDF and CDF results
# are the same as those calculated from truncating weibull_min
c, a, b = 2.5, 0.25, 1.25
x = np.linspace(a, b, 100)
pdf1 = stats.truncweibull_min.pdf(x, c, a, b)
cdf1 = stats.truncweibull_min.cdf(x, c, a, b)
norm = stats.weibull_min.cdf(b, c) - stats.weibull_min.cdf(a, c)
pdf2 = stats.weibull_min.pdf(x, c) / norm
cdf2 = (stats.weibull_min.cdf(x, c) - stats.weibull_min.cdf(a, c))/norm
np.testing.assert_allclose(pdf1, pdf2)
np.testing.assert_allclose(cdf1, cdf2)
|
TestTruncWeibull
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/function.py
|
{
"start": 1770,
"end": 8986
}
|
class ____(object):
"""Obsolete. Slated for deletion. Please use tf.function instead.
Known feature gaps while migrating to tf.function (could be outdated):
- tf.function doesn’t support Send/Recv capability since it doesn’t share
rendezvous with the main graph but always creates a new one.
- tf.function doesn’t support custom gradient function directly, instead you
need to define the function inside a tf.custom_gradient wrapper together
with the gradient function.
- Unlike Defun, Keras layers used inside a tf.function need to be created only
once to avoid variable recreation.
- Defun respects the device assignments and applies them to the function body
but tf.function needs it to be done manually.
- Defun might prune out unused ops automatically but tf.function doesn't.
Limitations of Defun:
- Original source locations are not preserved so errors do not include
full/valid stack traces.
- Only supports linear sequence of arguments and return values, putting the
burden on the caller to pack/unpack everything across a Defun boundary into
tuples (as opposed to passing list and dict-like structures directly).
- Does not support overloading or late-bound specializations.
- Has its own way for defining gradient overrides which does not follow
current conventions.
- Cannot support imperative control flow or automatic control dependencies.
- Does not reflect statefulness in the graph and has a calling convention that
differs from how more modern tools interact.
- Is only compatible with graph building mode.
Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function, it adds the `call` ops to the
default graph. In addition, it adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError(f"Function {func} must be a callable.")
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError(
"Functions with argument defaults or keywords arguments are not "
f"supported. {func} has defaults {argspec.defaults} and keywords "
f"{argspec.keywords}.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The number of tf.function input types is not compatible with the "
f"allowed arguments of {func}. The tf.function have {num} input "
f"types, while the python function allows minimum {min_args} and "
f"maximum {max_args} arguments.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
|
Defun
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/SignalProxy.py
|
{
"start": 178,
"end": 3832
}
|
class ____(QtCore.QObject):
"""Object which collects rapid-fire signals and condenses them
into a single signal or a rate-limited stream of signals.
Used, for example, to prevent a SpinBox from generating multiple
signals when the mouse wheel is rolled over it.
Emits sigDelayed after input signals have stopped for a certain period of
time.
"""
sigDelayed = QtCore.Signal(object)
def __init__(self, signal, delay=0.3, rateLimit=0, slot=None, *, threadSafe=True):
"""Initialization arguments:
signal - a bound Signal or pyqtSignal instance
delay - Time (in seconds) to wait for signals to stop before emitting (default 0.3s)
slot - Optional function to connect sigDelayed to.
rateLimit - (signals/sec) if greater than 0, this allows signals to stream out at a
steady rate while they are being received.
threadSafe - Specify if thread-safety is required. For backwards compatibility, it
defaults to True.
"""
QtCore.QObject.__init__(self)
self.delay = delay
self.rateLimit = rateLimit
self.args = None
Timer = ThreadsafeTimer if threadSafe else QtCore.QTimer
self.timer = Timer()
self.timer.timeout.connect(self.flush)
self.lastFlushTime = None
self.signal = signal
self.signal.connect(self.signalReceived)
if slot is not None:
self.blockSignal = False
self.sigDelayed.connect(slot)
self.slot = weakref.ref(slot)
else:
self.blockSignal = True
self.slot = None
def setDelay(self, delay):
self.delay = delay
@QtCore.Slot()
@QtCore.Slot(object)
@QtCore.Slot(object, object)
def signalReceived(self, *args):
"""Received signal. Cancel previous timer and store args to be
forwarded later."""
if self.blockSignal:
return
self.args = args
if self.rateLimit == 0:
self.timer.stop()
self.timer.start(int(self.delay * 1000) + 1)
else:
now = perf_counter()
if self.lastFlushTime is None:
leakTime = 0
else:
lastFlush = self.lastFlushTime
leakTime = max(0, (lastFlush + (1.0 / self.rateLimit)) - now)
self.timer.stop()
self.timer.start(int(min(leakTime, self.delay) * 1000) + 1)
@QtCore.Slot()
def flush(self):
"""If there is a signal queued up, send it now."""
if self.args is None or self.blockSignal:
return False
args, self.args = self.args, None
self.timer.stop()
self.lastFlushTime = perf_counter()
self.sigDelayed.emit(args)
return True
def disconnect(self):
self.blockSignal = True
try:
self.signal.disconnect(self.signalReceived)
except:
pass
try:
slot = self.slot()
if slot is not None:
self.sigDelayed.disconnect(slot)
except:
pass
finally:
self.slot = None
def connectSlot(self, slot):
"""Connect the `SignalProxy` to an external slot"""
assert self.slot is None, "Slot was already connected!"
self.slot = weakref.ref(slot)
self.sigDelayed.connect(slot)
self.blockSignal = False
def block(self):
"""Return a SignalBlocker that temporarily blocks input signals to
this proxy.
"""
return SignalBlock(self.signal, self.signalReceived)
|
SignalProxy
|
python
|
getsentry__sentry
|
src/sentry/migrations/0917_convert_org_saved_searches_to_views.py
|
{
"start": 535,
"end": 1888
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0916_delete_open_period_rows"),
]
operations = [
migrations.RunPython(
convert_org_saved_searches_to_views,
reverse_code=migrations.RunPython.noop,
hints={"tables": ["sentry_groupsearchview", "sentry_savedsearch"]},
)
]
|
Migration
|
python
|
realpython__materials
|
python-microservices-with-grpc/recommendations/recommendations.py
|
{
"start": 1129,
"end": 2439
}
|
class ____(recommendations_pb2_grpc.RecommendationsServicer):
def Recommend(self, request, context):
if request.category not in books_by_category:
raise NotFound("Category not found")
books_for_category = books_by_category[request.category]
num_results = min(request.max_results, len(books_for_category))
books_to_recommend = random.sample(books_for_category, num_results)
return RecommendationResponse(recommendations=books_to_recommend)
def serve():
interceptors = [ExceptionToStatusInterceptor()]
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10), interceptors=interceptors
)
recommendations_pb2_grpc.add_RecommendationsServicer_to_server(
RecommendationService(), server
)
with open("server.key", "rb") as fp:
server_key = fp.read()
with open("server.pem", "rb") as fp:
server_cert = fp.read()
with open("ca.pem", "rb") as fp:
ca_cert = fp.read()
creds = grpc.ssl_server_credentials(
[(server_key, server_cert)],
root_certificates=ca_cert,
require_client_auth=True,
)
server.add_secure_port("[::]:443", creds)
server.start()
server.wait_for_termination()
if __name__ == "__main__":
serve()
|
RecommendationService
|
python
|
django__django
|
django/test/client.py
|
{
"start": 34403,
"end": 45283
}
|
class ____(ClientMixin, RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(
self,
enforce_csrf_checks=False,
raise_request_exception=True,
*,
headers=None,
query_params=None,
**defaults,
):
super().__init__(headers=headers, query_params=query_params, **defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.raise_request_exception = raise_request_exception
self.exc_info = None
self.extra = None
self.headers = None
def request(self, **request):
"""
Make a generic request. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
response = self.handler(environ)
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
# Check for signaled exceptions.
self.check_exception(response)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response.
urlconf = getattr(response.wsgi_request, "urlconf", None)
response.resolver_match = SimpleLazyObject(
lambda: resolve(request["PATH_INFO"], urlconf=urlconf),
)
# Flatten a single context. Not really necessary anymore thanks to the
# __getattr__ flattening in ContextList, but has some edge case
# backwards compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(
self,
path,
data=None,
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Request a response from the server using GET."""
self.extra = extra
self.headers = headers
response = super().get(
path,
data=data,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, headers=headers, query_params=query_params, **extra
)
return response
def post(
self,
path,
data=None,
content_type=MULTIPART_CONTENT,
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Request a response from the server using POST."""
self.extra = extra
self.headers = headers
response = super().post(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response,
data=data,
content_type=content_type,
headers=headers,
query_params=query_params,
**extra,
)
return response
def head(
self,
path,
data=None,
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Request a response from the server using HEAD."""
self.extra = extra
self.headers = headers
response = super().head(
path,
data=data,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, headers=headers, query_params=query_params, **extra
)
return response
def options(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Request a response from the server using OPTIONS."""
self.extra = extra
self.headers = headers
response = super().options(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response,
data=data,
content_type=content_type,
headers=headers,
query_params=query_params,
**extra,
)
return response
def put(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Send a resource to the server using PUT."""
self.extra = extra
self.headers = headers
response = super().put(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response,
data=data,
content_type=content_type,
headers=headers,
query_params=query_params,
**extra,
)
return response
def patch(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Send a resource to the server using PATCH."""
self.extra = extra
self.headers = headers
response = super().patch(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response,
data=data,
content_type=content_type,
headers=headers,
query_params=query_params,
**extra,
)
return response
def delete(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Send a DELETE request to the server."""
self.extra = extra
self.headers = headers
response = super().delete(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response,
data=data,
content_type=content_type,
headers=headers,
query_params=query_params,
**extra,
)
return response
def trace(
self,
path,
data="",
follow=False,
secure=False,
*,
headers=None,
query_params=None,
**extra,
):
"""Send a TRACE request to the server."""
self.extra = extra
self.headers = headers
response = super().trace(
path,
data=data,
secure=secure,
headers=headers,
query_params=query_params,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, headers=headers, query_params=query_params, **extra
)
return response
def _handle_redirects(
self,
response,
data="",
content_type="",
headers=None,
query_params=None,
**extra,
):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
while response.status_code in REDIRECT_STATUS_CODES:
redirect_chain = response.redirect_chain
response = self._follow_redirect(
response,
data=data,
content_type=content_type,
headers=headers,
query_params=query_params,
**extra,
)
response.redirect_chain = redirect_chain
self._ensure_redirects_not_cyclic(response)
return response
|
Client
|
python
|
scrapy__scrapy
|
tests/test_downloader_handler_twisted_http11.py
|
{
"start": 1593,
"end": 1672
}
|
class ____(HTTP11DownloadHandlerMixin, TestHttpProxyBase):
pass
|
TestHttp11Proxy
|
python
|
palantir__python-language-server
|
pyls/plugins/pyflakes_lint.py
|
{
"start": 807,
"end": 2620
}
|
class ____(object):
def __init__(self, lines):
self.lines = lines
self.diagnostics = []
def unexpectedError(self, _filename, msg): # pragma: no cover
err_range = {
'start': {'line': 0, 'character': 0},
'end': {'line': 0, 'character': 0},
}
self.diagnostics.append({
'source': 'pyflakes',
'range': err_range,
'message': msg,
'severity': lsp.DiagnosticSeverity.Error,
})
def syntaxError(self, _filename, msg, lineno, offset, text):
# We've seen that lineno and offset can sometimes be None
lineno = lineno or 1
offset = offset or 0
err_range = {
'start': {'line': lineno - 1, 'character': offset},
'end': {'line': lineno - 1, 'character': offset + len(text)},
}
self.diagnostics.append({
'source': 'pyflakes',
'range': err_range,
'message': msg,
'severity': lsp.DiagnosticSeverity.Error,
})
def flake(self, message):
""" Get message like <filename>:<lineno>: <msg> """
err_range = {
'start': {'line': message.lineno - 1, 'character': message.col},
'end': {'line': message.lineno - 1, 'character': len(self.lines[message.lineno - 1])},
}
severity = lsp.DiagnosticSeverity.Warning
for message_type in PYFLAKES_ERROR_MESSAGES:
if isinstance(message, message_type):
severity = lsp.DiagnosticSeverity.Error
break
self.diagnostics.append({
'source': 'pyflakes',
'range': err_range,
'message': message.message % message.message_args,
'severity': severity
})
|
PyflakesDiagnosticReport
|
python
|
coleifer__peewee
|
tests/schema.py
|
{
"start": 1793,
"end": 27964
}
|
class ____(ModelDatabaseTestCase):
database = get_in_memory_db()
requires = [Article, CacheData, Category, Note, Person, Relationship,
TMUnique, TMSequence, TMIndexes, TMConstraints,
TMNamedConstraints, User]
def test_database_required(self):
class MissingDB(Model):
data = TextField()
self.assertRaises(ImproperlyConfigured, MissingDB.create_table)
def assertCreateTable(self, model_class, expected):
sql, params = model_class._schema._create_table(False).query()
self.assertEqual(params, [])
indexes = []
for create_index in model_class._schema._create_indexes(False):
isql, params = create_index.query()
self.assertEqual(params, [])
indexes.append(isql)
self.assertEqual([sql] + indexes, expected)
def assertIndexes(self, model_class, expected):
indexes = []
for create_index in model_class._schema._create_indexes(False):
indexes.append(create_index.query())
self.assertEqual(indexes, expected)
def test_model_fk_schema(self):
class Base(TestModel):
class Meta:
database = self.database
class User(Base):
username = TextField()
class Meta:
schema = 'foo'
class Tweet(Base):
user = ForeignKeyField(User)
content = TextField()
class Meta:
schema = 'bar'
self.assertCreateTable(User, [
('CREATE TABLE "foo"."user" ("id" INTEGER NOT NULL PRIMARY KEY, '
'"username" TEXT NOT NULL)')])
self.assertCreateTable(Tweet, [
('CREATE TABLE "bar"."tweet" ("id" INTEGER NOT NULL PRIMARY KEY, '
'"user_id" INTEGER NOT NULL, "content" TEXT NOT NULL, '
'FOREIGN KEY ("user_id") REFERENCES "foo"."user" ("id"))'),
('CREATE INDEX "bar"."tweet_user_id" ON "tweet" ("user_id")')])
def test_model_indexes_with_schema(self):
# Attach cache database so we can reference "cache." as the schema.
self.database.execute_sql("attach database ':memory:' as cache;")
self.assertCreateTable(CacheData, [
('CREATE TABLE "cache"."cache_data" ('
'"id" INTEGER NOT NULL PRIMARY KEY, "key" TEXT NOT NULL, '
'"value" TEXT NOT NULL)'),
('CREATE UNIQUE INDEX "cache"."cache_data_key" ON "cache_data" '
'("key")')])
# Actually create the table to verify it works correctly.
CacheData.create_table()
# Introspect the database and get indexes for the "cache" schema.
indexes = self.database.get_indexes('cache_data', 'cache')
self.assertEqual(len(indexes), 1)
index_metadata = indexes[0]
self.assertEqual(index_metadata.name, 'cache_data_key')
# Verify the index does not exist in the main schema.
self.assertEqual(len(self.database.get_indexes('cache_data')), 0)
class TestDatabase(Database):
index_schema_prefix = False
# When "index_schema_prefix == False", the index name is not prefixed
# with the schema, and the schema is referenced via the table name.
with CacheData.bind_ctx(TestDatabase(None)):
self.assertCreateTable(CacheData, [
('CREATE TABLE "cache"."cache_data" ('
'"id" INTEGER NOT NULL PRIMARY KEY, "key" TEXT NOT NULL, '
'"value" TEXT NOT NULL)'),
('CREATE UNIQUE INDEX "cache_data_key" ON "cache"."cache_data"'
' ("key")')])
def test_model_indexes(self):
self.assertIndexes(Article, [
('CREATE UNIQUE INDEX "article_name" ON "article" ("name")', []),
('CREATE INDEX "article_timestamp_status" ON "article" ('
'"timestamp" DESC, "status")', []),
('CREATE INDEX "article_name_timestamp" ON "article" ('
'"name", "timestamp", ("flags" & 4)) '
'WHERE ("status" = 1)', []),
('CREATE INDEX "article_foo" ON "article" ("flags" & 3)', []),
])
def test_model_index_types(self):
class Event(TestModel):
key = TextField()
timestamp = TimestampField(index=True, index_type='BRIN')
class Meta:
database = self.database
self.assertIndexes(Event, [
('CREATE INDEX "event_timestamp" ON "event" '
'USING BRIN ("timestamp")', [])])
# Check that we support MySQL-style USING clause.
idx, = Event._meta.fields_to_index()
self.assertSQL(idx, (
'CREATE INDEX IF NOT EXISTS "event_timestamp" '
'USING BRIN ON "event" ("timestamp")'), [],
index_using_precedes_table=True)
def test_model_indexes_custom_tablename(self):
class KV(TestModel):
key = TextField()
value = TextField()
timestamp = TimestampField(index=True)
class Meta:
database = self.database
indexes = (
(('key', 'value'), True),
)
table_name = 'kvs'
self.assertIndexes(KV, [
('CREATE INDEX "kvs_timestamp" ON "kvs" ("timestamp")', []),
('CREATE UNIQUE INDEX "kvs_key_value" ON "kvs" ("key", "value")',
[])])
def test_model_indexes_computed_columns(self):
class FuncIdx(TestModel):
a = IntegerField()
b = IntegerField()
class Meta:
database = self.database
i = FuncIdx.index(FuncIdx.a, FuncIdx.b, fn.SUM(FuncIdx.a + FuncIdx.b))
FuncIdx.add_index(i)
self.assertIndexes(FuncIdx, [
('CREATE INDEX "func_idx_a_b" ON "func_idx" '
'("a", "b", SUM("a" + "b"))', []),
])
def test_model_indexes_complex_columns(self):
class Taxonomy(TestModel):
name = CharField()
name_class = CharField()
class Meta:
database = self.database
name = NodeList((fn.LOWER(Taxonomy.name), SQL('varchar_pattern_ops')))
index = (Taxonomy
.index(name, Taxonomy.name_class)
.where(Taxonomy.name_class == 'scientific name'))
Taxonomy.add_index(index)
self.assertIndexes(Taxonomy, [
('CREATE INDEX "taxonomy_name_class" ON "taxonomy" ('
'LOWER("name") varchar_pattern_ops, "name_class") '
'WHERE ("name_class" = ?)', ['scientific name']),
])
def test_legacy_model_table_and_indexes(self):
class Base(Model):
class Meta:
database = self.database
class WebHTTPRequest(Base):
timestamp = DateTimeField(index=True)
data = TextField()
self.assertTrue(WebHTTPRequest._meta.legacy_table_names)
self.assertCreateTable(WebHTTPRequest, [
('CREATE TABLE "webhttprequest" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"timestamp" DATETIME NOT NULL, "data" TEXT NOT NULL)'),
('CREATE INDEX "webhttprequest_timestamp" ON "webhttprequest" '
'("timestamp")')])
# Table name is explicit, but legacy table names == false, so we get
# the new index name format.
class FooBar(Base):
data = IntegerField(unique=True)
class Meta:
legacy_table_names = False
table_name = 'foobar_tbl'
self.assertFalse(FooBar._meta.legacy_table_names)
self.assertCreateTable(FooBar, [
('CREATE TABLE "foobar_tbl" ("id" INTEGER NOT NULL PRIMARY KEY, '
'"data" INTEGER NOT NULL)'),
('CREATE UNIQUE INDEX "foobar_tbl_data" ON "foobar_tbl" ("data")'),
])
# Table name is explicit and legacy table names == true, so we get
# the old index name format.
class FooBar2(Base):
data = IntegerField(unique=True)
class Meta:
table_name = 'foobar2_tbl'
self.assertTrue(FooBar2._meta.legacy_table_names)
self.assertCreateTable(FooBar2, [
('CREATE TABLE "foobar2_tbl" ("id" INTEGER NOT NULL PRIMARY KEY, '
'"data" INTEGER NOT NULL)'),
('CREATE UNIQUE INDEX "foobar2_data" ON "foobar2_tbl" ("data")')])
def test_without_pk(self):
class NoPK(TestModel):
data = TextField()
class Meta:
database = self.database
primary_key = False
self.assertCreateTable(NoPK, [
('CREATE TABLE "no_pk" ("data" TEXT NOT NULL)')])
def test_without_rowid(self):
class NoRowid(TestModel):
key = TextField(primary_key=True)
value = TextField()
class Meta:
database = self.database
without_rowid = True
self.assertCreateTable(NoRowid, [
('CREATE TABLE "no_rowid" ('
'"key" TEXT NOT NULL PRIMARY KEY, '
'"value" TEXT NOT NULL) WITHOUT ROWID')])
# Subclasses do not inherit "without_rowid" setting.
class SubNoRowid(NoRowid): pass
self.assertCreateTable(SubNoRowid, [
('CREATE TABLE "sub_no_rowid" ('
'"key" TEXT NOT NULL PRIMARY KEY, '
'"value" TEXT NOT NULL)')])
def test_strict_tables(self):
class Strict(TestModel):
key = TextField(primary_key=True)
value = TextField()
class Meta:
database = self.database
strict_tables = True
self.assertCreateTable(Strict, [
('CREATE TABLE "strict" ('
'"key" TEXT NOT NULL PRIMARY KEY, '
'"value" TEXT NOT NULL) STRICT')])
# Subclasses *do* inherit "strict_tables" setting.
class SubStrict(Strict): pass
self.assertCreateTable(SubStrict, [
('CREATE TABLE "sub_strict" ('
'"key" TEXT NOT NULL PRIMARY KEY, '
'"value" TEXT NOT NULL) STRICT')])
def test_without_rowid_strict(self):
class KV(TestModel):
key = TextField(primary_key=True)
class Meta:
database = self.database
strict_tables = True
without_rowid = True
self.assertCreateTable(KV, [
('CREATE TABLE "kv" ("key" TEXT NOT NULL PRIMARY KEY) '
'STRICT, WITHOUT ROWID')])
class SKV(KV):
pass
self.assertCreateTable(SKV, [
('CREATE TABLE "skv" ("key" TEXT NOT NULL PRIMARY KEY) STRICT')])
def test_table_name(self):
class A(TestModel):
class Meta:
database = self.database
table_name = 'A_tbl'
class B(TestModel):
a = ForeignKeyField(A, backref='bs')
class Meta:
database = self.database
table_name = 'B_tbl'
self.assertCreateTable(A, [
'CREATE TABLE "A_tbl" ("id" INTEGER NOT NULL PRIMARY KEY)'])
self.assertCreateTable(B, [
('CREATE TABLE "B_tbl" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"a_id" INTEGER NOT NULL, '
'FOREIGN KEY ("a_id") REFERENCES "A_tbl" ("id"))'),
'CREATE INDEX "B_tbl_a_id" ON "B_tbl" ("a_id")'])
def test_temporary_table(self):
sql, params = User._schema._create_table(temporary=True).query()
self.assertEqual(sql, (
'CREATE TEMPORARY TABLE IF NOT EXISTS "users" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"username" VARCHAR(255) NOT NULL)'))
def test_model_temporary_table(self):
class TempUser(User):
class Meta:
temporary = True
self.reset_sql_history()
TempUser.create_table()
TempUser.drop_table()
queries = [x.msg for x in self.history]
self.assertEqual(queries, [
('CREATE TEMPORARY TABLE IF NOT EXISTS "temp_user" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"username" VARCHAR(255) NOT NULL)', []),
('DROP TABLE IF EXISTS "temp_user"', [])])
def test_drop_table(self):
sql, params = User._schema._drop_table().query()
self.assertEqual(sql, 'DROP TABLE IF EXISTS "users"')
sql, params = User._schema._drop_table(cascade=True).query()
self.assertEqual(sql, 'DROP TABLE IF EXISTS "users" CASCADE')
sql, params = User._schema._drop_table(restrict=True).query()
self.assertEqual(sql, 'DROP TABLE IF EXISTS "users" RESTRICT')
def test_table_constraints(self):
class UKV(TestModel):
key = TextField()
value = TextField()
status = IntegerField()
class Meta:
constraints = [
SQL('CONSTRAINT ukv_kv_uniq UNIQUE (key, value)'),
Check('status > 0')]
database = self.database
table_name = 'ukv'
self.assertCreateTable(UKV, [
('CREATE TABLE "ukv" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"key" TEXT NOT NULL, '
'"value" TEXT NOT NULL, '
'"status" INTEGER NOT NULL, '
'CONSTRAINT ukv_kv_uniq UNIQUE (key, value), '
'CHECK (status > 0))')])
def test_table_settings(self):
class KVSettings(TestModel):
key = TextField(primary_key=True)
value = TextField()
timestamp = TimestampField()
class Meta:
database = self.database
table_settings = ('PARTITION BY RANGE (timestamp)',
'WITHOUT ROWID')
self.assertCreateTable(KVSettings, [
('CREATE TABLE "kv_settings" ('
'"key" TEXT NOT NULL PRIMARY KEY, '
'"value" TEXT NOT NULL, '
'"timestamp" INTEGER NOT NULL) '
'PARTITION BY RANGE (timestamp) '
'WITHOUT ROWID')])
def test_table_options(self):
class TOpts(TestModel):
key = TextField()
class Meta:
database = self.database
options = {
'CHECKSUM': 1,
'COMPRESSION': 'lz4'}
self.assertCreateTable(TOpts, [
('CREATE TABLE "t_opts" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"key" TEXT NOT NULL, '
'CHECKSUM=1, COMPRESSION=lz4)')])
def test_table_and_index_creation(self):
self.assertCreateTable(Person, [
('CREATE TABLE "person" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"first" VARCHAR(255) NOT NULL, '
'"last" VARCHAR(255) NOT NULL, '
'"dob" DATE NOT NULL)'),
'CREATE INDEX "person_dob" ON "person" ("dob")',
('CREATE UNIQUE INDEX "person_first_last" ON '
'"person" ("first", "last")')])
self.assertCreateTable(Note, [
('CREATE TABLE "note" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"author_id" INTEGER NOT NULL, '
'"content" TEXT NOT NULL, '
'FOREIGN KEY ("author_id") REFERENCES "person" ("id"))'),
'CREATE INDEX "note_author_id" ON "note" ("author_id")'])
self.assertCreateTable(Category, [
('CREATE TABLE "category" ('
'"name" VARCHAR(20) NOT NULL PRIMARY KEY, '
'"parent_id" VARCHAR(20), '
'FOREIGN KEY ("parent_id") REFERENCES "category" ("name"))'),
'CREATE INDEX "category_parent_id" ON "category" ("parent_id")'])
self.assertCreateTable(Relationship, [
('CREATE TABLE "relationship" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"from_person_id" INTEGER NOT NULL, '
'"to_person_id" INTEGER NOT NULL, '
'FOREIGN KEY ("from_person_id") REFERENCES "person" ("id"), '
'FOREIGN KEY ("to_person_id") REFERENCES "person" ("id"))'),
('CREATE INDEX "relationship_from_person_id" '
'ON "relationship" ("from_person_id")'),
('CREATE INDEX "relationship_to_person_id" '
'ON "relationship" ("to_person_id")')])
self.assertCreateTable(TMUnique, [
('CREATE TABLE "tm_unique" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" TEXT NOT NULL)'),
'CREATE UNIQUE INDEX "tm_unique_data" ON "tm_unique" ("data")'])
self.assertCreateTable(TMSequence, [
('CREATE TABLE "tm_sequence" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"value" INTEGER NOT NULL DEFAULT NEXTVAL(\'test_seq\'))')])
self.assertCreateTable(TMIndexes, [
('CREATE TABLE "tm_indexes" ("id" INTEGER NOT NULL PRIMARY KEY, '
'"alpha" INTEGER NOT NULL, "beta" INTEGER NOT NULL, '
'"gamma" INTEGER NOT NULL)'),
('CREATE UNIQUE INDEX "tm_indexes_alpha_beta" '
'ON "tm_indexes" ("alpha", "beta")'),
('CREATE INDEX "tm_indexes_beta_gamma" '
'ON "tm_indexes" ("beta", "gamma")')])
self.assertCreateTable(TMConstraints, [
('CREATE TABLE "tm_constraints" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" INTEGER CHECK (data < 5), '
'"value" TEXT NOT NULL COLLATE NOCASE)')])
self.assertCreateTable(TMNamedConstraints, [
('CREATE TABLE "tm_named_constraints" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"fk_id" INTEGER, '
'"k" TEXT NOT NULL, '
'"v" INTEGER NOT NULL '
'CHECK (v in (1, 2)), '
'CONSTRAINT "tmc_fk" FOREIGN KEY ("fk_id") '
'REFERENCES "tm_named_constraints" ("id"), '
'CONSTRAINT "chk_k" CHECK (k != \'kx\'))'),
('CREATE INDEX "tm_named_constraints_fk_id" '
'ON "tm_named_constraints" ("fk_id")')])
sql, params = (TMNamedConstraints
._schema
._create_foreign_key(TMNamedConstraints.fk)
.query())
self.assertEqual(sql, (
'ALTER TABLE "tm_named_constraints" ADD CONSTRAINT "tmc_fk" '
'FOREIGN KEY ("fk_id") REFERENCES "tm_named_constraints" ("id")'))
def test_index_name_truncation(self):
class LongIndex(TestModel):
a123456789012345678901234567890 = CharField()
b123456789012345678901234567890 = CharField()
c123456789012345678901234567890 = CharField()
class Meta:
database = self.database
fields = LongIndex._meta.sorted_fields[1:]
self.assertEqual(len(fields), 3)
idx = ModelIndex(LongIndex, fields)
ctx = LongIndex._schema._create_index(idx)
self.assertSQL(ctx, (
'CREATE INDEX IF NOT EXISTS "'
'long_index_a123456789012345678901234567890_b123456789012_9dd2139'
'" ON "long_index" ('
'"a123456789012345678901234567890", '
'"b123456789012345678901234567890", '
'"c123456789012345678901234567890")'), [])
def test_fk_non_pk_ddl(self):
class A(Model):
cf = CharField(max_length=100, unique=True)
df = DecimalField(
max_digits=4,
decimal_places=2,
auto_round=True,
unique=True)
class Meta:
database = self.database
class CF(TestModel):
a = ForeignKeyField(A, field='cf')
class Meta:
database = self.database
class DF(TestModel):
a = ForeignKeyField(A, field='df')
class Meta:
database = self.database
sql, params = CF._schema._create_table(safe=False).query()
self.assertEqual(sql, (
'CREATE TABLE "cf" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"a_id" VARCHAR(100) NOT NULL, '
'FOREIGN KEY ("a_id") REFERENCES "a" ("cf"))'))
sql, params = DF._schema._create_table(safe=False).query()
self.assertEqual(sql, (
'CREATE TABLE "df" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"a_id" DECIMAL(4, 2) NOT NULL, '
'FOREIGN KEY ("a_id") REFERENCES "a" ("df"))'))
def test_deferred_foreign_key(self):
class Language(TestModel):
name = CharField()
selected_snippet = DeferredForeignKey('Snippet', null=True)
class Meta:
database = self.database
class Snippet(TestModel):
code = TextField()
language = ForeignKeyField(Language, backref='snippets')
class Meta:
database = self.database
self.assertEqual(Snippet._meta.fields['language'].rel_model, Language)
self.assertEqual(Language._meta.fields['selected_snippet'].rel_model,
Snippet)
sql, params = Snippet._schema._create_table(safe=False).query()
self.assertEqual(sql, (
'CREATE TABLE "snippet" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"code" TEXT NOT NULL, '
'"language_id" INTEGER NOT NULL, '
'FOREIGN KEY ("language_id") REFERENCES "language" ("id"))'))
sql, params = Language._schema._create_table(safe=False).query()
self.assertEqual(sql, (
'CREATE TABLE "language" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"name" VARCHAR(255) NOT NULL, '
'"selected_snippet_id" INTEGER)'))
sql, params = (Language
._schema
._create_foreign_key(Language.selected_snippet)
.query())
self.assertEqual(sql, (
'ALTER TABLE "language" ADD CONSTRAINT '
'"fk_language_selected_snippet_id_refs_snippet" '
'FOREIGN KEY ("selected_snippet_id") REFERENCES "snippet" ("id")'))
class SnippetComment(TestModel):
snippet_long_foreign_key_identifier = ForeignKeyField(Snippet)
comment = TextField()
class Meta:
database = self.database
sql, params = SnippetComment._schema._create_table(safe=True).query()
self.assertEqual(sql, (
'CREATE TABLE IF NOT EXISTS "snippet_comment" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"snippet_long_foreign_key_identifier_id" INTEGER NOT NULL, '
'"comment" TEXT NOT NULL, '
'FOREIGN KEY ("snippet_long_foreign_key_identifier_id") '
'REFERENCES "snippet" ("id"))'))
sql, params = (SnippetComment._schema
._create_foreign_key(
SnippetComment.snippet_long_foreign_key_identifier)
.query())
self.assertEqual(sql, (
'ALTER TABLE "snippet_comment" ADD CONSTRAINT "'
'fk_snippet_comment_snippet_long_foreign_key_identifier_i_2a8b87d"'
' FOREIGN KEY ("snippet_long_foreign_key_identifier_id") '
'REFERENCES "snippet" ("id")'))
def test_deferred_foreign_key_inheritance(self):
class Base(TestModel):
class Meta:
database = self.database
class WithTimestamp(Base):
timestamp = TimestampField()
class Tweet(Base):
user = DeferredForeignKey('DUser')
content = TextField()
class TimestampTweet(Tweet, WithTimestamp): pass
class DUser(Base):
username = TextField()
sql, params = Tweet._schema._create_table(safe=False).query()
self.assertEqual(sql, (
'CREATE TABLE "tweet" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"content" TEXT NOT NULL, '
'"user_id" INTEGER NOT NULL)'))
sql, params = TimestampTweet._schema._create_table(safe=False).query()
self.assertEqual(sql, (
'CREATE TABLE "timestamp_tweet" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"timestamp" INTEGER NOT NULL, '
'"content" TEXT NOT NULL, '
'"user_id" INTEGER NOT NULL)'))
def test_identity_field(self):
class PG10Identity(TestModel):
id = IdentityField()
data = TextField()
class Meta:
database = self.database
self.assertCreateTable(PG10Identity, [
('CREATE TABLE "pg10_identity" ('
'"id" INT GENERATED BY DEFAULT AS IDENTITY NOT NULL PRIMARY KEY, '
'"data" TEXT NOT NULL)'),
])
def test_self_fk_inheritance(self):
class BaseCategory(TestModel):
parent = ForeignKeyField('self', backref='children')
class Meta:
database = self.database
class CatA1(BaseCategory):
name_a1 = TextField()
class CatA2(CatA1):
name_a2 = TextField()
self.assertTrue(CatA1.parent.rel_model is CatA1)
self.assertTrue(CatA2.parent.rel_model is CatA2)
self.assertCreateTable(CatA1, [
('CREATE TABLE "cat_a1" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"parent_id" INTEGER NOT NULL, '
'"name_a1" TEXT NOT NULL, '
'FOREIGN KEY ("parent_id") REFERENCES "cat_a1" ("id"))'),
('CREATE INDEX "cat_a1_parent_id" ON "cat_a1" ("parent_id")')])
self.assertCreateTable(CatA2, [
('CREATE TABLE "cat_a2" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"parent_id" INTEGER NOT NULL, '
'"name_a1" TEXT NOT NULL, '
'"name_a2" TEXT NOT NULL, '
'FOREIGN KEY ("parent_id") REFERENCES "cat_a2" ("id"))'),
('CREATE INDEX "cat_a2_parent_id" ON "cat_a2" ("parent_id")')])
|
TestModelDDL
|
python
|
joblib__joblib
|
joblib/externals/loky/process_executor.py
|
{
"start": 9455,
"end": 9633
}
|
class ____:
def __init__(self, work_id, exception=None, result=None):
self.work_id = work_id
self.exception = exception
self.result = result
|
_ResultItem
|
python
|
numpy__numpy
|
numpy/linalg/tests/test_linalg.py
|
{
"start": 13670,
"end": 14308
}
|
class ____(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
|
HermitianGeneralizedTestCase
|
python
|
sphinx-doc__sphinx
|
sphinx/errors.py
|
{
"start": 2256,
"end": 2351
}
|
class ____(SphinxError):
"""Document error."""
category = 'Document error'
|
DocumentError
|
python
|
scipy__scipy
|
scipy/stats/tests/test_morestats.py
|
{
"start": 123132,
"end": 130785
}
|
class ____:
# In gh-5747, the R package `circular` was used to calculate reference
# values for the circular variance, e.g.:
# library(circular)
# options(digits=16)
# x = c(0, 2*pi/3, 5*pi/3)
# var.circular(x)
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 0.006455174000787767),
(stats.circstd, 6.520702116)])
def test_circfuncs(self, test_func, expected, xp):
x = xp.asarray([355., 5., 2., 359., 10., 350.])
xp_assert_close(test_func(x, high=360), xp.asarray(expected))
def test_circfuncs_small(self, xp):
# Default tolerances won't work here because the reference values
# are approximations. Ensure all array types work in float64 to
# avoid needing separate float32 and float64 tolerances.
x = xp.asarray([20, 21, 22, 18, 19, 20.5, 19.2], dtype=xp.float64)
M1 = xp.mean(x)
M2 = stats.circmean(x, high=360)
xp_assert_close(M2, M1, rtol=1e-5)
V1 = xp.var(x*xp.pi/180, correction=0)
# for small variations, circvar is approximately half the
# linear variance
V1 = V1 / 2.
V2 = stats.circvar(x, high=360)
xp_assert_close(V2, V1, rtol=1e-4)
S1 = xp.std(x, correction=0)
S2 = stats.circstd(x, high=360)
xp_assert_close(S2, S1, rtol=1e-4)
@pytest.mark.parametrize("test_func, numpy_func",
[(stats.circmean, np.mean),
(stats.circvar, np.var),
(stats.circstd, np.std)])
def test_circfuncs_close(self, test_func, numpy_func, xp):
# circfuncs should handle very similar inputs (gh-12740)
x = np.asarray([0.12675364631578953] * 10 + [0.12675365920187928] * 100)
circstat = test_func(xp.asarray(x))
normal = xp.asarray(numpy_func(x))
xp_assert_close(circstat, normal, atol=2e-8)
@pytest.mark.parametrize('circfunc', [stats.circmean,
stats.circvar,
stats.circstd])
def test_circmean_axis(self, xp, circfunc):
x = xp.asarray([[355, 5, 2, 359, 10, 350],
[351, 7, 4, 352, 9, 349],
[357, 9, 8, 358, 4, 356.]])
res = circfunc(x, high=360)
ref = circfunc(xp.reshape(x, (-1,)), high=360)
xp_assert_close(res, xp.asarray(ref))
res = circfunc(x, high=360, axis=1)
ref = [circfunc(x[i, :], high=360) for i in range(x.shape[0])]
xp_assert_close(res, xp.stack(ref))
res = circfunc(x, high=360, axis=0)
ref = [circfunc(x[:, i], high=360) for i in range(x.shape[1])]
xp_assert_close(res, xp.stack(ref))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean, 0.167690146),
(stats.circvar, 0.006455174270186603),
(stats.circstd, 6.520702116)])
def test_circfuncs_array_like(self, test_func, expected, xp):
x = xp.asarray([355, 5, 2, 359, 10, 350.])
xp_assert_close(test_func(x, high=360), xp.asarray(expected))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_empty(self, test_func, xp):
dtype = xp.float64
x = xp.asarray([], dtype=dtype)
with eager_warns(SmallSampleWarning, match=too_small_1d_not_omit, xp=xp):
res = test_func(x)
xp_assert_equal(res, xp.asarray(xp.nan, dtype=dtype))
@pytest.mark.parametrize("test_func", [stats.circmean, stats.circvar,
stats.circstd])
def test_nan_propagate(self, test_func, xp):
x = xp.asarray([355, 5, 2, 359, 10, 350, np.nan])
xp_assert_equal(test_func(x, high=360), xp.asarray(xp.nan))
@pytest.mark.parametrize("test_func,expected",
[(stats.circmean,
{None: np.nan, 0: 355.66582264, 1: 0.28725053}),
(stats.circvar,
{None: np.nan,
0: 0.002570671054089924,
1: 0.005545914017677123}),
(stats.circstd,
{None: np.nan, 0: 4.11093193, 1: 6.04265394})])
def test_nan_propagate_array(self, test_func, expected, xp):
x = xp.asarray([[355, 5, 2, 359, 10, 350, 1],
[351, 7, 4, 352, 9, 349, np.nan],
[1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
for axis in expected.keys():
out = test_func(x, high=360, axis=axis)
if axis is None:
xp_assert_equal(out, xp.asarray(xp.nan))
else:
xp_assert_close(out[0], xp.asarray(expected[axis]))
xp_assert_equal(out[1:], xp.full_like(out[1:], xp.nan))
def test_circmean_scalar(self, xp):
x = xp.asarray(1.)[()]
M1 = x
M2 = stats.circmean(x)
xp_assert_close(M2, M1, rtol=1e-5)
def test_circmean_range(self, xp):
# regression test for gh-6420: circmean(..., high, low) must be
# between `high` and `low`
m = stats.circmean(xp.arange(0, 2, 0.1), xp.pi, -xp.pi)
xp_assert_less(m, xp.asarray(xp.pi))
xp_assert_less(-m, xp.asarray(xp.pi))
def test_circfuncs_uint8(self, xp):
# regression test for gh-7255: overflow when working with
# numpy uint8 data type
x = xp.asarray([150, 10], dtype=xp.uint8)
xp_assert_close(stats.circmean(x, high=180), xp.asarray(170.0))
xp_assert_close(stats.circvar(x, high=180), xp.asarray(0.2339555554617))
xp_assert_close(stats.circstd(x, high=180), xp.asarray(20.91551378))
def test_circstd_zero(self, xp):
# circstd() of a single number should return positive zero.
y = stats.circstd(xp.asarray([0]))
assert math.copysign(1.0, y) == 1.0
def test_circmean_accuracy_tiny_input(self, xp):
# For tiny x such that sin(x) == x and cos(x) == 1.0 numerically,
# circmean(x) should return x because atan2(sin(x), cos(x)) == x.
# This test verifies this.
#
# The purpose of this test is not to show that circmean() is
# accurate in the last digit for certain input, because this is
# neither guaranteed not particularly useful. Rather, it is a
# "white-box" sanity check that no undue loss of precision is
# introduced by conversion between (high - low) and (2 * pi).
x = xp.linspace(1e-9, 6e-9, 50)
assert xp.all(xp.sin(x) == x) and xp.all(xp.cos(x) == 1.0)
m = (x * (2 * xp.pi) / (2 * xp.pi)) != x
assert xp.any(m)
x = x[m]
y = stats.circmean(x[:, None], axis=1)
assert xp.all(y == x)
def test_circmean_accuracy_huge_input(self, xp):
# White-box test that circmean() does not introduce undue loss of
# numerical accuracy by eagerly rotating the input. This is detected
# by supplying a huge input x such that (x - low) == x numerically.
x = xp.asarray(1e17, dtype=xp.float64)
y = math.atan2(xp.sin(x), xp.cos(x)) # -2.6584887370946806
expected = xp.asarray(y, dtype=xp.float64)
actual = stats.circmean(x, high=xp.pi, low=-xp.pi)
xp_assert_close(actual, expected, rtol=1e-15, atol=0.0)
|
TestCircFuncs
|
python
|
getsentry__sentry
|
src/sentry/incidents/models/alert_rule.py
|
{
"start": 9181,
"end": 10474
}
|
class ____(BaseManager["AlertRuleTrigger"]):
CACHE_KEY = "alert_rule_triggers:alert_rule:%s"
@classmethod
def _build_trigger_cache_key(cls, alert_rule_id: int) -> str:
return cls.CACHE_KEY % alert_rule_id
def get_for_alert_rule(self, alert_rule: AlertRule) -> list[AlertRuleTrigger]:
"""
Fetches the AlertRuleTriggers associated with an AlertRule. Attempts to fetch
from cache then hits the database
"""
cache_key = self._build_trigger_cache_key(alert_rule.id)
triggers = cache.get(cache_key)
if triggers is None:
triggers = list(AlertRuleTrigger.objects.filter(alert_rule=alert_rule))
cache.set(cache_key, triggers, 3600)
return triggers
@classmethod
def clear_trigger_cache(cls, instance: AlertRuleTrigger, **kwargs: Any) -> None:
cache.delete(cls._build_trigger_cache_key(instance.alert_rule_id))
assert cache.get(cls._build_trigger_cache_key(instance.alert_rule_id)) is None
@classmethod
def clear_alert_rule_trigger_cache(cls, instance: AlertRuleTrigger, **kwargs: Any) -> None:
cache.delete(cls._build_trigger_cache_key(instance.id))
assert cache.get(cls._build_trigger_cache_key(instance.id)) is None
|
AlertRuleTriggerManager
|
python
|
google__jax
|
jax/_src/dtypes.py
|
{
"start": 1584,
"end": 1948
}
|
class ____(np.generic):
"""Scalar class for extended dtypes.
This is an abstract class that should never be instantiated, but rather
exists for the sake of `jnp.issubdtype`.
Examples:
>>> from jax import random
>>> from jax import dtypes
>>> key = random.key(0)
>>> jnp.issubdtype(key.dtype, dtypes.extended)
True
"""
@export
|
extended
|
python
|
huggingface__transformers
|
src/transformers/models/wavlm/modeling_wavlm.py
|
{
"start": 15473,
"end": 18405
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = WavLMPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_attention_mask] = 0
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
position_bias = None
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = layer(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
index=i,
)
hidden_states, position_bias = layer_outputs[:2]
if skip_the_layer:
layer_outputs = (None, None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
|
WavLMEncoder
|
python
|
huggingface__transformers
|
src/transformers/models/patchtst/modeling_patchtst.py
|
{
"start": 38188,
"end": 40185
}
|
class ____(ModelOutput):
r"""
sequences (`torch.FloatTensor` of shape `(batch_size, num_samples, prediction_length, num_targets)`):
Sampled values from the chosen distribution.
"""
sequences: Optional[torch.FloatTensor] = None
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll
def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor:
"""
Computes the negative log likelihood loss from input distribution with respect to target.
"""
return -input.log_prob(target)
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average
def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor:
"""
Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,
meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.
Args:
input_tensor (`torch.FloatTensor`):
Input tensor, of which the average must be computed.
weights (`torch.FloatTensor`, *optional*):
Weights tensor, of the same shape as `input_tensor`.
dim (`int`, *optional*):
The dim along which to average `input_tensor`.
Returns:
`torch.FloatTensor`: The tensor with values averaged along the specified `dim`.
"""
if weights is not None:
weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))
sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)
return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights
else:
return input_tensor.mean(dim=dim)
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->PatchTST,TimeSeries->PatchTST
|
SamplePatchTSTOutput
|
python
|
django-extensions__django-extensions
|
django_extensions/management/commands/admin_generator.py
|
{
"start": 9569,
"end": 12164
}
|
class ____(LabelCommand):
help = """Generate a `admin.py` file for the given app (models)"""
# args = "[app_name]"
can_import_settings = True
def add_arguments(self, parser):
parser.add_argument("app_name")
parser.add_argument("model_name", nargs="*")
parser.add_argument(
"-s",
"--search-field",
action="append",
default=SEARCH_FIELD_NAMES,
help="Fields named like this will be added to `search_fields`"
" [default: %(default)s]",
)
parser.add_argument(
"-d",
"--date-hierarchy",
action="append",
default=DATE_HIERARCHY_NAMES,
help="A field named like this will be set as `date_hierarchy`"
" [default: %(default)s]",
)
parser.add_argument(
"-p",
"--prepopulated-fields",
action="append",
default=PREPOPULATED_FIELD_NAMES,
help="These fields will be prepopulated by the other field."
"The field names can be specified like `spam=eggA,eggB,eggC`"
" [default: %(default)s]",
)
parser.add_argument(
"-l",
"--list-filter-threshold",
type=int,
default=LIST_FILTER_THRESHOLD,
metavar="LIST_FILTER_THRESHOLD",
help="If a foreign key has less than LIST_FILTER_THRESHOLD items "
"it will be added to `list_filter` [default: %(default)s]",
)
parser.add_argument(
"-r",
"--raw-id-threshold",
type=int,
default=RAW_ID_THRESHOLD,
metavar="RAW_ID_THRESHOLD",
help="If a foreign key has more than RAW_ID_THRESHOLD items "
"it will be added to `list_filter` [default: %(default)s]",
)
@signalcommand
def handle(self, *args, **options):
app_name = options["app_name"]
try:
app = apps.get_app_config(app_name)
except LookupError:
self.stderr.write("This command requires an existing app name as argument")
self.stderr.write("Available apps:")
app_labels = [app.label for app in apps.get_app_configs()]
for label in sorted(app_labels):
self.stderr.write(" %s" % label)
return
model_res = []
for arg in options["model_name"]:
model_res.append(re.compile(arg, re.IGNORECASE))
self.stdout.write(AdminApp(app, model_res, **options).__str__())
|
Command
|
python
|
ansible__ansible
|
test/integration/targets/collections_plugin_namespace/collection_root/ansible_collections/my_ns/my_col/plugins/filter/test_filter.py
|
{
"start": 79,
"end": 224
}
|
class ____(object):
def filters(self):
filters = {
'filter_name': filter_name,
}
return filters
|
FilterModule
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.