language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_fancylists.py
|
{
"start": 52,
"end": 21855
}
|
class ____(util.MdCase):
"""Test fancy lists."""
extension = ['pymdownx.fancylists', 'pymdownx.saneheaders']
extension_configs = {}
def test_fail_case(self):
"""Test failed case."""
self.check_markdown(
"""
1. foo
. bar
1) foo
) bar
""",
"""
<ol type="1">
<li>foo
. bar</li>
</ol>
<ol type="1">
<li>foo
) bar</li>
</ol>
""",
True
)
def test_unordered(self):
"""Test unordered lists."""
self.check_markdown(
R'''
- item 1
* item 2
+ item 3
''',
r'''
<ul>
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ul>
''',
True
)
def test_ordered(self):
"""Test ordered lists."""
self.check_markdown(
R'''
1. item 1
2. item 2
3. item 3
''',
r'''
<ol type="1">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_ordered_paren(self):
"""Test ordered lists with parenthesis."""
self.check_markdown(
R'''
1) item 1
2) item 2
3) item 3
''',
r'''
<ol type="1">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_ordered_generic(self):
"""Test generic ordered list."""
self.check_markdown(
R'''
#. item 1
#. item 2
#. item 3
''',
r'''
<ol type="1">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_ordered_generic_paren(self):
"""Test generic ordered list with parenthesis."""
self.check_markdown(
R'''
#) item 1
#) item 2
#) item 3
''',
r'''
<ol type="1">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_ordered_generic_switch(self):
"""Test generic ordered list switching over to other style."""
self.check_markdown(
R'''
#. item 1
#. item 2
#) item 1
#) item 2
''',
r'''
<ol type="1">
<li>item 1</li>
<li>item 2</li>
</ol>
<ol type="1">
<li>item 1</li>
<li>item 2</li>
</ol>
''',
True
)
def test_ordered_generic_inherit(self):
"""Test generic ordered list inheritance."""
self.check_markdown(
R'''
i. item i
#. item ii
#. item iii
I. New list
''',
r'''
<ol type="i">
<li>item i</li>
<li>item ii</li>
<li>item iii</li>
</ol>
<ol type="I">
<li>New list</li>
</ol>
''',
True
)
def test_ordered_generic_inherit_no_change(self):
"""Test generic ordered list inheritance no new list if same type."""
self.check_markdown(
R'''
i. item i
#. item ii
#. item iii
iv. item iv
''',
r'''
<ol type="i">
<li>item i</li>
<li>item ii</li>
<li>item iii</li>
<li>item iv</li>
</ol>
''',
True
)
def test_roman(self):
"""Test Roman numeral lists."""
self.check_markdown(
R'''
i. item 1
ii. item 2
iii. item 3
iv. item 4
v. item 5
vi. item 6
''',
r'''
<ol type="i">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
<li>item 4</li>
<li>item 5</li>
<li>item 6</li>
</ol>
''',
True
)
def test_roman_paren(self):
"""Test Roman numeral lists with parenthesis."""
self.check_markdown(
R'''
i) item 1
ii) item 2
iii) item 3
''',
r'''
<ol type="i">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_roman_upper(self):
"""Test Roman numeral uppercase lists."""
self.check_markdown(
R'''
I. item 1
II. item 2
III. item 3
''',
r'''
<ol type="I">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_roman_upper_paren(self):
"""Test Roman numeral uppercase lists with parenthesis."""
self.check_markdown(
R'''
I) item 1
II) item 2
III) item 3
''',
r'''
<ol type="I">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_alpha(self):
"""Test alphabetical lists."""
self.check_markdown(
R'''
a. item 1
b. item 2
c. item 3
''',
r'''
<ol type="a">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_alpha_paren(self):
"""Test alphabetical lists with parenthesis."""
self.check_markdown(
R'''
a) item 1
b) item 2
c) item 3
''',
r'''
<ol type="a">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_alpha_upper(self):
"""Test alphabetical uppercase lists."""
self.check_markdown(
R'''
A. item 1
B. item 2
C. item 3
''',
r'''
<ol type="A">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_alpha_upper_paren(self):
"""Test alphabetical uppercase lists with parenthesis."""
self.check_markdown(
R'''
A) item 1
B) item 2
C) item 3
''',
r'''
<ol type="A">
<li>item 1</li>
<li>item 2</li>
<li>item 3</li>
</ol>
''',
True
)
def test_alpha_two_spaces(self):
"""Test alphabetical two space uppercase requirement."""
self.check_markdown(
R'''
A. item 1
A. item 2
''',
R'''
<ol type="A">
<li>item 1
A. item 2</li>
</ol>
''',
True
)
def test_roman_alpha(self):
"""Test behavior of alphabetical and Roman numeral."""
self.check_markdown(
R'''
v) item 1
a) item 2
i. item 1
a. item 1
b. item 2
''',
r'''
<ol start="22" type="a">
<li>item 1</li>
<li>item 2</li>
</ol>
<ol type="i">
<li>item 1</li>
</ol>
<ol type="a">
<li>item 1</li>
<li>item 2</li>
</ol>
''',
True
)
def test_bad_roman(self):
"""Test bad Roman numeral."""
self.check_markdown(
R'''
iviv. item 1
''',
R'''
<p>iviv. item 1</p>
''',
True
)
def test_roman_start(self):
"""Test bad Roman numeral."""
self.check_markdown(
R'''
iv. item 1
v. item 2
''',
R'''
<ol start="4" type="i">
<li>item 1</li>
<li>item 2</li>
</ol>
''',
True
)
def test_roman_two_spaces(self):
"""Test Roman numeral two space uppercase requirement."""
self.check_markdown(
R'''
I. item 1
I. item 2
''',
R'''
<ol type="I">
<li>item 1
I. item 2</li>
</ol>
''',
True
)
def test_roman_relaxed(self):
"""Test cases related to our less strict approach."""
self.check_markdown(
R'''
iiii. item 1
MCCCCCCVI. item 1
''',
R'''
<ol start="4" type="i">
<li>item 1</li>
</ol>
<ol start="1606" type="I">
<li>item 1</li>
</ol>
''',
True
)
def test_list_nested_same_line(self):
"""Test list nested on same line."""
self.check_markdown(
R'''
a) a) subitem1
b) subitem2
''',
R'''
<ol type="a">
<li>
<ol type="a">
<li>subitem1</li>
<li>subitem2</li>
</ol>
</li>
</ol>
''',
True
)
def test_indented_content(self):
"""Test indented content."""
self.check_markdown(
R'''
#. A paragraph
that is short.
Another paragraph.
#. A sublist
Another paragraph.
#. A new list item.
''',
R'''
<ol type="1">
<li>
<p>A paragraph
that is short.</p>
<p>Another paragraph.</p>
<ol type="1">
<li>A sublist</li>
</ol>
<p>Another paragraph.</p>
</li>
<li>
<p>A new list item.</p>
</li>
</ol>
''',
True
)
def test_unindented_content(self):
"""Test indented content with unindented lines."""
self.check_markdown(
R'''
#. A paragraph
that is short.
Another paragraph
with unindented lines.
#. A sublist
with unindented lines.
#. A new list item.
''',
R'''
<ol type="1">
<li>
<p>A paragraph
that is short.</p>
<p>Another paragraph
with unindented lines.</p>
<ol type="1">
<li>A sublist
with unindented lines.</li>
</ol>
</li>
<li>
<p>A new list item.</p>
</li>
</ol>
''',
True
)
def test_header_case(self):
"""Test case where list follows header."""
self.check_markdown(
R'''
Tight List:
* # Header1
Line 1-2 - **not** a header *or* paragraph!
* # Header2
Line 2-2 - not a header or paragraph!
Loose List:
* # Header1
Line 1-2 - *a* paragraph
* # Header2
Line 2-2 - a paragraph
''',
R'''
<p>Tight List:</p>
<ul>
<li>
<h1>Header1</h1>
Line 1-2 - <strong>not</strong> a header <em>or</em> paragraph!</li>
<li>
<h1>Header2</h1>
Line 2-2 - not a header or paragraph!</li>
</ul>
<p>Loose List:</p>
<ul>
<li>
<h1>Header1</h1>
<p>Line 1-2 - <em>a</em> paragraph</p>
</li>
<li>
<h1>Header2</h1>
<p>Line 2-2 - a paragraph</p>
</li>
</ul>
''',
True
)
def test_mixed_nesting_case(self):
"""Test mixed nesting case."""
self.check_markdown(
R'''
* item 1
* item 1-1
* item 1-2
* item 1-2-1
* item 2
* item 3
* item 4
* item 4-1
* item 4-2
* item 4-3
Paragraph under item 4-3
Paragraph under item 4
''',
R'''
<ul>
<li>item 1<ul>
<li>item 1-1</li>
<li>item 1-2<ul>
<li>item 1-2-1</li>
</ul>
</li>
</ul>
</li>
<li>item 2</li>
<li>item 3</li>
<li>
<p>item 4</p>
<ul>
<li>item 4-1</li>
<li>item 4-2</li>
<li>
<p>item 4-3</p>
<p>Paragraph under item 4-3</p>
</li>
</ul>
<p>Paragraph under item 4</p>
</li>
</ul>
''',
True
)
def test_tight_loose_mix(self):
"""Test inconsistent loose and tight list item spacing."""
self.check_markdown(
R'''
* item 1
* item 2
* item 3
* item 4
''',
R'''
<ul>
<li>item 1</li>
<li>
<p>item 2</p>
</li>
<li>
<p>item 3</p>
</li>
<li>item 4</li>
</ul>
''',
True
)
def test_roman_mitigation(self):
"""Test mitigation for conflict case with alphabetical lists."""
self.check_markdown(
R'''
IIIII. Roman numeral V
---
VIIIII. Roman numeral X
---
XXXXX. Roman numeral L
---
LXXXXX. Roman numeral C
---
CCCCC. Roman numeral D
---
DCCCCC. Roman numeral M
/// fancylists | start=5 type=I
V. Alternate work around
///
''',
R'''
<ol start="5" type="I">
<li>Roman numeral V</li>
</ol>
<hr />
<ol start="10" type="I">
<li>Roman numeral X</li>
</ol>
<hr />
<ol start="50" type="I">
<li>Roman numeral L</li>
</ol>
<hr />
<ol start="100" type="I">
<li>Roman numeral C</li>
</ol>
<hr />
<ol start="500" type="I">
<li>Roman numeral D</li>
</ol>
<hr />
<ol start="1000" type="I">
<li>Roman numeral M</li>
</ol>
<ol start="5" type="I">
<li>Alternate work around</li>
</ol>
''',
True
)
def test_alpha_mitigation_start(self):
"""Test mitigation for conflict case with Roman numeral lists with explicit start."""
self.check_markdown(
R'''
/// fancylists | start=3 type=a
i. Workaround
j. Workaround
///
''',
R'''
<ol start="3" type="a">
<li>Workaround</li>
<li>Workaround</li>
</ol>
''',
True
)
def test_alpha_mitigation(self):
"""Test mitigation for conflict case with Roman numeral lists with explicit start."""
self.check_markdown(
R'''
/// fancylists | type=a
i. Workaround
j. Workaround
///
/// fancylists | type=a
i) Workaround
j) Workaround
///
''',
R'''
<ol start="9" type="a">
<li>Workaround</li>
<li>Workaround</li>
</ol>
<ol start="9" type="a">
<li>Workaround</li>
<li>Workaround</li>
</ol>
''',
True
)
def test_alpha_mitigation_switch(self):
"""Test mitigation for conflict case with Roman numeral lists."""
self.check_markdown(
R'''
/// fancylists | type=a
i. Workaround
j) Workaround
///
''',
R'''
<ol start="9" type="a">
<li>Workaround</li>
</ol>
<ol start="10" type="a">
<li>Workaround</li>
</ol>
''',
True
)
def test_alpha_mitigation_complex(self):
"""Test mitigation for complex conflict case with Roman numeral lists."""
self.check_markdown(
R'''
/// fancylists | type=a
i. Workaround
Workaround
j. Workaround
///
''',
R'''
<ol start="9" type="a">
<li>
<p>Workaround</p>
<p>Workaround</p>
</li>
<li>
<p>Workaround</p>
</li>
</ol>
''',
True
)
def test_alpha_mitigation_ul_ignored(self):
"""Test that mitigation won't target unordered lists."""
self.check_markdown(
R'''
/// fancylists | type=a
- Workaround
Workaround
- Workaround
///
''',
R'''
<ul>
<li>
<p>Workaround</p>
<p>Workaround</p>
</li>
<li>
<p>Workaround</p>
</li>
</ul>
''',
True
)
def test_alpha_mitigation_generic(self):
"""Test that mitigation works on generic."""
self.check_markdown(
R'''
/// fancylists | start=9 type=a
#. Workaround
Workaround
#. Workaround
///
''',
R'''
<ol start="9" type="a">
<li>
<p>Workaround</p>
<p>Workaround</p>
</li>
<li>
<p>Workaround</p>
</li>
</ol>
''',
True
)
def test_alpha_mitigation_generic_no_start(self):
"""Test that mitigation works on generic with no start."""
self.check_markdown(
R'''
/// fancylists | type=a
#. Workaround
Workaround
#. Workaround
///
''',
R'''
<ol type="a">
<li>
<p>Workaround</p>
<p>Workaround</p>
</li>
<li>
<p>Workaround</p>
</li>
</ol>
''',
True
)
def test_mitigation_no_force_alpha(self):
"""Test case that can't be forced."""
self.check_markdown(
R'''
/// fancylists | type=a
1. Can't force
///
''',
R'''
<ol type="a">
<li>Can't force</li>
</ol>
''',
True
)
def test_mitigation_bad_type(self):
"""Test bad type in mitigation."""
self.check_markdown(
R'''
/// fancylists | type=j
1. Can't force
///
''',
R'''
<p>/// fancylists | type=j</p>
<ol type="1">
<li>Can't force
///</li>
</ol>
''',
True
)
def test_mitigation_no_type(self):
"""Test no type in mitigation."""
self.check_markdown(
R'''
/// fancylists |
a. Can't force
///
''',
R'''
<p>/// fancylists |</p>
<ol type="a">
<li>Can't force
///</li>
</ol>
''',
True
)
def test_mitigation_only_start(self):
"""Test no type in mitigation."""
self.check_markdown(
R'''
/// fancylists | start=5
a. item 5
///
''',
R'''
<ol start="5" type="1">
<li>item 5</li>
</ol>
''',
True
)
|
TestFancyLists
|
python
|
scipy__scipy
|
scipy/stats/_multivariate.py
|
{
"start": 86237,
"end": 105822
}
|
class ____(multi_rv_generic):
r"""A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix). These arguments must satisfy the relationship
``df > scale.ndim - 1``, but see notes on using the `rvs` method with
``df < scale.ndim``.
Methods
-------
pdf(x, df, scale)
Probability density function.
logpdf(x, df, scale)
Log of the probability density function.
rvs(df, scale, size=1, random_state=None)
Draw random samples from a Wishart distribution.
entropy()
Compute the differential entropy of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
%(_doc_random_state)s
Raises
------
scipy.linalg.LinAlgError
If the scale matrix `scale` is not positive definite.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported. Symmetry is not checked; only the lower triangular
portion is used.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
The algorithm [2]_ implemented by the `rvs` method may
produce numerically singular matrices with :math:`p - 1 < \nu < p`; the
user may wish to check for this condition and generate replacement samples
as necessary.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
>>> plt.show()
The input quantiles can be any shape of array, as long as the last
axis labels the components.
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
>>> rv = wishart(df=1, scale=1)
>>> # Frozen object with the same methods but holding the given
>>> # degrees of freedom and scale fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis, np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two dimensional,"
f" but scale.scale = {str(scale.shape)}.")
elif scale.ndim > 2:
raise ValueError(f"Array 'scale' must be at most two-dimensional, "
f"but scale.ndim = {scale.ndim}")
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= dim - 1:
raise ValueError("Degrees of freedom must be greater than the "
"dimension of scale matrix minus 1.")
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError(
"Quantiles must be square if they are two dimensional,"
f" but x.shape = {str(x.shape)}.")
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError(
"Quantiles must be square in the first two dimensions "
f"if they are three dimensional, but x.shape = {str(x.shape)}.")
elif x.ndim > 3:
raise ValueError(f"Quantiles must be at most two-dimensional with an "
f"additional dimension for multiple components, "
f"but x.ndim = {x.ndim}")
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
f' be {(dim, dim)}, got {x.shape[0:2]}.')
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
f' Got size.ndim = {str(tuple(size))}')
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""Log of the Wishart probability density function.
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.empty(x.shape[-1])
scale_inv_x = np.empty(x.shape)
tr_scale_inv_x = np.empty(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:, :, i])
scale_inv_x[:, :, i] = scipy.linalg.cho_solve((C, True), x[:, :, i])
tr_scale_inv_x[i] = scale_inv_x[:, :, i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""Mean of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""Mode of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""Variance of the Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = (np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) +
shape[::-1]).T)
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None, None, None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""Draw random samples from a Wishart distribution.
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (``dim``, ``dim``), where
``dim`` is the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
|
wishart_gen
|
python
|
mlflow__mlflow
|
mlflow/types/chat.py
|
{
"start": 3338,
"end": 3424
}
|
class ____(BaseModel):
type: AllowedType | list[AllowedType] | None = None
|
ParamType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/design-skiplist.py
|
{
"start": 407,
"end": 2897
}
|
class ____(object):
P_NUMERATOR, P_DENOMINATOR = 1, 2 # P = 1/4 in redis implementation
MAX_LEVEL = 32 # enough for 2^32 elements
def __init__(self):
self.__head = SkipNode()
self.__len = 0
def search(self, target):
"""
:type target: int
:rtype: bool
"""
return True if self.__find(target, self.__find_prev_nodes(target)) else False
def add(self, num):
"""
:type num: int
:rtype: None
"""
node = SkipNode(self.__random_level(), num)
if len(self.__head.nexts) < len(node.nexts):
self.__head.nexts.extend([None]*(len(node.nexts)-len(self.__head.nexts)))
prevs = self.__find_prev_nodes(num)
for i in xrange(len(node.nexts)):
node.nexts[i] = prevs[i].nexts[i]
prevs[i].nexts[i] = node
self.__len += 1
def erase(self, num):
"""
:type num: int
:rtype: bool
"""
prevs = self.__find_prev_nodes(num)
curr = self.__find(num, prevs)
if not curr:
return False
self.__len -= 1
for i in reversed(xrange(len(curr.nexts))):
prevs[i].nexts[i] = curr.nexts[i]
if not self.__head.nexts[i]:
self.__head.nexts.pop()
return True
def __find(self, num, prevs):
if prevs:
candidate = prevs[0].nexts[0]
if candidate and candidate.num == num:
return candidate
return None
def __find_prev_nodes(self, num):
prevs = [None]*len(self.__head.nexts)
curr = self.__head
for i in reversed(xrange(len(self.__head.nexts))):
while curr.nexts[i] and curr.nexts[i].num < num:
curr = curr.nexts[i]
prevs[i] = curr
return prevs
def __random_level(self):
level = 1
while random.randint(1, Skiplist.P_DENOMINATOR) <= Skiplist.P_NUMERATOR and \
level < Skiplist.MAX_LEVEL:
level += 1
return level
def __len__(self):
return self.__len
def __str__(self):
result = []
for i in reversed(xrange(len(self.__head.nexts))):
result.append([])
curr = self.__head.nexts[i]
while curr:
result[-1].append(str(curr.num))
curr = curr.nexts[i]
return "\n".join(map(lambda x: "->".join(x), result))
|
Skiplist
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
|
{
"start": 992,
"end": 1538
}
|
class ____(torch.nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(0)
self.net1 = nn.Sequential(nn.Linear(8, 16), nn.ReLU())
self.net2 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())
self.net3 = nn.Sequential(nn.Linear(32, 64), nn.ReLU())
self.net4 = nn.Sequential(nn.ReLU(), nn.Linear(64, 8))
def forward(self, x):
return self.net4(self.net3(self.net2(self.net1(x))))
def get_input(self):
return torch.rand(8, 8, device=device_type.type)
|
TestDummyModel
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/utils/kubernetes.py
|
{
"start": 2805,
"end": 2986
}
|
class ____(BaseModel):
model_config = {
"extra": "allow",
"json_schema_extra": {"$ref": create_definition_ref("io.k8s.api.core.v1.Container")},
}
|
InitContainer
|
python
|
google__pytype
|
pytype/tools/traces/traces_test.py
|
{
"start": 8401,
"end": 9452
}
|
class ____(MatchAstTestCase):
def test_num(self):
matches = self._get_traces("v = 42", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", 42, ("int",))])
def test_str(self):
matches = self._get_traces("v = 'hello'", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", "hello", ("str",))])
def test_unicode(self):
matches = self._get_traces("v = u'hello'", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", "hello", ("str",))])
def test_bytes(self):
matches = self._get_traces("v = b'hello'", ast.Constant)
self.assertTracesEqual(
matches, [((1, 4), "LOAD_CONST", b"hello", ("bytes",))])
def test_bool(self):
matches = self._get_traces("v = True", ast.Constant)
self.assertTracesEqual(matches, [((1, 4), "LOAD_CONST", True, ("bool",))])
def test_ellipsis(self):
matches = self._get_traces("v = ...", ast.Constant)
self.assertTracesEqual(
matches, [((1, 4), "LOAD_CONST", Ellipsis, ("ellipsis",))])
|
MatchConstantTest
|
python
|
ray-project__ray
|
python/ray/train/v2/api/result.py
|
{
"start": 735,
"end": 4859
}
|
class ____(ResultV1):
checkpoint: Optional[Checkpoint]
error: Optional[TrainingFailedError]
best_checkpoints: Optional[List[Tuple[Checkpoint, Dict[str, Any]]]] = None
@PublicAPI(stability="alpha")
def get_best_checkpoint(
self, metric: str, mode: str
) -> Optional["ray.train.Checkpoint"]:
return super().get_best_checkpoint(metric, mode)
@classmethod
def from_path(
cls,
path: Union[str, os.PathLike],
storage_filesystem: Optional[pyarrow.fs.FileSystem] = None,
) -> "Result":
"""Restore a training result from a previously saved training run path.
Args:
path: Path to the run output directory
storage_filesystem: Optional filesystem to use for accessing the path
Returns:
Result object with restored checkpoints and metrics
"""
fs, fs_path = get_fs_and_path(str(path), storage_filesystem)
# Validate that the experiment directory exists
if not _exists_at_fs_path(fs, fs_path):
raise RuntimeError(f"Experiment folder {fs_path} doesn't exist.")
# Remove trailing slashes to handle paths correctly
# os.path.basename() returns empty string for paths with trailing slashes
fs_path = fs_path.rstrip("/")
storage_path, experiment_dir_name = os.path.dirname(fs_path), os.path.basename(
fs_path
)
storage_context = StorageContext(
storage_path=storage_path,
experiment_dir_name=experiment_dir_name,
storage_filesystem=fs,
)
# Validate that the checkpoint manager snapshot file exists
if not _exists_at_fs_path(
storage_context.storage_filesystem,
storage_context.checkpoint_manager_snapshot_path,
):
raise RuntimeError(
f"Failed to restore the Result object: "
f"{CHECKPOINT_MANAGER_SNAPSHOT_FILENAME} doesn't exist in the "
f"experiment folder. Make sure that this is an output directory created by a Ray Train run."
)
checkpoint_manager = CheckpointManager(
storage_context=storage_context,
checkpoint_config=CheckpointConfig(),
)
# When we build a Result object from checkpoints, the error is not loaded.
return cls._from_checkpoint_manager(
checkpoint_manager=checkpoint_manager,
storage_context=storage_context,
)
@classmethod
def _from_checkpoint_manager(
cls,
checkpoint_manager: CheckpointManager,
storage_context: StorageContext,
error: Optional[TrainingFailedError] = None,
) -> "Result":
"""Create a Result object from a CheckpointManager."""
latest_checkpoint_result = checkpoint_manager.latest_checkpoint_result
if latest_checkpoint_result:
latest_metrics = latest_checkpoint_result.metrics
latest_checkpoint = latest_checkpoint_result.checkpoint
else:
latest_metrics = None
latest_checkpoint = None
best_checkpoints = [
(r.checkpoint, r.metrics)
for r in checkpoint_manager.best_checkpoint_results
]
# Provide the history of metrics attached to checkpoints as a dataframe.
metrics_dataframe = None
if best_checkpoints:
metrics_dataframe = pd.DataFrame([m for _, m in best_checkpoints])
return Result(
metrics=latest_metrics,
checkpoint=latest_checkpoint,
error=error,
path=storage_context.experiment_fs_path,
best_checkpoints=best_checkpoints,
metrics_dataframe=metrics_dataframe,
_storage_filesystem=storage_context.storage_filesystem,
)
@property
@Deprecated
def config(self) -> Optional[Dict[str, Any]]:
raise DeprecationWarning(
"The `config` property for a `ray.train.Result` is deprecated, "
"since it is only relevant in the context of Ray Tune."
)
|
Result
|
python
|
pytorch__pytorch
|
test/inductor/test_lookup_table.py
|
{
"start": 1547,
"end": 2822
}
|
class ____(MMKernelInputs):
"""Mock MMKernelInputs that subclasses the real class and uses real tensors"""
def __init__(
self,
tensors: list[torch.Tensor],
scalars: Optional[dict[str, Union[float, int]]] = None,
mat1_idx: int = -2,
mat2_idx: int = -1,
):
"""Initialize with real tensors, creating mock nodes for the base class"""
mock_nodes = [MockTensorNode(t) for t in tensors]
super().__init__(mock_nodes, scalars, mat1_idx=mat1_idx, mat2_idx=mat2_idx)
self.tensors = tensors # Keep reference to original tensors
def shapes_hinted(self) -> tuple[tuple[int, ...], ...]:
"""Delegate to symbolic since real tensors already have int shapes"""
return self.shapes_symbolic()
def strides_hinted(self) -> tuple[tuple[int, ...], ...]:
"""Delegate to symbolic since real tensors already have int strides"""
return self.strides_symbolic() # pyre-ignore
def mnk_hinted(self) -> tuple[int, int, int]:
"""Delegate to symbolic since real tensors already have int dimensions"""
return self.mnk_symbolic() # pyre-ignore
@property
def device_type(self) -> Optional[str]:
return self.tensors[0].device.type
|
MockMMKernelInputs
|
python
|
apache__airflow
|
task-sdk/tests/task_sdk/definitions/test_callback.py
|
{
"start": 1548,
"end": 6717
}
|
class ____:
@pytest.mark.parametrize(
("subclass", "callable"),
[
pytest.param(AsyncCallback, empty_async_callback_for_deadline_tests, id="async"),
pytest.param(SyncCallback, empty_sync_callback_for_deadline_tests, id="sync"),
],
)
def test_init_error_reserved_kwarg(self, subclass, callable):
with pytest.raises(ValueError, match="context is a reserved kwarg for this class"):
subclass(callable, {"context": None})
@pytest.mark.parametrize(
("callback_callable", "expected_path"),
[
pytest.param(
empty_sync_callback_for_deadline_tests,
qualname(empty_sync_callback_for_deadline_tests),
id="valid_sync_callable",
),
pytest.param(
empty_async_callback_for_deadline_tests,
qualname(empty_async_callback_for_deadline_tests),
id="valid_async_callable",
),
pytest.param(TEST_CALLBACK_PATH, TEST_CALLBACK_PATH, id="valid_path_string"),
pytest.param(lambda x: x, None, id="lambda_function"),
pytest.param(TEST_CALLBACK_PATH + " ", TEST_CALLBACK_PATH, id="path_with_whitespace"),
pytest.param(UNIMPORTABLE_DOT_PATH, UNIMPORTABLE_DOT_PATH, id="valid_format_not_importable"),
],
)
def test_get_callback_path_happy_cases(self, callback_callable, expected_path):
path = Callback.get_callback_path(callback_callable)
if expected_path is None:
assert path.endswith("<lambda>")
else:
assert path == expected_path
@pytest.mark.parametrize(
("callback_callable", "error_type"),
[
pytest.param(42, ImportError, id="not_a_string"),
pytest.param("", ImportError, id="empty_string"),
pytest.param("os.path", AttributeError, id="non_callable_module"),
],
)
def test_get_callback_path_error_cases(self, callback_callable, error_type):
expected_message = ""
if error_type is ImportError:
expected_message = "doesn't look like a valid dot path."
elif error_type is AttributeError:
expected_message = "is not callable."
with pytest.raises(error_type, match=expected_message):
Callback.get_callback_path(callback_callable)
@pytest.mark.parametrize(
("callback1_args", "callback2_args", "should_equal"),
[
pytest.param(
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
True,
id="identical",
),
pytest.param(
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(UNIMPORTABLE_DOT_PATH, TEST_CALLBACK_KWARGS),
False,
id="different_path",
),
pytest.param(
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(TEST_CALLBACK_PATH, {"other": "kwargs"}),
False,
id="different_kwargs",
),
pytest.param((TEST_CALLBACK_PATH, None), (TEST_CALLBACK_PATH, None), True, id="both_no_kwargs"),
],
)
def test_callback_equality(self, callback1_args, callback2_args, should_equal):
callback1 = AsyncCallback(*callback1_args)
callback2 = AsyncCallback(*callback2_args)
assert (callback1 == callback2) == should_equal
@pytest.mark.parametrize(
("callback_class", "args1", "args2", "should_be_same_hash"),
[
pytest.param(
AsyncCallback,
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
True,
id="async_identical",
),
pytest.param(
SyncCallback,
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
True,
id="sync_identical",
),
pytest.param(
AsyncCallback,
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(UNIMPORTABLE_DOT_PATH, TEST_CALLBACK_KWARGS),
False,
id="async_different_path",
),
pytest.param(
SyncCallback,
(TEST_CALLBACK_PATH, TEST_CALLBACK_KWARGS),
(TEST_CALLBACK_PATH, {"other": "kwargs"}),
False,
id="sync_different_kwargs",
),
pytest.param(
AsyncCallback,
(TEST_CALLBACK_PATH, None),
(TEST_CALLBACK_PATH, None),
True,
id="async_no_kwargs",
),
],
)
def test_callback_hash_and_set_behavior(self, callback_class, args1, args2, should_be_same_hash):
callback1 = callback_class(*args1)
callback2 = callback_class(*args2)
assert (hash(callback1) == hash(callback2)) == should_be_same_hash
|
TestCallback
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocolModule2.py
|
{
"start": 413,
"end": 545
}
|
class ____(Protocol):
var_1: str
# This should generate an error because var_1 has the
# wrong type.
v2: P2 = protocolModule1
|
P2
|
python
|
realpython__materials
|
python-import/namespace_package/third_party/serializers/json.py
|
{
"start": 49,
"end": 391
}
|
class ____:
def __init__(self):
self._current_object = None
def start_object(self, object_name, object_id):
self._current_object = dict(id=object_id)
def add_property(self, name, value):
self._current_object[name] = value
def __str__(self):
return json.dumps(self._current_object)
|
JsonSerializer
|
python
|
scikit-image__scikit-image
|
src/skimage/transform/_thin_plate_splines.py
|
{
"start": 165,
"end": 8124
}
|
class ____:
"""Thin-plate spline transformation.
Given two matching sets of points, source and destination, this class
estimates the thin-plate spline (TPS) transformation which transforms
each point in source into its destination counterpart.
Attributes
----------
src : (N, 2) array_like
Coordinates of control points in source image.
References
----------
.. [1] Bookstein, Fred L. "Principal warps: Thin-plate splines and the
decomposition of deformations," IEEE Transactions on pattern analysis
and machine intelligence 11.6 (1989): 567–585.
DOI:`10.1109/34.24792`
https://user.engineering.uiowa.edu/~aip/papers/bookstein-89.pdf
Examples
--------
>>> import skimage as ski
Define source and destination control points such that they simulate
rotating by 90 degrees and generate a meshgrid from them:
>>> src = np.array([[0, 0], [0, 5], [5, 5], [5, 0]])
>>> dst = np.array([[5, 0], [0, 0], [0, 5], [5, 5]])
Estimate the transformation:
>>> tps = ski.transform.ThinPlateSplineTransform.from_estimate(src, dst)
Appyling the transformation to `src` approximates `dst`:
>>> np.round(tps(src), 4) # doctest: +FLOAT_CMP
array([[5., 0.],
[0., 0.],
[0., 5.],
[5., 5.]])
Create a meshgrid to apply the transformation to:
>>> grid = np.meshgrid(np.arange(5), np.arange(5))
>>> grid[1]
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]])
>>> coords = np.vstack([grid[0].ravel(), grid[1].ravel()]).T
>>> transformed = tps(coords)
>>> np.round(transformed[:, 1]).reshape(5, 5).astype(int)
array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
The estimation can fail - for example, if all the input or output points
are the same. If this happens, you will get a transform that is not
"truthy" - meaning that ``bool(tform)`` is ``False``:
>>> if tps:
... print("Estimation succeeded.")
Estimation succeeded.
Not so for a degenerate transform with identical points.
>>> bad_src = np.ones((4, 2))
>>> bad_tps = ski.transform.ThinPlateSplineTransform.from_estimate(
... bad_src, dst)
>>> if not bad_tps:
... print("Estimation failed.")
Estimation failed.
Trying to use this failed estimation transform result will give a suitable
error:
>>> bad_tps.params # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
FailedEstimationAccessError: No attribute "params" for failed estimation ...
"""
def __init__(self):
self._estimated = False
self._spline_mappings = None
self.src = None
def __call__(self, coords):
"""Estimate the transformation from a set of corresponding points.
Parameters
----------
coords : (N, 2) array_like
x, y coordinates to transform
Returns
-------
transformed_coords: (N, D) array
Destination coordinates
"""
if self._spline_mappings is None:
msg = (
"Transformation is undefined, define it by calling `estimate` "
"before applying it"
)
raise ValueError(msg)
coords = np.array(coords)
if coords.ndim != 2 or coords.shape[1] != 2:
msg = "Input `coords` must have shape (N, 2)"
raise ValueError(msg)
radial_dist = self._radial_distance(coords)
transformed_coords = self._spline_function(coords, radial_dist)
return transformed_coords
@property
def inverse(self):
raise NotImplementedError("Not supported")
@classmethod
def from_estimate(cls, src, dst) -> Self | FailedEstimation:
"""Estimate optimal spline mappings between source and destination points.
Parameters
----------
src : (N, 2) array_like
Control points at source coordinates.
dst : (N, 2) array_like
Control points at destination coordinates.
Returns
-------
tform : Self or ``FailedEstimation``
An instance of the transformation if the estimation succeeded.
Otherwise, we return a special ``FailedEstimation`` object to
signal a failed estimation. Testing the truth value of the failed
estimation object will return ``False``. E.g.
.. code-block:: python
tform = ThinPlateSplineTransform.from_estimate(...)
if not tform:
raise RuntimeError(f"Failed estimation: {tf}")
Notes
-----
The number N of source and destination points must match.
"""
tf = cls()
msg = tf._estimate(src, dst)
return tf if msg is None else FailedEstimation(f'{cls.__name__}: {msg}')
def _estimate(self, src, dst):
"""Try to estimate and return reason if estimation fails."""
check_nD(src, 2, arg_name="src")
check_nD(dst, 2, arg_name="dst")
if src.shape[0] < 3 or dst.shape[0] < 3:
msg = "Need at least 3 points in in `src` and `dst`"
raise ValueError(msg)
if src.shape != dst.shape:
msg = f"Shape of `src` and `dst` didn't match, {src.shape} != {dst.shape}"
raise ValueError(msg)
self.src = src
n, d = src.shape
dist = distance_matrix(src, src)
K = self._radial_basis_kernel(dist)
P = np.hstack([np.ones((n, 1)), src])
n_plus_3 = n + 3
L = np.zeros((n_plus_3, n_plus_3), dtype=np.float32)
L[:n, :n] = K
L[:n, -3:] = P
L[-3:, :n] = P.T
V = np.vstack([dst, np.zeros((d + 1, d))])
try:
self._spline_mappings = np.linalg.solve(L, V)
except np.linalg.LinAlgError:
return 'Unable to solve for spline mappings'
return None
def _radial_distance(self, coords):
"""Compute the radial distance between input points and source points."""
dists = distance_matrix(coords, self.src)
return self._radial_basis_kernel(dists)
def _spline_function(self, coords, radial_dist):
"""Estimate the spline function in X and Y directions."""
n = self.src.shape[0]
w = self._spline_mappings[:n]
a = self._spline_mappings[n:]
transformed_coords = a[0] + np.dot(coords, a[1:]) + np.dot(radial_dist, w)
return transformed_coords
@staticmethod
def _radial_basis_kernel(r):
"""Compute the radial basis function for thin-plate splines.
Parameters
----------
r : (4, N) ndarray
Input array representing the Euclidean distance between each pair of
two collections of control points.
Returns
-------
U : (4, N) ndarray
Calculated kernel function U.
"""
_small = 1e-8 # Small value to avoid divide-by-zero
r_sq = r**2
U = np.where(r == 0.0, 0.0, r_sq * np.log(r_sq + _small))
return U
@_deprecate_estimate
def estimate(self, src, dst):
"""Estimate optimal spline mappings between source and destination points.
Parameters
----------
src : (N, 2) array_like
Control points at source coordinates.
dst : (N, 2) array_like
Control points at destination coordinates.
Returns
-------
success: bool
True indicates that the estimation was successful.
Notes
-----
The number N of source and destination points must match.
"""
return self._estimate(src, dst) is None
|
ThinPlateSplineTransform
|
python
|
walkccc__LeetCode
|
solutions/22. Generate Parentheses/22.py
|
{
"start": 0,
"end": 368
}
|
class ____:
def generateParenthesis(self, n):
ans = []
def dfs(l: int, r: int, s: list[str]) -> None:
if l == 0 and r == 0:
ans.append(''.join(s))
if l > 0:
s.append('(')
dfs(l - 1, r, s)
s.pop()
if l < r:
s.append(')')
dfs(l, r - 1, s)
s.pop()
dfs(n, n, [])
return ans
|
Solution
|
python
|
doocs__leetcode
|
solution/1700-1799/1762.Buildings With an Ocean View/Solution.py
|
{
"start": 0,
"end": 279
}
|
class ____:
def findBuildings(self, heights: List[int]) -> List[int]:
ans = []
mx = 0
for i in range(len(heights) - 1, -1, -1):
if heights[i] > mx:
ans.append(i)
mx = heights[i]
return ans[::-1]
|
Solution
|
python
|
scrapy__scrapy
|
tests/mockserver/http_resources.py
|
{
"start": 4190,
"end": 4651
}
|
class ____(LeafResource):
def render_GET(self, request):
request.startedWriting = 1
self.deferRequest(request, 0, self._delayedRender, request)
return NOT_DONE_YET
render_POST = render_GET
def _delayedRender(self, request):
raw = getarg(request, b"raw", b"HTTP 1.1 200 OK\n")
request.startedWriting = 1
request.write(raw)
request.channel.transport.loseConnection()
request.finish()
|
Raw
|
python
|
gevent__gevent
|
src/greentest/3.14/test_urllib2.py
|
{
"start": 17172,
"end": 17556
}
|
class ____(urllib.request.HTTPHandler):
# Very simple mock HTTP handler with no special behavior other than using a mock HTTP connection
def __init__(self, debuglevel=None):
super(MockHTTPHandler, self).__init__(debuglevel=debuglevel)
self.httpconn = MockHTTPClass()
def http_open(self, req):
return self.do_open(self.httpconn, req)
|
MockHTTPHandler
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 336038,
"end": 340277
}
|
class ____(Request):
"""
Publish tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param publish_model: Indicates that the task output model (if exists) should
be published. Optional, the default value is True.
:type publish_model: bool
"""
_service = "tasks"
_action = "publish_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": "boolean",
},
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"publish_model": {
"description": "Indicates that the task output model (if exists) should be published. Optional, the default value is True.",
"type": "boolean",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids: List[str],
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
force: Optional[bool] = False,
publish_model: Optional[bool] = None,
**kwargs: Any
) -> None:
super(PublishManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
self.force = force
self.publish_model = publish_model
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("publish_model")
def publish_model(self) -> Optional[bool]:
return self._property_publish_model
@publish_model.setter
def publish_model(self, value: Optional[bool]) -> None:
if value is None:
self._property_publish_model = None
return
self.assert_isinstance(value, "publish_model", (bool,))
self._property_publish_model = value
|
PublishManyRequest
|
python
|
apache__airflow
|
providers/opensearch/src/airflow/providers/opensearch/operators/opensearch.py
|
{
"start": 4389,
"end": 5724
}
|
class ____(BaseOperator):
"""
Create a new index on an OpenSearch cluster with a given index name.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpenSearchCreateIndexOperator`
:param index_name: The name of the index to be created.
:param index_body: A dictionary that defines index settings
:param opensearch_conn_id: opensearch connection to use
"""
def __init__(
self,
*,
index_name: str,
index_body: dict[str, Any],
opensearch_conn_id: str = "opensearch_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.index_name = index_name
self.index_body = index_body
self.opensearch_conn_id = opensearch_conn_id
@cached_property
def hook(self) -> OpenSearchHook:
"""Get an instance of an OpenSearchHook."""
return OpenSearchHook(open_search_conn_id=self.opensearch_conn_id, log_query=False)
def execute(self, context: Context) -> Any:
"""Create an index on an OpenSearch cluster."""
try:
self.hook.client.indices.create(index=self.index_name, body=self.index_body)
except OpenSearchException as e:
raise AirflowException(e)
|
OpenSearchCreateIndexOperator
|
python
|
pyinstaller__pyinstaller
|
bootloader/waflib/Tools/cs.py
|
{
"start": 4175,
"end": 4439
}
|
class ____(Task.Task):
color = 'YELLOW'
inst_to = None
def runnable_status(self):
return Task.SKIP_ME
@conf
def read_csshlib(self, name, paths=[]):
return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib')
|
fake_csshlib
|
python
|
huggingface__transformers
|
src/transformers/models/modernbert/modeling_modernbert.py
|
{
"start": 22530,
"end": 24600
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: ModernBertConfig, layer_id: Optional[int] = None):
super().__init__()
self.config = config
if layer_id == 0:
self.attn_norm = nn.Identity()
else:
self.attn_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.attn = ModernBertAttention(config=config, layer_id=layer_id)
self.mlp_norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, bias=config.norm_bias)
self.mlp = ModernBertMLP(config)
self.attention_type = config.layer_types[layer_id]
@torch.compile(dynamic=True)
def compiled_mlp(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.mlp(self.mlp_norm(hidden_states))
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
sliding_window_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
cu_seqlens: Optional[torch.Tensor] = None,
max_seqlen: Optional[int] = None,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> torch.Tensor:
attn_outputs = self.attn(
self.attn_norm(hidden_states),
attention_mask=attention_mask,
sliding_window_mask=sliding_window_mask,
position_ids=position_ids,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = hidden_states + attn_outputs[0]
mlp_output = (
self.compiled_mlp(hidden_states)
if self.config.reference_compile
else self.mlp(self.mlp_norm(hidden_states))
)
hidden_states = hidden_states + mlp_output
return (hidden_states,) + attn_outputs[1:] # add attentions if outputted
@auto_docstring
|
ModernBertEncoderLayer
|
python
|
astropy__astropy
|
astropy/modeling/projections.py
|
{
"start": 16099,
"end": 16407
}
|
class ____(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
|
Sky2Pix_ZenithalEquidistant
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/math_ops/banded_triangular_solve_op_test.py
|
{
"start": 960,
"end": 8924
}
|
class ____(test.TestCase):
def _verifySolveAllWays(self, x, y, dtypes, batch_dims=None):
for lower in (False,):
for adjoint in (False, True):
for use_placeholder in True, False:
self._verifySolve(
x,
y,
lower=lower,
adjoint=adjoint,
batch_dims=batch_dims,
use_placeholder=use_placeholder,
dtypes=dtypes)
def _verifySolveAllWaysReal(self, x, y, batch_dims=None):
self._verifySolveAllWays(x, y, (np.float32, np.float64), batch_dims)
def _verifySolveAllWaysComplex(self, x, y, batch_dims=None):
self._verifySolveAllWays(x, y, (np.complex64, np.complex128), batch_dims)
def _verifySolve(self,
x,
y,
lower=True,
adjoint=False,
batch_dims=None,
use_placeholder=False,
dtypes=(np.float32, np.float64)):
for np_type in dtypes:
a = x.astype(np_type)
b = y.astype(np_type)
# Now we need to convert a to a dense triangular matrix.
def make_diags(diags, lower=True):
n = len(diags[0])
a = np.zeros(n * n, dtype=diags.dtype)
if lower:
for i, diag in enumerate(diags):
a[n * i:n * n:n + 1] = diag[i:]
else:
diags_flip = np.flip(diags, 0)
for i, diag in enumerate(diags_flip):
a[i:(n - i) * n:n + 1] = diag[:(n - i)]
return a.reshape(n, n)
# For numpy.solve we have to explicitly zero out the strictly
# upper or lower triangle.
if a.size > 0:
a_np = make_diags(a, lower=lower)
else:
a_np = a
if adjoint:
a_np = np.conj(np.transpose(a_np))
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
with self.cached_session():
a_tf = a
b_tf = b
if use_placeholder:
a_tf = array_ops.placeholder_with_default(a_tf, shape=None)
b_tf = array_ops.placeholder_with_default(b_tf, shape=None)
tf_ans = linalg_ops.banded_triangular_solve(
a_tf, b_tf, lower=lower, adjoint=adjoint)
tf_val = self.evaluate(tf_ans)
np_ans = np.linalg.solve(a_np, b)
self.assertEqual(np_ans.shape, tf_val.shape)
self.assertAllClose(np_ans, tf_val)
@test_util.run_deprecated_v1
def testSolve(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1]])
rhs0 = np.array([[1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
# 2x2 matrix with 2 bands, single right-hand side.
# Corresponds to the lower triangular
# [[1., 0.], [3., 4.]]
# and upper triangular
# [[2., 1.], [0., 3.]]
matrix = np.array([[1., 4.], [2., 3.]])
rhs0 = np.array([[1.], [1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
# 2x2 matrix with 2 bands, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]])
self._verifySolveAllWaysReal(matrix, rhs1)
# 4 x 4 matrix with 2 bands, 3 right hand sides.
# Corresponds to the lower triangular
# [[1., 0., 0., 0.],
# [-1., 2., 0., 0.],
# [0., -2., 3., 0.],
# [0., 0., -3., 4.]]
# and upper triangular
# [[1., 1., 0., 0.],
# [0., -1., 2., 0.],
# [0., 0., -2., 3.],
# [0., 0., 0., -3.]]
matrix = np.array([[1., 2., 3., 4.], [1., -1., -2., -3.]])
rhs0 = np.array([[1., 0., 1.], [0., 1., 1.], [-1., 2., 1.], [0., -1., -1.]])
self._verifySolveAllWaysReal(matrix, rhs0)
def testSolveBandSizeSmaller(self):
rhs0 = np.random.randn(6, 4)
# 6 x 6 matrix with 2 bands. Ensure all non-zero entries.
matrix = 2. * np.random.uniform(size=[3, 6]) + 1.
self._verifySolveAllWaysReal(matrix, rhs0)
# 6 x 6 matrix with 3 bands. Ensure all non-zero entries.
matrix = 2. * np.random.uniform(size=[3, 6]) + 1.
self._verifySolveAllWaysReal(matrix, rhs0)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="ROCm does not support BLAS operations for complex types")
@test_util.run_deprecated_v1
def testSolveComplex(self):
# 1x1 matrix, single rhs.
matrix = np.array([[0.1 + 1j * 0.1]])
rhs0 = np.array([[1. + 1j]])
self._verifySolveAllWaysComplex(matrix, rhs0)
# 2x2 matrix with 2 bands, single right-hand side.
# Corresponds to
# [[1. + 1j, 0.], [4 + 1j, 2 + 1j]]
matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64)
matrix += 1j * matrix
rhs0 = np.array([[1.], [1.]]).astype(np.complex64)
rhs0 += 1j * rhs0
self._verifySolveAllWaysComplex(matrix, rhs0)
# 2x2 matrix with 2 bands, 3 right-hand sides.
rhs1 = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64)
rhs1 += 1j * rhs1
self._verifySolveAllWaysComplex(matrix, rhs1)
@test_util.run_deprecated_v1
def testSolveBatch(self):
matrix = np.array([[1., 2.], [3., 4.]])
rhs = np.array([[1., 0., 1.], [0., 1., 1.]])
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[3, 2])
matrix = np.array([[1., 2., 3., 4.], [-1., -2., -3., -4.],
[-1., 1., 2., 3.]])
rhs = np.array([[-1., 2.], [1., 1.], [0., 1.], [2., 3.]])
# Batch of 2x3x4x4 matrices with 3 bands, 2x3x4x2 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x4x4 matrices with 3 bands, 3x2x4x2 right-hand sides.
self._verifySolveAllWaysReal(matrix, rhs, batch_dims=[3, 2])
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="ROCm does not support BLAS operations for complex types")
@test_util.run_deprecated_v1
def testSolveBatchComplex(self):
matrix = np.array([[1., 2.], [3., 4.]]).astype(np.complex64)
matrix += 1j * matrix
rhs = np.array([[1., 0., 1.], [0., 1., 1.]]).astype(np.complex64)
rhs += 1j * rhs
# Batch of 2x3x2x2 matrices, 2x3x2x3 right-hand sides.
self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[2, 3])
# Batch of 3x2x2x2 matrices, 3x2x2x3 right-hand sides.
self._verifySolveAllWaysComplex(matrix, rhs, batch_dims=[3, 2])
@test_util.run_deprecated_v1
def testWrongDimensions(self):
# The matrix should have the same number of rows as the
# right-hand sides.
matrix = np.array([[1., 1.], [1., 1.]])
rhs = np.array([[1., 0.]])
with self.cached_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs)
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
# Number of bands exceeds the dimension of the matrix.
matrix = np.ones((6, 4))
rhs = np.ones((4, 2))
with self.cached_session():
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs)
with self.assertRaises(ValueError):
self._verifySolve(matrix, rhs, batch_dims=[2, 3])
@test_util.run_deprecated_v1
@test_util.disable_xla("XLA cannot throw assertion errors during a kernel.")
def testNotInvertible(self):
# The input should be invertible.
# The matrix is singular because it has a zero on the diagonal.
# FIXME(rmlarsen): The GPU kernel does not check for singularity.
singular_matrix = np.array([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]])
with self.cached_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix)
with self.assertRaisesOpError("Input matrix is not invertible."):
self._verifySolve(singular_matrix, singular_matrix, batch_dims=[2, 3])
if __name__ == "__main__":
test.main()
|
BandedTriangularSolveOpTest
|
python
|
openai__openai-python
|
src/openai/types/evals/create_eval_completions_run_data_source.py
|
{
"start": 1746,
"end": 1913
}
|
class ____(BaseModel):
id: str
"""The identifier of the file."""
type: Literal["file_id"]
"""The type of jsonl source. Always `file_id`."""
|
SourceFileID
|
python
|
ansible__ansible
|
test/units/module_utils/datatag/test_datatag.py
|
{
"start": 30535,
"end": 32713
}
|
class ____(AnsibleTaggedObject):
"""A surrogate test type that allows empty tags."""
__slots__ = ('_ansible_tags_mapping', '_value')
_empty_tags_as_native: t.ClassVar[bool] = False
_value: str
def __init__(self, value: str):
self._ansible_tags_mapping = _EMPTY_INTERNAL_TAGS_MAPPING
def test_helper_untag():
"""Validate the behavior of the `AnsibleTagHelper.untag` method."""
value = AnsibleTagHelper.tag("value", tags=[ExampleSingletonTag(), ExampleTagWithContent(content_str="blah")])
assert len(AnsibleTagHelper.tag_types(value)) == 2
less_est = AnsibleTagHelper.untag(value, ExampleSingletonTag)
assert AnsibleTagHelper.tag_types(less_est) == {ExampleTagWithContent}
no_tags_explicit = AnsibleTagHelper.untag(value, ExampleSingletonTag, ExampleTagWithContent)
assert type(no_tags_explicit) is str # pylint: disable=unidiomatic-typecheck
no_tags_implicit = AnsibleTagHelper.untag(value)
assert type(no_tags_implicit) is str # pylint: disable=unidiomatic-typecheck
untagged_value = "not a tagged value"
assert AnsibleTagHelper.untag(untagged_value) is untagged_value
tagged_empty_tags_ok_value = ExampleSingletonTag().tag(NonNativeTaggedType("blah"))
untagged_empty_tags_ok_value = AnsibleTagHelper.untag(tagged_empty_tags_ok_value)
assert type(untagged_empty_tags_ok_value) is NonNativeTaggedType # pylint: disable=unidiomatic-typecheck
assert not AnsibleTagHelper.tags(untagged_empty_tags_ok_value)
def test_serializable_dataclass_with_tuple() -> None:
"""Validate that dataclass deserialization converts inbound lists for tuple-typed fields."""
@dataclasses.dataclass(**_tag_dataclass_kwargs)
class HasTuple(AnsibleSerializableDataclass):
data: t.Tuple[str, ...]
assert HasTuple._from_dict(dict(data=["abc", "def"])) == HasTuple(data=("abc", "def"))
def test_tag_copy_non_propagation() -> None:
value = ExampleTagThatPreventsPropagation().tag("hello")
copied_value = AnsibleTagHelper.tag_copy(value, "copy")
assert type(copied_value) is str # pylint: disable=unidiomatic-typecheck
assert copied_value == "copy"
|
NonNativeTaggedType
|
python
|
doocs__leetcode
|
solution/0700-0799/0707.Design Linked List/Solution.py
|
{
"start": 0,
"end": 1214
}
|
class ____:
def __init__(self):
self.dummy = ListNode()
self.cnt = 0
def get(self, index: int) -> int:
if index < 0 or index >= self.cnt:
return -1
cur = self.dummy.next
for _ in range(index):
cur = cur.next
return cur.val
def addAtHead(self, val: int) -> None:
self.addAtIndex(0, val)
def addAtTail(self, val: int) -> None:
self.addAtIndex(self.cnt, val)
def addAtIndex(self, index: int, val: int) -> None:
if index > self.cnt:
return
pre = self.dummy
for _ in range(index):
pre = pre.next
pre.next = ListNode(val, pre.next)
self.cnt += 1
def deleteAtIndex(self, index: int) -> None:
if index >= self.cnt:
return
pre = self.dummy
for _ in range(index):
pre = pre.next
t = pre.next
pre.next = t.next
t.next = None
self.cnt -= 1
# Your MyLinkedList object will be instantiated and called as such:
# obj = MyLinkedList()
# param_1 = obj.get(index)
# obj.addAtHead(val)
# obj.addAtTail(val)
# obj.addAtIndex(index,val)
# obj.deleteAtIndex(index)
|
MyLinkedList
|
python
|
TheAlgorithms__Python
|
graphs/bidirectional_a_star.py
|
{
"start": 526,
"end": 1661
}
|
class ____:
"""
>>> k = Node(0, 0, 4, 3, 0, None)
>>> k.calculate_heuristic()
5.0
>>> n = Node(1, 4, 3, 4, 2, None)
>>> n.calculate_heuristic()
2.0
>>> l = [k, n]
>>> n == l[0]
False
>>> l.sort()
>>> n == l[0]
True
"""
def __init__(
self,
pos_x: int,
pos_y: int,
goal_x: int,
goal_y: int,
g_cost: int,
parent: Node | None,
) -> None:
self.pos_x = pos_x
self.pos_y = pos_y
self.pos = (pos_y, pos_x)
self.goal_x = goal_x
self.goal_y = goal_y
self.g_cost = g_cost
self.parent = parent
self.h_cost = self.calculate_heuristic()
self.f_cost = self.g_cost + self.h_cost
def calculate_heuristic(self) -> float:
"""
Heuristic for the A*
"""
dy = self.pos_x - self.goal_x
dx = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(dx) + abs(dy)
else:
return sqrt(dy**2 + dx**2)
def __lt__(self, other: Node) -> bool:
return self.f_cost < other.f_cost
|
Node
|
python
|
scrapy__scrapy
|
tests/AsyncCrawlerProcess/caching_hostname_resolver.py
|
{
"start": 75,
"end": 920
}
|
class ____(scrapy.Spider):
"""
Finishes in a finite amount of time (does not hang indefinitely in the DNS resolution)
"""
name = "caching_hostname_resolver_spider"
async def start(self):
yield scrapy.Request(self.url)
def parse(self, response):
for _ in range(10):
yield scrapy.Request(
response.url, dont_filter=True, callback=self.ignore_response
)
def ignore_response(self, response):
self.logger.info(repr(response.ip_address))
if __name__ == "__main__":
process = AsyncCrawlerProcess(
settings={
"RETRY_ENABLED": False,
"DNS_RESOLVER": "scrapy.resolver.CachingHostnameResolver",
}
)
process.crawl(CachingHostnameResolverSpider, url=sys.argv[1])
process.start()
|
CachingHostnameResolverSpider
|
python
|
pydantic__pydantic
|
tests/test_pickle.py
|
{
"start": 4773,
"end": 5021
}
|
class ____:
a: int
b: float
def dataclass_factory() -> type:
@pydantic.dataclasses.dataclass
class NonImportableDataclass:
a: int
b: float
return NonImportableDataclass
@dataclasses.dataclass
|
ImportableDataclass
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py
|
{
"start": 7688,
"end": 7961
}
|
class ____(graphene.Union):
class Meta:
types = (
GrapheneDefaultPartitionStatuses,
GrapheneMultiPartitionStatuses,
GrapheneTimePartitionStatuses,
)
name = "AssetPartitionStatuses"
|
GrapheneAssetPartitionStatuses
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/tests/bucketize_op_test.py
|
{
"start": 1049,
"end": 2955
}
|
class ____(xla_test.XLATestCase):
def testInt(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(expected_out,
sess.run(op, {p: [-5, 0, 2, 3, 5, 8, 10, 11, 12]}))
def testFloat(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(
expected_out,
sess.run(op, {p: [-5., 0., 2., 3., 5., 8., 10., 11., 12.]}))
def test2DInput(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
self.assertAllEqual(
expected_out, sess.run(op,
{p: [[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]}))
@test_util.disable_mlir_bridge("Error handling")
def testInvalidBoundariesOrder(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 8, 3, 11])
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
sess.run(op, {p: [-5, 0]})
def testBoundariesNotList(self):
with self.session():
with self.assertRaisesRegex(TypeError, "Expected list.*"):
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
math_ops._bucketize(p, boundaries=0)
if __name__ == "__main__":
test.main()
|
BucketizationOpTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/events.py
|
{
"start": 112486,
"end": 114714
}
|
class ____(Response):
"""
Response of events.vector_metrics_iter_histogram endpoint.
:param images:
:type images: Sequence[dict]
"""
_service = "events"
_action = "vector_metrics_iter_histogram"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"images": {"items": {"type": "object"}, "type": ["array", "null"]}},
"type": "object",
}
def __init__(self, images: Optional[List[dict]] = None, **kwargs: Any) -> None:
super(VectorMetricsIterHistogramResponse, self).__init__(**kwargs)
self.images = images
@schema_property("images")
def images(self) -> Optional[List[dict]]:
return self._property_images
@images.setter
def images(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_images = None
return
self.assert_isinstance(value, "images", (list, tuple))
self.assert_isinstance(value, "images", (dict,), is_array=True)
self._property_images = value
response_mapping = {
AddRequest: AddResponse,
AddBatchRequest: AddBatchResponse,
DeleteForTaskRequest: DeleteForTaskResponse,
DebugImagesRequest: DebugImagesResponse,
GetDebugImageSampleRequest: GetDebugImageSampleResponse,
NextDebugImageSampleRequest: NextDebugImageSampleResponse,
GetTaskMetricsRequest: GetTaskMetricsResponse,
GetTaskLogRequest: GetTaskLogResponse,
GetTaskEventsRequest: GetTaskEventsResponse,
DownloadTaskLogRequest: DownloadTaskLogResponse,
GetTaskPlotsRequest: GetTaskPlotsResponse,
GetMultiTaskPlotsRequest: GetMultiTaskPlotsResponse,
GetVectorMetricsAndVariantsRequest: GetVectorMetricsAndVariantsResponse,
VectorMetricsIterHistogramRequest: VectorMetricsIterHistogramResponse,
ScalarMetricsIterHistogramRequest: ScalarMetricsIterHistogramResponse,
MultiTaskScalarMetricsIterHistogramRequest: MultiTaskScalarMetricsIterHistogramResponse,
GetTaskLatestScalarValuesRequest: GetTaskLatestScalarValuesResponse,
GetScalarMetricsAndVariantsRequest: GetScalarMetricsAndVariantsResponse,
GetScalarMetricDataRequest: GetScalarMetricDataResponse,
}
|
VectorMetricsIterHistogramResponse
|
python
|
numba__numba
|
numba/core/types/misc.py
|
{
"start": 7229,
"end": 7962
}
|
class ____(Callable, Phantom):
"""
The type of exception classes (not instances).
"""
def __init__(self, exc_class):
assert issubclass(exc_class, BaseException)
name = "%s" % (exc_class.__name__)
self.exc_class = exc_class
super(ExceptionClass, self).__init__(name)
def get_call_type(self, context, args, kws):
return self.get_call_signatures()[0][0]
def get_call_signatures(self):
from numba.core import typing
return_type = ExceptionInstance(self.exc_class)
return [typing.signature(return_type)], False
def get_impl_key(self, sig):
return type(self)
@property
def key(self):
return self.exc_class
|
ExceptionClass
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/auth_manager/avp/facade.py
|
{
"start": 2248,
"end": 2495
}
|
class ____(TypedDict, total=False):
"""Represent the parameters of ``is_authorized`` method in AVP facade."""
method: ExtendedResourceMethod
entity_type: AvpEntities
entity_id: str | None
context: dict | None
|
IsAuthorizedRequest
|
python
|
nedbat__coveragepy
|
coverage/types.py
|
{
"start": 1237,
"end": 2231
}
|
class ____(Protocol):
"""A simple value type for recording what to do with a file."""
original_filename: str
canonical_filename: str
source_filename: str | None
trace: bool
reason: str
file_tracer: FileTracer | None
has_dynamic_filename: bool
# When collecting data, we use a dictionary with a few possible shapes. The
# keys are always file names.
# - If measuring line coverage, the values are sets of line numbers.
# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs
# of line numbers).
# - If measuring arcs in the C tracer, the values are sets of packed arcs (two
# line numbers combined into one integer).
TTraceFileData = set[TLineNo] | set[TArc] | set[int]
TTraceData = dict[str, TTraceFileData]
# Functions passed into collectors.
TShouldTraceFn = Callable[[str, FrameType], TFileDisposition]
TCheckIncludeFn = Callable[[str, FrameType], bool]
TShouldStartContextFn = Callable[[FrameType], str | None]
|
TFileDisposition
|
python
|
ray-project__ray
|
rllib/offline/output_writer.py
|
{
"start": 124,
"end": 461
}
|
class ____:
"""Writer API for saving experiences from policy evaluation."""
@PublicAPI
def write(self, sample_batch: SampleBatchType):
"""Saves a batch of experiences.
Args:
sample_batch: SampleBatch or MultiAgentBatch to save.
"""
raise NotImplementedError
@PublicAPI
|
OutputWriter
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 98006,
"end": 98452
}
|
class ____(object):
__slots__ = ('db',)
def __init__(self, db): self.db = db
def __enter__(self):
if self.db.is_closed():
self.db.connect()
def __exit__(self, exc_type, exc_val, exc_tb): self.db.close()
def __call__(self, fn):
@wraps(fn)
def inner(*args, **kwargs):
with ConnectionContext(self.db):
return fn(*args, **kwargs)
return inner
|
ConnectionContext
|
python
|
ray-project__ray
|
python/ray/data/_internal/logical/interfaces/source_operator.py
|
{
"start": 150,
"end": 471
}
|
class ____(ABC):
"""Mixin for Logical operators that can be logical source nodes.
Subclasses: Read, InputData, FromAbstract.
"""
@abstractmethod
def output_data(self) -> Optional[List["RefBundle"]]:
"""The output data of this operator if already known, or ``None``."""
pass
|
SourceOperator
|
python
|
getsentry__sentry
|
tests/sentry/api/serializers/test_apitoken.py
|
{
"start": 2532,
"end": 3018
}
|
class ____(TestApiTokenSerializer):
def test_field_is_returned(self) -> None:
attrs = self._serializer.get_attrs(item_list=[self._token], user=self._user)
attrs["application"] = None
serialized_object = self._serializer.serialize(
obj=self._token, user=self._user, attrs=attrs
)
assert "tokenLastCharacters" in serialized_object
assert serialized_object["tokenLastCharacters"] == self._token.token[-4:]
|
TestLastTokenCharacters
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/overloadOverride1.py
|
{
"start": 198,
"end": 264
}
|
class ____:
def foo(self, x: int) -> int:
return x
|
Base1
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/ruff/RUF023.py
|
{
"start": 5808,
"end": 6131
}
|
class ____:
# This should get flagged, *but* the fix is unsafe,
# since the `__slots__` binding is used by the `__match_args__` definition
__slots__ = ("foo", "bar")
__match_args__ = __slots__
###################################
# These should all not get flagged:
###################################
|
VeryDRY
|
python
|
pytorch__pytorch
|
test/inductor/test_mmdecomp.py
|
{
"start": 3017,
"end": 9775
}
|
class ____(NNTestCase):
_do_cuda_memory_leak_check = GPU_TYPE == "cuda"
_do_cuda_non_default_stream = GPU_TYPE == "cuda"
@unittest.skipIf(not HAS_GPU, "GPU tests require triton")
@parametrize("dtype", [torch.float, torch.bfloat16])
def test_simple_mm(self, device, dtype):
fudge = 10
rtol = default_rtol[dtype] * fudge
atol = default_atol[dtype] * fudge
for t_size in ts_list:
((a1_0, a1_1, a2_0, a2_1)) = t_size
t1 = rand_math_tensor((a1_0, a1_1), dtype=dtype, device=device)
t2 = rand_math_tensor((a2_0, a2_1), dtype=dtype, device=device)
tadd = rand_math_tensor((a1_0, a2_1), dtype=dtype, device=device)
run_comp_nocomp(torch_mm, t1, t2, rtol=rtol, atol=atol)
run_comp_nocomp(torch_addmm, tadd, t1, t2, rtol=rtol, atol=atol)
@unittest.skipIf(not HAS_GPU, "GPU tests require triton")
@parametrize(
"dtype", [torch.float, torch.bfloat16] if SM80OrLater else [torch.float]
)
@parametrize("bs", [1, 2, 4, 10])
def test_batched_mm(self, device, dtype, bs):
fudge = 3
rtol = default_rtol[dtype] * fudge
atol = default_atol[dtype] * fudge
for t_size in ts_list:
((a1_0, a1_1, a2_0, a2_1)) = t_size
t1 = rand_math_tensor((bs, a1_0, a1_1), dtype=dtype, device=device)
t2 = rand_math_tensor((bs, a2_0, a2_1), dtype=dtype, device=device)
tadd = rand_math_tensor((bs, a1_0, a2_1), dtype=dtype, device=device)
run_comp_nocomp(torch_bmm, t1, t2, rtol=rtol, atol=atol)
for alpha in (0, 1, -1, 0.5, -0.5):
for beta in (0, 1, -1, 0.5, -0.5):
run_comp_nocomp(
torch_baddbmm, tadd, t1, t2, alpha, beta, rtol=rtol, atol=atol
)
@unittest.skipIf(not HAS_GPU, "GPU tests require triton")
@config.patch(coordinate_descent_tuning=True)
def test_bmm_batch2_last_dim_size_is_one(self, device):
fudge = 3
rtol = default_rtol[torch.float32] * fudge
atol = default_atol[torch.float32] * fudge
t1 = torch.randn(1, 32, 2, device=device)
t2 = torch.randn(1, 2, 1, device=device)
run_comp_nocomp(torch_bmm, t1, t2, rtol=rtol, atol=atol)
@unittest.skipIf(not HAS_GPU, "GPU tests require triton")
@parametrize("dtype", [torch.float, torch.bfloat16, torch.int])
def test_some(self, device, dtype):
# this Pytorch data type is not fully supported on cuda today
# - unfortunately we can't skipIf because we don't see the actual params in skipIf
if device.startswith(GPU_TYPE) and dtype == torch.int:
return
run_comp_nocomp(
torch_mm,
init_tensor([[1], [2], [3], [4]], dtype=dtype, device=device),
init_tensor([[1, 2, 3, 4]], dtype=dtype, device=device),
)
run_comp_nocomp(
torch_mm,
init_tensor([[1, 2, 3, 4]], dtype=dtype, device=device),
init_tensor([[1], [2], [3], [4]], dtype=dtype, device=device),
)
@unittest.skipIf(not HAS_GPU, "GPU tests require triton")
@parametrize("dtype", [torch.float, torch.bfloat16, torch.int])
@parametrize("bs", [1, 2, 4, 10])
def test_some_batched(self, device, dtype, bs):
# this Pytorch data type is not fully supported on cuda today
# - unfortunately we can't skipIf because we don't see the actual params in skipIf
if device.startswith(GPU_TYPE) and dtype == torch.int:
return
run_comp_nocomp(
torch_bmm,
init_tensor([[[1], [2], [3], [4]]] * bs, dtype=dtype, device=device),
init_tensor([[[1, 2, 3, 4]]] * bs, dtype=dtype, device=device),
)
run_comp_nocomp(
torch_bmm,
init_tensor([[[1, 2, 3, 4]]] * bs, dtype=dtype, device=device),
init_tensor([[[1], [2], [3], [4]]] * bs, dtype=dtype, device=device),
)
@parametrize("dtype", [torch.float, torch.bfloat16])
def test_dynamic_shape_mm(self, device, dtype):
# Test that the mm decomp does not evaluate expressions for dynamic shapes
shape_env = ShapeEnv()
fake_mode = FakeTensorMode(shape_env=shape_env)
# Only test decomp for cpu to match fake tensors from dynamo
if device != "cpu":
return
for t_size in ts_list:
((a1_0, a1_1, a2_0, a2_1)) = t_size
# Create the fake tensors
t1 = create_fake_tensor_with_dynamic_size(
rand_math_tensor((a1_0, a1_1), dtype=dtype, device=device),
fake_mode,
)
t2 = create_fake_tensor_with_dynamic_size(
rand_math_tensor((a2_0, a2_1), dtype=dtype, device=device),
fake_mode,
)
# Save the expression types to check if any symints are evaluated
og_t1_expr_types = [
type(d.node.expr) if type(d) is torch.SymInt else int for d in t1.size()
]
og_t2_expr_types = [
type(d.node.expr) if type(d) is torch.SymInt else int for d in t2.size()
]
r = mm(t1, t2)
# Make sure all symints are not evaluated
new_t1_expr_types = [
type(d.node.expr) if type(d) is torch.SymInt else int for d in t1.size()
]
new_t2_expr_types = [
type(d.node.expr) if type(d) is torch.SymInt else int for d in t2.size()
]
self.assertTrue(
all(
og_t1_expr_types[i] == new_t1_expr_types[i]
for i in range(len(og_t1_expr_types))
)
)
self.assertTrue(
all(
og_t2_expr_types[i] == new_t2_expr_types[i]
for i in range(len(og_t2_expr_types))
)
)
if r is not NotImplemented:
# Check that the output is well formed
self.assertEqual(t1.size(0), r.size(0))
self.assertEqual(t2.size(1), r.size(1))
r_expr_types = [
type(d.node.expr) if type(d) is torch.SymInt else int
for d in r.size()
]
self.assertTrue(r_expr_types[0] == og_t1_expr_types[0])
self.assertTrue(r_expr_types[1] == og_t2_expr_types[1])
device_types = ("cpu", GPU_TYPE)
instantiate_device_type_tests(TestDecomp, globals(), only_for=device_types)
if __name__ == "__main__":
# We don't support torch.compile() on Windows
if not IS_WINDOWS:
run_tests()
|
TestDecomp
|
python
|
huggingface__transformers
|
tests/models/mllama/test_modeling_mllama.py
|
{
"start": 3916,
"end": 4492
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `MllamaForConditionalGeneration`.
"""
all_model_classes = (MllamaForCausalLM,) if is_torch_available() else ()
def setUp(self):
self.model_tester = MllamaText2TextModelTester(self)
self.config_tester = ConfigTester(self, config_class=MllamaTextConfig, has_text_modality=True)
@unittest.skip("Mllama needs a different model prefix to loadd saved checkpoints")
def test_model_base_model_prefix(self):
pass
|
MllamaForCausalLMModelTest
|
python
|
plotly__plotly.py
|
tests/test_core/test_figure_messages/test_on_change.py
|
{
"start": 115,
"end": 8442
}
|
class ____(TestCase):
def setUp(self):
# Construct initial scatter object
self.figure = go.Figure(
data=[
go.Scatter(y=[3, 2, 1], marker={"color": "green"}),
go.Bar(y=[3, 2, 1, 0, -1], marker={"opacity": 0.5}),
],
layout={"xaxis": {"range": [-1, 4]}, "width": 1000},
frames=[go.Frame(layout={"yaxis": {"title": "f1"}})],
)
# on_change validation
# --------------------
def test_raise_if_no_figure(self):
scatt = go.Scatter()
fn = MagicMock()
with pytest.raises(ValueError):
scatt.on_change(fn, "x")
def test_raise_on_frame_hierarchy(self):
fn = MagicMock()
with pytest.raises(ValueError):
self.figure.frames[0].layout.xaxis.on_change(fn, "range")
def test_validate_property_path_nested_1(self):
fn = MagicMock()
with pytest.raises(ValueError):
self.figure.layout.xaxis.on_change(fn, "bogus")
def test_validate_property_path_nested_2(self):
fn = MagicMock()
with pytest.raises(ValueError):
self.figure.layout.on_change(fn, "xaxis.title_font.bogus")
# Python triggered changes
# ------------------------
def test_single_prop_callback_on_assignment(self):
# Install callbacks on 'x', and 'y' property of first trace
fn_x = MagicMock()
fn_y = MagicMock()
self.figure.data[0].on_change(fn_x, "x")
self.figure.data[0].on_change(fn_y, "y")
# Setting x and y on second trace does not trigger callback
self.figure.data[1].x = [1, 2, 3]
self.figure.data[1].y = [1, 2, 3]
self.assertFalse(fn_x.called)
self.assertFalse(fn_y.called)
# Set x on first trace
self.figure.data[0].x = [10, 20, 30]
fn_x.assert_called_once_with(self.figure.data[0], (10, 20, 30))
self.assertFalse(fn_y.called)
# Set y on first trace
self.figure.data[0].y = [11, 22, 33]
fn_y.assert_called_once_with(self.figure.data[0], (11, 22, 33))
def test_multi_prop_callback_on_assignment_trace(self):
# Register callback if either 'x' or 'y' changes on first trace
fn = MagicMock()
self.figure.data[0].on_change(fn, "x", "y")
# Perform assignment on one of the properties
self.figure.data[0].x = [11, 22, 33]
# Check function called once with new value of x and old value of y
fn.assert_called_once_with(self.figure.data[0], (11, 22, 33), (3, 2, 1))
def test_multi_prop_callback_on_assignment_layout(self):
fn_range = MagicMock()
# Register callback if either axis range is changed. Both tuple and
# dot syntax are supported for nested properties
self.figure.layout.on_change(fn_range, ("xaxis", "range"), "yaxis.range")
self.figure.layout.xaxis.range = [-10, 10]
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), None)
def test_multi_prop_callback_on_assignment_layout_nested(self):
fn_title_font = MagicMock()
fn_xaxis = MagicMock()
fn_layout = MagicMock()
# Register callback on change to family property under title_font
self.figure.layout.xaxis.title.font.on_change(fn_title_font, "family")
# Register callback on the range and title_font.family properties
# under xaxis
self.figure.layout.xaxis.on_change(fn_xaxis, "range", "title.font.family")
# Register callback on xaxis object itself
self.figure.layout.on_change(fn_layout, "xaxis")
# Assign a new xaxis range and title.font.family
self.figure.layout.xaxis.title.font.family = "courier"
# Check that all callbacks were executed once
fn_title_font.assert_called_once_with(
self.figure.layout.xaxis.title.font, "courier"
)
fn_xaxis.assert_called_once_with(self.figure.layout.xaxis, (-1, 4), "courier")
fn_layout.assert_called_once_with(
self.figure.layout,
go.layout.XAxis(range=(-1, 4), title={"font": {"family": "courier"}}),
)
def test_prop_callback_nested_arrays(self):
# Initialize updatemenus and buttons
self.figure.layout.updatemenus = [{}, {}, {}]
self.figure.layout.updatemenus[2].buttons = [{}, {}]
self.figure.layout.updatemenus[2].buttons[1].label = "button 1"
self.figure.layout.updatemenus[2].buttons[1].method = "relayout"
# Register method callback
fn_button = MagicMock()
fn_layout = MagicMock()
self.figure.layout.updatemenus[2].buttons[1].on_change(fn_button, "method")
self.figure.layout.on_change(fn_layout, "updatemenus[2].buttons[1].method")
# Update button method
self.figure.layout.updatemenus[2].buttons[1].method = "restyle"
# Check that both callbacks are called once
fn_button.assert_called_once_with(
self.figure.layout.updatemenus[2].buttons[1], "restyle"
)
fn_layout.assert_called_once_with(self.figure.layout, "restyle")
def test_callback_on_update(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range")
self.figure.update({"layout": {"yaxis": {"range": [11, 22]}}})
fn_range.assert_called_once_with(self.figure.layout, (-1, 4), (11, 22))
def test_callback_on_update_single_call(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
self.figure.update(
{"layout": {"xaxis": {"range": [-10, 10]}, "yaxis": {"range": [11, 22]}}}
)
# Even though both properties changed, callback should be called
# only once with the new value of both properties
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
def test_callback_on_batch_update(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
with self.figure.batch_update():
self.figure.layout.xaxis.range = [-10, 10]
self.figure.layout.width = 500
# Check fn not called before context exits
self.assertFalse(fn_range.called)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), None, 500)
def test_callback_on_batch_animate(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
with self.figure.batch_animate():
self.figure["layout.xaxis.range"] = [-10, 10]
self.figure[("layout", "yaxis", "range")] = (11, 22)
# Check fn not called before context exits
self.assertFalse(fn_range.called)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
def test_callback_on_plotly_relayout(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
self.figure.plotly_relayout(
relayout_data={"xaxis.range": [-10, 10], "yaxis.range": [11, 22]}
)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
def test_callback_on_plotly_restyle(self):
# Register callback if either 'x' or 'y' changes on first trace
fn = MagicMock()
self.figure.data[0].on_change(fn, "x", "y")
# Perform assignment on one of pthe properties
self.figure.plotly_restyle(
{"x": [[11, 22, 33], [1, 11, 111]]}, trace_indexes=[0, 1]
)
# Check function called once with new value of x and old value of y
fn.assert_called_once_with(self.figure.data[0], (11, 22, 33), (3, 2, 1))
def test_callback_on_plotly_update(self):
fn_range = MagicMock()
self.figure.layout.on_change(fn_range, "xaxis.range", "yaxis.range", "width")
self.figure.plotly_update(
restyle_data={"marker.color": "blue"},
relayout_data={"xaxis.range": [-10, 10], "yaxis.range": [11, 22]},
)
fn_range.assert_called_once_with(self.figure.layout, (-10, 10), (11, 22), 1000)
|
TestOnChangeCallbacks
|
python
|
fluentpython__example-code
|
20-descriptor/descriptorkinds.py
|
{
"start": 4961,
"end": 5231
}
|
class ____: # <1>
"""a.k.a. data descriptor or enforced descriptor"""
def __get__(self, instance, owner):
print_args('get', self, instance, owner) # <2>
def __set__(self, instance, value):
print_args('set', self, instance, value)
|
Overriding
|
python
|
mlflow__mlflow
|
mlflow/pyfunc/model.py
|
{
"start": 10712,
"end": 12146
}
|
class ____:
"""
A collection of artifacts that a :class:`~PythonModel` can use when performing inference.
:class:`~PythonModelContext` objects are created *implicitly* by the
:func:`save_model() <mlflow.pyfunc.save_model>` and
:func:`log_model() <mlflow.pyfunc.log_model>` persistence methods, using the contents specified
by the ``artifacts`` parameter of these methods.
"""
def __init__(self, artifacts, model_config):
"""
Args:
artifacts: A dictionary of ``<name, artifact_path>`` entries, where ``artifact_path``
is an absolute filesystem path to a given artifact.
model_config: The model configuration to make available to the model at
loading time.
"""
self._artifacts = artifacts
self._model_config = model_config
@property
def artifacts(self):
"""
A dictionary containing ``<name, artifact_path>`` entries, where ``artifact_path`` is an
absolute filesystem path to the artifact.
"""
return self._artifacts
@property
def model_config(self):
"""
A dictionary containing ``<config, value>`` entries, where ``config`` is the name
of the model configuration keys and ``value`` is the value of the given configuration.
"""
return self._model_config
@deprecated("ResponsesAgent", "3.0.0")
|
PythonModelContext
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/vertex_ai/test_generative_model.py
|
{
"start": 2025,
"end": 3186
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("generative_model.GenerativeModelHook"))
def test_execute(self, mock_hook):
prompt = "In 10 words or less, what is Apache Airflow?"
pretrained_model = "textembedding-gecko"
with pytest.warns(AirflowProviderDeprecationWarning):
op = TextEmbeddingModelGetEmbeddingsOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT,
location=GCP_LOCATION,
prompt=prompt,
pretrained_model=pretrained_model,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context={"ti": mock.MagicMock()})
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.text_embedding_model_get_embeddings.assert_called_once_with(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
prompt=prompt,
pretrained_model=pretrained_model,
)
|
TestVertexAITextEmbeddingModelGetEmbeddingsOperator
|
python
|
django-extensions__django-extensions
|
tests/testapp/jobs/weekly/test_weekly_job.py
|
{
"start": 122,
"end": 229
}
|
class ____(WeeklyJob):
help = "My sample weekly job."
def execute(self):
WEEKLY_JOB_MOCK()
|
Job
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/loops/fit_loop.py
|
{
"start": 2370,
"end": 24161
}
|
class ____(_Loop):
"""This loop is the top-level loop where training starts.
It simply counts the epochs and iterates from one to the next by calling ``TrainingEpochLoop.run()`` in its
``advance()`` method.
Example::
# FitLoop
for epoch in range(max_epochs):
# TrainingEpochLoop
for batch_idx, batch in enumerate(train_dataloader):
loss = lightning_module.training_step(batch, batch_idx)
...
# ValidationEpochLoop
for batch_idx, batch in enumerate(val_dataloader):
lightning_module.validation_step(batch, batch_idx)
...
...
...
Args:
min_epochs: The minimum number of epochs
max_epochs: The maximum number of epochs, can be set -1 to turn this limit off
"""
def __init__(
self,
trainer: "pl.Trainer",
min_epochs: Optional[int] = 0,
max_epochs: Optional[int] = None,
) -> None:
super().__init__(trainer)
if isinstance(max_epochs, int) and max_epochs < -1:
# Allow max_epochs to be zero, since this will be handled by fit_loop.done
raise MisconfigurationException(
f"`max_epochs` must be a non-negative integer or -1. You passed in {max_epochs}."
)
self.max_epochs = max_epochs
self.min_epochs = min_epochs
self.epoch_loop = _TrainingEpochLoop(trainer)
self.epoch_progress = _Progress()
self.max_batches: Union[int, float] = float("inf")
self._data_source = _DataLoaderSource(None, "train_dataloader")
self._combined_loader: Optional[CombinedLoader] = None
self._combined_loader_states_to_load: list[dict[str, Any]] = []
self._data_fetcher: Optional[_DataFetcher] = None
self._last_train_dl_reload_epoch = float("-inf")
self._restart_stage = RestartStage.NONE
@property
def total_batch_idx(self) -> int:
"""Returns the current batch index (across epochs)"""
return self.epoch_loop.total_batch_idx
@property
def batch_idx(self) -> int:
"""Returns the current batch index (within this epoch)"""
return self.epoch_loop.batch_idx
@property
def min_steps(self) -> Optional[int]:
"""Returns the minimum number of steps to run."""
return self.epoch_loop.min_steps
@property
def max_steps(self) -> int:
"""Returns the maximum number of steps to run."""
return self.epoch_loop.max_steps
@_Loop.restarting.setter
@override
def restarting(self, restarting: bool) -> None:
# if the last epoch completely finished, we are not actually restarting
values = self.epoch_progress.current.ready, self.epoch_progress.current.started
epoch_unfinished = any(v != self.epoch_progress.current.processed for v in values)
restarting = restarting and epoch_unfinished or self._iteration_based_training()
_Loop.restarting.fset(self, restarting) # call the parent setter
@property
def _skip_backward(self) -> bool:
"""Determines whether the loop will skip backward during automatic optimization."""
return self.epoch_loop.automatic_optimization._skip_backward
@_skip_backward.setter
def _skip_backward(self, value: bool) -> None:
"""Determines whether the loop will skip backward during automatic optimization."""
self.epoch_loop.automatic_optimization._skip_backward = value
@property
def _results(self) -> _ResultCollection:
if self.trainer.training:
return self.epoch_loop._results
if self.trainer.validating:
return self.epoch_loop.val_loop._results
raise RuntimeError("`FitLoop._results` property isn't defined. Accessed outside of scope")
@property
def _can_stop_early(self) -> bool:
met_min_epochs = self.epoch_progress.current.processed >= self.min_epochs if self.min_epochs else True
met_min_steps = self.epoch_loop.global_step >= self.min_steps if self.min_steps else True
return met_min_epochs and met_min_steps
@property
def _should_reload_train_dl(self) -> bool:
"""Check if train dataloader should be reloaded."""
n_epochs = self.trainer.reload_dataloaders_every_n_epochs
return n_epochs and self.trainer.current_epoch - self._last_train_dl_reload_epoch >= n_epochs
@property
def done(self) -> bool:
"""Evaluates when to leave the loop."""
if self.max_batches == 0:
rank_zero_info("`Trainer.fit` stopped: No training batches.")
return True
# TODO: Move track steps inside training loop and move part of these condition inside training loop
stop_steps = _is_max_limit_reached(self.epoch_loop.global_step, self.max_steps)
if stop_steps:
rank_zero_info(f"`Trainer.fit` stopped: `max_steps={self.max_steps!r}` reached.")
return True
# `processed` is increased before `on_train_epoch_end`, the hook where checkpoints are typically saved.
# we use it here because the checkpoint data won't have `completed` increased yet
assert isinstance(self.max_epochs, int)
stop_epochs = _is_max_limit_reached(self.epoch_progress.current.processed, self.max_epochs)
if stop_epochs:
# in case they are not equal, override so `trainer.current_epoch` has the expected value
self.epoch_progress.current.completed = self.epoch_progress.current.processed
rank_zero_info(f"`Trainer.fit` stopped: `max_epochs={self.max_epochs!r}` reached.")
return True
if self.trainer.should_stop and self._can_stop_early:
rank_zero_debug("`Trainer.fit` stopped: `trainer.should_stop` was set.")
return True
return False
@property
def skip(self) -> bool:
"""Whether we should skip the training and immediately return from the call to :meth:`run`."""
# if `limit_train_batches == 0` then `setup_data` won't set the `self.max_batches` attribute (checked in `done`)
# so we cannot use it solely
return self.done or self.trainer.limit_train_batches == 0
def run(self) -> None:
self.setup_data()
if self.skip:
return
self.reset()
self.on_run_start()
while not self.done:
try:
self.on_advance_start()
self.advance()
self.on_advance_end()
except StopIteration:
break
finally:
self.on_iteration_done()
self._restarting = False
self.on_run_end()
def setup_data(self) -> None:
if self._combined_loader is not None and not self._should_reload_train_dl:
return
trainer = self.trainer
pl_module = trainer.lightning_module
if trainer.limit_train_batches == 0 or not is_overridden("training_step", pl_module):
return
log.debug(f"{self.__class__.__name__}: resetting train dataloader")
source = self._data_source
train_dataloader = _request_dataloader(source)
trainer.strategy.barrier("train_dataloader()")
if not isinstance(train_dataloader, CombinedLoader):
combined_loader = CombinedLoader(train_dataloader, "max_size_cycle")
else:
combined_loader = train_dataloader
if trainer.overfit_batches > 0:
_resolve_overfit_batches(combined_loader, mode=RunningStage.TRAINING)
trainer_fn = TrainerFn.FITTING
stage = RunningStage.TRAINING
dataloaders = []
for dl in combined_loader.flattened:
_check_dataloader_iterable(dl, source, trainer_fn)
dl = _process_dataloader(trainer, trainer_fn, stage, dl)
dataloaders.append(dl)
combined_loader.flattened = dataloaders
self._combined_loader = combined_loader
allow_zero_length = pl_module.allow_zero_length_dataloader_with_multiple_devices
if trainer.datamodule is not None:
allow_zero_length |= trainer.datamodule.allow_zero_length_dataloader_with_multiple_devices
limits = []
for dl in combined_loader.flattened:
# determine number of batches
length = len(dl) if has_len_all_ranks(dl, trainer.strategy, allow_zero_length) else float("inf")
num_batches = _parse_num_batches(stage, length, trainer.limit_train_batches)
limits.append(num_batches)
combined_loader.limits = limits
self._load_combined_loader_states()
self._data_fetcher = _select_data_fetcher(trainer, RunningStage.TRAINING)
self._data_fetcher.setup(combined_loader)
iter(self._data_fetcher) # creates the iterator inside the fetcher
max_batches = sized_len(combined_loader)
self.max_batches = max_batches if max_batches is not None else float("inf")
has_len_all_ranks_ = has_len_all_ranks(combined_loader, trainer.strategy, allow_zero_length)
if self.max_batches == 0:
return
# store epoch of dataloader reset for reload_dataloaders_every_n_epochs
self._last_train_dl_reload_epoch = trainer.current_epoch
# If time-based validation is enabled, disable batch-based scheduling here.
# Use None to clearly signal "no batch-based validation"; wall-time logic will run elsewhere.
if getattr(trainer, "_val_check_time_interval", None) is not None:
trainer.val_check_batch = None
trainer._train_start_time = time.monotonic()
trainer._last_val_time = trainer._train_start_time
elif isinstance(trainer.val_check_interval, int):
trainer.val_check_batch = trainer.val_check_interval
if trainer.val_check_batch > self.max_batches and trainer.check_val_every_n_epoch is not None:
raise ValueError(
f" `val_check_interval` ({trainer.val_check_interval}) must be less than or equal"
f" to the number of the training batches ({self.max_batches})."
" If you want to disable validation set `limit_val_batches` to 0.0 instead."
" If you want to validate based on the total training batches, set `check_val_every_n_epoch=None`."
)
else:
if not has_len_all_ranks_:
if trainer.val_check_interval == 1.0:
trainer.val_check_batch = float("inf")
else:
raise MisconfigurationException(
"When using an IterableDataset for `train_dataloader`,"
" `Trainer(val_check_interval)` must be time based, `1.0` or an int. An int k specifies"
" checking validation every k training batches."
)
else:
trainer.val_check_batch = int(self.max_batches * trainer.val_check_interval)
trainer.val_check_batch = max(1, trainer.val_check_batch)
if trainer.loggers and self.max_batches < trainer.log_every_n_steps and not trainer.fast_dev_run:
rank_zero_warn(
f"The number of training batches ({self.max_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={trainer.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch.",
category=PossibleUserWarning,
)
@property
def restarted_on_epoch_start(self) -> bool:
return self._restart_stage == RestartStage.RESTARTED_ON_EPOCH_START
@property
def restarted_mid_epoch(self) -> bool:
return self._restart_stage == RestartStage.RESTARTED_MID_EPOCH
@property
def restarted_on_epoch_end(self) -> bool:
return self._restart_stage == RestartStage.RESTARTED_ON_EPOCH_END
@property
def resumed_on_epoch_end(self) -> bool:
# This case happens when restarting from last without validation at
# the end of epoch. In this case self.restarting is False.
return self._restart_stage == RestartStage.RESUMED_ON_EPOCH_END
def update_restart_stage(self) -> None:
if (
self.restarting
and self.epoch_progress.total.started == self.epoch_progress.total.ready - 1
and self.epoch_progress.total.processed == self.epoch_progress.total.started
and self.epoch_progress.total.completed == self.epoch_progress.total.processed
):
self._restart_stage = RestartStage.RESTARTED_ON_EPOCH_START
elif (
self.restarting
and self.epoch_progress.total.started == self.epoch_progress.total.ready
and self.epoch_progress.total.processed == self.epoch_progress.total.started - 1
and self.epoch_progress.total.completed == self.epoch_progress.total.processed
):
self._restart_stage = RestartStage.RESTARTED_MID_EPOCH
elif (
self.restarting
and self.epoch_progress.total.started == self.epoch_progress.total.ready
and self.epoch_progress.total.processed == self.epoch_progress.total.started
and self.epoch_progress.total.completed == self.epoch_progress.total.processed - 1
):
self._restart_stage = RestartStage.RESTARTED_ON_EPOCH_END
elif (
self._loaded_from_state_dict
and self.epoch_progress.total.started == self.epoch_progress.total.ready
and self.epoch_progress.total.processed == self.epoch_progress.total.started
and self.epoch_progress.total.completed == self.epoch_progress.total.processed - 1
):
self._restart_stage = RestartStage.RESUMED_ON_EPOCH_END
else:
self._restart_stage = RestartStage.NONE
self.epoch_loop.update_restart_stage()
def reset_restart_stage(self) -> None:
self._restart_stage = RestartStage.NONE
def reset(self) -> None:
"""Resets the internal state of this loop."""
assert self.trainer.model is not None
torch.set_grad_enabled(True)
self.update_restart_stage()
if self.restarted_on_epoch_start:
self.epoch_progress.reset_on_restart()
if self.resumed_on_epoch_end:
# when restarting from last without validation at end of epoch,
# self.restarting is False but it's still resuming
self.epoch_progress.increment_completed()
if (
self.epoch_loop.restarted_on_train_batch_end
and self.restarted_mid_epoch
and self.epoch_loop.batch_progress.is_last_batch
):
self.epoch_progress.increment_processed()
self.epoch_progress.increment_completed()
if (
self.epoch_loop.restarted_on_train_batch_end
and self.epoch_loop.batch_progress.is_last_batch
and not self.restarted_mid_epoch
and not self.epoch_loop.val_loop.batch_progress.is_last_batch
):
self.epoch_progress.increment_completed()
def on_run_start(self) -> None:
"""Calls the ``on_train_start`` hook."""
# update the current_epoch in-case of checkpoint reload
if not self._iteration_based_training():
self.epoch_progress.current.completed = self.epoch_progress.current.processed
trainer = self.trainer
# reload the evaluation dataloaders too for proper display in the progress bar
if self.epoch_loop._should_check_val_epoch() and trainer.val_dataloaders is None:
trainer.validating = True
self.epoch_loop.val_loop.setup_data()
trainer.training = True
# Check for modules in eval mode at training start
self._warn_if_modules_in_eval_mode()
call._call_callback_hooks(trainer, "on_train_start")
call._call_lightning_module_hook(trainer, "on_train_start")
call._call_strategy_hook(trainer, "on_train_start")
def on_advance_start(self) -> None:
"""Prepares the dataloader for training and calls the hook ``on_train_epoch_start``"""
trainer = self.trainer
# might need to setup data again depending on `trainer.reload_dataloaders_every_n_epochs`
self.setup_data()
# update the epoch value for all samplers
assert self._combined_loader is not None
for i, dl in enumerate(self._combined_loader.flattened):
_set_sampler_epoch(dl, self.epoch_progress.current.processed)
if not self.restarted_mid_epoch and not self.restarted_on_epoch_end:
if not self.restarted_on_epoch_start:
self.epoch_progress.increment_ready()
call._call_callback_hooks(trainer, "on_train_epoch_start")
call._call_lightning_module_hook(trainer, "on_train_epoch_start")
self.epoch_progress.increment_started()
def advance(self) -> None:
"""Runs one whole epoch."""
log.debug(f"{type(self).__name__}: advancing loop")
combined_loader = self._combined_loader
assert combined_loader is not None
if combined_loader._mode == "sequential":
raise ValueError(
f'`{type(self).__name__}` does not support the `CombinedLoader(mode="sequential")` mode.'
f" The available modes are: {[m for m in _SUPPORTED_MODES if m != 'sequential']}"
)
with self.trainer.profiler.profile("run_training_epoch"):
assert self._data_fetcher is not None
self.epoch_loop.run(self._data_fetcher)
def on_advance_end(self) -> None:
trainer = self.trainer
# inform logger the batch loop has finished
trainer._logger_connector.epoch_end_reached()
self.epoch_progress.increment_processed()
# call train epoch end hooks
# we always call callback hooks first, but here we need to make an exception for the callbacks that
# monitor a metric, otherwise they wouldn't be able to monitor a key logged in
# `LightningModule.on_train_epoch_end`
call._call_callback_hooks(trainer, "on_train_epoch_end", monitoring_callbacks=False)
call._call_lightning_module_hook(trainer, "on_train_epoch_end")
call._call_callback_hooks(trainer, "on_train_epoch_end", monitoring_callbacks=True)
trainer._logger_connector.on_epoch_end()
if not self.restarting and self.epoch_loop._num_ready_batches_reached():
# since metric-based schedulers require access to metrics and those are not currently saved in the
# checkpoint, the plateau schedulers shouldn't be updated
self.epoch_loop.update_lr_schedulers("epoch", update_plateau_schedulers=not self.restarting)
# we manually decrease here because loggers expect that the same step is used when logging epoch-end metrics
# even when the batch loop has finished
self.epoch_loop._batches_that_stepped -= 1
# log epoch metrics
trainer._logger_connector.update_train_epoch_metrics()
self.epoch_loop._batches_that_stepped += 1
self.epoch_progress.increment_completed()
if trainer.received_sigterm:
raise SIGTERMException
def on_run_end(self) -> None:
"""Calls the ``on_train_end`` hook."""
log.debug(f"{self.__class__.__name__}: train run ended")
trainer = self.trainer
call._call_callback_hooks(trainer, "on_train_end")
call._call_lightning_module_hook(trainer, "on_train_end")
call._call_strategy_hook(trainer, "on_train_end")
def teardown(self) -> None:
if self._data_fetcher is not None:
self._data_fetcher.teardown()
self._data_fetcher = None
self.epoch_loop.teardown()
@override
def on_save_checkpoint(self) -> dict:
state_dict = super().on_save_checkpoint()
if self._combined_loader is not None and (loader_states := self._combined_loader._state_dicts()):
state_dict["combined_loader"] = loader_states
return state_dict
@override
def on_load_checkpoint(self, state_dict: dict) -> None:
self._combined_loader_states_to_load = state_dict.get("combined_loader", [])
super().on_load_checkpoint(state_dict)
def _warn_if_modules_in_eval_mode(self) -> None:
"""Warn if any modules are in eval mode at the start of training."""
model = self.trainer.lightning_module
eval_modules = [name for name, module in model.named_modules() if not module.training]
if eval_modules:
rank_zero_warn(
f"Found {len(eval_modules)} module(s) in eval mode at the start of training."
" This may lead to unexpected behavior during training. If this is intentional,"
" you can ignore this warning.",
category=PossibleUserWarning,
)
def _should_accumulate(self) -> bool:
"""Whether the gradients should be accumulated."""
return self.epoch_loop._should_accumulate()
def _iteration_based_training(self) -> bool:
return self.trainer.max_steps != -1
def _load_combined_loader_states(self) -> None:
if not self.restarting or not self._combined_loader_states_to_load or self._combined_loader is None:
return
self._combined_loader._load_state_dicts(self._combined_loader_states_to_load)
self._combined_loader_states_to_load = [] # release memory
|
_FitLoop
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_x25519.py
|
{
"start": 1402,
"end": 14997
}
|
class ____:
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join("asymmetric", "X25519", "rfc7748.txt"),
load_nist_vectors,
),
)
def test_rfc7748(self, vector, backend):
private = binascii.unhexlify(vector["input_scalar"])
public = binascii.unhexlify(vector["input_u"])
shared_key = binascii.unhexlify(vector["output_u"])
private_key = X25519PrivateKey.from_private_bytes(private)
public_key = X25519PublicKey.from_public_bytes(public)
computed_shared_key = private_key.exchange(public_key)
assert computed_shared_key == shared_key
def test_rfc7748_1000_iteration(self, backend):
old_private = private = public = binascii.unhexlify(
b"0900000000000000000000000000000000000000000000000000000000000000"
)
shared_key = binascii.unhexlify(
b"684cf59ba83309552800ef566f2f4d3c1c3887c49360e3875f2eb94d99532c51"
)
private_key = X25519PrivateKey.from_private_bytes(private)
public_key = X25519PublicKey.from_public_bytes(public)
for _ in range(1000):
computed_shared_key = private_key.exchange(public_key)
private_key = X25519PrivateKey.from_private_bytes(
computed_shared_key
)
public_key = X25519PublicKey.from_public_bytes(old_private)
old_private = computed_shared_key
assert computed_shared_key == shared_key
def test_null_shared_key_raises_error(self, backend):
"""
The vector used here is taken from wycheproof's x25519 test vectors
"""
public = binascii.unhexlify(
"5f9c95bca3508c24b1d0b1559c83ef5b04445cc4581c8e86d8224eddd09f1157"
)
private = binascii.unhexlify(
"78f1e8edf14481b389448dac8f59c70b038e7cf92ef2c7eff57a72466e115296"
)
private_key = X25519PrivateKey.from_private_bytes(private)
public_key = X25519PublicKey.from_public_bytes(public)
with pytest.raises(ValueError):
private_key.exchange(public_key)
def test_public_bytes_bad_args(self, backend):
key = X25519PrivateKey.generate().public_key()
with pytest.raises(TypeError):
key.public_bytes(
None, # type: ignore[arg-type]
serialization.PublicFormat.Raw,
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.Raw
)
with pytest.raises(TypeError):
key.public_bytes(
serialization.Encoding.DER,
None, # type: ignore[arg-type]
)
with pytest.raises(TypeError):
key.public_bytes(
serialization.Encoding.SMIME,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
# These vectors are also from RFC 7748
# https://tools.ietf.org/html/rfc7748#section-6.1
@pytest.mark.parametrize(
("private_bytes", "public_bytes"),
[
(
binascii.unhexlify(
b"77076d0a7318a57d3c16c17251b26645df4c2f87ebc0992ab177fba"
b"51db92c2a"
),
binascii.unhexlify(
b"8520f0098930a754748b7ddcb43ef75a0dbf3a0d26381af4eba4a98"
b"eaa9b4e6a"
),
),
(
binascii.unhexlify(
b"5dab087e624a8a4b79e17f8b83800ee66f3bb1292618b6fd1c2f8b2"
b"7ff88e0eb"
),
binascii.unhexlify(
b"de9edb7d7b7dc1b4d35b61c2ece435373f8343c85b78674dadfc7e1"
b"46f882b4f"
),
),
],
)
def test_pub_priv_bytes_raw(self, private_bytes, public_bytes, backend):
private_key = X25519PrivateKey.from_private_bytes(private_bytes)
assert (
private_key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
== private_bytes
)
assert private_key.private_bytes_raw() == private_bytes
assert (
private_key.public_key().public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
== public_bytes
)
assert private_key.public_key().public_bytes_raw() == public_bytes
public_key = X25519PublicKey.from_public_bytes(public_bytes)
assert (
public_key.public_bytes(
serialization.Encoding.Raw, serialization.PublicFormat.Raw
)
== public_bytes
)
assert public_key.public_bytes_raw() == public_bytes
def test_generate(self, backend):
key = X25519PrivateKey.generate()
assert key
assert key.public_key()
def test_invalid_type_exchange(self, backend):
key = X25519PrivateKey.generate()
with pytest.raises(TypeError):
key.exchange(object()) # type: ignore[arg-type]
def test_invalid_length_from_public_bytes(self, backend):
with pytest.raises(ValueError):
X25519PublicKey.from_public_bytes(b"a" * 31)
with pytest.raises(ValueError):
X25519PublicKey.from_public_bytes(b"a" * 33)
def test_invalid_length_from_private_bytes(self, backend):
with pytest.raises(ValueError):
X25519PrivateKey.from_private_bytes(b"a" * 31)
with pytest.raises(ValueError):
X25519PrivateKey.from_private_bytes(b"a" * 33)
def test_invalid_private_bytes(self, backend):
key = X25519PrivateKey.generate()
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
None, # type: ignore[arg-type]
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
DummyKeySerializationEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.PKCS8,
DummyKeySerializationEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
with pytest.raises(TypeError):
key.private_bytes(None, None, None) # type: ignore[arg-type]
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.Raw,
None, # type: ignore[arg-type]
None, # type: ignore[arg-type]
)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
object(), # type: ignore[arg-type]
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"a" * 1024),
)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.SMIME,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
def test_invalid_public_bytes(self, backend):
key = X25519PrivateKey.generate().public_key()
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.Raw,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.PKCS1
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.Raw
)
@pytest.mark.parametrize(
("encoding", "fmt", "encryption", "passwd", "load_func"),
[
(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"password"),
b"password",
serialization.load_pem_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(b"password"),
b"password",
serialization.load_der_private_key,
),
(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
None,
serialization.load_pem_private_key,
),
(
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
None,
serialization.load_der_private_key,
),
],
)
def test_round_trip_private_serialization(
self, encoding, fmt, encryption, passwd, load_func, backend
):
key = X25519PrivateKey.generate()
serialized = key.private_bytes(encoding, fmt, encryption)
loaded_key = load_func(serialized, passwd, backend)
assert isinstance(loaded_key, X25519PrivateKey)
def test_invalid_public_key_pem(self):
with pytest.raises(ValueError):
serialization.load_pem_public_key(
textwrap.dedent("""
-----BEGIN PUBLIC KEY-----
MCswBQYDK2VuAyIA////////////////////////////////////////////
-----END PUBLIC KEY-----""").encode()
)
def test_buffer_protocol(self, backend):
private_bytes = bytearray(os.urandom(32))
key = X25519PrivateKey.from_private_bytes(private_bytes)
assert (
key.private_bytes(
serialization.Encoding.Raw,
serialization.PrivateFormat.Raw,
serialization.NoEncryption(),
)
== private_bytes
)
@pytest.mark.supported(
only_if=lambda backend: backend.x25519_supported(),
skip_message="Requires OpenSSL with X25519 support",
)
def test_public_key_equality(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "X25519", "x25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None).public_key()
key2 = serialization.load_der_private_key(key_bytes, None).public_key()
key3 = X25519PrivateKey.generate().public_key()
assert key1 == key2
assert key1 != key3
assert key1 != object()
with pytest.raises(TypeError):
key1 < key2 # type: ignore[operator]
@pytest.mark.supported(
only_if=lambda backend: backend.x25519_supported(),
skip_message="Requires OpenSSL with X25519 support",
)
def test_public_key_copy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "X25519", "x25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None).public_key()
key2 = copy.copy(key1)
assert key1 == key2
@pytest.mark.supported(
only_if=lambda backend: backend.x25519_supported(),
skip_message="Requires OpenSSL with X25519 support",
)
def test_public_key_deepcopy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "X25519", "x25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = (
x25519.X25519PublicKey,
serialization.load_der_private_key(key_bytes, None).public_key(),
)
key2 = copy.deepcopy(key1)
assert key1 == key2
@pytest.mark.supported(
only_if=lambda backend: backend.x25519_supported(),
skip_message="Requires OpenSSL with X25519 support",
)
def test_private_key_copy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "X25519", "x25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None)
key2 = copy.copy(key1)
assert key1 == key2
@pytest.mark.supported(
only_if=lambda backend: backend.x25519_supported(),
skip_message="Requires OpenSSL with X25519 support",
)
def test_private_key_deepcopy(backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "X25519", "x25519-pkcs8.der"),
lambda derfile: derfile.read(),
mode="rb",
)
key1 = serialization.load_der_private_key(key_bytes, None)
key2 = copy.deepcopy(key1)
assert key1 == key2
|
TestX25519Exchange
|
python
|
protocolbuffers__protobuf
|
upb/cmake/staleness_test.py
|
{
"start": 2041,
"end": 2317
}
|
class ____(unittest.TestCase):
def testFilesMatch(self):
errors = staleness_test_lib.CheckFilesMatch(config)
self.assertFalse(errors, errors)
if len(sys.argv) > 1 and sys.argv[1] == "--fix":
staleness_test_lib.FixFiles(config)
else:
unittest.main()
|
TestFilesMatch
|
python
|
pytorch__pytorch
|
test/distributed/_composable/fsdp/test_fully_shard_compile.py
|
{
"start": 1971,
"end": 3913
}
|
class ____(FSDPTest):
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skip_if_lt_x_gpu(2)
def test_disable_compiling_hooks(self):
self.run_subtests(
{
"skip_fsdp_hooks": [False, True],
},
self._test_disable_compiling_hooks,
)
def _test_disable_compiling_hooks(
self,
skip_fsdp_hooks: bool,
):
torch._dynamo.reset()
trace_rules_check_count = 0
HOOKS_FILE_NAME = "torch/distributed/fsdp/_fully_shard/_fsdp_state.py"
HOOK_WRAPPER_NAME = "fsdp_hook_wrapper"
def patched_trace_rules_check(*args, **kwargs):
nonlocal trace_rules_check_count
f_code = args[0]
if (
hasattr(f_code, "co_filename")
and f_code.co_filename.endswith(HOOKS_FILE_NAME)
and f_code.co_name != HOOK_WRAPPER_NAME
):
trace_rules_check_count += 1
return orig_trace_rules_check(*args, **kwargs)
original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks
orig_trace_rules_check = torch._dynamo.trace_rules.check
torch.distributed.barrier()
torch._dynamo.config.skip_fsdp_hooks = skip_fsdp_hooks
torch._dynamo.trace_rules.check = patched_trace_rules_check
model = MLP(4).to(device_type)
fully_shard(model)
model.compile()
model(torch.randn((4, 4), device=device_type))
torch.distributed.barrier()
torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks
torch._dynamo.trace_rules.check = orig_trace_rules_check
if skip_fsdp_hooks:
self.assertEqual(trace_rules_check_count, 0)
else:
self.assertTrue(trace_rules_check_count > 0)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
|
TestFullyShardCompileCompute
|
python
|
spack__spack
|
lib/spack/spack/vendor/macholib/mach_o.py
|
{
"start": 26096,
"end": 26346
}
|
class ____(Structure):
_fields_ = (("entryoff", p_uint64), ("stacksize", p_uint64))
def describe(self):
s = {}
s["entryoff"] = int(self.entryoff)
s["stacksize"] = int(self.stacksize)
return s
|
entry_point_command
|
python
|
Textualize__textual
|
src/textual/canvas.py
|
{
"start": 2248,
"end": 3309
}
|
class ____(Primitive):
"""A vertical line."""
origin: Offset
length: int
color: Color
line_type: CanvasLineType = "thin"
def render(self, canvas: Canvas) -> None:
x, y = self.origin
if x < 0 or x >= canvas.width:
return
line_type_index = _LINE_TYPE_INDEX[self.line_type]
box = canvas.box
_combine_quads = combine_quads
y_range = canvas.y_range(y, y + self.length)
if y in y_range:
box[y][x] = _combine_quads(box[y][x], (0, 0, line_type_index, 0))
bottom = y + self.length - 1
if bottom in y_range:
box[bottom][x] = _combine_quads(box[bottom][x], (line_type_index, 0, 0, 0))
line_quad = (line_type_index, 0, line_type_index, 0)
for box_y in canvas.y_range(y + 1, y + self.length - 1):
box[box_y][x] = _combine_quads(box[box_y][x], line_quad)
spans = canvas.spans
span = _Span(x, x + 1, self.color)
for y in y_range:
spans[y].append(span)
@dataclass
|
VerticalLine
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/execution_time/context.py
|
{
"start": 17478,
"end": 18488
}
|
class ____(_AssetRefResolutionMixin):
"""Wrapper to access an outlet asset event in template."""
key: BaseAssetUniqueKey
extra: dict[str, JsonValue] = attrs.Factory(dict)
asset_alias_events: list[AssetAliasEvent] = attrs.field(factory=list)
def add(self, asset: Asset | AssetRef, extra: dict[str, JsonValue] | None = None) -> None:
"""Add an AssetEvent to an existing Asset."""
if not isinstance(self.key, AssetAliasUniqueKey):
return
if isinstance(asset, AssetRef):
asset_key, asset_extra = self._resolve_asset_ref(asset)
else:
asset_key = AssetUniqueKey.from_asset(asset)
asset_extra = asset.extra
asset_alias_name = self.key.name
event = AssetAliasEvent(
source_alias_name=asset_alias_name,
dest_asset_key=asset_key,
dest_asset_extra=asset_extra,
extra=extra or {},
)
self.asset_alias_events.append(event)
|
OutletEventAccessor
|
python
|
pytest-dev__pytest
|
src/_pytest/pytester.py
|
{
"start": 6015,
"end": 6509
}
|
class ____:
def __init__(self, request: FixtureRequest) -> None:
self._request = request
def gethookrecorder(self, hook) -> HookRecorder:
hookrecorder = HookRecorder(hook._pm)
self._request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(values: Iterable[str]) -> list[str]:
"""Only return names from iterator values without a leading underscore."""
return [x for x in values if x[0] != "_"]
@final
|
PytestArg
|
python
|
doocs__leetcode
|
solution/0100-0199/0144.Binary Tree Preorder Traversal/Solution.py
|
{
"start": 192,
"end": 498
}
|
class ____:
def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
def dfs(root):
if root is None:
return
ans.append(root.val)
dfs(root.left)
dfs(root.right)
ans = []
dfs(root)
return ans
|
Solution
|
python
|
Netflix__metaflow
|
metaflow/plugins/timeout_decorator.py
|
{
"start": 239,
"end": 310
}
|
class ____(MetaflowException):
headline = "@timeout"
|
TimeoutException
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 220523,
"end": 220862
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("count", "state")
count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="count")
state = sgqlc.types.Field(sgqlc.types.non_null(CheckRunState), graphql_name="state")
|
CheckRunStateCount
|
python
|
cython__cython
|
Cython/Compiler/Nodes.py
|
{
"start": 16437,
"end": 17798
}
|
class ____(StatNode):
# include_file string or None
# verbatim_include string or None
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
if self.include_file or self.verbatim_include:
# Determine whether include should be late
stats = self.body.stats
if not env.directives['preliminary_late_includes_cy28']:
late = False
elif not stats:
# Special case: empty 'cdef extern' blocks are early
late = False
else:
late = all(isinstance(node, CVarDefNode) for node in stats)
env.add_include_file(self.include_file, self.verbatim_include, late)
def analyse_expressions(self, env):
# Allow C properties, inline methods, etc. also in external types.
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
|
CDefExternNode
|
python
|
python-pillow__Pillow
|
setup.py
|
{
"start": 8117,
"end": 41479
}
|
class ____(build_ext):
class ext_feature:
features = [
"zlib",
"jpeg",
"tiff",
"freetype",
"raqm",
"lcms",
"webp",
"jpeg2000",
"imagequant",
"xcb",
"avif",
]
required = {"jpeg", "zlib"}
vendor: set[str] = set()
def __init__(self) -> None:
self._settings: dict[str, str | bool | None] = {}
for f in self.features:
self.set(f, None)
def require(self, feat: str) -> bool:
return feat in self.required
def get(self, feat: str) -> str | bool | None:
return self._settings[feat]
def set(self, feat: str, value: str | bool | None) -> None:
self._settings[feat] = value
def want(self, feat: str) -> bool:
return self._settings[feat] is None
def want_vendor(self, feat: str) -> bool:
return feat in self.vendor
def __iter__(self) -> Iterator[str]:
yield from self.features
feature = ext_feature()
user_options = (
build_ext.user_options
+ [(f"disable-{x}", None, f"Disable support for {x}") for x in feature]
+ [(f"enable-{x}", None, f"Enable support for {x}") for x in feature]
+ [
(f"vendor-{x}", None, f"Use vendored version of {x}")
for x in ("raqm", "fribidi")
]
+ [
("disable-platform-guessing", None, "Disable platform guessing"),
("debug", None, "Debug logging"),
]
+ [("add-imaging-libs=", None, "Add libs to _imaging build")]
)
@staticmethod
def check_configuration(option: str, value: str) -> bool | None:
return True if value in configuration.get(option, []) else None
def initialize_options(self) -> None:
self.disable_platform_guessing = self.check_configuration(
"platform-guessing", "disable"
)
self.add_imaging_libs = ""
build_ext.initialize_options(self)
for x in self.feature:
setattr(self, f"disable_{x}", self.check_configuration(x, "disable"))
setattr(self, f"enable_{x}", self.check_configuration(x, "enable"))
for x in ("raqm", "fribidi"):
setattr(self, f"vendor_{x}", self.check_configuration(x, "vendor"))
if self.check_configuration("debug", "true"):
self.debug = True
self.parallel = configuration.get("parallel", [None])[-1]
def finalize_options(self) -> None:
build_ext.finalize_options(self)
if self.debug:
global DEBUG
DEBUG = True
if not self.parallel:
# If --parallel (or -j) wasn't specified, we want to reproduce the same
# behavior as before, that is, auto-detect the number of jobs.
self.parallel = None
cpu_count = os.cpu_count()
if cpu_count is not None:
try:
self.parallel = int(os.environ.get("MAX_CONCURRENCY", cpu_count))
except TypeError:
pass
for x in self.feature:
if getattr(self, f"disable_{x}"):
self.feature.set(x, False)
self.feature.required.discard(x)
_dbg("Disabling %s", x)
if getattr(self, f"enable_{x}"):
msg = f"Conflicting options: '-C {x}=enable' and '-C {x}=disable'"
raise ValueError(msg)
if x == "freetype":
_dbg("'-C freetype=disable' implies '-C raqm=disable'")
if getattr(self, "enable_raqm"):
msg = (
"Conflicting options: "
"'-C raqm=enable' and '-C freetype=disable'"
)
raise ValueError(msg)
setattr(self, "disable_raqm", True)
if getattr(self, f"enable_{x}"):
_dbg("Requiring %s", x)
self.feature.required.add(x)
if x == "raqm":
_dbg("'-C raqm=enable' implies '-C freetype=enable'")
self.feature.required.add("freetype")
for x in ("raqm", "fribidi"):
if getattr(self, f"vendor_{x}"):
if getattr(self, "disable_raqm"):
msg = f"Conflicting options: '-C {x}=vendor' and '-C raqm=disable'"
raise ValueError(msg)
if x == "fribidi" and not getattr(self, "vendor_raqm"):
msg = (
f"Conflicting options: '-C {x}=vendor' and not '-C raqm=vendor'"
)
raise ValueError(msg)
_dbg("Using vendored version of %s", x)
self.feature.vendor.add(x)
def _update_extension(
self,
name: str,
libraries: list[str] | list[str | bool | None],
define_macros: list[tuple[str, str | None]] | None = None,
sources: list[str] | None = None,
) -> None:
for extension in self.extensions:
if extension.name == name:
extension.libraries += libraries
if define_macros is not None:
extension.define_macros += define_macros
if sources is not None:
extension.sources += sources
if FUZZING_BUILD:
extension.language = "c++"
extension.extra_link_args = ["--stdlib=libc++"]
break
def _remove_extension(self, name: str) -> None:
for extension in self.extensions:
if extension.name == name:
self.extensions.remove(extension)
break
def get_macos_sdk_path(self) -> str | None:
try:
sdk_path = (
subprocess.check_output(["xcrun", "--show-sdk-path", "--sdk", "macosx"])
.strip()
.decode("latin1")
)
except Exception:
sdk_path = None
if (
not sdk_path
or sdk_path == "/Applications/Xcode.app/Contents/Developer"
"/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk"
):
commandlinetools_sdk_path = (
"/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk"
)
if os.path.exists(commandlinetools_sdk_path):
sdk_path = commandlinetools_sdk_path
return sdk_path
def get_ios_sdk_path(self) -> str:
try:
sdk = sys.implementation._multiarch.split("-")[-1]
_dbg("Using %s SDK", sdk)
return (
subprocess.check_output(["xcrun", "--show-sdk-path", "--sdk", sdk])
.strip()
.decode("latin1")
)
except Exception:
msg = "Unable to identify location of iOS SDK."
raise ValueError(msg)
def build_extensions(self) -> None:
library_dirs: list[str] = []
include_dirs: list[str] = []
pkg_config = None
if _cmd_exists(os.environ.get("PKG_CONFIG", "pkg-config")):
pkg_config = _pkg_config
#
# add configured kits
for root_name, lib_name in {
"AVIF_ROOT": "avif",
"JPEG_ROOT": "libjpeg",
"JPEG2K_ROOT": "libopenjp2",
"TIFF_ROOT": ("libtiff-5", "libtiff-4"),
"ZLIB_ROOT": "zlib",
"FREETYPE_ROOT": "freetype2",
"HARFBUZZ_ROOT": "harfbuzz",
"FRIBIDI_ROOT": "fribidi",
"RAQM_ROOT": "raqm",
"WEBP_ROOT": "libwebp",
"LCMS_ROOT": "lcms2",
"IMAGEQUANT_ROOT": "libimagequant",
}.items():
root = globals()[root_name]
if root is None and root_name in os.environ:
root_prefix = os.environ[root_name]
root = (
os.path.join(root_prefix, "lib"),
os.path.join(root_prefix, "include"),
)
if root is None and pkg_config:
if isinstance(lib_name, str):
_dbg("Looking for `%s` using pkg-config.", lib_name)
root = pkg_config(lib_name)
else:
for lib_name2 in lib_name:
_dbg("Looking for `%s` using pkg-config.", lib_name2)
root = pkg_config(lib_name2)
if root:
break
if isinstance(root, tuple):
lib_root, include_root = root
else:
lib_root = include_root = root
if lib_root is not None:
if not isinstance(lib_root, (tuple, list)):
lib_root = (lib_root,)
for lib_dir in lib_root:
_add_directory(library_dirs, lib_dir)
if include_root is not None:
if not isinstance(include_root, (tuple, list)):
include_root = (include_root,)
for include_dir in include_root:
_add_directory(include_dirs, include_dir)
# respect CFLAGS/CPPFLAGS/LDFLAGS
for k in ("CFLAGS", "CPPFLAGS", "LDFLAGS"):
if k in os.environ:
for match in re.finditer(r"-I([^\s]+)", os.environ[k]):
_add_directory(include_dirs, match.group(1))
for match in re.finditer(r"-L([^\s]+)", os.environ[k]):
_add_directory(library_dirs, match.group(1))
# include, rpath, if set as environment variables:
for k in ("C_INCLUDE_PATH", "CPATH", "INCLUDE"):
if k in os.environ:
for d in os.environ[k].split(os.path.pathsep):
_add_directory(include_dirs, d)
for k in ("LD_RUN_PATH", "LIBRARY_PATH", "LIB"):
if k in os.environ:
for d in os.environ[k].split(os.path.pathsep):
_add_directory(library_dirs, d)
_add_directory(library_dirs, os.path.join(sys.prefix, "lib"))
_add_directory(include_dirs, os.path.join(sys.prefix, "include"))
#
# add platform directories
if self.disable_platform_guessing:
pass
elif sys.platform == "cygwin":
# pythonX.Y.dll.a is in the /usr/lib/pythonX.Y/config directory
self.compiler.shared_lib_extension = ".dll.a"
_add_directory(
library_dirs,
os.path.join(
"/usr/lib", "python{}.{}".format(*sys.version_info), "config"
),
)
elif sys.platform == "darwin":
# attempt to make sure we pick freetype2 over other versions
_add_directory(include_dirs, "/sw/include/freetype2")
_add_directory(include_dirs, "/sw/lib/freetype2/include")
# fink installation directories
_add_directory(library_dirs, "/sw/lib")
_add_directory(include_dirs, "/sw/include")
# darwin ports installation directories
_add_directory(library_dirs, "/opt/local/lib")
_add_directory(include_dirs, "/opt/local/include")
# if Homebrew is installed, use its lib and include directories
try:
prefix = (
subprocess.check_output(["brew", "--prefix"])
.strip()
.decode("latin1")
)
except Exception:
# Homebrew not installed
prefix = None
ft_prefix = None
if prefix:
# add Homebrew's include and lib directories
_add_directory(library_dirs, os.path.join(prefix, "lib"))
_add_directory(include_dirs, os.path.join(prefix, "include"))
_add_directory(
include_dirs, os.path.join(prefix, "opt", "zlib", "include")
)
ft_prefix = os.path.join(prefix, "opt", "freetype")
if ft_prefix and os.path.isdir(ft_prefix):
# freetype might not be linked into Homebrew's prefix
_add_directory(library_dirs, os.path.join(ft_prefix, "lib"))
_add_directory(include_dirs, os.path.join(ft_prefix, "include"))
else:
# fall back to freetype from XQuartz if
# Homebrew's freetype is missing
_add_directory(library_dirs, "/usr/X11/lib")
_add_directory(include_dirs, "/usr/X11/include")
# Add the macOS SDK path.
sdk_path = self.get_macos_sdk_path()
if sdk_path:
_add_directory(library_dirs, os.path.join(sdk_path, "usr", "lib"))
_add_directory(include_dirs, os.path.join(sdk_path, "usr", "include"))
for extension in self.extensions:
extension.extra_compile_args = ["-Wno-nullability-completeness"]
elif sys.platform == "ios":
# Add the iOS SDK path.
sdk_path = self.get_ios_sdk_path()
# Add the iOS SDK path.
_add_directory(library_dirs, os.path.join(sdk_path, "usr", "lib"))
_add_directory(include_dirs, os.path.join(sdk_path, "usr", "include"))
for extension in self.extensions:
extension.extra_compile_args = ["-Wno-nullability-completeness"]
elif sys.platform.startswith(("linux", "gnu", "freebsd")):
for dirname in _find_library_dirs_ldconfig():
_add_directory(library_dirs, dirname)
if sys.platform.startswith("linux") and os.environ.get("ANDROID_ROOT"):
# termux support for android.
# system libraries (zlib) are installed in /system/lib
# headers are at $PREFIX/include
# user libs are at $PREFIX/lib
_add_directory(
library_dirs,
os.path.join(
os.environ["ANDROID_ROOT"],
"lib" if struct.calcsize("l") == 4 else "lib64",
),
)
elif sys.platform.startswith("netbsd"):
_add_directory(library_dirs, "/usr/pkg/lib")
_add_directory(include_dirs, "/usr/pkg/include")
elif sys.platform.startswith("sunos5"):
_add_directory(library_dirs, "/opt/local/lib")
_add_directory(include_dirs, "/opt/local/include")
# FIXME: check /opt/stuff directories here?
# standard locations
if not self.disable_platform_guessing:
_add_directory(library_dirs, "/usr/local/lib")
_add_directory(include_dirs, "/usr/local/include")
_add_directory(library_dirs, "/usr/lib")
_add_directory(include_dirs, "/usr/include")
# alpine, at least
_add_directory(library_dirs, "/lib")
if sys.platform == "win32":
# on Windows, look for the OpenJPEG libraries in the location that
# the official installer puts them
program_files = os.environ.get("ProgramFiles", "")
best_version = (0, 0)
best_path = None
for name in os.listdir(program_files):
if name.startswith("OpenJPEG "):
version = tuple(int(x) for x in name[9:].strip().split("."))
if version > best_version:
best_version = version
best_path = os.path.join(program_files, name)
if best_path:
_dbg("Adding %s to search list", best_path)
_add_directory(library_dirs, os.path.join(best_path, "lib"))
_add_directory(include_dirs, os.path.join(best_path, "include"))
#
# insert new dirs *before* default libs, to avoid conflicts
# between Python PYD stub libs and real libraries
self.compiler.library_dirs = library_dirs + self.compiler.library_dirs
self.compiler.include_dirs = include_dirs + self.compiler.include_dirs
#
# look for available libraries
feature = self.feature
if feature.want("zlib"):
_dbg("Looking for zlib")
if _find_include_file(self, "zlib.h"):
if _find_library_file(self, "z"):
feature.set("zlib", "z")
elif sys.platform == "win32" and _find_library_file(self, "zlib"):
feature.set("zlib", "zlib") # alternative name
elif sys.platform == "win32" and _find_library_file(self, "zdll"):
feature.set("zlib", "zdll") # dll import library
if feature.want("jpeg"):
_dbg("Looking for jpeg")
if _find_include_file(self, "jpeglib.h"):
if _find_library_file(self, "jpeg"):
feature.set("jpeg", "jpeg")
elif sys.platform == "win32" and _find_library_file(self, "libjpeg"):
feature.set("jpeg", "libjpeg") # alternative name
feature.set("openjpeg_version", None)
if feature.want("jpeg2000"):
_dbg("Looking for jpeg2000")
best_version: tuple[int, ...] | None = None
best_path = None
# Find the best version
for directory in self.compiler.include_dirs:
_dbg("Checking for openjpeg-#.# in %s", directory)
try:
listdir = os.listdir(directory)
except Exception:
# OSError, FileNotFoundError
continue
for name in listdir:
if name.startswith("openjpeg-") and os.path.isfile(
os.path.join(directory, name, "openjpeg.h")
):
_dbg("Found openjpeg.h in %s/%s", (directory, name))
version = tuple(int(x) for x in name[9:].split("."))
if best_version is None or version > best_version:
best_version = version
best_path = os.path.join(directory, name)
_dbg(
"Best openjpeg version %s so far in %s",
(str(best_version), best_path),
)
if best_version and _find_library_file(self, "openjp2"):
# Add the directory to the include path so we can include
# <openjpeg.h> rather than having to cope with the versioned
# include path
_add_directory(self.compiler.include_dirs, best_path, 0)
feature.set("jpeg2000", "openjp2")
feature.set("openjpeg_version", ".".join(str(x) for x in best_version))
if feature.want("imagequant"):
_dbg("Looking for imagequant")
if _find_include_file(self, "libimagequant.h"):
if _find_library_file(self, "imagequant"):
feature.set("imagequant", "imagequant")
elif _find_library_file(self, "libimagequant"):
feature.set("imagequant", "libimagequant")
if feature.want("tiff"):
_dbg("Looking for tiff")
if _find_include_file(self, "tiff.h"):
if sys.platform in ["win32", "darwin"] and _find_library_file(
self, "libtiff"
):
feature.set("tiff", "libtiff")
elif _find_library_file(self, "tiff"):
feature.set("tiff", "tiff")
if feature.want("freetype"):
_dbg("Looking for freetype")
if _find_library_file(self, "freetype"):
# look for freetype2 include files
freetype_version = 0
for subdir in self.compiler.include_dirs:
_dbg("Checking for include file %s in %s", ("ft2build.h", subdir))
if os.path.isfile(os.path.join(subdir, "ft2build.h")):
_dbg("Found %s in %s", ("ft2build.h", subdir))
freetype_version = 21
subdir = os.path.join(subdir, "freetype2")
break
subdir = os.path.join(subdir, "freetype2")
_dbg("Checking for include file %s in %s", ("ft2build.h", subdir))
if os.path.isfile(os.path.join(subdir, "ft2build.h")):
_dbg("Found %s in %s", ("ft2build.h", subdir))
freetype_version = 21
break
if freetype_version:
feature.set("freetype", "freetype")
if subdir:
_add_directory(self.compiler.include_dirs, subdir, 0)
if feature.get("freetype") and feature.want("raqm"):
if not feature.want_vendor("raqm"): # want system Raqm
_dbg("Looking for Raqm")
if _find_include_file(self, "raqm.h"):
if _find_library_file(self, "raqm"):
feature.set("raqm", "raqm")
elif _find_library_file(self, "libraqm"):
feature.set("raqm", "libraqm")
else: # want to build Raqm from src/thirdparty
_dbg("Looking for HarfBuzz")
feature.set("harfbuzz", None)
hb_dir = _find_include_dir(self, "harfbuzz", "hb.h")
if hb_dir:
if isinstance(hb_dir, str):
_add_directory(self.compiler.include_dirs, hb_dir, 0)
if _find_library_file(self, "harfbuzz"):
feature.set("harfbuzz", "harfbuzz")
if feature.get("harfbuzz"):
if not feature.want_vendor("fribidi"): # want system FriBiDi
_dbg("Looking for FriBiDi")
feature.set("fribidi", None)
fribidi_dir = _find_include_dir(self, "fribidi", "fribidi.h")
if fribidi_dir:
if isinstance(fribidi_dir, str):
_add_directory(
self.compiler.include_dirs, fribidi_dir, 0
)
if _find_library_file(self, "fribidi"):
feature.set("fribidi", "fribidi")
feature.set("raqm", True)
else: # want to build FriBiDi shim from src/thirdparty
feature.set("raqm", True)
if feature.want("lcms"):
_dbg("Looking for lcms")
if _find_include_file(self, "lcms2.h"):
if _find_library_file(self, "lcms2"):
feature.set("lcms", "lcms2")
elif _find_library_file(self, "lcms2_static"):
# alternate Windows name.
feature.set("lcms", "lcms2_static")
if feature.want("webp"):
_dbg("Looking for webp")
if all(
_find_include_file(self, "webp/" + include)
for include in ("encode.h", "decode.h", "mux.h", "demux.h")
):
# In Google's precompiled zip it is called "libwebp"
for prefix in ("", "lib"):
if all(
_find_library_file(self, prefix + library)
for library in ("webp", "webpmux", "webpdemux")
):
feature.set("webp", prefix + "webp")
break
if feature.want("xcb"):
_dbg("Looking for xcb")
if _find_include_file(self, "xcb/xcb.h"):
if _find_library_file(self, "xcb"):
feature.set("xcb", "xcb")
if feature.want("avif"):
_dbg("Looking for avif")
if avif_h := _find_include_file(self, "avif/avif.h"):
with open(avif_h, "rb") as fp:
major_version = int(
fp.read().split(b"#define AVIF_VERSION_MAJOR ")[1].split()[0]
)
if major_version >= 1 and _find_library_file(self, "avif"):
feature.set("avif", "avif")
for f in feature:
if not feature.get(f) and feature.require(f):
if f in ("jpeg", "zlib"):
raise RequiredDependencyException(f)
raise DependencyException(f)
#
# core library
libs: list[str | bool | None] = []
libs.extend(self.add_imaging_libs.split())
defs: list[tuple[str, str | None]] = []
if feature.get("tiff"):
libs.append(feature.get("tiff"))
defs.append(("HAVE_LIBTIFF", None))
if sys.platform == "win32":
# This define needs to be defined if-and-only-if it was defined
# when compiling LibTIFF. LibTIFF doesn't expose it in `tiffconf.h`,
# so we have to guess; by default it is defined in all Windows builds.
# See #4237, #5243, #5359 for more information.
defs.append(("USE_WIN32_FILEIO", None))
elif sys.platform == "ios":
# Ensure transitive dependencies are linked.
libs.append("lzma")
if feature.get("jpeg"):
libs.append(feature.get("jpeg"))
defs.append(("HAVE_LIBJPEG", None))
if feature.get("jpeg2000"):
libs.append(feature.get("jpeg2000"))
defs.append(("HAVE_OPENJPEG", None))
if sys.platform == "win32" and not PLATFORM_MINGW:
defs.append(("OPJ_STATIC", None))
if feature.get("zlib"):
libs.append(feature.get("zlib"))
defs.append(("HAVE_LIBZ", None))
if feature.get("imagequant"):
libs.append(feature.get("imagequant"))
defs.append(("HAVE_LIBIMAGEQUANT", None))
if feature.get("xcb"):
libs.append(feature.get("xcb"))
if sys.platform == "ios":
# Ensure transitive dependencies are linked.
libs.append("Xau")
defs.append(("HAVE_XCB", None))
if sys.platform == "win32":
libs.extend(["kernel32", "user32", "gdi32"])
if struct.unpack("h", b"\0\1")[0] == 1:
defs.append(("WORDS_BIGENDIAN", None))
defs.append(("PILLOW_VERSION", f'"{PILLOW_VERSION}"'))
self._update_extension("PIL._imaging", libs, defs)
#
# additional libraries
if feature.get("freetype"):
srcs = []
libs = ["freetype"]
defs = []
if feature.get("raqm"):
if not feature.want_vendor("raqm"): # using system Raqm
defs.append(("HAVE_RAQM", None))
defs.append(("HAVE_RAQM_SYSTEM", None))
libs.append(feature.get("raqm"))
else: # building Raqm from src/thirdparty
defs.append(("HAVE_RAQM", None))
srcs.append("src/thirdparty/raqm/raqm.c")
libs.append(feature.get("harfbuzz"))
if not feature.want_vendor("fribidi"): # using system FriBiDi
defs.append(("HAVE_FRIBIDI_SYSTEM", None))
libs.append(feature.get("fribidi"))
else: # building FriBiDi shim from src/thirdparty
srcs.append("src/thirdparty/fribidi-shim/fribidi.c")
if sys.platform == "ios":
# Ensure transitive dependencies are linked.
libs.extend(["z", "bz2", "brotlicommon", "brotlidec", "png"])
self._update_extension("PIL._imagingft", libs, defs, srcs)
else:
self._remove_extension("PIL._imagingft")
if feature.get("lcms"):
libs = [feature.get("lcms")]
if sys.platform == "win32":
libs.extend(["user32", "gdi32"])
self._update_extension("PIL._imagingcms", libs)
else:
self._remove_extension("PIL._imagingcms")
webp = feature.get("webp")
if isinstance(webp, str):
libs = [webp, webp + "mux", webp + "demux"]
if sys.platform == "ios":
# Ensure transitive dependencies are linked.
libs.append("sharpyuv")
self._update_extension("PIL._webp", libs)
else:
self._remove_extension("PIL._webp")
if feature.get("avif"):
libs = [feature.get("avif")]
if sys.platform == "win32":
libs.extend(["ntdll", "userenv", "ws2_32", "bcrypt"])
self._update_extension("PIL._avif", libs)
else:
self._remove_extension("PIL._avif")
tk_libs = ["psapi"] if sys.platform in ("win32", "cygwin") else []
self._update_extension("PIL._imagingtk", tk_libs)
build_ext.build_extensions(self)
#
# sanity checks
self.summary_report(feature)
def summary_report(self, feature: ext_feature) -> None:
print("-" * 68)
print("PIL SETUP SUMMARY")
print("-" * 68)
print(f"version Pillow {PILLOW_VERSION}")
version = sys.version.split("[")
print(f"platform {sys.platform} {version[0].strip()}")
for v in version[1:]:
print(f" [{v.strip()}")
print("-" * 68)
raqm_extra_info = ""
if feature.want_vendor("raqm"):
raqm_extra_info += "bundled"
if feature.want_vendor("fribidi"):
raqm_extra_info += ", FriBiDi shim"
options = [
(feature.get("jpeg"), "JPEG"),
(
feature.get("jpeg2000"),
"OPENJPEG (JPEG2000)",
feature.get("openjpeg_version"),
),
(feature.get("zlib"), "ZLIB (PNG/ZIP)"),
(feature.get("imagequant"), "LIBIMAGEQUANT"),
(feature.get("tiff"), "LIBTIFF"),
(feature.get("freetype"), "FREETYPE2"),
(feature.get("raqm"), "RAQM (Text shaping)", raqm_extra_info),
(feature.get("lcms"), "LITTLECMS2"),
(feature.get("webp"), "WEBP"),
(feature.get("xcb"), "XCB (X protocol)"),
(feature.get("avif"), "LIBAVIF"),
]
all = 1
for option in options:
if option[0]:
extra_info = ""
if len(option) >= 3 and option[2]:
extra_info = f" ({option[2]})"
print(f"--- {option[1]} support available{extra_info}")
else:
print(f"*** {option[1]} support not available")
all = 0
print("-" * 68)
if not all:
print("To add a missing option, make sure you have the required")
print("library and headers.")
print(
"See https://pillow.readthedocs.io/en/latest/installation."
"html#building-from-source"
)
print("")
print("To check the build, run the selftest.py script.")
print("")
def debug_build() -> bool:
return hasattr(sys, "gettotalrefcount") or FUZZING_BUILD
libraries: list[tuple[str, _BuildInfo]] = [
("pil_imaging_mode", {"sources": ["src/libImaging/Mode.c"]}),
]
files: list[str | os.PathLike[str]] = ["src/_imaging.c"]
for src_file in _IMAGING:
files.append("src/" + src_file + ".c")
for src_file in _LIB_IMAGING:
files.append(os.path.join("src/libImaging", src_file + ".c"))
ext_modules = [
Extension("PIL._imaging", files),
Extension("PIL._imagingft", ["src/_imagingft.c"]),
Extension("PIL._imagingcms", ["src/_imagingcms.c"]),
Extension("PIL._webp", ["src/_webp.c"]),
Extension("PIL._avif", ["src/_avif.c"]),
Extension("PIL._imagingtk", ["src/_imagingtk.c", "src/Tk/tkImaging.c"]),
Extension("PIL._imagingmath", ["src/_imagingmath.c"]),
Extension("PIL._imagingmorph", ["src/_imagingmorph.c"]),
]
try:
setup(
cmdclass={"build_ext": pil_build_ext},
ext_modules=ext_modules,
libraries=libraries,
zip_safe=not (debug_build() or PLATFORM_MINGW),
)
except RequiredDependencyException as err:
msg = f"""
The headers or library files could not be found for {str(err)},
a required dependency when compiling Pillow from source.
Please see the install instructions at:
https://pillow.readthedocs.io/en/latest/installation/basic-installation.html
"""
sys.stderr.write(msg)
raise RequiredDependencyException(msg)
except DependencyException as err:
msg = f"""
The headers or library files could not be found for {str(err)},
which was requested by the option flag '-C {str(err)}=enable'
"""
sys.stderr.write(msg)
raise DependencyException(msg)
|
pil_build_ext
|
python
|
xlwings__xlwings
|
xlwings/conversion/standard.py
|
{
"start": 2208,
"end": 2964
}
|
class ____:
def __init__(self, options):
self.options = options
def __call__(self, c):
chunksize = self.options.get("chunksize")
if c.range and chunksize:
parts = []
for i in range(math.ceil(c.range.shape[0] / chunksize)):
raw_value = c.range[
i * chunksize : (i * chunksize) + chunksize, :
].raw_value
if isinstance(raw_value[0], (list, tuple)):
parts.extend(raw_value)
else:
# Turn a single row list into a 2d list
parts.extend([raw_value])
c.value = parts
elif c.range:
c.value = c.range.raw_value
|
ReadValueFromRangeStage
|
python
|
pypa__hatch
|
src/hatch/utils/fs.py
|
{
"start": 1028,
"end": 4377
}
|
class ____(_PathBase):
@cached_property
def long_id(self) -> str:
from base64 import urlsafe_b64encode
from hashlib import sha256
path = str(self)
if sys.platform == "win32" or sys.platform == "darwin":
path = path.casefold()
digest = sha256(path.encode("utf-8")).digest()
return urlsafe_b64encode(digest).decode("utf-8")
@cached_property
def id(self) -> str:
return self.long_id[:8]
def ensure_dir_exists(self) -> None:
self.mkdir(parents=True, exist_ok=True)
def ensure_parent_dir_exists(self) -> None:
self.parent.mkdir(parents=True, exist_ok=True)
def expand(self) -> Path:
return Path(os.path.expanduser(os.path.expandvars(self)))
def remove(self) -> None:
if self.is_file():
os.remove(self)
elif self.is_dir():
import shutil
shutil.rmtree(self, ignore_errors=False)
def move(self, target):
try:
self.replace(target)
except OSError:
import shutil
shutil.copy2(self, target)
self.unlink()
def wait_for_dir_removed(self, timeout: int = 5) -> None:
import shutil
import time
for _ in range(timeout * 2):
if self.is_dir():
shutil.rmtree(self, ignore_errors=True)
time.sleep(0.5)
else:
return
if self.is_dir():
shutil.rmtree(self, ignore_errors=False)
def write_atomic(self, data: str | bytes, *args: Any, **kwargs: Any) -> None:
from tempfile import mkstemp
fd, path = mkstemp(dir=self.parent)
with os.fdopen(fd, *args, **kwargs) as f:
f.write(data)
f.flush()
disk_sync(fd)
os.replace(path, self)
@contextmanager
def as_cwd(self, *args: Any, **kwargs: Any) -> Generator[Path, None, None]:
origin = os.getcwd()
os.chdir(self)
try:
if args or kwargs:
with EnvVars(*args, **kwargs):
yield self
else:
yield self
finally:
os.chdir(origin)
@contextmanager
def temp_hide(self) -> Generator[Path, None, None]:
import shutil
with temp_directory() as temp_dir:
temp_path = Path(temp_dir, self.name)
with suppress(FileNotFoundError):
shutil.move(str(self), temp_dir / self.name)
try:
yield temp_path
finally:
with suppress(FileNotFoundError):
shutil.move(str(temp_path), self)
if sys.platform == "win32":
@classmethod
def from_uri(cls, path: str) -> Path:
return cls(path.replace("file:///", "", 1))
else:
@classmethod
def from_uri(cls, path: str) -> Path:
return cls(path.replace("file://", "", 1))
@contextmanager
def temp_directory() -> Generator[Path, None, None]:
from tempfile import TemporaryDirectory
with TemporaryDirectory() as d:
yield Path(d).resolve()
@contextmanager
def temp_chdir(env_vars: dict[str, str] | None = None) -> Generator[Path, None, None]:
with temp_directory() as d, d.as_cwd(env_vars=env_vars):
yield d
|
Path
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess12.py
|
{
"start": 155,
"end": 515
}
|
class ____(type):
@overload
def __get__(self: type[T], instance: None, owner: Any) -> type[T]: ...
@overload
def __get__(self: type[T], instance: object, owner: Any) -> T: ...
def __get__(self: type[T], instance: object | None, owner: Any) -> type[T] | T:
if instance is None:
return self
return self()
|
MetaClass
|
python
|
PyCQA__pylint
|
pylint/utils/linterstats.py
|
{
"start": 1233,
"end": 1401
}
|
class ____(TypedDict):
"""TypedDict to store counts of undocumented node types."""
function: int
klass: int
method: int
module: int
|
UndocumentedNodes
|
python
|
spack__spack
|
lib/spack/spack/database.py
|
{
"start": 13355,
"end": 19267
}
|
class ____:
"""Tracks installation failures.
Prefix failure marking takes the form of a byte range lock on the nth
byte of a file for coordinating between concurrent parallel build
processes and a persistent file, named with the full hash and
containing the spec, in a subdirectory of the database to enable
persistence across overlapping but separate related build processes.
The failure lock file lives alongside the install DB.
``n`` is the sys.maxsize-bit prefix of the associated DAG hash to make
the likelihood of collision very low with no cleanup required.
"""
#: root directory of the failure tracker
dir: pathlib.Path
#: File for locking particular concrete spec hashes
locker: SpecLocker
def __init__(self, root_dir: Union[str, pathlib.Path], default_timeout: Optional[float]):
#: Ensure a persistent location for dealing with parallel installation
#: failures (e.g., across near-concurrent processes).
self.dir = pathlib.Path(root_dir) / _DB_DIRNAME / "failures"
self.locker = SpecLocker(failures_lock_path(root_dir), default_timeout=default_timeout)
def _ensure_parent_directories(self) -> None:
"""Ensure that parent directories of the FailureTracker exist.
Accesses the filesystem only once, the first time it's called on a given FailureTracker.
"""
self.dir.mkdir(parents=True, exist_ok=True)
def clear(self, spec: "spack.spec.Spec", force: bool = False) -> None:
"""Removes any persistent and cached failure tracking for the spec.
see :meth:`mark`.
Args:
spec: the spec whose failure indicators are being removed
force: True if the failure information should be cleared when a failure lock
exists for the file, or False if the failure should not be cleared (e.g.,
it may be associated with a concurrent build)
"""
locked = self.lock_taken(spec)
if locked and not force:
tty.msg(f"Retaining failure marking for {spec.name} due to lock")
return
if locked:
tty.warn(f"Removing failure marking despite lock for {spec.name}")
succeeded, lock = self.locker.clear(spec)
if succeeded and lock is not None:
lock.release_write()
if self.persistent_mark(spec):
path = self._path(spec)
tty.debug(f"Removing failure marking for {spec.name}")
try:
path.unlink()
except OSError as err:
tty.warn(
f"Unable to remove failure marking for {spec.name} ({str(path)}): {str(err)}"
)
def clear_all(self) -> None:
"""Force remove install failure tracking files."""
tty.debug("Releasing prefix failure locks")
self.locker.clear_all(
clear_fn=lambda x: x.release_write() if x.is_write_locked() else True
)
tty.debug("Removing prefix failure tracking files")
try:
marks = os.listdir(str(self.dir))
except FileNotFoundError:
return # directory doesn't exist yet
except OSError as exc:
tty.warn(f"Unable to remove failure marking files: {str(exc)}")
return
for fail_mark in marks:
try:
(self.dir / fail_mark).unlink()
except OSError as exc:
tty.warn(f"Unable to remove failure marking file {fail_mark}: {str(exc)}")
def mark(self, spec: "spack.spec.Spec") -> lk.Lock:
"""Marks a spec as failing to install.
Args:
spec: spec that failed to install
"""
self._ensure_parent_directories()
# Dump the spec to the failure file for (manual) debugging purposes
path = self._path(spec)
path.write_text(spec.to_json())
# Also ensure a failure lock is taken to prevent cleanup removal
# of failure status information during a concurrent parallel build.
if not self.locker.has_lock(spec):
try:
mark = self.locker.lock(spec)
mark.acquire_write()
except lk.LockTimeoutError:
# Unlikely that another process failed to install at the same
# time but log it anyway.
tty.debug(f"PID {os.getpid()} failed to mark install failure for {spec.name}")
tty.warn(f"Unable to mark {spec.name} as failed.")
return self.locker.lock(spec)
def has_failed(self, spec: "spack.spec.Spec") -> bool:
"""Return True if the spec is marked as failed."""
# The failure was detected in this process.
if self.locker.has_lock(spec):
return True
# The failure was detected by a concurrent process (e.g., an srun),
# which is expected to be holding a write lock if that is the case.
if self.lock_taken(spec):
return True
# Determine if the spec may have been marked as failed by a separate
# spack build process running concurrently.
return self.persistent_mark(spec)
def lock_taken(self, spec: "spack.spec.Spec") -> bool:
"""Return True if another process has a failure lock on the spec."""
check = self.locker.raw_lock(spec)
return check.is_write_locked()
def persistent_mark(self, spec: "spack.spec.Spec") -> bool:
"""Determine if the spec has a persistent failure marking."""
return self._path(spec).exists()
def _path(self, spec: "spack.spec.Spec") -> pathlib.Path:
"""Return the path to the spec's failure file, which may not exist."""
assert spec.concrete, "concrete spec required for failure path"
return self.dir / f"{spec.name}-{spec.dag_hash()}"
SelectType = Callable[[InstallRecord], bool]
|
FailureTracker
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 991445,
"end": 993569
}
|
class ____(Gradient):
"""
RadialGradient schema wrapper.
Parameters
----------
gradient : Literal['radial']
The type of gradient. Use ``"radial"`` for a radial gradient.
stops : Sequence[dict, :class:`GradientStop`]
An array of gradient stops defining the gradient color sequence.
id : str
r1 : float
The radius length, in normalized [0, 1] coordinates, of the inner circle for the
gradient.
**Default value:** ``0``
r2 : float
The radius length, in normalized [0, 1] coordinates, of the outer circle for the
gradient.
**Default value:** ``0.5``
x1 : float
The x-coordinate, in normalized [0, 1] coordinates, for the center of the inner
circle for the gradient.
**Default value:** ``0.5``
x2 : float
The x-coordinate, in normalized [0, 1] coordinates, for the center of the outer
circle for the gradient.
**Default value:** ``0.5``
y1 : float
The y-coordinate, in normalized [0, 1] coordinates, for the center of the inner
circle for the gradient.
**Default value:** ``0.5``
y2 : float
The y-coordinate, in normalized [0, 1] coordinates, for the center of the outer
circle for the gradient.
**Default value:** ``0.5``
"""
_schema = {"$ref": "#/definitions/RadialGradient"}
def __init__(
self,
gradient: Optional[Literal["radial"]] = Undefined,
stops: Optional[Sequence[SchemaBase | Map]] = Undefined,
id: Optional[str] = Undefined,
r1: Optional[float] = Undefined,
r2: Optional[float] = Undefined,
x1: Optional[float] = Undefined,
x2: Optional[float] = Undefined,
y1: Optional[float] = Undefined,
y2: Optional[float] = Undefined,
**kwds,
):
super().__init__(
gradient=gradient,
stops=stops,
id=id,
r1=r1,
r2=r2,
x1=x1,
x2=x2,
y1=y1,
y2=y2,
**kwds,
)
|
RadialGradient
|
python
|
huggingface__transformers
|
src/transformers/models/align/modeling_align.py
|
{
"start": 29487,
"end": 31179
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([AlignTextLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText
|
AlignTextEncoder
|
python
|
walkccc__LeetCode
|
solutions/75. Sort Colors/75.py
|
{
"start": 0,
"end": 415
}
|
class ____:
def sortColors(self, nums: list[int]) -> None:
zero = -1
one = -1
two = -1
for num in nums:
if num == 0:
two += 1
one += 1
zero += 1
nums[two] = 2
nums[one] = 1
nums[zero] = 0
elif num == 1:
two += 1
one += 1
nums[two] = 2
nums[one] = 1
else:
two += 1
nums[two] = 2
|
Solution
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 253034,
"end": 254168
}
|
class ____(Response):
"""
Response of tasks.make_private endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_private"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
MakePrivateResponse
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/media_file_storage.py
|
{
"start": 892,
"end": 1303
}
|
class ____(Exception):
"""Exception class for errors raised by MediaFileStorage.
When running in "development mode", the full text of these errors
is displayed in the frontend, so errors should be human-readable
(and actionable).
When running in "release mode", errors are redacted on the
frontend; we instead show a generic "Something went wrong!" message.
"""
|
MediaFileStorageError
|
python
|
mlflow__mlflow
|
mlflow/data/schema.py
|
{
"start": 176,
"end": 2650
}
|
class ____:
"""
Represents the schema of a dataset with tensor features and targets.
"""
def __init__(self, features: Schema, targets: Schema = None):
if not isinstance(features, Schema):
raise MlflowException(
f"features must be mlflow.types.Schema, got '{type(features)}'",
INVALID_PARAMETER_VALUE,
)
if targets is not None and not isinstance(targets, Schema):
raise MlflowException(
f"targets must be either None or mlflow.types.Schema, got '{type(features)}'",
INVALID_PARAMETER_VALUE,
)
self.features = features
self.targets = targets
def to_dict(self) -> dict[str, Any]:
"""Serialize into a 'jsonable' dictionary.
Returns:
dictionary representation of the schema's features and targets (if defined).
"""
return {
"mlflow_tensorspec": {
"features": self.features.to_json(),
"targets": self.targets.to_json() if self.targets is not None else None,
},
}
@classmethod
def from_dict(cls, schema_dict: dict[str, Any]):
"""Deserialize from dictionary representation.
Args:
schema_dict: Dictionary representation of model signature. Expected dictionary format:
`{'features': <json string>, 'targets': <json string>" }`
Returns:
TensorDatasetSchema populated with the data from the dictionary.
"""
if "mlflow_tensorspec" not in schema_dict:
raise MlflowException(
"TensorDatasetSchema dictionary is missing expected key 'mlflow_tensorspec'",
INVALID_PARAMETER_VALUE,
)
schema_dict = schema_dict["mlflow_tensorspec"]
features = Schema.from_json(schema_dict["features"])
if "targets" in schema_dict and schema_dict["targets"] is not None:
targets = Schema.from_json(schema_dict["targets"])
return cls(features, targets)
else:
return cls(features)
def __eq__(self, other) -> bool:
return (
isinstance(other, TensorDatasetSchema)
and self.features == other.features
and self.targets == other.targets
)
def __repr__(self) -> str:
return f"features:\n {self.features!r}\ntargets:\n {self.targets!r}\n"
|
TensorDatasetSchema
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 164485,
"end": 167305
}
|
class ____:
def test_describe(self):
assert self.locale.describe("now", only_distance=True) == "հիմա"
assert self.locale.describe("now", only_distance=False) == "հիմա"
def test_meridians_hy(self):
assert self.locale.meridian(7, "A") == "Ամ"
assert self.locale.meridian(18, "A") == "պ.մ."
assert self.locale.meridian(10, "a") == "Ամ"
assert self.locale.meridian(22, "a") == "պ.մ."
def test_format_timeframe(self):
# Second(s)
assert self.locale._format_timeframe("second", -1) == "վայրկյան"
assert self.locale._format_timeframe("second", 1) == "վայրկյան"
assert self.locale._format_timeframe("seconds", -3) == "3 վայրկյան"
assert self.locale._format_timeframe("seconds", 3) == "3 վայրկյան"
assert self.locale._format_timeframe("seconds", 30) == "30 վայրկյան"
# Minute(s)
assert self.locale._format_timeframe("minute", -1) == "րոպե"
assert self.locale._format_timeframe("minute", 1) == "րոպե"
assert self.locale._format_timeframe("minutes", -4) == "4 րոպե"
assert self.locale._format_timeframe("minutes", 4) == "4 րոպե"
assert self.locale._format_timeframe("minutes", 40) == "40 րոպե"
# Hour(s)
assert self.locale._format_timeframe("hour", -1) == "ժամ"
assert self.locale._format_timeframe("hour", 1) == "ժամ"
assert self.locale._format_timeframe("hours", -23) == "23 ժամ"
assert self.locale._format_timeframe("hours", 23) == "23 ժամ"
# Day(s)
assert self.locale._format_timeframe("day", -1) == "օր"
assert self.locale._format_timeframe("day", 1) == "օր"
assert self.locale._format_timeframe("days", -12) == "12 օր"
assert self.locale._format_timeframe("days", 12) == "12 օր"
# Month(s)
assert self.locale._format_timeframe("month", -1) == "ամիս"
assert self.locale._format_timeframe("month", 1) == "ամիս"
assert self.locale._format_timeframe("months", -2) == "2 ամիս"
assert self.locale._format_timeframe("months", 2) == "2 ամիս"
assert self.locale._format_timeframe("months", 11) == "11 ամիս"
# Year(s)
assert self.locale._format_timeframe("year", -1) == "տարին"
assert self.locale._format_timeframe("year", 1) == "տարին"
assert self.locale._format_timeframe("years", -2) == "2 տարին"
assert self.locale._format_timeframe("years", 2) == "2 տարին"
assert self.locale._format_timeframe("years", 12) == "12 տարին"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "շաբաթ"
assert self.locale.day_abbreviation(dt.isoweekday()) == "շաբ."
@pytest.mark.usefixtures("lang_locale")
|
TestArmenianLocale
|
python
|
ray-project__ray
|
python/ray/serve/handle.py
|
{
"start": 20286,
"end": 25264
}
|
class ____(_DeploymentResponseBase):
"""A future-like object wrapping the result of a streaming deployment handle call.
This is returned when using `handle.options(stream=True)` and calling a generator
deployment method.
`DeploymentResponseGenerator` is both a synchronous and asynchronous iterator.
When iterating over results from inside a deployment, `async for` should be used to
avoid blocking the asyncio event loop.
When iterating over results from outside a deployment, use a standard `for` loop.
Example:
.. code-block:: python
from typing import AsyncGenerator, Generator
from ray import serve
from ray.serve.handle import DeploymentHandle
@serve.deployment
class Streamer:
def generate_numbers(self, limit: int) -> Generator[int]:
for i in range(limit):
yield i
@serve.deployment
class Caller:
def __init__(self, handle: DeploymentHandle):
# Set `stream=True` on the handle to enable streaming calls.
self._streaming_handle = handle.options(stream=True)
async def __call__(self, limit: int) -> AsyncIterator[int]:
gen: DeploymentResponseGenerator = (
self._streaming_handle.generate_numbers.remote(limit)
)
# Inside a deployment: use `async for` to enable concurrency.
async for i in gen:
yield i
app = Caller.bind(Streamer.bind())
handle: DeploymentHandle = serve.run(app)
# Outside a deployment: use a standard `for` loop.
gen: DeploymentResponseGenerator = handle.options(stream=True).remote(10)
assert [i for i in gen] == list(range(10))
A `DeploymentResponseGenerator` *cannot* currently be passed to another
`DeploymentHandle` call.
"""
def __await__(self):
raise TypeError(
"`DeploymentResponseGenerator` cannot be awaited directly. Use `async for` "
"or `await response.__anext__() instead`."
)
def __aiter__(self) -> AsyncIterator[Any]:
return self
async def __anext__(self) -> Any:
try:
replica_result = await self._fetch_future_result_async()
return await replica_result.__anext__()
except asyncio.CancelledError:
if self._cancelled:
raise RequestCancelledError(self.request_id) from None
else:
raise asyncio.CancelledError from None
def __iter__(self) -> Iterator[Any]:
return self
def __next__(self) -> Any:
if is_running_in_asyncio_loop():
raise RuntimeError(
"Sync methods should not be called from within an `asyncio` event "
"loop. Use `async for` or `await response.__anext__()` instead."
)
replica_result = self._fetch_future_result_sync()
return replica_result.__next__()
@DeveloperAPI
async def _to_object_ref_gen(self) -> ObjectRefGenerator:
"""Advanced API to convert the generator to a Ray `ObjectRefGenerator`.
This method is `async def` because it will block until the handle call has been
assigned to a replica. If there are many requests in flight and all
replicas' queues are full, this may be a slow operation.
"""
ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED.record("1")
if not self._request_metadata._by_reference:
raise OBJ_REF_NOT_SUPPORTED_ERROR
replica_result = await self._fetch_future_result_async()
return replica_result.to_object_ref_gen()
@DeveloperAPI
def _to_object_ref_gen_sync(
self,
_timeout_s: Optional[float] = None,
_allow_running_in_asyncio_loop: bool = False,
) -> ObjectRefGenerator:
"""Advanced API to convert the generator to a Ray `ObjectRefGenerator`.
This method is a *blocking* call because it will block until the handle call has
been assigned to a replica. If there are many requests in flight and all
replicas' queues are full, this may be a slow operation.
From inside a deployment, `_to_object_ref_gen` should be used instead to avoid
blocking the asyncio event loop.
"""
ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED.record("1")
if not self._request_metadata._by_reference:
raise OBJ_REF_NOT_SUPPORTED_ERROR
if not _allow_running_in_asyncio_loop and is_running_in_asyncio_loop():
raise RuntimeError(
"Sync methods should not be called from within an `asyncio` event "
"loop. Use `await response._to_object_ref()` instead."
)
replica_result = self._fetch_future_result_sync(_timeout_s)
return replica_result.to_object_ref_gen()
@PublicAPI(stability="stable")
|
DeploymentResponseGenerator
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_highlight.py
|
{
"start": 20373,
"end": 21409
}
|
class ____(util.MdCase):
"""Test default block language cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences', 'pymdownx.inlinehilite']
extension_configs = {
'pymdownx.highlight': {
'default_lang': 'python'
}
}
def test_default_block(self):
"""Test that default language affects block, but not inline code."""
self.check_markdown(
'''
`import code`
import code
```
import code
```
''',
'''
<p><code>import code</code></p>
<div class="highlight"><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">code</span>
</code></pre></div>
<div class="highlight"><pre><span></span><code><span class="kn">import</span><span class="w"> </span><span class="nn">code</span>
</code></pre></div>
''', # noqa: E501
True
)
|
TestDefaultLang
|
python
|
kamyu104__LeetCode-Solutions
|
Python/unique-paths-iii.py
|
{
"start": 61,
"end": 1341
}
|
class ____(object):
def uniquePathsIII(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def index(grid, r, c):
return 1 << (r*len(grid[0])+c)
def dp(grid, src, dst, todo, lookup):
if src == dst:
return int(todo == 0)
key = (src, todo)
if key in lookup:
return lookup[key]
result = 0
for d in directions:
r, c = src[0]+d[0], src[1]+d[1]
if 0 <= r < len(grid) and 0 <= c < len(grid[0]) and \
grid[r][c] % 2 == 0 and \
todo & index(grid, r, c):
result += dp(grid, (r, c), dst, todo ^ index(grid, r, c), lookup)
lookup[key] = result
return lookup[key]
todo = 0
src, dst = None, None
for r, row in enumerate(grid):
for c, val in enumerate(row):
if val % 2 == 0:
todo |= index(grid, r, c)
if val == 1:
src = (r, c)
elif val == 2:
dst = (r, c)
return dp(grid, src, dst, todo, {})
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/metaclass9.py
|
{
"start": 1407,
"end": 1512
}
|
class ____(metaclass=Meta2): ...
# This should generate an error because param4 is the wrong type.
|
Class2_4
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1148018,
"end": 1148275
}
|
class ____(VegaLiteSchema):
"""ScaleInvalidDataShowAstime schema wrapper."""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"time">'}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
ScaleInvalidDataShowAstime
|
python
|
automl__auto-sklearn
|
autosklearn/metrics/__init__.py
|
{
"start": 5977,
"end": 23924
}
|
class ____(Scorer):
def __call__(
self,
y_true: np.ndarray,
y_pred: np.ndarray,
*,
X_data: Optional[SUPPORTED_XDATA_TYPES] = None,
sample_weight: Optional[List[float]] = None,
) -> float:
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
y_true : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
y_pred : array-like, [n_samples x n_classes]
Model predictions
X_data : array-like [n_samples x n_features]
X data used to obtain the predictions: each row x_j corresponds to the input
used to obtain predictions y_j
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
if y_pred.ndim > 1:
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
scorer_kwargs = {} # type: Dict[str, Union[List[float], np.ndarray]]
if sample_weight is not None:
scorer_kwargs["sample_weight"] = sample_weight
if self._needs_X is True:
scorer_kwargs["X_data"] = X_data
return self._sign * self._score_func(
y_true, y_pred, **scorer_kwargs, **self._kwargs
)
def make_scorer(
name: str,
score_func: Callable,
*,
optimum: float = 1.0,
worst_possible_result: float = 0.0,
greater_is_better: bool = True,
needs_proba: bool = False,
needs_threshold: bool = False,
needs_X: bool = False,
**kwargs: Any,
) -> Scorer:
"""Make a scorer from a performance metric or loss function.
Factory inspired by scikit-learn which wraps scikit-learn scoring functions
to be used in auto-sklearn.
Parameters
----------
name: str
Descriptive name of the metric
score_func : callable
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
optimum : int or float, default=1
The best score achievable by the score function, i.e. maximum in case of
scorer function and minimum in case of loss function.
worst_possible_result : int of float, default=0
The worst score achievable by the score function, i.e. minimum in case of
scorer function and maximum in case of loss function.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification.
needs_X : boolean, default=False
Whether score_func requires X in __call__ to compute a metric.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better or set
greater_is_better to False.
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError(
"Set either needs_proba or needs_threshold to True, but not both."
)
cls = None # type: Any
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(
name, score_func, optimum, worst_possible_result, sign, kwargs, needs_X=needs_X
)
# Standard regression scores
mean_absolute_error = make_scorer(
"mean_absolute_error",
sklearn.metrics.mean_absolute_error,
optimum=0,
worst_possible_result=MAXINT,
greater_is_better=False,
)
mean_squared_error = make_scorer(
"mean_squared_error",
sklearn.metrics.mean_squared_error,
optimum=0,
worst_possible_result=MAXINT,
greater_is_better=False,
squared=True,
)
root_mean_squared_error = make_scorer(
"root_mean_squared_error",
sklearn.metrics.mean_squared_error,
optimum=0,
worst_possible_result=MAXINT,
greater_is_better=False,
squared=False,
)
mean_squared_log_error = make_scorer(
"mean_squared_log_error",
sklearn.metrics.mean_squared_log_error,
optimum=0,
worst_possible_result=MAXINT,
greater_is_better=False,
)
median_absolute_error = make_scorer(
"median_absolute_error",
sklearn.metrics.median_absolute_error,
optimum=0,
worst_possible_result=MAXINT,
greater_is_better=False,
)
r2 = make_scorer("r2", sklearn.metrics.r2_score)
# Standard Classification Scores
accuracy = make_scorer("accuracy", sklearn.metrics.accuracy_score)
balanced_accuracy = make_scorer(
"balanced_accuracy", sklearn.metrics.balanced_accuracy_score
)
# Score functions that need decision values
roc_auc = make_scorer(
"roc_auc",
sklearn.metrics.roc_auc_score,
greater_is_better=True,
needs_threshold=True,
)
average_precision = make_scorer(
"average_precision", sklearn.metrics.average_precision_score, needs_threshold=True
)
# NOTE: zero_division
#
# Specified as the explicit default, see sklearn docs:
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn-metrics-precision-score
precision = make_scorer(
"precision", partial(sklearn.metrics.precision_score, zero_division=0)
)
recall = make_scorer("recall", partial(sklearn.metrics.recall_score, zero_division=0))
f1 = make_scorer("f1", partial(sklearn.metrics.f1_score, zero_division=0))
# Score function for probabilistic classification
log_loss = make_scorer(
"log_loss",
sklearn.metrics.log_loss,
optimum=0,
worst_possible_result=MAXINT,
greater_is_better=False,
needs_proba=True,
)
# TODO what about mathews correlation coefficient etc?
REGRESSION_METRICS = {
scorer.name: scorer
for scorer in [
mean_absolute_error,
mean_squared_error,
root_mean_squared_error,
mean_squared_log_error,
median_absolute_error,
r2,
]
}
CLASSIFICATION_METRICS = {
scorer.name: scorer
for scorer in [accuracy, balanced_accuracy, roc_auc, average_precision, log_loss]
}
# NOTE: zero_division
#
# Specified as the explicit default, see sklearn docs:
# https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn-metrics-precision-score
for (base_name, sklearn_metric), average in product(
[
("precision", sklearn.metrics.precision_score),
("recall", sklearn.metrics.recall_score),
("f1", sklearn.metrics.f1_score),
],
["macro", "micro", "samples", "weighted"],
):
name = f"{base_name}_{average}"
scorer = make_scorer(
name, partial(sklearn_metric, pos_label=None, average=average, zero_division=0)
)
globals()[name] = scorer # Adds scorer to the module scope
CLASSIFICATION_METRICS[name] = scorer
def _validate_metrics(
metrics: Sequence[Scorer],
scoring_functions: Optional[List[Scorer]] = None,
) -> None:
"""
Validate metrics given to Auto-sklearn. Raises an Exception in case of a problem.
metrics: Sequence[Scorer]
A list of objects that hosts a function to calculate how good the
prediction is according to the solution.
scoring_functions: Optional[List[Scorer]]
A list of metrics to calculate multiple losses
"""
to_score = list(metrics)
if scoring_functions:
to_score.extend(scoring_functions)
if len(metrics) == 0:
raise ValueError("Number of metrics to compute must be greater than zero.")
metric_counter = collections.Counter(to_score)
metric_names_counter = collections.Counter(metric.name for metric in to_score)
if len(metric_counter) != len(metric_names_counter):
raise ValueError(
"Error in metrics passed to Auto-sklearn. A metric name was used "
"multiple times for different metrics!"
)
def calculate_scores(
solution: np.ndarray,
prediction: np.ndarray,
task_type: int,
metrics: Sequence[Scorer],
*,
X_data: Optional[SUPPORTED_XDATA_TYPES] = None,
scoring_functions: Optional[List[Scorer]] = None,
) -> Dict[str, float]:
"""
Returns the scores (a magnitude that allows casting the
optimization problem as a maximization one) for the
given Auto-Sklearn Scorer objects.
Parameters
----------
solution: np.ndarray
The ground truth of the targets
prediction: np.ndarray
The best estimate from the model, of the given targets
task_type: int
To understand if the problem task is classification
or regression
metrics: Sequence[Scorer]
A list of objects that hosts a function to calculate how good the
prediction is according to the solution.
X_data : array-like [n_samples x n_features]
X data used to obtain the predictions
scoring_functions: List[Scorer]
A list of metrics to calculate multiple losses
Returns
-------
Dict[str, float]
"""
if task_type not in TASK_TYPES:
raise NotImplementedError(task_type)
_validate_metrics(metrics=metrics, scoring_functions=scoring_functions)
to_score = list(metrics)
if scoring_functions:
to_score.extend(scoring_functions)
score_dict = dict()
if task_type in REGRESSION_TASKS:
for metric_ in to_score:
try:
score_dict[metric_.name] = _compute_single_scorer(
metric=metric_,
prediction=prediction,
solution=solution,
task_type=task_type,
X_data=X_data,
)
except ValueError as e:
print(e, e.args[0])
if (
e.args[0] == "Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values."
):
continue
else:
raise e
else:
for metric_ in to_score:
# TODO maybe annotate metrics to define which cases they can
# handle?
try:
score_dict[metric_.name] = _compute_single_scorer(
metric=metric_,
prediction=prediction,
solution=solution,
task_type=task_type,
X_data=X_data,
)
except ValueError as e:
if e.args[0] == "multiclass format is not supported":
continue
elif (
e.args[0] == "Samplewise metrics are not available "
"outside of multilabel classification."
):
continue
elif (
e.args[0] == "Target is multiclass but "
"average='binary'. Please choose another average "
"setting, one of [None, 'micro', 'macro', 'weighted']."
):
continue
else:
raise e
return score_dict
def calculate_loss(
solution: np.ndarray,
prediction: np.ndarray,
task_type: int,
metric: Scorer,
X_data: Optional[SUPPORTED_XDATA_TYPES] = None,
) -> float:
"""Calculate the loss with a given metric
Parameters
----------
solution: np.ndarray
The solutions
prediction: np.ndarray
The predictions generated
task_type: int
The task type of the problem
metric: Scorer
The metric to use
X_data: Optional[SUPPORTED_XDATA_TYPES]
X data used to obtain the predictions
"""
losses = calculate_losses(
solution=solution,
prediction=prediction,
task_type=task_type,
metrics=[metric],
X_data=X_data,
)
return losses[metric.name]
def calculate_losses(
solution: np.ndarray,
prediction: np.ndarray,
task_type: int,
metrics: Sequence[Scorer],
*,
X_data: Optional[SUPPORTED_XDATA_TYPES] = None,
scoring_functions: Optional[List[Scorer]] = None,
) -> Dict[str, float]:
"""
Returns the losses (a magnitude that allows casting the
optimization problem as a minimization one) for the
given Auto-Sklearn Scorer objects.
Parameters
----------
solution: np.ndarray
The ground truth of the targets
prediction: np.ndarray
The best estimate from the model, of the given targets
task_type: int
To understand if the problem task is classification
or regression
metrics: Sequence[Scorer]
A list of objects that hosts a function to calculate how good the
prediction is according to the solution.
X_data: Optional[SUPPORTED_XDATA_TYPES]
X data used to obtain the predictions
scoring_functions: List[Scorer]
A list of metrics to calculate multiple losses
Returns
-------
Dict[str, float]
A loss function for each of the provided scorer objects
"""
score = calculate_scores(
solution=solution,
prediction=prediction,
X_data=X_data,
task_type=task_type,
metrics=metrics,
scoring_functions=scoring_functions,
)
scoring_functions = scoring_functions if scoring_functions else []
# we expect a dict() object for which we should calculate the loss
loss_dict = dict()
for metric_ in scoring_functions + list(metrics):
# maybe metric argument is not in scoring_functions
# TODO: When metrics are annotated with type_of_target support
# we can remove this check
if metric_.name not in score:
continue
loss_dict[metric_.name] = metric_._optimum - score[metric_.name]
return loss_dict
def compute_single_metric(
metric: Scorer,
prediction: np.ndarray,
solution: np.ndarray,
task_type: int,
X_data: Optional[SUPPORTED_XDATA_TYPES] = None,
) -> float:
"""
Returns a metric for the given Auto-Sklearn Scorer object.
It's direction is determined by the metric itself.
Parameters
----------
solution: np.ndarray
The ground truth of the targets
prediction: np.ndarray
The best estimate from the model, of the given targets
task_type: int
To understand if the problem task is classification
or regression
metric: Scorer
Object that host a function to calculate how good the
prediction is according to the solution.
X_data : array-like [n_samples x n_features]
X data used to obtain the predictions
Returns
-------
float
"""
score = _compute_single_scorer(
solution=solution,
prediction=prediction,
metric=metric,
X_data=X_data,
task_type=task_type,
)
return metric._sign * score
def _compute_single_scorer(
metric: Scorer,
prediction: np.ndarray,
solution: np.ndarray,
task_type: int,
X_data: Optional[SUPPORTED_XDATA_TYPES] = None,
) -> float:
"""
Returns a score (a magnitude that allows casting the
optimization problem as a maximization one) for the
given Auto-Sklearn Scorer object
Parameters
----------
solution: np.ndarray
The ground truth of the targets
prediction: np.ndarray
The best estimate from the model, of the given targets
task_type: int
To understand if the problem task is classification
or regression
metric: Scorer
Object that host a function to calculate how good the
prediction is according to the solution.
X_data : array-like [n_samples x n_features]
X data used to obtain the predictions
Returns
-------
float
"""
if metric._needs_X:
if X_data is None:
raise ValueError(
f"Metric {metric.name} needs X_data, but X_data is {X_data}"
)
elif X_data.shape[0] != solution.shape[0]:
raise ValueError(
f"X_data has wrong length. "
f"Should be {solution.shape[0]}, but is {X_data.shape[0]}"
)
if task_type in REGRESSION_TASKS:
# TODO put this into the regression metric itself
cprediction = sanitize_array(prediction)
score = metric(solution, cprediction, X_data=X_data)
else:
score = metric(solution, prediction, X_data=X_data)
return score
if task_type in REGRESSION_TASKS:
# TODO put this into the regression metric itself
cprediction = sanitize_array(prediction)
score = metric(solution, cprediction)
else:
score = metric(solution, prediction)
return score
# Must be at bottom so all metrics are defined
default_metric_for_task: Dict[int, Scorer] = {
BINARY_CLASSIFICATION: CLASSIFICATION_METRICS["accuracy"],
MULTICLASS_CLASSIFICATION: CLASSIFICATION_METRICS["accuracy"],
MULTILABEL_CLASSIFICATION: CLASSIFICATION_METRICS["f1_macro"],
REGRESSION: REGRESSION_METRICS["r2"],
MULTIOUTPUT_REGRESSION: REGRESSION_METRICS["r2"],
}
|
_ThresholdScorer
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/rdf.py
|
{
"start": 505,
"end": 6304
}
|
class ____(RegexLexer):
"""
Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
.. versionadded:: 2.0
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
# character group definitions ::
PN_CHARS_BASE_GRP = (u'a-zA-Z'
u'\u00c0-\u00d6'
u'\u00d8-\u00f6'
u'\u00f8-\u02ff'
u'\u0370-\u037d'
u'\u037f-\u1fff'
u'\u200c-\u200d'
u'\u2070-\u218f'
u'\u2c00-\u2fef'
u'\u3001-\ud7ff'
u'\uf900-\ufdcf'
u'\ufdf0-\ufffd')
PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
PN_CHARS_GRP = (PN_CHARS_U_GRP +
r'\-' +
r'0-9' +
u'\u00b7' +
u'\u0300-\u036f' +
u'\u203f-\u2040')
HEX_GRP = '0-9A-Fa-f'
PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
# terminal productions ::
PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
PN_CHARS = '[' + PN_CHARS_GRP + ']'
HEX = '[' + HEX_GRP + ']'
PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
'.]*' + PN_CHARS + ')?'
PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
PERCENT = '%' + HEX + HEX
PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
'(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
# Lexer token definitions ::
tokens = {
'root': [
(r'\s+', Text),
# keywords ::
(r'((?i)select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
r'offset|bindings|load|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
r'using\s+named|using|graph|default|named|all|optional|service|'
r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
(r'(a)\b', Keyword),
# IRIs ::
('(' + IRIREF + ')', Name.Label),
# blank nodes ::
('(' + BLANK_NODE_LABEL + ')', Name.Label),
# # variables ::
('[?$]' + VARNAME, Name.Variable),
# prefixed names ::
(r'(' + PN_PREFIX + ')?(\:)(' + PN_LOCAL + ')?',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
# function names ::
(r'((?i)str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
Name.Function),
# boolean literals ::
(r'(true|false)', Keyword.Constant),
# double literals ::
(r'[+\-]?(\d+\.\d*' + EXPONENT + '|\.?\d+' + EXPONENT + ')', Number.Float),
# decimal literals ::
(r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
# integer literals ::
(r'[+\-]?\d+', Number.Integer),
# operators ::
(r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
# punctuation characters ::
(r'[(){}.;,:^\[\]]', Punctuation),
# line comments ::
(r'#[^\n]*', Comment),
# strings ::
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String.Escape, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'u' + HEX + '{4}', String.Escape, '#pop'),
(r'U' + HEX + '{8}', String.Escape, '#pop'),
(r'.', String.Escape, '#pop'),
],
'end-of-string': [
(r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
bygroups(Operator, Name.Function), '#pop:2'),
(r'\^\^', Operator, '#pop:2'),
default('#pop:2'),
],
}
|
SparqlLexer
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/projects.py
|
{
"start": 125167,
"end": 128989
}
|
class ____(Request):
"""
Get user and system tags used for the tasks under the specified projects
:param include_system: If set to 'true' then the list of the system tags is also returned.
The default value is 'false'
:type include_system: bool
:param projects: The list of projects under which the tags are searched. If not passed or empty then all the
projects are searched
:type projects: Sequence[str]
:param filter: Filter on entities to collect tags from
:type filter: dict
"""
_service = "projects"
_action = "get_task_tags"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"filter": {
"description": "Filter on entities to collect tags from",
"properties": {
"system_tags": {
"description": "The list of system tag values to filter by. Use 'null' value to specify empty system tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "The list of tag values to filter by. Use 'null' value to specify empty tags. Use '__Snot' value to specify that the following value should be excluded",
"items": {"type": "string"},
"type": "array",
},
},
"type": ["object", "null"],
},
"include_system": {
"default": False,
"description": "If set to 'true' then the list of the system tags is also returned. The default value is 'false'",
"type": ["boolean", "null"],
},
"projects": {
"description": "The list of projects under which the tags are searched. If not passed or empty then all the projects are searched",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
include_system: Optional[bool] = False,
projects: Optional[List[str]] = None,
filter: Optional[dict] = None,
**kwargs: Any
) -> None:
super(GetTaskTagsRequest, self).__init__(**kwargs)
self.include_system = include_system
self.projects = projects
self.filter = filter
@schema_property("include_system")
def include_system(self) -> Optional[bool]:
return self._property_include_system
@include_system.setter
def include_system(self, value: Optional[bool]) -> None:
if value is None:
self._property_include_system = None
return
self.assert_isinstance(value, "include_system", (bool,))
self._property_include_system = value
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
@schema_property("filter")
def filter(self) -> Optional[dict]:
return self._property_filter
@filter.setter
def filter(self, value: Optional[dict]) -> None:
if value is None:
self._property_filter = None
return
self.assert_isinstance(value, "filter", (dict,))
self._property_filter = value
|
GetTaskTagsRequest
|
python
|
catalyst-team__catalyst
|
catalyst/loggers/neptune.py
|
{
"start": 957,
"end": 9880
}
|
class ____(ILogger):
"""Neptune logger for parameters, metrics, images and other artifacts (videos, audio,
model checkpoints, etc.).
Neptune documentation:
https://docs.neptune.ai/integrations-and-supported-tools/model-training/catalyst
When the logger is created, link to the run in Neptune will be printed to stdout.
It looks like this:
https://ui.neptune.ai/common/catalyst-integration/e/CATALYST-1379
To start with Neptune please check
`Neptune getting-started docs <http://docs.neptune.ai/getting-started/installation>`_
because you will need ``api_token`` and project to log your Catalyst runs to.
.. note::
You can use public api_token ``ANONYMOUS`` and set project to
``common/catalyst-integration`` for testing without registration.
Args:
base_namespace: Optional, ``str``, root namespace within Neptune's run.
Default is "experiment".
api_token: Optional, ``str``. Your Neptune API token. Read more about it in the
`Neptune docs <http://docs.neptune.ai/getting-started/installation>`_.
project: Optional, ``str``. Name of the project to log runs to.
It looks like this: "my_workspace/my_project".
run: Optional, pass Neptune run object if you want to continue logging
to the existing run (resume run).
Read more about it
`here <https://docs.neptune.ai/how-to-guides/neptune-api/resume-run>`_.
log_batch_metrics: boolean flag to log batch metrics
(default: SETTINGS.log_batch_metrics or False).
log_epoch_metrics: boolean flag to log epoch metrics
(default: SETTINGS.log_epoch_metrics or True).
neptune_run_kwargs: Optional, additional keyword arguments
to be passed directly to the
`neptune.init() <https://docs.neptune.ai/api-reference/neptune#init>`_
function.
Python API examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...
loggers={
"neptune": dl.NeptuneLogger(
project="my_workspace/my_project",
tags=["pretraining", "retina"],
)
}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"neptune": dl.NeptuneLogger(
project="my_workspace/my_project"
)
}
# ...
runner = CustomRunner().run()
"""
def __init__(
self,
base_namespace=None,
api_token=None,
project=None,
run=None,
log_batch_metrics: bool = SETTINGS.log_batch_metrics,
log_epoch_metrics: bool = SETTINGS.log_epoch_metrics,
**neptune_run_kwargs,
):
super().__init__(
log_batch_metrics=log_batch_metrics, log_epoch_metrics=log_epoch_metrics
)
if base_namespace is None:
self.base_namespace = "experiment"
else:
self.base_namespace = base_namespace
self._api_token = api_token
self._project = project
self._neptune_run_kwargs = neptune_run_kwargs
if run is None:
self.run = neptune.init(
project=self._project,
api_token=self._api_token,
**self._neptune_run_kwargs,
)
else:
self.run = run
try:
import catalyst.__version__ as version
self.run["source_code/integrations/neptune-catalyst"] = version
except (ImportError, NameError, AttributeError):
pass
@property
def logger(self):
"""Internal logger/experiment/etc. from the monitoring system."""
return self.run
def _log_metrics(self, metrics: Dict[str, float], neptune_path: str, step: int):
for key, value in metrics.items():
self.run[f"{neptune_path}/{key}"].log(value=float(value), step=step)
def _log_image(self, image: np.ndarray, neptune_path: str):
self.run[neptune_path].log(neptune.types.File.as_image(image))
def _log_artifact(self, artifact: object, path_to_artifact: str, neptune_path: str):
if artifact is not None:
self.run[neptune_path].upload(neptune.types.File.as_pickle(artifact))
elif path_to_artifact is not None:
self.run[neptune_path].upload(path_to_artifact)
def log_artifact(
self,
tag: str,
runner: "IRunner",
artifact: object = None,
path_to_artifact: str = None,
scope: str = None,
) -> None:
"""Logs arbitrary file (audio, video, csv, etc.) to Neptune."""
if artifact is not None and path_to_artifact is not None:
ValueError("artifact and path_to_artifact are mutually exclusive")
if scope == "batch":
neptune_path = "/".join(
[
self.base_namespace,
"_artifacts",
f"epoch-{runner.epoch_step:04d}",
f"loader-{runner.loader_key}",
f"batch-{runner.batch_step:04d}",
tag,
]
)
elif scope == "loader":
neptune_path = "/".join(
[
self.base_namespace,
"_artifacts",
f"epoch-{runner.epoch_step:04d}",
f"loader-{runner.loader_key}",
tag,
]
)
elif scope == "epoch":
neptune_path = "/".join(
[
self.base_namespace,
"_artifacts",
f"epoch-{runner.epoch_step:04d}",
tag,
]
)
elif scope == "experiment" or scope is None:
neptune_path = "/".join([self.base_namespace, "_artifacts", tag])
self._log_artifact(artifact, path_to_artifact, neptune_path)
def log_image(
self,
tag: str,
image: np.ndarray,
runner: "IRunner",
scope: str = None,
) -> None:
"""Logs image to Neptune for current scope on current step."""
if scope == "batch" or scope == "loader":
log_path = "/".join(
[
self.base_namespace,
"_images",
f"epoch-{runner.epoch_step:04d}",
f"loader-{runner.loader_key}",
tag,
]
)
elif scope == "epoch":
log_path = "/".join(
[self.base_namespace, "_images", f"epoch-{runner.epoch_step:04d}", tag]
)
elif scope == "experiment" or scope is None:
log_path = "/".join([self.base_namespace, "_images", tag])
self._log_image(image, log_path)
def log_hparams(self, hparams: Dict, runner: "IRunner" = None) -> None:
"""Logs hyper-parameters to Neptune."""
self.run[f"{self.base_namespace}/hparams"] = hparams
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs batch, epoch and loader metrics to Neptune."""
if scope == "batch" and self.log_batch_metrics:
neptune_path = "/".join([self.base_namespace, runner.loader_key, scope])
self._log_metrics(
metrics=metrics, neptune_path=neptune_path, step=runner.sample_step
)
elif scope == "loader" and self.log_epoch_metrics:
neptune_path = "/".join([self.base_namespace, runner.loader_key, scope])
self._log_metrics(
metrics=_prepare_metrics(metrics),
neptune_path=neptune_path,
step=runner.epoch_step,
)
elif scope == "epoch" and self.log_epoch_metrics:
loader_key = "_epoch_"
prepared_metrics = _prepare_metrics(metrics[loader_key])
neptune_path = "/".join([self.base_namespace, scope])
if prepared_metrics:
self._log_metrics(
metrics=prepared_metrics,
neptune_path=neptune_path,
step=runner.epoch_step,
)
elif scope == "experiment" or scope is None:
self._log_metrics(metrics=metrics, neptune_path=self.base_namespace, step=0)
def flush_log(self) -> None:
"""Flushes the loggers."""
pass
def close_log(self, scope: str = None) -> None:
"""Closes the loggers."""
self.run.wait()
__all__ = ["NeptuneLogger"]
|
NeptuneLogger
|
python
|
pypa__pipenv
|
pipenv/vendor/plette/models/base.py
|
{
"start": 2354,
"end": 2972
}
|
class ____(DataModelCollection):
"""A sequence of data views.
Each entry is an instance of `item_class`.
"""
@classmethod
def validate(cls, data):
for d in data:
cls.item_class.validate(d)
def __iter__(self):
return (self.item_class(d) for d in self._data)
def __getitem__(self, key):
if isinstance(key, slice):
return type(self)(self._data[key])
return super().__getitem__(key)
def append(self, value):
if isinstance(value, DataModel):
value = value._data
self._data.append(value)
|
DataModelSequence
|
python
|
getsentry__sentry
|
src/sentry/integrations/vercel/integration.py
|
{
"start": 7550,
"end": 15338
}
|
class ____(IntegrationInstallation):
@property
def metadata(self):
return self.model.metadata
def get_dynamic_display_information(self):
qs = urlencode({"category": "source code management"})
source_code_link = absolute_uri(f"/settings/{self.organization.slug}/integrations/?{qs}")
return {
"configure_integration": {
"instructions": [
install_source_code_integration.format(source_code_link),
]
}
}
def get_client(self):
access_token = self.metadata["access_token"]
if self.metadata["installation_type"] == "team":
return VercelClient(access_token, self.model.external_id)
return VercelClient(access_token)
def get_configuration_id(self):
# XXX(meredith): The "configurations" in the metadata is no longer
# needed since Vercel restricted installation on their end to be
# once per user/team. Eventually we should be able to just use
# `self.metadata["installation_id"]`
if not self.metadata.get("configurations"):
return self.metadata["installation_id"]
# note this could return a different integration if the user has multiple
# installations with the same organization
for configuration_id, data in self.metadata["configurations"].items():
if data["organization_id"] == self.organization_id:
return configuration_id
logger.error(
"could not find matching org",
extra={"organization_id": self.organization_id, "integration_id": self.model.id},
)
return None
def get_slug(self):
client = self.get_client()
if self.metadata["installation_type"] == "team":
team = client.get_team()
return team["slug"]
else:
user = client.get_user()
return user["username"]
def get_organization_config(self):
vercel_client = self.get_client()
# TODO: add try/catch if we get API failure
slug = self.get_slug()
base_url = f"https://vercel.com/{slug}"
vercel_projects = [
{"value": p["id"], "label": p["name"], "url": "{}/{}".format(base_url, p["name"])}
for p in vercel_client.get_projects()
]
sentry_projects = [
{"id": proj.id, "platform": proj.platform, "name": proj.name, "slug": proj.slug}
for proj in sorted(self.organization.projects, key=(lambda proj: proj.slug))
if proj.status == ObjectStatus.ACTIVE
]
fields = [
{
"name": "project_mappings",
"type": "project_mapper",
"mappedDropdown": {
"items": vercel_projects,
"placeholder": _("Vercel project..."),
},
"sentryProjects": sentry_projects,
"nextButton": {
"allowedDomain": "https://vercel.com",
"description": _(
"Link your Sentry projects to complete your installation on Vercel"
),
"text": _("Complete on Vercel"),
},
"iconType": "vercel",
}
]
return fields
def update_organization_config(self, data):
# data = {"project_mappings": [[sentry_project_id, vercel_project_id]]}
vercel_client = self.get_client()
config = self.org_integration.config
try:
new_mappings = data["project_mappings"]
except KeyError:
raise ValidationError("Failed to update configuration.")
old_mappings = config.get("project_mappings") or []
sentry_projects = {proj.id: proj for proj in self.organization.projects}
for mapping in new_mappings:
# skip any mappings that already exist
if mapping in old_mappings:
continue
[sentry_project_id, vercel_project_id] = mapping
sentry_project = sentry_projects[sentry_project_id]
project_key = project_key_service.get_default_project_key(
organization_id=self.organization_id, project_id=sentry_project_id
)
if not project_key:
raise ValidationError(
{"project_mappings": ["You must have an enabled DSN to continue!"]}
)
vercel_project = vercel_client.get_project(vercel_project_id)
sentry_auth_token = SentryAppInstallationToken.objects.get_token(
sentry_project.organization_id,
"vercel",
)
env_var_map = (
VercelEnvVarMapBuilder()
.with_organization(self.organization)
.with_project(sentry_project)
.with_project_key(project_key)
.with_auth_token(sentry_auth_token)
.with_framework(vercel_project.get("framework"))
.build()
)
for env_var, details in env_var_map.items():
# We are logging a message because we potentially have a weird bug where auth tokens disappear from vercel
if env_var == "SENTRY_AUTH_TOKEN" and details["value"] is None:
sentry_sdk.capture_message(
"Setting SENTRY_AUTH_TOKEN env var with None value in Vercel integration"
)
self.create_env_var(
vercel_client,
vercel_project_id,
env_var,
details["value"],
details["type"],
details["target"],
)
config.update(data)
org_integration = integration_service.update_organization_integration(
org_integration_id=self.org_integration.id,
config=config,
)
if org_integration is not None:
self.org_integration = org_integration
def create_env_var(self, client, vercel_project_id, key, value, type, target):
data = {
"key": key,
"value": value,
"target": target,
"type": type,
}
try:
return client.create_env_variable(vercel_project_id, data)
except ApiError as e:
if e.json and e.json.get("error", {}).get("code") == "ENV_ALREADY_EXISTS":
try:
return self.update_env_variable(client, vercel_project_id, data)
except ApiError as e:
error_message = (
e.json.get("error", {}).get("message")
if e.json
else f"Could not update environment variable {key}."
)
raise ValidationError({"project_mappings": [error_message]})
raise
def update_env_variable(self, client, vercel_project_id, data):
envs = client.get_env_vars(vercel_project_id)["envs"]
env_var_ids = [env_var["id"] for env_var in envs if env_var["key"] == data["key"]]
if env_var_ids:
return client.update_env_variable(vercel_project_id, env_var_ids[0], data)
key = data["key"]
raise IntegrationError(
f"Could not update environment variable {key} in Vercel project {vercel_project_id}."
)
def uninstall(self):
client = self.get_client()
try:
client.uninstall(self.get_configuration_id())
except ApiError as error:
if error.code == 403:
pass
else:
raise
|
VercelIntegration
|
python
|
apache__airflow
|
airflow-core/src/airflow/configuration.py
|
{
"start": 2432,
"end": 6042
}
|
class ____:
"""
Holds modifications to be applied when writing out the config.
:param rename: Mapping from (old_section, old_option) to (new_section, new_option)
:param remove: Set of (section, option) to remove
:param default_updates: Mapping from (section, option) to new default value
"""
def __init__(self) -> None:
self.rename: dict[tuple[str, str], tuple[str, str]] = {}
self.remove: set[tuple[str, str]] = set()
self.default_updates: dict[tuple[str, str], str] = {}
def add_rename(self, old_section: str, old_option: str, new_section: str, new_option: str) -> None:
self.rename[(old_section, old_option)] = (new_section, new_option)
def add_remove(self, section: str, option: str) -> None:
self.remove.add((section, option))
def add_default_update(self, section: str, option: str, new_default: str) -> None:
self.default_updates[(section, option)] = new_default
def _parse_sqlite_version(s: str) -> tuple[int, ...]:
match = _SQLITE3_VERSION_PATTERN.match(s)
if match is None:
return ()
return tuple(int(p) for p in match.group("version").split("."))
@overload
def expand_env_var(env_var: None) -> None: ...
@overload
def expand_env_var(env_var: str) -> str: ...
def expand_env_var(env_var: str | None) -> str | None:
"""
Expand (potentially nested) env vars.
Repeat and apply `expandvars` and `expanduser` until
interpolation stops having any effect.
"""
if not env_var or not isinstance(env_var, str):
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
env_var = interpolated
def run_command(command: str) -> str:
"""Run command and returns stdout."""
process = subprocess.Popen(
shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
output, stderr = (stream.decode(sys.getdefaultencoding(), "ignore") for stream in process.communicate())
if process.returncode != 0:
raise AirflowConfigException(
f"Cannot execute {command}. Error code is: {process.returncode}. "
f"Output: {output}, Stderr: {stderr}"
)
return output
def _default_config_file_path(file_name: str) -> str:
templates_dir = os.path.join(os.path.dirname(__file__), "config_templates")
return os.path.join(templates_dir, file_name)
def retrieve_configuration_description(
include_airflow: bool = True,
include_providers: bool = True,
selected_provider: str | None = None,
) -> dict[str, dict[str, Any]]:
"""
Read Airflow configuration description from YAML file.
:param include_airflow: Include Airflow configs
:param include_providers: Include provider configs
:param selected_provider: If specified, include selected provider only
:return: Python dictionary containing configs & their info
"""
base_configuration_description: dict[str, dict[str, Any]] = {}
if include_airflow:
with open(_default_config_file_path("config.yml")) as config_file:
base_configuration_description.update(yaml.safe_load(config_file))
if include_providers:
from airflow.providers_manager import ProvidersManager
for provider, config in ProvidersManager().provider_configs:
if not selected_provider or provider == selected_provider:
base_configuration_description.update(config)
return base_configuration_description
|
ConfigModifications
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/dacite/exceptions.py
|
{
"start": 2028,
"end": 2275
}
|
class ____(DaciteError):
def __init__(self, message: str) -> None:
super().__init__()
self.message = message
def __str__(self) -> str:
return f"can not resolve forward reference: {self.message}"
|
ForwardReferenceError
|
python
|
optuna__optuna
|
optuna/exceptions.py
|
{
"start": 1765,
"end": 1943
}
|
class ____(OptunaError):
"""Exception for storage operation.
This error is raised when an operation failed in backend DB of storage.
"""
pass
|
StorageInternalError
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B024.py
|
{
"start": 1338,
"end": 1426
}
|
class ____(metaclass=abc.ABCMeta): # error
def method(self):
foo()
|
abc_Base_2
|
python
|
fluentpython__example-code-2e
|
15-more-types/cafeteria/cafeteria.py
|
{
"start": 436,
"end": 476
}
|
class ____:
"""Any garbage."""
|
Garbage
|
python
|
MongoEngine__mongoengine
|
mongoengine/fields.py
|
{
"start": 79589,
"end": 79972
}
|
class ____(GeoJsonBaseField):
"""A GeoJSON field storing a list of Points.
The data is represented as:
.. code-block:: js
{'type' : 'MultiPoint' ,
'coordinates' : [[x1, y1], [x2, y2]]}
You can either pass a dict with the full information or a list
to set the value.
Requires mongodb >= 2.6
"""
_type = "MultiPoint"
|
MultiPointField
|
python
|
huggingface__transformers
|
src/transformers/models/mask2former/modeling_mask2former.py
|
{
"start": 69140,
"end": 75447
}
|
class ____(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and
keys (as explained in the DETR paper).
"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
key_value_states: Optional[torch.Tensor] = None,
key_value_position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
hidden_states = hidden_states.permute(1, 0, 2) if hidden_states is not None else None
position_embeddings = position_embeddings.permute(1, 0, 2) if position_embeddings is not None else None
key_value_states = key_value_states.permute(1, 0, 2) if key_value_states is not None else None
key_value_position_embeddings = (
key_value_position_embeddings.permute(1, 0, 2) if key_value_position_embeddings is not None else None
)
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
batch_size, target_len, embed_dim = hidden_states.size()
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states_original = hidden_states
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
# add key-value position embeddings to the key value states
if key_value_position_embeddings is not None:
key_value_states_original = key_value_states
key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, batch_size)
value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, batch_size)
value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
source_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (batch_size * self.num_heads, target_len, source_len):
raise ValueError(
f"Attention mask should be of size {(target_len, batch_size * self.num_heads, source_len)}, but is"
f" {attention_mask.size()}"
)
attn_weights += attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len)
attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, target_len, embed_dim)
attn_output = self.out_proj(attn_output).permute(1, 0, 2)
return attn_output, attn_weights_reshaped
|
Mask2FormerAttention
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-s3/source_s3/v4/zip_reader.py
|
{
"start": 11282,
"end": 15390
}
|
class ____:
"""
A custom reader class that provides buffered reading capabilities on a decompressed stream.
Supports reading lines, reading chunks, and iterating over the content.
"""
def __init__(self, decompressed_stream: DecompressedStream, encoding: Optional[str] = None, buffer_size: int = BUFFER_SIZE_DEFAULT):
"""
Initialize a ZipContentReader.
:param decompressed_stream: A DecompressedStream object.
:param encoding: Encoding to decode the bytes. If None, bytes are returned.
:param buffer_size: Size of the buffer for reading data.
"""
self.raw = decompressed_stream
self.encoding = encoding
self.buffer_size = buffer_size
self.buffer = bytearray()
self._closed = False
def __iter__(self):
"""
Make the class iterable.
"""
return self
def __next__(self) -> Union[str, bytes]:
"""
Iterate over the lines in the reader.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def readline(self, limit: int = -1) -> Union[str, bytes]:
"""
Read a single line from the stream.
"""
if limit != -1:
raise NotImplementedError("Limits other than -1 not implemented yet")
line = ""
while True:
char = self.read(1)
if not char:
break
line += char
if char in ["\n", "\r"]:
# Handling different types of newlines
next_char = self.read(1)
if char == "\r" and next_char == "\n":
line += next_char
else:
self.buffer = next_char.encode(self.encoding) + self.buffer
break
return line
def read(self, size: int = -1) -> Union[str, bytes]:
"""
Read a specified number of bytes/characters from the reader.
"""
while len(self.buffer) < size:
chunk = self.raw.read(self.buffer_size)
if not chunk:
break
self.buffer += chunk
data = self.buffer[:size]
self.buffer = self.buffer[size:]
try:
return data.decode(self.encoding) if self.encoding else bytes(data)
except UnicodeDecodeError:
if self.encoding == "utf_8_sig":
# utf_8_sig considers `\xef\xbb\xbf` as a single character and therefore calling `bytearray(b'\xef').decode("utf_8_sig") will
# cause an exception to be raised.
number_of_bytes_to_add = size - 1
if data.endswith(bytearray(b"\xef")):
number_of_bytes_to_add += 2
elif data.endswith(bytearray(b"\xbb")):
number_of_bytes_to_add += 1
data = data + self.buffer[:number_of_bytes_to_add]
self.buffer = self.buffer[number_of_bytes_to_add:]
return data.decode(self.encoding) if self.encoding else bytes(data)
raise
def seek(self, offset: int, whence: int = io.SEEK_SET) -> int:
"""
Seek to a specific position in the decompressed stream.
"""
self.buffer = bytearray()
return self.raw.seek(offset, whence)
def close(self):
"""
Close the reader and underlying decompressed stream.
"""
self._closed = True
self.raw.close()
def tell(self) -> int:
"""
Return the current position in the decompressed stream.
"""
return self.raw.tell()
@property
def closed(self) -> bool:
"""
Check if the reader is closed.
"""
return self._closed
def __enter__(self) -> "ZipContentReader":
"""Enter the runtime context for the reader."""
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
"""Exit the runtime context for the reader and ensure resources are closed."""
self.close()
|
ZipContentReader
|
python
|
apache__airflow
|
airflow-core/tests/unit/utils/test_deprecation_tools.py
|
{
"start": 1692,
"end": 10633
}
|
class ____:
"""Tests for the getattr_with_deprecation function."""
def test_getattr_with_deprecation_specific_class(self):
"""Test deprecated import for a specific class."""
imports = {"OldClass": "new.module.NewClass"}
# Mock the new module and class
mock_module = mock.MagicMock()
mock_new_class = mock.MagicMock()
mock_module.NewClass = mock_new_class
with mock.patch("airflow.utils.deprecation_tools.importlib.import_module", return_value=mock_module):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name="OldClass",
)
assert result == mock_new_class
assert len(w) == 1
assert issubclass(w[0].category, DeprecatedImportWarning)
assert "old.module.OldClass" in str(w[0].message)
assert "new.module.NewClass" in str(w[0].message)
def test_getattr_with_deprecation_wildcard(self):
"""Test deprecated import using wildcard pattern."""
imports = {"*": "new.module"}
# Mock the new module and attribute
mock_module = mock.MagicMock()
mock_attribute = mock.MagicMock()
mock_module.SomeAttribute = mock_attribute
with mock.patch("airflow.utils.deprecation_tools.importlib.import_module", return_value=mock_module):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name="SomeAttribute",
)
assert result == mock_attribute
assert len(w) == 1
assert issubclass(w[0].category, DeprecatedImportWarning)
assert "old.module.SomeAttribute" in str(w[0].message)
assert "new.module.SomeAttribute" in str(w[0].message)
def test_getattr_with_deprecation_wildcard_with_override(self):
"""Test wildcard pattern with override deprecated classes."""
imports = {"*": "new.module"}
override_deprecated_classes = {"SomeAttribute": "override.module.OverrideClass"}
# Mock the new module and attribute
mock_module = mock.MagicMock()
mock_attribute = mock.MagicMock()
mock_module.SomeAttribute = mock_attribute
with mock.patch("airflow.utils.deprecation_tools.importlib.import_module", return_value=mock_module):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes=override_deprecated_classes,
extra_message="",
name="SomeAttribute",
)
assert result == mock_attribute
assert len(w) == 1
assert issubclass(w[0].category, DeprecatedImportWarning)
assert "old.module.SomeAttribute" in str(w[0].message)
assert "override.module.OverrideClass" in str(w[0].message)
def test_getattr_with_deprecation_specific_class_priority(self):
"""Test that specific class mapping takes priority over wildcard."""
imports = {"SpecificClass": "specific.module.SpecificClass", "*": "wildcard.module"}
# Mock the specific module and class
mock_module = mock.MagicMock()
mock_specific_class = mock.MagicMock()
mock_module.SpecificClass = mock_specific_class
with mock.patch("airflow.utils.deprecation_tools.importlib.import_module", return_value=mock_module):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name="SpecificClass",
)
assert result == mock_specific_class
assert len(w) == 1
assert issubclass(w[0].category, DeprecatedImportWarning)
assert "old.module.SpecificClass" in str(w[0].message)
assert "specific.module.SpecificClass" in str(w[0].message)
def test_getattr_with_deprecation_attribute_not_found(self):
"""Test AttributeError when attribute not found."""
imports = {"ExistingClass": "new.module.ExistingClass"}
with pytest.raises(AttributeError, match=r"has no attribute.*NonExistentClass"):
getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name="NonExistentClass",
)
def test_getattr_with_deprecation_import_error(self):
"""Test ImportError when target module cannot be imported."""
imports = {"*": "nonexistent.module"}
with mock.patch(
"airflow.utils.deprecation_tools.importlib.import_module",
side_effect=ImportError("Module not found"),
):
with pytest.raises(ImportError, match="Could not import"):
getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name="SomeAttribute",
)
def test_getattr_with_deprecation_with_extra_message(self):
"""Test that extra message is included in warning."""
imports = {"*": "new.module"}
extra_message = "This is an extra message"
# Mock the new module and attribute
mock_module = mock.MagicMock()
mock_attribute = mock.MagicMock()
mock_module.SomeAttribute = mock_attribute
with mock.patch("airflow.utils.deprecation_tools.importlib.import_module", return_value=mock_module):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message=extra_message,
name="SomeAttribute",
)
assert len(w) == 1
assert extra_message in str(w[0].message)
@pytest.mark.parametrize("dunder_attribute", ["__path__", "__file__"])
def test_getattr_with_deprecation_wildcard_skips_dunder_attributes(self, dunder_attribute):
"""Test that wildcard pattern skips Python special attributes."""
imports = {"*": "new.module"}
# Special attributes should raise AttributeError, not be redirected
with pytest.raises(AttributeError, match=rf"has no attribute.*{re.escape(dunder_attribute)}"):
getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name=dunder_attribute,
)
@pytest.mark.parametrize("non_dunder_attr", ["__version", "__author", "_private", "public"])
def test_getattr_with_deprecation_wildcard_allows_non_dunder_attributes(self, non_dunder_attr):
"""Test that wildcard pattern allows non-dunder attributes (including single underscore prefixed)."""
imports = {"*": "unittest.mock"}
# These should be redirected through wildcard pattern
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with contextlib.suppress(ImportError, AttributeError):
# Expected - the target module might not have the attribute
# The important thing is that it tried to redirect (didn't raise AttributeError immediately)
getattr_with_deprecation(
imports=imports,
module="old.module",
override_deprecated_classes={},
extra_message="",
name=non_dunder_attr,
)
# Should have generated a deprecation warning
assert len(w) == 1
assert "deprecated" in str(w[0].message).lower()
|
TestGetAttrWithDeprecation
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/ext/asyncio/scoping.py
|
{
"start": 3323,
"end": 54154
}
|
class ____(Generic[_AS]):
"""Provides scoped management of :class:`.AsyncSession` objects.
See the section :ref:`asyncio_scoped_session` for usage details.
.. versionadded:: 1.4.19
"""
_support_async = True
session_factory: async_sessionmaker[_AS]
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.AsyncSession` is needed."""
registry: ScopedRegistry[_AS]
def __init__(
self,
session_factory: async_sessionmaker[_AS],
scopefunc: Callable[[], Any],
):
"""Construct a new :class:`_asyncio.async_scoped_session`.
:param session_factory: a factory to create new :class:`_asyncio.AsyncSession`
instances. This is usually, but not necessarily, an instance
of :class:`_asyncio.async_sessionmaker`.
:param scopefunc: function which defines
the current scope. A function such as ``asyncio.current_task``
may be useful here.
""" # noqa: E501
self.session_factory = session_factory
self.registry = ScopedRegistry(session_factory, scopefunc)
@property
def _proxied(self) -> _AS:
return self.registry()
def __call__(self, **kw: Any) -> _AS:
r"""Return the current :class:`.AsyncSession`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.AsyncSession` is not present. If the
:class:`.AsyncSession` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified."
)
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
else:
sess = self.registry()
if not self._support_async and sess._is_asyncio:
warn_deprecated(
"Using `scoped_session` with asyncio is deprecated and "
"will raise an error in a future version. "
"Please use `async_scoped_session` instead.",
"1.4.23",
)
return sess
def configure(self, **kwargs: Any) -> None:
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn(
"At least one scoped session is already present. "
" configure() can not affect sessions that have "
"already been created."
)
self.session_factory.configure(**kwargs)
async def remove(self) -> None:
"""Dispose of the current :class:`.AsyncSession`, if present.
Different from scoped_session's remove method, this method would use
await to wait for the close method of AsyncSession.
"""
if self.registry.has():
await self.registry().close()
self.registry.clear()
# START PROXY METHODS async_scoped_session
# code within this block is **programmatically,
# statically generated** by tools/generate_proxy_methods.py
def __contains__(self, instance: object) -> bool:
r"""Return True if the instance is associated with this session.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
The instance may be pending or persistent within the Session for a
result of True.
""" # noqa: E501
return self._proxied.__contains__(instance)
def __iter__(self) -> Iterator[object]:
r"""Iterate over all pending or persistent instances within this
Session.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
""" # noqa: E501
return self._proxied.__iter__()
async def aclose(self) -> None:
r"""A synonym for :meth:`_asyncio.AsyncSession.close`.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
The :meth:`_asyncio.AsyncSession.aclose` name is specifically
to support the Python standard library ``@contextlib.aclosing``
context manager function.
.. versionadded:: 2.0.20
""" # noqa: E501
return await self._proxied.aclose()
def add(self, instance: object, *, _warn: bool = True) -> None:
r"""Place an object into this :class:`_orm.Session`.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
Objects that are in the :term:`transient` state when passed to the
:meth:`_orm.Session.add` method will move to the
:term:`pending` state, until the next flush, at which point they
will move to the :term:`persistent` state.
Objects that are in the :term:`detached` state when passed to the
:meth:`_orm.Session.add` method will move to the :term:`persistent`
state directly.
If the transaction used by the :class:`_orm.Session` is rolled back,
objects which were transient when they were passed to
:meth:`_orm.Session.add` will be moved back to the
:term:`transient` state, and will no longer be present within this
:class:`_orm.Session`.
.. seealso::
:meth:`_orm.Session.add_all`
:ref:`session_adding` - at :ref:`session_basics`
""" # noqa: E501
return self._proxied.add(instance, _warn=_warn)
def add_all(self, instances: Iterable[object]) -> None:
r"""Add the given collection of instances to this :class:`_orm.Session`.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
See the documentation for :meth:`_orm.Session.add` for a general
behavioral description.
.. seealso::
:meth:`_orm.Session.add`
:ref:`session_adding` - at :ref:`session_basics`
""" # noqa: E501
return self._proxied.add_all(instances)
def begin(self) -> AsyncSessionTransaction:
r"""Return an :class:`_asyncio.AsyncSessionTransaction` object.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
The underlying :class:`_orm.Session` will perform the
"begin" action when the :class:`_asyncio.AsyncSessionTransaction`
object is entered::
async with async_session.begin():
... # ORM transaction is begun
Note that database IO will not normally occur when the session-level
transaction is begun, as database transactions begin on an
on-demand basis. However, the begin block is async to accommodate
for a :meth:`_orm.SessionEvents.after_transaction_create`
event hook that may perform IO.
For a general description of ORM begin, see
:meth:`_orm.Session.begin`.
""" # noqa: E501
return self._proxied.begin()
def begin_nested(self) -> AsyncSessionTransaction:
r"""Return an :class:`_asyncio.AsyncSessionTransaction` object
which will begin a "nested" transaction, e.g. SAVEPOINT.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
Behavior is the same as that of :meth:`_asyncio.AsyncSession.begin`.
For a general description of ORM begin nested, see
:meth:`_orm.Session.begin_nested`.
.. seealso::
:ref:`aiosqlite_serializable` - special workarounds required
with the SQLite asyncio driver in order for SAVEPOINT to work
correctly.
""" # noqa: E501
return self._proxied.begin_nested()
async def close(self) -> None:
r"""Close out the transactional resources and ORM objects used by this
:class:`_asyncio.AsyncSession`.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.close` - main documentation for
"close"
:ref:`session_closing` - detail on the semantics of
:meth:`_asyncio.AsyncSession.close` and
:meth:`_asyncio.AsyncSession.reset`.
""" # noqa: E501
return await self._proxied.close()
async def reset(self) -> None:
r"""Close out the transactional resources and ORM objects used by this
:class:`_orm.Session`, resetting the session to its initial state.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. versionadded:: 2.0.22
.. seealso::
:meth:`_orm.Session.reset` - main documentation for
"reset"
:ref:`session_closing` - detail on the semantics of
:meth:`_asyncio.AsyncSession.close` and
:meth:`_asyncio.AsyncSession.reset`.
""" # noqa: E501
return await self._proxied.reset()
async def commit(self) -> None:
r"""Commit the current transaction in progress.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.commit` - main documentation for
"commit"
""" # noqa: E501
return await self._proxied.commit()
async def connection(
self,
bind_arguments: Optional[_BindArguments] = None,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
**kw: Any,
) -> AsyncConnection:
r"""Return a :class:`_asyncio.AsyncConnection` object corresponding to
this :class:`.Session` object's transactional state.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
This method may also be used to establish execution options for the
database connection used by the current transaction.
.. versionadded:: 1.4.24 Added \**kw arguments which are passed
through to the underlying :meth:`_orm.Session.connection` method.
.. seealso::
:meth:`_orm.Session.connection` - main documentation for
"connection"
""" # noqa: E501
return await self._proxied.connection(
bind_arguments=bind_arguments,
execution_options=execution_options,
**kw,
)
async def delete(self, instance: object) -> None:
r"""Mark an instance as deleted.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
The database delete operation occurs upon ``flush()``.
As this operation may need to cascade along unloaded relationships,
it is awaitable to allow for those queries to take place.
.. seealso::
:meth:`_orm.Session.delete` - main documentation for delete
""" # noqa: E501
return await self._proxied.delete(instance)
async def delete_all(self, instances: Iterable[object]) -> None:
r"""Calls :meth:`.AsyncSession.delete` on multiple instances.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.delete_all` - main documentation for delete_all
""" # noqa: E501
return await self._proxied.delete_all(instances)
@overload
async def execute(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[_Ts]]: ...
@overload
async def execute(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[TupleAny]]: ...
async def execute(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Result[Unpack[TupleAny]]:
r"""Execute a statement and return a buffered
:class:`_engine.Result` object.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.execute` - main documentation for execute
""" # noqa: E501
return await self._proxied.execute(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
def expire(
self, instance: object, attribute_names: Optional[Iterable[str]] = None
) -> None:
r"""Expire the attributes on an instance.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
:meth:`_orm.Query.populate_existing`
""" # noqa: E501
return self._proxied.expire(instance, attribute_names=attribute_names)
def expire_all(self) -> None:
r"""Expires all persistent instances within this Session.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` is not usually needed,
assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
:meth:`_orm.Query.populate_existing`
""" # noqa: E501
return self._proxied.expire_all()
def expunge(self, instance: object) -> None:
r"""Remove the `instance` from this ``Session``.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
""" # noqa: E501
return self._proxied.expunge(instance)
def expunge_all(self) -> None:
r"""Remove all object instances from this ``Session``.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
""" # noqa: E501
return self._proxied.expunge_all()
async def flush(self, objects: Optional[Sequence[Any]] = None) -> None:
r"""Flush all the object changes to the database.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.flush` - main documentation for flush
""" # noqa: E501
return await self._proxied.flush(objects=objects)
def get_bind(
self,
mapper: Optional[_EntityBindKey[_O]] = None,
clause: Optional[ClauseElement] = None,
bind: Optional[_SessionBind] = None,
**kw: Any,
) -> Union[Engine, Connection]:
r"""Return a "bind" to which the synchronous proxied :class:`_orm.Session`
is bound.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
Unlike the :meth:`_orm.Session.get_bind` method, this method is
currently **not** used by this :class:`.AsyncSession` in any way
in order to resolve engines for requests.
.. note::
This method proxies directly to the :meth:`_orm.Session.get_bind`
method, however is currently **not** useful as an override target,
in contrast to that of the :meth:`_orm.Session.get_bind` method.
The example below illustrates how to implement custom
:meth:`_orm.Session.get_bind` schemes that work with
:class:`.AsyncSession` and :class:`.AsyncEngine`.
The pattern introduced at :ref:`session_custom_partitioning`
illustrates how to apply a custom bind-lookup scheme to a
:class:`_orm.Session` given a set of :class:`_engine.Engine` objects.
To apply a corresponding :meth:`_orm.Session.get_bind` implementation
for use with a :class:`.AsyncSession` and :class:`.AsyncEngine`
objects, continue to subclass :class:`_orm.Session` and apply it to
:class:`.AsyncSession` using
:paramref:`.AsyncSession.sync_session_class`. The inner method must
continue to return :class:`_engine.Engine` instances, which can be
acquired from a :class:`_asyncio.AsyncEngine` using the
:attr:`_asyncio.AsyncEngine.sync_engine` attribute::
# using example from "Custom Vertical Partitioning"
import random
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.ext.asyncio import create_async_engine
from sqlalchemy.ext.asyncio import async_sessionmaker
from sqlalchemy.orm import Session
# construct async engines w/ async drivers
engines = {
"leader": create_async_engine("sqlite+aiosqlite:///leader.db"),
"other": create_async_engine("sqlite+aiosqlite:///other.db"),
"follower1": create_async_engine("sqlite+aiosqlite:///follower1.db"),
"follower2": create_async_engine("sqlite+aiosqlite:///follower2.db"),
}
class RoutingSession(Session):
def get_bind(self, mapper=None, clause=None, **kw):
# within get_bind(), return sync engines
if mapper and issubclass(mapper.class_, MyOtherClass):
return engines["other"].sync_engine
elif self._flushing or isinstance(clause, (Update, Delete)):
return engines["leader"].sync_engine
else:
return engines[
random.choice(["follower1", "follower2"])
].sync_engine
# apply to AsyncSession using sync_session_class
AsyncSessionMaker = async_sessionmaker(sync_session_class=RoutingSession)
The :meth:`_orm.Session.get_bind` method is called in a non-asyncio,
implicitly non-blocking context in the same manner as ORM event hooks
and functions that are invoked via :meth:`.AsyncSession.run_sync`, so
routines that wish to run SQL commands inside of
:meth:`_orm.Session.get_bind` can continue to do so using
blocking-style code, which will be translated to implicitly async calls
at the point of invoking IO on the database drivers.
""" # noqa: E501
return self._proxied.get_bind(
mapper=mapper, clause=clause, bind=bind, **kw
)
def is_modified(
self, instance: object, include_collections: bool = True
) -> bool:
r"""Return ``True`` if the given instance has locally
modified attributes.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously flushed or committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
""" # noqa: E501
return self._proxied.is_modified(
instance, include_collections=include_collections
)
async def invalidate(self) -> None:
r"""Close this Session, using connection invalidation.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
For a complete description, see :meth:`_orm.Session.invalidate`.
""" # noqa: E501
return await self._proxied.invalidate()
async def merge(
self,
instance: _O,
*,
load: bool = True,
options: Optional[Sequence[ORMOption]] = None,
) -> _O:
r"""Copy the state of a given instance into a corresponding instance
within this :class:`_asyncio.AsyncSession`.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.merge` - main documentation for merge
""" # noqa: E501
return await self._proxied.merge(instance, load=load, options=options)
async def merge_all(
self,
instances: Iterable[_O],
*,
load: bool = True,
options: Optional[Sequence[ORMOption]] = None,
) -> Sequence[_O]:
r"""Calls :meth:`.AsyncSession.merge` on multiple instances.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.merge_all` - main documentation for merge_all
""" # noqa: E501
return await self._proxied.merge_all(
instances, load=load, options=options
)
async def refresh(
self,
instance: object,
attribute_names: Optional[Iterable[str]] = None,
with_for_update: ForUpdateParameter = None,
) -> None:
r"""Expire and refresh the attributes on the given instance.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
This is the async version of the :meth:`_orm.Session.refresh` method.
See that method for a complete description of all options.
.. seealso::
:meth:`_orm.Session.refresh` - main documentation for refresh
""" # noqa: E501
return await self._proxied.refresh(
instance,
attribute_names=attribute_names,
with_for_update=with_for_update,
)
async def rollback(self) -> None:
r"""Rollback the current transaction in progress.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.rollback` - main documentation for
"rollback"
""" # noqa: E501
return await self._proxied.rollback()
@overload
async def scalar(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Optional[_T]: ...
@overload
async def scalar(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Any: ...
async def scalar(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Any:
r"""Execute a statement and return a scalar result.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.scalar` - main documentation for scalar
""" # noqa: E501
return await self._proxied.scalar(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
@overload
async def scalars(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[_T]: ...
@overload
async def scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[Any]: ...
async def scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[Any]:
r"""Execute a statement and return scalar results.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
:return: a :class:`_result.ScalarResult` object
.. versionadded:: 1.4.24 Added :meth:`_asyncio.AsyncSession.scalars`
.. versionadded:: 1.4.26 Added
:meth:`_asyncio.async_scoped_session.scalars`
.. seealso::
:meth:`_orm.Session.scalars` - main documentation for scalars
:meth:`_asyncio.AsyncSession.stream_scalars` - streaming version
""" # noqa: E501
return await self._proxied.scalars(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
async def get(
self,
entity: _EntityBindKey[_O],
ident: _PKIdentityArgument,
*,
options: Optional[Sequence[ORMOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
) -> Union[_O, None]:
r"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. seealso::
:meth:`_orm.Session.get` - main documentation for get
""" # noqa: E501
result = await self._proxied.get(
entity,
ident,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
execution_options=execution_options,
)
return result
async def get_one(
self,
entity: _EntityBindKey[_O],
ident: _PKIdentityArgument,
*,
options: Optional[Sequence[ORMOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
) -> _O:
r"""Return an instance based on the given primary key identifier,
or raise an exception if not found.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
Raises :class:`_exc.NoResultFound` if the query selects no rows.
..versionadded: 2.0.22
.. seealso::
:meth:`_orm.Session.get_one` - main documentation for get_one
""" # noqa: E501
return await self._proxied.get_one(
entity,
ident,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
execution_options=execution_options,
)
@overload
async def stream(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> AsyncResult[Unpack[_Ts]]: ...
@overload
async def stream(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> AsyncResult[Unpack[TupleAny]]: ...
async def stream(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> AsyncResult[Unpack[TupleAny]]:
r"""Execute a statement and return a streaming
:class:`_asyncio.AsyncResult` object.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
""" # noqa: E501
return await self._proxied.stream(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
@overload
async def stream_scalars(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> AsyncScalarResult[_T]: ...
@overload
async def stream_scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> AsyncScalarResult[Any]: ...
async def stream_scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> AsyncScalarResult[Any]:
r"""Execute a statement and return a stream of scalar results.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
:return: an :class:`_asyncio.AsyncScalarResult` object
.. versionadded:: 1.4.24
.. seealso::
:meth:`_orm.Session.scalars` - main documentation for scalars
:meth:`_asyncio.AsyncSession.scalars` - non streaming version
""" # noqa: E501
return await self._proxied.stream_scalars(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
@property
def bind(self) -> Any:
r"""Proxy for the :attr:`_asyncio.AsyncSession.bind` attribute
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
""" # noqa: E501
return self._proxied.bind
@bind.setter
def bind(self, attr: Any) -> None:
self._proxied.bind = attr
@property
def dirty(self) -> Any:
r"""The set of all persistent instances considered dirty.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_asyncio.AsyncSession` class.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
""" # noqa: E501
return self._proxied.dirty
@property
def deleted(self) -> Any:
r"""The set of all instances marked as 'deleted' within this ``Session``
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_asyncio.AsyncSession` class.
""" # noqa: E501
return self._proxied.deleted
@property
def new(self) -> Any:
r"""The set of all instances marked as 'new' within this ``Session``.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_asyncio.AsyncSession` class.
""" # noqa: E501
return self._proxied.new
@property
def identity_map(self) -> Any:
r"""Proxy for the :attr:`_orm.Session.identity_map` attribute
on behalf of the :class:`_asyncio.AsyncSession` class.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
""" # noqa: E501
return self._proxied.identity_map
@identity_map.setter
def identity_map(self, attr: Any) -> None:
self._proxied.identity_map = attr
@property
def is_active(self) -> Any:
r"""True if this :class:`.Session` not in "partial rollback" state.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_asyncio.AsyncSession` class.
.. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins
a new transaction immediately, so this attribute will be False
when the :class:`_orm.Session` is first instantiated.
"partial rollback" state typically indicates that the flush process
of the :class:`_orm.Session` has failed, and that the
:meth:`_orm.Session.rollback` method must be emitted in order to
fully roll back the transaction.
If this :class:`_orm.Session` is not in a transaction at all, the
:class:`_orm.Session` will autobegin when it is first used, so in this
case :attr:`_orm.Session.is_active` will return True.
Otherwise, if this :class:`_orm.Session` is within a transaction,
and that transaction has not been rolled back internally, the
:attr:`_orm.Session.is_active` will also return True.
.. seealso::
:ref:`faq_session_rollback`
:meth:`_orm.Session.in_transaction`
""" # noqa: E501
return self._proxied.is_active
@property
def autoflush(self) -> Any:
r"""Proxy for the :attr:`_orm.Session.autoflush` attribute
on behalf of the :class:`_asyncio.AsyncSession` class.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
""" # noqa: E501
return self._proxied.autoflush
@autoflush.setter
def autoflush(self, attr: Any) -> None:
self._proxied.autoflush = attr
@property
def no_autoflush(self) -> Any:
r"""Return a context manager that disables autoflush.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_asyncio.AsyncSession` class.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
""" # noqa: E501
return self._proxied.no_autoflush
@property
def info(self) -> Any:
r"""A user-modifiable dictionary.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_asyncio.AsyncSession` class.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
""" # noqa: E501
return self._proxied.info
@property
def execution_options(self) -> Any:
r"""Proxy for the :attr:`_orm.Session.execution_options` attribute
on behalf of the :class:`_asyncio.AsyncSession` class.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class
on behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
""" # noqa: E501
return self._proxied.execution_options
@execution_options.setter
def execution_options(self, attr: Any) -> None:
self._proxied.execution_options = attr
@classmethod
async def close_all(cls) -> None:
r"""Close all :class:`_asyncio.AsyncSession` sessions.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. deprecated:: 2.0 The :meth:`.AsyncSession.close_all` method is deprecated and will be removed in a future release. Please refer to :func:`_asyncio.close_all_sessions`.
""" # noqa: E501
return await AsyncSession.close_all()
@classmethod
def object_session(cls, instance: object) -> Optional[Session]:
r"""Return the :class:`.Session` to which an object belongs.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
This is an alias of :func:`.object_session`.
""" # noqa: E501
return AsyncSession.object_session(instance)
@classmethod
def identity_key(
cls,
class_: Optional[Type[Any]] = None,
ident: Union[Any, Tuple[Any, ...]] = None,
*,
instance: Optional[Any] = None,
row: Optional[Union[Row[Unpack[TupleAny]], RowMapping]] = None,
identity_token: Optional[Any] = None,
) -> _IdentityKeyType[Any]:
r"""Return an identity key.
.. container:: class_bases
Proxied for the :class:`_asyncio.AsyncSession` class on
behalf of the :class:`_asyncio.scoping.async_scoped_session` class.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_asyncio.AsyncSession` class.
This is an alias of :func:`.util.identity_key`.
""" # noqa: E501
return AsyncSession.identity_key(
class_=class_,
ident=ident,
instance=instance,
row=row,
identity_token=identity_token,
)
# END PROXY METHODS async_scoped_session
|
async_scoped_session
|
python
|
ray-project__ray
|
python/ray/train/_checkpoint.py
|
{
"start": 738,
"end": 1518
}
|
class ____(type):
def __getattr__(self, item):
try:
return super().__getattribute__(item)
except AttributeError as exc:
if item in {
"from_dict",
"to_dict",
"from_bytes",
"to_bytes",
"get_internal_representation",
}:
raise _get_migration_error(item) from exc
elif item in {
"from_uri",
"to_uri",
"uri",
}:
raise _get_uri_error(item) from exc
elif item in {"get_preprocessor", "set_preprocessor"}:
raise _get_preprocessor_error(item) from exc
raise exc
@PublicAPI(stability="beta")
|
_CheckpointMetaClass
|
python
|
apache__airflow
|
providers/apache/livy/tests/unit/apache/livy/triggers/test_livy.py
|
{
"start": 1142,
"end": 9508
}
|
class ____:
def test_livy_trigger_serialization(self):
"""
Asserts that the TaskStateTrigger correctly serializes its arguments
and classpath.
"""
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=0
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.apache.livy.triggers.livy.LivyTrigger"
assert kwargs == {
"batch_id": 1,
"spark_params": {},
"livy_conn_id": LivyHook.default_conn_name,
"polling_interval": 0,
"extra_options": None,
"extra_headers": None,
"livy_hook_async": None,
"execution_timeout": None,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.triggers.livy.LivyTrigger.poll_for_termination")
async def test_livy_trigger_run_with_no_poll_interval(self, mock_poll_for_termination):
"""
Test if the task ran in the triggerer successfully with poll interval=0.
In the case when polling_interval=0, it should return the batch_id
"""
mock_poll_for_termination.return_value = {"status": "success"}
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=0
)
generator = trigger.run()
actual = await generator.asend(None)
assert (
TriggerEvent(
{"status": "success", "batch_id": 1, "response": "Batch 1 succeeded", "log_lines": None}
)
== actual
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.triggers.livy.LivyTrigger.poll_for_termination")
async def test_livy_trigger_run_with_poll_interval_success(self, mock_poll_for_termination):
"""
Test if the task ran in the triggerer successfully with poll interval>0. In the case when
polling_interval > 0, it should return a success or failure status.
"""
mock_poll_for_termination.return_value = {"status": "success"}
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
generator = trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "success"}) == actual
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.triggers.livy.LivyTrigger.poll_for_termination")
async def test_livy_trigger_run_with_poll_interval_error(self, mock_poll_for_termination):
"""Test if the task in the trigger returned an error when poll_for_termination returned error."""
mock_poll_for_termination.return_value = {"status": "error"}
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
task = [i async for i in trigger.run()]
assert len(task) == 2
assert TriggerEvent({"status": "error"}) in task
@pytest.mark.db_test
@pytest.mark.asyncio
async def test_livy_trigger_run_with_exception(self):
"""Test if the task in the trigger failed with a connection error when no connection is mocked."""
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
task = [i async for i in trigger.run()]
assert len(task) == 1
event = task[0]
assert isinstance(event, TriggerEvent)
assert event.payload.get("status") == "error"
assert event.payload.get("batch_id") == 1
assert "Cannot connect to host livy:8998 ssl:default" in event.payload.get("response")
@pytest.mark.db_test
@pytest.mark.asyncio
async def test_livy_trigger_poll_for_termination_with_client_error(self):
"""
Test if the poll_for_termination() in the trigger failed with a ClientConnectionError
when no connection is mocked.
"""
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
with pytest.raises(ClientConnectionError):
await trigger.poll_for_termination(1)
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.get_batch_state")
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.dump_batch_logs")
async def test_livy_trigger_poll_for_termination_success(
self, mock_dump_batch_logs, mock_get_batch_state
):
"""
Test if the poll_for_termination() in the triggerer returned success response when get_batch_state()
runs successfully.
"""
mock_get_batch_state.return_value = {"batch_state": BatchState.SUCCESS}
mock_dump_batch_logs.return_value = ["mock_log"]
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
task = await trigger.poll_for_termination(1)
assert task == {
"status": "success",
"batch_id": 1,
"response": "Batch 1 succeeded",
"log_lines": ["mock_log"],
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.get_batch_state")
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.dump_batch_logs")
async def test_livy_trigger_poll_for_termination_error(self, mock_dump_batch_logs, mock_get_batch_state):
"""
Test if the poll_for_termination() in the trigger returned error response when get_batch_state()
failed.
"""
mock_get_batch_state.return_value = {"batch_state": BatchState.ERROR}
mock_dump_batch_logs.return_value = ["mock_log"]
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
task = await trigger.poll_for_termination(1)
assert task == {
"status": "error",
"batch_id": 1,
"response": "Batch 1 did not succeed",
"log_lines": ["mock_log"],
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.get_batch_state")
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.dump_batch_logs")
async def test_livy_trigger_poll_for_termination_state(self, mock_dump_batch_logs, mock_get_batch_state):
"""
Test if the poll_for_termination() in the trigger is still polling when get_batch_state() returned
NOT_STARTED.
"""
mock_get_batch_state.return_value = {"batch_state": BatchState.NOT_STARTED}
mock_dump_batch_logs.return_value = ["mock_log"]
trigger = LivyTrigger(
batch_id=1, spark_params={}, livy_conn_id=LivyHook.default_conn_name, polling_interval=30
)
task = asyncio.create_task(trigger.poll_for_termination(1))
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.get_batch_state")
@mock.patch("airflow.providers.apache.livy.hooks.livy.LivyAsyncHook.dump_batch_logs")
async def test_livy_trigger_poll_for_termination_timeout(
self, mock_dump_batch_logs, mock_get_batch_state
):
"""
Test if poll_for_termination() returns timeout response when execution times out.
"""
mock_get_batch_state.return_value = {"batch_state": BatchState.RUNNING}
mock_dump_batch_logs.return_value = ["mock_log"]
trigger = LivyTrigger(
batch_id=1,
spark_params={},
livy_conn_id=LivyHook.default_conn_name,
polling_interval=1,
execution_timeout=timedelta(seconds=0),
)
task = await trigger.poll_for_termination(1)
assert task == {
"status": "timeout",
"batch_id": 1,
"response": "Batch 1 timed out",
"log_lines": ["mock_log"],
}
|
TestLivyTrigger
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte_tests/beta/test_translator.py
|
{
"start": 2514,
"end": 3634
}
|
class ____(DagsterAirbyteTranslator):
def get_asset_spec(self, props: AirbyteConnectionTableProps) -> AssetSpec:
default_spec = super().get_asset_spec(props)
return default_spec.replace_attributes(
key=default_spec.key.with_prefix("test_connection"),
).merge_attributes(metadata={"custom": "metadata"})
def test_custom_translator(
fetch_workspace_data_api_mocks: responses.RequestsMock,
resource: Union[AirbyteCloudWorkspace, AirbyteWorkspace],
) -> None:
table_props_data = (
resource.fetch_airbyte_workspace_data().to_airbyte_connection_table_props_data()
)
assert len(table_props_data) == 2
first_table_props = next(iter(table_props_data))
translator = MyCustomTranslator()
asset_spec = translator.get_asset_spec(first_table_props)
assert "custom" in asset_spec.metadata
assert asset_spec.metadata["custom"] == "metadata"
assert asset_spec.key.path == ["test_connection", "test_prefix_test_stream"]
assert has_kind(asset_spec.tags, "airbyte")
assert has_kind(asset_spec.tags, TEST_DESTINATION_TYPE)
|
MyCustomTranslator
|
python
|
walkccc__LeetCode
|
solutions/1883. Minimum Skips to Arrive at Meeting On Time/1883.py
|
{
"start": 0,
"end": 684
}
|
class ____:
def minSkips(self, dist: list[int], speed: int, hoursBefore: int) -> int:
INF = 10**7
EPS = 1e-9
n = len(dist)
# dp[i][j] := the minimum time, where i is the number of roads we traversed
# so far and j is the number of skips we did
dp = [[INF] * (n + 1) for _ in range(n + 1)]
dp[0][0] = 0
for i, d in enumerate(dist, 1):
dp[i][0] = math.ceil(dp[i - 1][0] + d / speed - EPS)
for j in range(1, i + 1):
dp[i][j] = min(dp[i - 1][j - 1] + d / speed,
math.ceil(dp[i - 1][j] + d / speed - EPS))
for j, time in enumerate(dp[-1]):
if time <= hoursBefore:
return j
return -1
|
Solution
|
python
|
tornadoweb__tornado
|
tornado/test/iostream_test.py
|
{
"start": 47291,
"end": 48313
}
|
class ____(TestReadWriteMixin, AsyncTestCase):
@gen.coroutine
def make_iostream_pair(self, **kwargs):
r, w = os.pipe()
return PipeIOStream(r, **kwargs), PipeIOStream(w, **kwargs)
@gen_test
def test_pipe_iostream(self):
rs, ws = yield self.make_iostream_pair()
ws.write(b"hel")
ws.write(b"lo world")
data = yield rs.read_until(b" ")
self.assertEqual(data, b"hello ")
data = yield rs.read_bytes(3)
self.assertEqual(data, b"wor")
ws.close()
data = yield rs.read_until_close()
self.assertEqual(data, b"ld")
rs.close()
@gen_test
def test_pipe_iostream_big_write(self):
rs, ws = yield self.make_iostream_pair()
NUM_BYTES = 1048576
# Write 1MB of data, which should fill the buffer
ws.write(b"1" * NUM_BYTES)
data = yield rs.read_bytes(NUM_BYTES)
self.assertEqual(data, b"1" * NUM_BYTES)
ws.close()
rs.close()
|
TestPipeIOStream
|
python
|
facelessuser__soupsieve
|
tests/test_bs4_cases.py
|
{
"start": 194,
"end": 4810
}
|
class ____(unittest.TestCase):
"""
Original Beautiful soup test html document.
http://bazaar.launchpad.net/~leonardr/beautifulsoup/bs4/view/head:/bs4/tests/test_tree.py, line 1627.
"""
HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>The title</title>
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
</head>
<body>
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
<div id="main" class="fancy">
<div id="inner">
<h1 id="header1">An H1</h1>
<p>Some text</p>
<p class="onep" id="p1">Some more text</p>
<h2 id="header2">An H2</h2>
<p class="class1 class2 class3" id="pmulti">Another</p>
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
<h2 id="header3">Another H2</h2>
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
<span class="s1">
<a href="#" id="s1a1">span1a1</a>
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
<span class="span2">
<a href="#" id="s2a1">span2a1</a>
</span>
<span class="span3"></span>
<custom-dashed-tag class="dashed" id="dash2"/>
<div data-tag="dashedvalue" id="data1"/>
</span>
</div>
<x id="xid">
<z id="zida"/>
<z id="zidab"/>
<z id="zidac"/>
</x>
<y id="yid">
<z id="zidb"/>
</y>
<p lang="en" id="lang-en">English</p>
<p lang="en-gb" id="lang-en-gb">English UK</p>
<p lang="en-us" id="lang-en-us">English US</p>
<p lang="fr" id="lang-fr">French</p>
</div>
<div id="footer">
</div>
"""
def setUp(self):
"""Setup."""
self.soup = BeautifulSoup(self.HTML, 'html.parser')
def test_parent_nth_of_type_preconditions(self):
"""Test `nth` type preconditions."""
els = sv.select('div > h1', self.soup)
# check that there is a unique selection
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, 'An H1')
# Show that the `h1`'s parent `div#inner` is the first child of type `div` of the grandparent `div#main`.
# so that the selector `div:nth-of-type(1) > h1` should also give `h1`.
h1 = els[0]
div_inner = h1.parent
div_main = div_inner.parent
div_main_children = list(div_main.children)
self.assertEqual(div_main_children[0], '\n')
self.assertEqual(div_main_children[1], div_inner)
def test_parent_nth_of_type(self):
"""Test parent of `nth` of type."""
els = sv.select('div:nth-of-type(1) > h1', self.soup)
self.assertEqual(len(els), 1)
self.assertEqual(els[0].string, 'An H1')
SIMPLE_XML = """<Envelope><Header>...</Header></Envelope>"""
NAMESPACE_XML = """
<?xml version="1.0"?>
<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:a="http://www.w3.org/2005/08/addressing"
xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd">
<a:Action s:mustUnderstand="1">http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue</a:Action>
<o:UsernameToken u:Id="uuid-00000043-0000-4000-0000-000000000000">
</s:Envelope>
""".strip()
NAMESPACES = {
'x': "http://www.w3.org/2003/05/soap-envelope",
'y': "http://www.w3.org/2005/08/addressing",
'z': "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
}
@util.requires_lxml
def test_simple_xml():
"""Test basic XML."""
xml = BeautifulSoup(SIMPLE_XML, "xml")
assert xml.select_one("Envelope")
assert xml.select_one("Envelope Header")
assert xml.select_one("Header")
assert not xml.select_one("envelope")
assert not xml.select_one("envelope header")
assert not xml.select_one("header")
@util.requires_lxml
def test_namespace_xml():
"""Test namespace XML."""
xml = BeautifulSoup(NAMESPACE_XML, "xml")
assert xml.select_one("Envelope")
assert xml.select_one("Envelope Action")
assert xml.select_one("Action")
assert not xml.select_one("envelope")
assert not xml.select_one("envelope action")
assert not xml.select_one("action")
@util.requires_lxml
def test_namespace_xml_with_namespace():
"""Test namespace selectors with XML."""
xml = BeautifulSoup(NAMESPACE_XML, "xml")
assert xml.select_one("x|Envelope", namespaces=NAMESPACES)
assert xml.select_one("x|Envelope y|Action", namespaces=NAMESPACES)
assert xml.select_one("y|Action", namespaces=NAMESPACES)
assert not xml.select_one("x|envelope", namespaces=NAMESPACES)
assert not xml.select_one("x|envelope y|action", namespaces=NAMESPACES)
assert not xml.select_one("y|action", namespaces=NAMESPACES)
|
SelectorNthOfTypeBugTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.