language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/strings_ops/unicode_transcode_op_test.py
|
{
"start": 1214,
"end": 16861
}
|
class ____(test.TestCase, parameterized.TestCase):
def test_transcode_utf8_simple(self):
strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
outputs = string_ops.unicode_transcode(
strings,
input_encoding="ISO-8859-1",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
outputs = string_ops.unicode_transcode(
strings,
input_encoding="US-ASCII",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
def test_transcode_utf16_to_utf8(self):
strings = [b"\x00a\x00b\x20\xAC", b"\xD8\x01\xDC\x37"] # U+10437
expected = [s.decode("UTF-16-BE").encode("UTF-8") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-16",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
def test_transcode_bad_utf8(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=True)
values = self.evaluate(outputs)
self.assertAllEqual(values, b" ")
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00 ")
def test_transcode_bad_utf8_with_some_good(self):
bad_string = b"abc\xffabcdefg"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, b"abc abcdefg")
def test_transcode_bad_utf8_with_defaults(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string, input_encoding="UTF-8", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00\xef\xbf\xbd")
def test_transcode_bad_utf8_with_space_replacement(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string, input_encoding="UTF-8", output_encoding="UTF-8",
replacement_char=ord(" "))
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00 ")
@test_util.run_deprecated_v1
def test_transcode_bad_utf8_with_strict_errors(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="strict")
with self.assertRaisesOpError(
"Invalid formatting on input string"):
self.evaluate(outputs)
@test_util.run_deprecated_v1
def test_transcode_bad_utf8_start_with_strict_errors(self):
bad_string = b"\xffabcd"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="strict")
with self.assertRaisesOpError(
"Invalid formatting on input string"):
self.evaluate(outputs)
def test_transcode_bad_utf8_with_elision_of_malformatting(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="ignore")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\x00")
def test_transcode_bad_utf8_with_elision_including_control_chars(self):
bad_string = b"\x00\xff"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="ignore",
replace_control_characters=True)
values = self.evaluate(outputs)
self.assertAllEqual(values, b"")
def test_transcode_bad_utf8_termination_with_defaults(self):
bad_string = b"a\xf0"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bad_string, input_encoding="UTF-8", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"a\xef\xbf\xbd") # 0xFFFD
def test_transcode_utf8_with_replacement_char(self):
strings = [b"a\xef\xbf\xbd"]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings, input_encoding="UTF-8", output_encoding="UTF-8",
errors="strict")
values = self.evaluate(outputs)
self.assertAllEqual(values, [b"a\xef\xbf\xbd"])
outputs = string_ops.unicode_transcode(
strings, input_encoding="UTF-8", output_encoding="UTF-8",
errors="replace", replacement_char=ord("?"))
values = self.evaluate(outputs)
self.assertAllEqual(values, [b"a\xef\xbf\xbd"])
def test_transcode_utf8_to_utf16(self):
strings = [b"ab\xe2\x82\xac", b"\xf0\x90\x90\xb7"] # U+10437
expected = [s.decode("UTF-8").encode("UTF-16-BE") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-16-BE",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
def test_transcode_utf32_to_utf8(self):
strings = [
b"\x00\x00\x00a\x00\x00\x00b\x00\x00\x20\xAC", b"\x00\x01\x04\x37"
] # U+10437
expected = [s.decode("UTF-32-BE").encode("UTF-8") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-32",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
def test_transcode_utf8_to_utf32(self):
strings = [b"ab\xe2\x82\xac", b"\xf0\x90\x90\xb7"]
expected = [s.decode("UTF-8").encode("UTF-32-BE") for s in strings]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-32-BE",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, expected)
# Documentation in ICU suggests that getNextUChar may produce a different
# error code if the input sequence contains particular non-coding sequences.
# This test checks that condition.
def test_transcode_ascii_with_shift_chars(self):
strings = [b"\x0e\x0e", b"\x0f\x0f"]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="US-ASCII",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
values = self.evaluate(outputs)
self.assertAllEqual(values, strings)
def test_transcode_utf8_with_bom(self):
bom_string = b"\xef\xbb\xbfabcdefg"
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-8", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\xef\xbb\xbfabcdefg") # BOM preserved
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-8", output_encoding="UTF-16-BE")
values = self.evaluate(outputs)
utf16expected = bom_string.decode("UTF-8").encode("UTF-16-BE")
self.assertAllEqual(values, utf16expected)
def test_transcode_utf16_le_be_with_bom(self):
bom_string = b"\xfe\xff\x00\x61" # Big-endian BOM with 'a' encoded
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-16-BE", output_encoding="UTF-8")
values = self.evaluate(outputs)
# BOM is preserved in output
self.assertAllEqual(values, b"\xef\xbb\xbfa")
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-16-LE", output_encoding="UTF-8")
values = self.evaluate(outputs)
# mangled BOM and value from (incorrect) LE encoding
self.assertAllEqual(values, b"\xef\xbf\xbe\xe6\x84\x80")
bom_string = b"\xff\xfe\x61\x00" # Little-endian BOM with 'a' encoded
outputs = string_ops.unicode_transcode(
bom_string, input_encoding="UTF-16-LE", output_encoding="UTF-8")
values = self.evaluate(outputs)
self.assertAllEqual(values, b"\xef\xbb\xbfa")
@parameterized.parameters(
# BOM is stripped if it is used to decide the byte order of the input.
(b"\xfe\xff\x00*", "UTF-16", b"*"),
(b"\xff\xfe*\x00", "UTF-16", b"*"),
# BOM is *not* stripped if it is not used to decide the byte order of
# the input.
(b"\xef\xbb\xbf*", "UTF-8", b"\xef\xbb\xbf*"),
(b"\xfe\xff\x00*", "UTF-16-BE", b"\xef\xbb\xbf*"),
(b"\xff\xfe*\x00", "UTF-16-LE", b"\xef\xbb\xbf*"),
# If the encoding is UTF-16, and no BOM is present, then UTF-16-BE
# is assumed.
(b"\x00*", "UTF-16", b"*"),
# BOM is never stripped from any position other than the beginning of
# the string, for any encoding.
(b"<\xef\xbb\xbf>", "UTF-8", b"<\xef\xbb\xbf>"),
(b"\x00<\xfe\xff\x00>", "UTF-16", b"<\xef\xbb\xbf>"),
(b"\x00<\xfe\xff\x00>", "UTF-16-BE", b"<\xef\xbb\xbf>"),
(b"<\x00\xff\xfe>\x00", "UTF-16-LE", b"<\xef\xbb\xbf>"),
(b"\xfe\xff\x00<\xfe\xff\x00>", "UTF-16", b"<\xef\xbb\xbf>"),
(b"\xff\xfe<\x00\xff\xfe>\x00", "UTF-16", b"<\xef\xbb\xbf>"),
)
@test_util.run_deprecated_v1
def test_bom_handling(self, string, input_encoding, expected):
with self.test_session():
output = string_ops.unicode_transcode(
string, input_encoding=input_encoding, output_encoding="UTF-8")
self.assertAllEqual(output, expected)
@test_util.run_deprecated_v1
def test_invalid_encoding_causes_errors(self):
strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="invalid",
output_encoding="UTF-8",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
with self.assertRaisesOpError(
"Could not create converter for input encoding: invalid"):
self.evaluate(outputs)
with self.assertRaisesRegex(ValueError, "Op passed string 'invalid'"):
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="invalid",
errors="replace",
replacement_char=ord(" "),
replace_control_characters=False)
self.evaluate(outputs)
@test_util.run_deprecated_v1
def test_invalid_error_policy_causes_errors(self):
strings = [[b"a", b"abc"], [b"ABC", b"DEF"]]
with self.assertRaisesRegex(
ValueError, "'invalid' not in: \"strict\", \"replace\", \"ignore\"."):
with self.cached_session() as sess:
outputs = string_ops.unicode_transcode(
strings,
input_encoding="UTF-8",
output_encoding="UTF-8",
errors="invalid",
replacement_char=ord(" "),
replace_control_characters=False)
self.evaluate(outputs)
def test_forwarding(self):
with self.cached_session():
# Generate an input that is uniquely consumed by the transcode op.
# This exercises code paths which are optimized for this case
# (e.g., using forwarding).
inp = string_ops.substr(
constant_op.constant([b"AbCdEfG", b"HiJkLmN"], dtypes.string),
pos=0,
len=5)
transcoded = string_ops.unicode_transcode(
inp, input_encoding="UTF-8", output_encoding="UTF-8")
self.assertAllEqual([b"AbCdE", b"HiJkL"], transcoded)
@test_util.run_deprecated_v1
def test_cjk_encodings(self):
strings_ja = [
b"\x5c\x5c", # Yen sign
b"\x8f\x70", # kanji character "waza"
b"\x83\x4f"
] # katakana character "gu"
strings_zh_cn = [b"\xca\xf5"] # simplified "shu4"
strings_zh_tw = [b"\xb3\x4e"] # traditional "shu4"
strings_ko = [b"\xc7\xd1\xb9\xce"] # hangul "hanmin"
expected_ja = [s.decode("shift_jis").encode("UTF-8") for s in strings_ja]
expected_zh_cn = [
s.decode("gb18030").encode("UTF-8") for s in strings_zh_cn
]
expected_zh_tw = [s.decode("big5").encode("UTF-8") for s in strings_zh_tw]
expected_ko = [s.decode("euc_kr").encode("UTF-8") for s in strings_ko]
with self.cached_session() as sess:
outputs_ja = string_ops.unicode_transcode(
strings_ja,
input_encoding="shift_jis",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
outputs_zh_cn = string_ops.unicode_transcode(
strings_zh_cn,
input_encoding="gb18030",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
outputs_zh_tw = string_ops.unicode_transcode(
strings_zh_tw,
input_encoding="big5",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
outputs_ko = string_ops.unicode_transcode(
strings_ko,
input_encoding="euc_kr",
output_encoding="UTF-8",
replacement_char=ord(" "),
replace_control_characters=False)
result_ja, result_zh_cn, result_zh_tw, result_ko = sess.run(
[outputs_ja, outputs_zh_cn, outputs_zh_tw, outputs_ko])
self.assertAllEqual(result_ja, expected_ja)
self.assertAllEqual(result_zh_cn, expected_zh_cn)
self.assertAllEqual(result_zh_tw, expected_zh_tw)
self.assertAllEqual(result_ko, expected_ko)
if __name__ == "__main__":
test.main()
|
UnicodeTranscodeOpTest
|
python
|
huggingface__transformers
|
src/transformers/models/edgetam/modular_edgetam.py
|
{
"start": 6165,
"end": 6235
}
|
class ____(Sam2VisionEncoderOutput):
pass
|
EdgeTamVisionEncoderOutput
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/exceptions.py
|
{
"start": 13258,
"end": 13934
}
|
class ____(InstallationError):
"""Multiple HashError instances rolled into one for reporting"""
def __init__(self) -> None:
self.errors: List[HashError] = []
def append(self, error: "HashError") -> None:
self.errors.append(error)
def __str__(self) -> str:
lines = []
self.errors.sort(key=lambda e: e.order)
for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
lines.append(cls.head)
lines.extend(e.body() for e in errors_of_cls)
if lines:
return "\n".join(lines)
return ""
def __bool__(self) -> bool:
return bool(self.errors)
|
HashErrors
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-firebase-realtime-database/source_firebase_realtime_database/firebase_rtdb.py
|
{
"start": 142,
"end": 1228
}
|
class ____:
def __init__(self, path="", buffer_size=10000):
self._path = path
self._buffer_size = buffer_size
def initialize(self, database_name, google_application_credentials):
database_url = f"https://{database_name}.firebaseio.com"
sa_key = json.loads(google_application_credentials)
cred = credentials.Certificate(sa_key)
firebase_admin.initialize_app(
cred,
{
"databaseURL": database_url,
},
)
self._ref = db.reference(self._path)
def check_connection(self):
self._ref.get(shallow=True)
def fetch_records(self, start_key=None):
if start_key:
return self._ref.order_by_key().start_at(start_key).limit_to_first(self._buffer_size).get()
else:
return self._ref.order_by_key().limit_to_first(self._buffer_size).get()
def extract(self):
return Records(self)
def set_records(self, records):
self._ref.set(records)
def delete_records(self):
self._ref.delete()
|
Client
|
python
|
plotly__plotly.py
|
plotly/graph_objs/densitymap/colorbar/_title.py
|
{
"start": 233,
"end": 3992
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymap.colorbar"
_path_str = "densitymap.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymap.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.densitymap.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.densitymap.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymap.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymap.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Title
|
python
|
dask__dask
|
dask/dataframe/tseries/resample.py
|
{
"start": 6212,
"end": 6270
}
|
class ____(ResampleReduction):
how = "last"
|
ResampleLast
|
python
|
django__django
|
tests/m2m_through_regress/models.py
|
{
"start": 2296,
"end": 2393
}
|
class ____(Competitor):
person = models.ForeignKey(Person, models.CASCADE)
|
IndividualCompetitor
|
python
|
django__django
|
tests/model_fields/models.py
|
{
"start": 20067,
"end": 20497
}
|
class ____(GeneratedModelVirtualBase):
class Meta:
required_db_features = {
"supports_virtual_generated_columns",
"supports_table_check_constraints",
}
constraints = [
models.CheckConstraint(
condition=models.Q(a__gt=0),
name="Generated model check constraint virtual a > 0",
)
]
|
GeneratedModelCheckConstraintVirtual
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py
|
{
"start": 14052,
"end": 14940
}
|
class ____(Benchmark):
"""
Univariate Problem20 objective function.
This class defines the Univariate Problem20 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem20}}(x) = -[x-\\sin(x)]e^{-x^2}
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem20.png
:alt: Univariate Problem20 function
:align: center
**Univariate Problem20 function**
*Global optimum*: :math:`f(x)=-0.0634905` for :math:`x = 1.195137`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = 1.195137
self.fglob = -0.0634905
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(x - sin(x)) * exp(-x ** 2.0)
|
Problem20
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_flow_run_states.py
|
{
"start": 10562,
"end": 11382
}
|
class ____:
async def test_read_flow_run_state(self, flow_run, session):
# create a flow run to read
flow_run_state = (
await models.flow_runs.set_flow_run_state(
session=session,
flow_run_id=flow_run.id,
state=Running(),
)
).state
read_flow_run_state = await models.flow_run_states.read_flow_run_state(
session=session, flow_run_state_id=flow_run_state.id
)
assert flow_run_state == read_flow_run_state.as_state()
async def test_read_flow_run_state_returns_none_if_does_not_exist(self, session):
result = await models.flow_run_states.read_flow_run_state(
session=session, flow_run_state_id=uuid4()
)
assert result is None
|
TestReadFlowRunState
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_vendor/truststore/_windows.py
|
{
"start": 3999,
"end": 17891
}
|
class ____(Structure):
_fields_ = (
("cbSize", DWORD),
("hRestrictedRoot", HCERTSTORE),
("hRestrictedTrust", HCERTSTORE),
("hRestrictedOther", HCERTSTORE),
("cAdditionalStore", DWORD),
("rghAdditionalStore", c_void_p),
("dwFlags", DWORD),
("dwUrlRetrievalTimeout", DWORD),
("MaximumCachedCertificates", DWORD),
("CycleDetectionModulus", DWORD),
("hExclusiveRoot", HCERTSTORE),
("hExclusiveTrustedPeople", HCERTSTORE),
("dwExclusiveFlags", DWORD),
)
PCERT_CHAIN_ENGINE_CONFIG = POINTER(CERT_CHAIN_ENGINE_CONFIG)
PHCERTCHAINENGINE = POINTER(HCERTCHAINENGINE)
X509_ASN_ENCODING = 0x00000001
PKCS_7_ASN_ENCODING = 0x00010000
CERT_STORE_PROV_MEMORY = b"Memory"
CERT_STORE_ADD_USE_EXISTING = 2
USAGE_MATCH_TYPE_OR = 1
OID_PKIX_KP_SERVER_AUTH = c_char_p(b"1.3.6.1.5.5.7.3.1")
CERT_CHAIN_REVOCATION_CHECK_END_CERT = 0x10000000
CERT_CHAIN_REVOCATION_CHECK_CHAIN = 0x20000000
CERT_CHAIN_POLICY_IGNORE_ALL_NOT_TIME_VALID_FLAGS = 0x00000007
CERT_CHAIN_POLICY_IGNORE_INVALID_BASIC_CONSTRAINTS_FLAG = 0x00000008
CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG = 0x00000010
CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG = 0x00000040
CERT_CHAIN_POLICY_IGNORE_WRONG_USAGE_FLAG = 0x00000020
CERT_CHAIN_POLICY_IGNORE_INVALID_POLICY_FLAG = 0x00000080
CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS = 0x00000F00
CERT_CHAIN_POLICY_ALLOW_TESTROOT_FLAG = 0x00008000
CERT_CHAIN_POLICY_TRUST_TESTROOT_FLAG = 0x00004000
AUTHTYPE_SERVER = 2
CERT_CHAIN_POLICY_SSL = 4
FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000
FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200
# Flags to set for SSLContext.verify_mode=CERT_NONE
CERT_CHAIN_POLICY_VERIFY_MODE_NONE_FLAGS = (
CERT_CHAIN_POLICY_IGNORE_ALL_NOT_TIME_VALID_FLAGS
| CERT_CHAIN_POLICY_IGNORE_INVALID_BASIC_CONSTRAINTS_FLAG
| CERT_CHAIN_POLICY_ALLOW_UNKNOWN_CA_FLAG
| CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG
| CERT_CHAIN_POLICY_IGNORE_WRONG_USAGE_FLAG
| CERT_CHAIN_POLICY_IGNORE_INVALID_POLICY_FLAG
| CERT_CHAIN_POLICY_IGNORE_ALL_REV_UNKNOWN_FLAGS
| CERT_CHAIN_POLICY_ALLOW_TESTROOT_FLAG
| CERT_CHAIN_POLICY_TRUST_TESTROOT_FLAG
)
wincrypt = WinDLL("crypt32.dll")
kernel32 = WinDLL("kernel32.dll")
def _handle_win_error(result: bool, _: Any, args: Any) -> Any:
if not result:
# Note, actually raises OSError after calling GetLastError and FormatMessage
raise WinError()
return args
CertCreateCertificateChainEngine = wincrypt.CertCreateCertificateChainEngine
CertCreateCertificateChainEngine.argtypes = (
PCERT_CHAIN_ENGINE_CONFIG,
PHCERTCHAINENGINE,
)
CertCreateCertificateChainEngine.errcheck = _handle_win_error
CertOpenStore = wincrypt.CertOpenStore
CertOpenStore.argtypes = (LPCSTR, DWORD, HCRYPTPROV_LEGACY, DWORD, c_void_p)
CertOpenStore.restype = HCERTSTORE
CertOpenStore.errcheck = _handle_win_error
CertAddEncodedCertificateToStore = wincrypt.CertAddEncodedCertificateToStore
CertAddEncodedCertificateToStore.argtypes = (
HCERTSTORE,
DWORD,
c_char_p,
DWORD,
DWORD,
PCCERT_CONTEXT,
)
CertAddEncodedCertificateToStore.restype = BOOL
CertCreateCertificateContext = wincrypt.CertCreateCertificateContext
CertCreateCertificateContext.argtypes = (DWORD, c_char_p, DWORD)
CertCreateCertificateContext.restype = PCERT_CONTEXT
CertCreateCertificateContext.errcheck = _handle_win_error
CertGetCertificateChain = wincrypt.CertGetCertificateChain
CertGetCertificateChain.argtypes = (
HCERTCHAINENGINE,
PCERT_CONTEXT,
LPFILETIME,
HCERTSTORE,
PCERT_CHAIN_PARA,
DWORD,
c_void_p,
PCCERT_CHAIN_CONTEXT,
)
CertGetCertificateChain.restype = BOOL
CertGetCertificateChain.errcheck = _handle_win_error
CertVerifyCertificateChainPolicy = wincrypt.CertVerifyCertificateChainPolicy
CertVerifyCertificateChainPolicy.argtypes = (
c_ulong,
PCERT_CHAIN_CONTEXT,
PCERT_CHAIN_POLICY_PARA,
PCERT_CHAIN_POLICY_STATUS,
)
CertVerifyCertificateChainPolicy.restype = BOOL
CertCloseStore = wincrypt.CertCloseStore
CertCloseStore.argtypes = (HCERTSTORE, DWORD)
CertCloseStore.restype = BOOL
CertCloseStore.errcheck = _handle_win_error
CertFreeCertificateChain = wincrypt.CertFreeCertificateChain
CertFreeCertificateChain.argtypes = (PCERT_CHAIN_CONTEXT,)
CertFreeCertificateContext = wincrypt.CertFreeCertificateContext
CertFreeCertificateContext.argtypes = (PCERT_CONTEXT,)
CertFreeCertificateChainEngine = wincrypt.CertFreeCertificateChainEngine
CertFreeCertificateChainEngine.argtypes = (HCERTCHAINENGINE,)
FormatMessageW = kernel32.FormatMessageW
FormatMessageW.argtypes = (
DWORD,
LPCVOID,
DWORD,
DWORD,
LPWSTR,
DWORD,
c_void_p,
)
FormatMessageW.restype = DWORD
def _verify_peercerts_impl(
ssl_context: ssl.SSLContext,
cert_chain: list[bytes],
server_hostname: str | None = None,
) -> None:
"""Verify the cert_chain from the server using Windows APIs."""
# If the peer didn't send any certificates then
# we can't do verification. Raise an error.
if not cert_chain:
raise ssl.SSLCertVerificationError("Peer sent no certificates to verify")
pCertContext = None
hIntermediateCertStore = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, None, 0, None)
try:
# Add intermediate certs to an in-memory cert store
for cert_bytes in cert_chain[1:]:
CertAddEncodedCertificateToStore(
hIntermediateCertStore,
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
cert_bytes,
len(cert_bytes),
CERT_STORE_ADD_USE_EXISTING,
None,
)
# Cert context for leaf cert
leaf_cert = cert_chain[0]
pCertContext = CertCreateCertificateContext(
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, leaf_cert, len(leaf_cert)
)
# Chain params to match certs for serverAuth extended usage
cert_enhkey_usage = CERT_ENHKEY_USAGE()
cert_enhkey_usage.cUsageIdentifier = 1
cert_enhkey_usage.rgpszUsageIdentifier = (c_char_p * 1)(OID_PKIX_KP_SERVER_AUTH)
cert_usage_match = CERT_USAGE_MATCH()
cert_usage_match.Usage = cert_enhkey_usage
chain_params = CERT_CHAIN_PARA()
chain_params.RequestedUsage = cert_usage_match
chain_params.cbSize = sizeof(chain_params)
pChainPara = pointer(chain_params)
if ssl_context.verify_flags & ssl.VERIFY_CRL_CHECK_CHAIN:
chain_flags = CERT_CHAIN_REVOCATION_CHECK_CHAIN
elif ssl_context.verify_flags & ssl.VERIFY_CRL_CHECK_LEAF:
chain_flags = CERT_CHAIN_REVOCATION_CHECK_END_CERT
else:
chain_flags = 0
try:
# First attempt to verify using the default Windows system trust roots
# (default chain engine).
_get_and_verify_cert_chain(
ssl_context,
None,
hIntermediateCertStore,
pCertContext,
pChainPara,
server_hostname,
chain_flags=chain_flags,
)
except ssl.SSLCertVerificationError as e:
# If that fails but custom CA certs have been added
# to the SSLContext using load_verify_locations,
# try verifying using a custom chain engine
# that trusts the custom CA certs.
custom_ca_certs: list[bytes] | None = ssl_context.get_ca_certs(
binary_form=True
)
if custom_ca_certs:
try:
_verify_using_custom_ca_certs(
ssl_context,
custom_ca_certs,
hIntermediateCertStore,
pCertContext,
pChainPara,
server_hostname,
chain_flags=chain_flags,
)
# Raise the original error, not the new error.
except ssl.SSLCertVerificationError:
raise e from None
else:
raise
finally:
CertCloseStore(hIntermediateCertStore, 0)
if pCertContext:
CertFreeCertificateContext(pCertContext)
def _get_and_verify_cert_chain(
ssl_context: ssl.SSLContext,
hChainEngine: HCERTCHAINENGINE | None,
hIntermediateCertStore: HCERTSTORE,
pPeerCertContext: c_void_p,
pChainPara: PCERT_CHAIN_PARA, # type: ignore[valid-type]
server_hostname: str | None,
chain_flags: int,
) -> None:
ppChainContext = None
try:
# Get cert chain
ppChainContext = pointer(PCERT_CHAIN_CONTEXT())
CertGetCertificateChain(
hChainEngine, # chain engine
pPeerCertContext, # leaf cert context
None, # current system time
hIntermediateCertStore, # additional in-memory cert store
pChainPara, # chain-building parameters
chain_flags,
None, # reserved
ppChainContext, # the resulting chain context
)
pChainContext = ppChainContext.contents
# Verify cert chain
ssl_extra_cert_chain_policy_para = SSL_EXTRA_CERT_CHAIN_POLICY_PARA()
ssl_extra_cert_chain_policy_para.cbSize = sizeof(
ssl_extra_cert_chain_policy_para
)
ssl_extra_cert_chain_policy_para.dwAuthType = AUTHTYPE_SERVER
ssl_extra_cert_chain_policy_para.fdwChecks = 0
if server_hostname:
ssl_extra_cert_chain_policy_para.pwszServerName = c_wchar_p(server_hostname)
chain_policy = CERT_CHAIN_POLICY_PARA()
chain_policy.pvExtraPolicyPara = cast(
pointer(ssl_extra_cert_chain_policy_para), c_void_p
)
if ssl_context.verify_mode == ssl.CERT_NONE:
chain_policy.dwFlags |= CERT_CHAIN_POLICY_VERIFY_MODE_NONE_FLAGS
if not ssl_context.check_hostname:
chain_policy.dwFlags |= CERT_CHAIN_POLICY_IGNORE_INVALID_NAME_FLAG
chain_policy.cbSize = sizeof(chain_policy)
pPolicyPara = pointer(chain_policy)
policy_status = CERT_CHAIN_POLICY_STATUS()
policy_status.cbSize = sizeof(policy_status)
pPolicyStatus = pointer(policy_status)
CertVerifyCertificateChainPolicy(
CERT_CHAIN_POLICY_SSL,
pChainContext,
pPolicyPara,
pPolicyStatus,
)
# Check status
error_code = policy_status.dwError
if error_code:
# Try getting a human readable message for an error code.
error_message_buf = create_unicode_buffer(1024)
error_message_chars = FormatMessageW(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
None,
error_code,
0,
error_message_buf,
sizeof(error_message_buf),
None,
)
# See if we received a message for the error,
# otherwise we use a generic error with the
# error code and hope that it's search-able.
if error_message_chars <= 0:
error_message = f"Certificate chain policy error {error_code:#x} [{policy_status.lElementIndex}]"
else:
error_message = error_message_buf.value.strip()
err = ssl.SSLCertVerificationError(error_message)
err.verify_message = error_message
err.verify_code = error_code
raise err from None
finally:
if ppChainContext:
CertFreeCertificateChain(ppChainContext.contents)
def _verify_using_custom_ca_certs(
ssl_context: ssl.SSLContext,
custom_ca_certs: list[bytes],
hIntermediateCertStore: HCERTSTORE,
pPeerCertContext: c_void_p,
pChainPara: PCERT_CHAIN_PARA, # type: ignore[valid-type]
server_hostname: str | None,
chain_flags: int,
) -> None:
hChainEngine = None
hRootCertStore = CertOpenStore(CERT_STORE_PROV_MEMORY, 0, None, 0, None)
try:
# Add custom CA certs to an in-memory cert store
for cert_bytes in custom_ca_certs:
CertAddEncodedCertificateToStore(
hRootCertStore,
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
cert_bytes,
len(cert_bytes),
CERT_STORE_ADD_USE_EXISTING,
None,
)
# Create a custom cert chain engine which exclusively trusts
# certs from our hRootCertStore
cert_chain_engine_config = CERT_CHAIN_ENGINE_CONFIG()
cert_chain_engine_config.cbSize = sizeof(cert_chain_engine_config)
cert_chain_engine_config.hExclusiveRoot = hRootCertStore
pConfig = pointer(cert_chain_engine_config)
phChainEngine = pointer(HCERTCHAINENGINE())
CertCreateCertificateChainEngine(
pConfig,
phChainEngine,
)
hChainEngine = phChainEngine.contents
# Get and verify a cert chain using the custom chain engine
_get_and_verify_cert_chain(
ssl_context,
hChainEngine,
hIntermediateCertStore,
pPeerCertContext,
pChainPara,
server_hostname,
chain_flags,
)
finally:
if hChainEngine:
CertFreeCertificateChainEngine(hChainEngine)
CertCloseStore(hRootCertStore, 0)
@contextlib.contextmanager
def _configure_context(ctx: ssl.SSLContext) -> typing.Iterator[None]:
check_hostname = ctx.check_hostname
verify_mode = ctx.verify_mode
ctx.check_hostname = False
_set_ssl_context_verify_mode(ctx, ssl.CERT_NONE)
try:
yield
finally:
ctx.check_hostname = check_hostname
_set_ssl_context_verify_mode(ctx, verify_mode)
|
CERT_CHAIN_ENGINE_CONFIG
|
python
|
facebook__pyre-check
|
client/commands/servers.py
|
{
"start": 1092,
"end": 3682
}
|
class ____:
pid: int
version: str
global_root: str
flavor: str
relative_local_root: Optional[str] = None
@staticmethod
def from_json(
input_json: Dict[str, object], flavor: identifiers.PyreFlavor
) -> "RunningServerStatus":
pid = input_json.get("pid", None)
if not isinstance(pid, int):
raise InvalidServerResponse(f"Expect `pid` to be an int but got {pid}")
version = input_json.get("version", None)
if not isinstance(version, str):
raise InvalidServerResponse(
f"Expect `version` to be a string but got {version}"
)
global_root = input_json.get("global_root", None)
if not isinstance(global_root, str):
raise InvalidServerResponse(
f"Expect `global_root` to be a string but got {global_root}"
)
relative_local_root = input_json.get("relative_local_root", None)
if relative_local_root is not None and not isinstance(relative_local_root, str):
raise InvalidServerResponse(
"Expected `relative_local_root` to be a string but got "
f"{relative_local_root}"
)
return RunningServerStatus(
pid=pid,
version=version,
global_root=global_root,
relative_local_root=relative_local_root,
flavor=flavor.value,
)
@staticmethod
def from_server_response(
response: str, flavor: identifiers.PyreFlavor
) -> "RunningServerStatus":
try:
response_json = json.loads(response)
if (
not isinstance(response_json, list)
or len(response_json) < 2
or response_json[0] != "Info"
or not isinstance(response_json[1], dict)
):
message = f"Unexpected JSON response: {response_json}"
raise InvalidServerResponse(message)
return RunningServerStatus.from_json(response_json[1], flavor)
except json.JSONDecodeError as error:
message = f"Cannot parse response as JSON: {error}"
raise InvalidServerResponse(message) from error
def to_json(self) -> Dict[str, object]:
return {
"status": "running",
"pid": self.pid,
"version": self.version,
"global_root": self.global_root,
"relative_local_root": self.relative_local_root,
"flavor": self.flavor,
}
@dataclasses.dataclass(frozen=True)
|
RunningServerStatus
|
python
|
chroma-core__chroma
|
chromadb/utils/read_write_lock.py
|
{
"start": 1239,
"end": 1624
}
|
class ____:
def __init__(self, rwLock: ReadWriteLock):
self.rwLock = rwLock
def __enter__(self) -> None:
self.rwLock.acquire_read()
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.rwLock.release_read()
|
ReadRWLock
|
python
|
django-haystack__django-haystack
|
test_haystack/spatial/test_spatial.py
|
{
"start": 366,
"end": 3602
}
|
class ____(TestCase):
def test_ensure_geometry(self):
from django.contrib.gis.geos import GEOSGeometry, Point
self.assertRaises(
SpatialError, ensure_geometry, [38.97127105172941, -95.23592948913574]
)
ensure_geometry(GEOSGeometry("POLYGON((-95 38, -96 40, -97 42, -95 38))"))
ensure_geometry(GEOSGeometry("POINT(-95.23592948913574 38.97127105172941)"))
ensure_geometry(Point(-95.23592948913574, 38.97127105172941))
def test_ensure_point(self):
from django.contrib.gis.geos import GEOSGeometry, Point
self.assertRaises(
SpatialError, ensure_point, [38.97127105172941, -95.23592948913574]
)
self.assertRaises(
SpatialError,
ensure_point,
GEOSGeometry("POLYGON((-95 38, -96 40, -97 42, -95 38))"),
)
ensure_point(Point(-95.23592948913574, 38.97127105172941))
def test_ensure_wgs84(self):
from django.contrib.gis.geos import GEOSGeometry, Point
self.assertRaises(
SpatialError,
ensure_wgs84,
GEOSGeometry("POLYGON((-95 38, -96 40, -97 42, -95 38))"),
)
orig_pnt = Point(-95.23592948913574, 38.97127105172941)
std_pnt = ensure_wgs84(orig_pnt)
self.assertEqual(orig_pnt.srid, None)
self.assertEqual(std_pnt.srid, 4326)
self.assertEqual(std_pnt.x, -95.23592948913574)
self.assertEqual(std_pnt.y, 38.97127105172941)
orig_pnt = Point(-95.23592948913574, 38.97127105172941)
orig_pnt.srid = 2805
std_pnt = ensure_wgs84(orig_pnt)
self.assertEqual(orig_pnt.srid, 2805)
self.assertEqual(std_pnt.srid, 4326)
# These should be different, since it got transformed.
self.assertNotEqual(std_pnt.x, -95.23592948913574)
self.assertNotEqual(std_pnt.y, 38.97127105172941)
def test_ensure_distance(self):
self.assertRaises(
SpatialError, ensure_distance, [38.97127105172941, -95.23592948913574]
)
ensure_distance(D(mi=5))
def test_generate_bounding_box(self):
from django.contrib.gis.geos import Point
downtown_bottom_left = Point(-95.23947, 38.9637903)
downtown_top_right = Point(-95.23362278938293, 38.973081081164715)
((min_lat, min_lng), (max_lat, max_lng)) = generate_bounding_box(
downtown_bottom_left, downtown_top_right
)
self.assertEqual(min_lat, 38.9637903)
self.assertEqual(min_lng, -95.23947)
self.assertEqual(max_lat, 38.973081081164715)
self.assertEqual(max_lng, -95.23362278938293)
def test_generate_bounding_box_crossing_line_date(self):
from django.contrib.gis.geos import Point
downtown_bottom_left = Point(95.23947, 38.9637903)
downtown_top_right = Point(-95.23362278938293, 38.973081081164715)
((south, west), (north, east)) = generate_bounding_box(
downtown_bottom_left, downtown_top_right
)
self.assertEqual(south, 38.9637903)
self.assertEqual(west, 95.23947)
self.assertEqual(north, 38.973081081164715)
self.assertEqual(east, -95.23362278938293)
|
SpatialUtilitiesTestCase
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-lantern/llama_index/vector_stores/lantern/base.py
|
{
"start": 673,
"end": 3675
}
|
class ____(NamedTuple):
node_id: str # FIXME: verify this type hint
text: str
metadata: dict
similarity: float
_logger = logging.getLogger(__name__)
def get_data_model(
base: Type,
index_name: str,
schema_name: str,
hybrid_search: bool,
text_search_config: str,
cache_okay: bool,
embed_dim: int = 1536,
m: int = 16,
ef_construction: int = 128,
ef: int = 64,
) -> Any:
"""
This part create a dynamic sqlalchemy model with a new table.
"""
from sqlalchemy import Column, Computed
from sqlalchemy.dialects.postgresql import (
ARRAY,
BIGINT,
JSON,
REAL,
TSVECTOR,
VARCHAR,
)
from sqlalchemy.schema import Index
from sqlalchemy.types import TypeDecorator
class TSVector(TypeDecorator):
impl = TSVECTOR
cache_ok = cache_okay
tablename = "data_%s" % index_name # dynamic table name
class_name = "Data%s" % index_name # dynamic class name
indexname = "%s_idx" % index_name # dynamic index name
hnsw_indexname = "%s_hnsw_idx" % index_name # dynamic hnsw index name
if hybrid_search:
class HybridAbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(JSON)
node_id = Column(VARCHAR)
embedding = Column(ARRAY(REAL, embed_dim)) # type: ignore
text_search_tsv = Column( # type: ignore
TSVector(),
Computed(
"to_tsvector('%s', text)" % text_search_config, persisted=True
),
)
model = type(
class_name,
(HybridAbstractData,),
{"__tablename__": tablename, "__table_args__": {"schema": schema_name}},
)
Index(
indexname,
model.text_search_tsv, # type: ignore
postgresql_using="gin",
)
else:
class AbstractData(base): # type: ignore
__abstract__ = True # this line is necessary
id = Column(BIGINT, primary_key=True, autoincrement=True)
text = Column(VARCHAR, nullable=False)
metadata_ = Column(JSON)
node_id = Column(VARCHAR)
embedding = Column(ARRAY(REAL, embed_dim)) # type: ignore
model = type(
class_name,
(AbstractData,),
{"__tablename__": tablename, "__table_args__": {"schema": schema_name}},
)
Index(
hnsw_indexname,
model.embedding, # type: ignore
postgresql_using="lantern_hnsw",
postgresql_with={
"m": m,
"ef_construction": ef_construction,
"ef": ef,
"dim": embed_dim,
},
postgresql_ops={"embedding": "dist_cos_ops"},
)
return model
|
DBEmbeddingRow
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/parameterTypes/action.py
|
{
"start": 121,
"end": 1768
}
|
class ____(QtWidgets.QPushButton):
settableAttributes = {
"title", "tip", "icon", "shortcut", "enabled", "visible"
}
def __init__(self, parameter=None, parent=None):
super().__init__(parent)
if not parameter:
return
parameter.sigNameChanged.connect(self.onNameChange)
parameter.sigOptionsChanged.connect(self.updateOpts)
self.clicked.connect(parameter.activate)
self.updateOpts(parameter, parameter.opts)
def updateOpts(self, param, opts):
# Of the attributes that can be set on a QPushButton, only the text
# and tooltip attributes are different from standard pushbutton names
nameMap = dict(title="text", tip="toolTip")
# Special case: "title" could be none, in which case make it something
# readable by the simple copy-paste logic later
opts = opts.copy()
if "name" in opts:
opts.setdefault("title", opts["name"])
if "title" in opts and opts["title"] is None:
opts["title"] = param.title()
# Another special case: icons should be loaded from data before
# being passed to the button
if "icon" in opts:
opts["icon"] = QtGui.QIcon(opts["icon"])
for attr in self.settableAttributes.intersection(opts):
buttonAttr = nameMap.get(attr, attr)
capitalized = buttonAttr[0].upper() + buttonAttr[1:]
setter = getattr(self, f"set{capitalized}")
setter(opts[attr])
def onNameChange(self, param, name):
self.updateOpts(param, dict(title=param.title()))
|
ParameterControlledButton
|
python
|
pytorch__pytorch
|
torchgen/api/translate.py
|
{
"start": 2562,
"end": 19297
}
|
class ____(RuntimeError):
pass
# Given a set of in-scope bindings and a set of target bindings, synthesize
# a list of expressions that uses only the in-scope bindings (bindings) that
# have all of the types of goals. You may want to use this function if
# you're generating code for a function like:
#
# void f({args}) {
# g({exprs}); // g is a different API
# }
#
# and you need to generate "exprs".
#
# Typically, a list of Bindings is convenient to get (you usually call something
# like arguments() to get them); but technically you only need less information:
# for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
# something more complicated, e.g., tracking the set of bindings in a context,
# you may find using these smaller types more convenient.
def translate(
bindings: Sequence[Expr | Binding],
goals: Sequence[NamedCType | Binding],
*,
method: bool = False,
allow_expensive_conversions: bool = False,
) -> list[Expr]:
binding_exprs: list[Expr] = []
for b in bindings:
if isinstance(b, Binding):
binding_exprs.append(
Expr(
expr=b.name,
type=b.nctype,
)
)
else:
binding_exprs.append(b)
goal_ctypes: list[NamedCType] = []
for g in goals:
if isinstance(g, Binding):
goal_ctypes.append(g.nctype)
else:
goal_ctypes.append(g)
# Add all the bindings to the context
ctx: dict[NamedCType, str] = {}
for b in binding_exprs:
ctx[b.type] = b.expr
# While we're at it, do some simple forward inference, looking through
# constructors.
#
# NB: When should you do forward inference versus backward inference?
# The general idea:
#
# - Backward inference WHEN the goal gets smaller
# - Forward inference WHEN the hypothesis gets smaller
#
# This helps ensure termination: backward inference starts with a goal
# and tries to make it simpler and simpler until it's trivial; if the
# goal can grow in size, we blow up to a really huge goal size.
# Similarly, with forward inference we take hypotheses and decompose
# them into simpler hypotheses; if hypotheses could expand in size,
# we also have potential nontermination. (In the code below, forward
# inference is only ever carried out at a single step, but you could
# imagine repeated application of forward inference being profitable.)
#
# A good starting point in the literature for exploring more about proof
# search are these lecture notes
# https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
#
# TODO: My kingdom for a pattern matcher
# https://www.python.org/dev/peps/pep-0634/
#
# TODO: This could get us in recomputation trouble if b.expr is nontrivial.
# Fix this by implementing some sort of sharing so that if multiple
# goals share the same expression, we only compute it once. This seems
# to matter in practice as compiler is often unwilling to CSE nontrivial
# expressions like scalar.to<scalar_t>()
t = b.type
if (
isinstance(t, ConstRefCType)
and isinstance(t.elem, OptionalCType)
and isinstance(t.elem.elem, BaseCType)
and str(t.elem.elem.type) == "at::Tensor"
):
ctx[NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))] = (
f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
)
if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
ctx[NamedCType(t.name, BaseCType(optionalTensorRefT))] = (
f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
)
if t.type == ConstRefCType(BaseCType(scalarT)):
ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
ctx[NamedCType(t.name, BaseCType(optionalScalarRefT))] = (
f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
)
if t.type == BaseCType(scalar_t):
ctx[NamedCType(t.name, BaseCType(opmath_t))] = (
f"static_cast<opmath_t>({b.expr})"
)
# [Note: IOptTensorListRef]
if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
ctx[NamedCType(t.name, BaseCType(iOptTensorListRefT))] = (
f"at::IOptTensorListRef({b.expr})"
)
# Add implicit bindings if the generated code is inside a Tensor method
if method:
ctx[NamedCType("self", MutRefCType(BaseCType(tensorT)))] = (
"const_cast<Tensor&>(*this)"
)
ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = (
"const_cast<Tensor&>(*this)"
)
# This is better! Byte-for-byte compat
# ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
def unsat(goal: NamedCType) -> NoReturn:
ctx_desc = "\n".join(
f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
)
raise UnsatError(
f"""
Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
When I failed, the following bindings were available in the context:
{ctx_desc}
This probably means there is a missing rule in the rules of torchgen.api.translate.
Check this module for more information.
"""
)
# A shitty backtracking search implementation. It's shitty because it
# does backtracking via stack (bad idea!) and for the most part tries to
# avoid backtracking. In particular, if
# direct=True, we won't try to do any fancy synthesis, just trivial
# conversions (e.g., "T a" is OK for "const T& a"). So all of the
# existing rules in this function simply try to solve immediately,
# and bail if things don't work out.
def solve(goal: NamedCType, *, direct: bool) -> str:
def direct_solve(goal: NamedCType) -> str:
return solve(goal, direct=True)
if goal in ctx:
# Trivial
return ctx[goal]
# const & is satisfied with mutable &
if isinstance(goal.type, ConstRefCType):
try:
# WARNING: not strictly decreasing; be careful not
# to add a direct conversion that goes satisfies
# mutable& with const&
return solve(
NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
)
except UnsatError:
pass
# mutable & is satisfied with value
if isinstance(goal.type, MutRefCType):
try:
return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
except UnsatError:
pass
# TODO: These are referentially equal, shouldn't have to do this;
# ensuring we don't use type synonym IntArrayRef in codegen would
# help
if goal.type == ArrayRefCType(BaseCType(longT)):
return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct)
if direct:
unsat(goal)
# For now, all of these rules are mutually exclusive.
if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
memory_format = direct_solve(
NamedCType(
SpecialArgName.possibly_redundant_memory_format,
OptionalCType(BaseCType(memoryFormatT)),
)
)
# No need to join "memory_format" and "options" if the target API takes "options" directly.
# Otherwise it will cause the redundant memory_format error.
if options_ctype in goal_ctypes:
return memory_format
try:
options = direct_solve(options_ctype)
return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
except UnsatError:
return memory_format
elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
dtype = direct_solve(
NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
)
pin_memory = direct_solve(
NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
)
device = direct_solve(
NamedCType("device", OptionalCType(BaseCType(deviceT)))
)
layout = direct_solve(
NamedCType("layout", OptionalCType(BaseCType(layoutT)))
)
return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
try:
options = direct_solve(options_ctype)
return f"c10::optTypeMetaToScalarType({options}.dtype_opt())"
except UnsatError:
out_tensor = direct_solve(out_tensor_ctype)
return f"{out_tensor}.scalar_type()"
elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
try:
options = direct_solve(options_ctype)
return f"{options}.layout_opt()"
except UnsatError:
out_tensor = direct_solve(out_tensor_ctype)
return f"{out_tensor}.layout()"
elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
try:
options = direct_solve(options_ctype)
return f"{options}.device_opt()"
except UnsatError:
out_tensor = direct_solve(out_tensor_ctype)
return f"{out_tensor}.device()"
elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
try:
options = direct_solve(options_ctype)
return f"{options}.pinned_memory_opt()"
except UnsatError:
# If we're calling a factory op from its out= variant,
# We don't actually care about the value of pin_memory.
out_tensor = direct_solve(out_tensor_ctype)
return "::std::nullopt"
# We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
elif goal.type == BaseCType(intArrayRefT):
try:
return direct_solve(NamedCType(goal.name, longVec_ctype))
except UnsatError:
# We can also go SymIntArrayRef -> IntArrayRef
symIntArrayRef_type = direct_solve(
NamedCType(goal.name, BaseCType(symIntArrayRefT))
)
return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})"
elif goal.type == BaseCType(symIntArrayRefT):
try:
r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT)))
return f"c10::fromIntArrayRefSlow({r})"
except UnsatError:
return direct_solve(NamedCType(goal.name, longSymVec_ctype))
elif goal.type == BaseCType(SymIntT):
return direct_solve(NamedCType(goal.name, BaseCType(longT)))
elif goal.type == OptionalCType(BaseCType(SymIntT)):
argname = direct_solve(
NamedCType(goal.name, OptionalCType(BaseCType(longT)))
)
return f"{argname}.has_value() ? ::std::make_optional(c10::SymInt(*{argname})) : ::std::nullopt"
elif goal.type == BaseCType(longT):
symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
return f"{symInt_type}.guard_int(__FILE__, __LINE__)"
elif goal.type == OptionalCType(BaseCType(longT)):
argname = direct_solve(
NamedCType(goal.name, OptionalCType(BaseCType(SymIntT)))
)
return f"{argname}.has_value() ? ::std::make_optional({argname}->guard_int(__FILE__, __LINE__)) : ::std::nullopt"
elif goal.type == BaseCType(optionalIntArrayRefT):
try:
return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
except UnsatError:
argname = direct_solve(
NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT))
)
return f"{argname}.has_value() ? ::std::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : ::std::nullopt"
elif goal.type == BaseCType(optionalSymIntArrayRefT):
# TODO: You might also want to solve this from longSymVec_ctype or
# an optional version of it
argname = direct_solve(
NamedCType(goal.name, BaseCType(optionalIntArrayRefT))
)
return f"{argname}.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*{argname})) : ::std::nullopt"
elif goal.type == BaseCType(optionalScalarRefT):
return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
elif goal.type == BaseCType(optionalTensorRefT):
return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
# Note [translation from C++ reference to value types]
# The below cases are all for when we have an argument with a reference type,
# and a corresponding goal with a value type.
# These are needed when we populate the inputs to a lambda capture and we need
# to guarantee the lifetime of each captured argument.
# We guard it with an explicit kwarg because converting to a value type is expensive
# (O(n)) to convert from IntArrayRef to vector<int>),
# so the caller of translate() should be explicit that they need it.
if allow_expensive_conversions:
if goal.type == VectorCType(BaseCType(longT)):
intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
argname = direct_solve(intArrayRef_ctype)
return f"{argname}.vec()"
if goal.type == VectorCType(BaseCType(SymIntT)):
symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
argname = direct_solve(symIntArrayRef_ctype)
return f"{argname}.vec()"
elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
optionalIntArrayRef_ctype = NamedCType(
goal.name, BaseCType(optionalIntArrayRefT)
)
argname = direct_solve(optionalIntArrayRef_ctype)
return f"{argname}.has_value() ? ::std::make_optional({argname}->vec()) : ::std::nullopt"
elif goal.type == OptionalCType(BaseCType(scalarT)):
optionalScalarRef_ctype = NamedCType(
goal.name, BaseCType(optionalScalarRefT)
)
argname = direct_solve(optionalScalarRef_ctype)
return f"{argname}.has_value() ? ::std::make_optional({argname}) : ::std::nullopt"
elif goal.type == OptionalCType(BaseCType(scalarT)):
optionalTensorRef_ctype = NamedCType(
goal.name, BaseCType(optionalTensorRefT)
)
argname = direct_solve(optionalTensorRef_ctype)
return f"{argname}.has_value() ? ::std::make_optional({argname}) : ::std::nullopt"
# Technically, we also need to handle cases of C++ containers holding reference types.
# But there currently aren't any ops that require lambda capture codegen
# With arguments like ::std::vector<IntArrayRef>.
# If that changes, we'll have to add the translation here.
# We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
# We could probably generalize this to non-tensor types too.
if goal.type == MutRefCType(BaseCType(tensorT)):
const_ref_tensor_ctype = NamedCType(
goal.name, ConstRefCType(BaseCType(tensorT))
)
argname = direct_solve(const_ref_tensor_ctype)
return f"const_cast<Tensor&>({argname})"
unsat(goal)
return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
|
UnsatError
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-github/llama_index/readers/github/repository/github_client.py
|
{
"start": 2238,
"end": 2902
}
|
class ____(DataClassJsonMixin):
"""
Dataclass for the response from the Github API's getCommit endpoint.
Attributes:
- tree (Tree): Tree object for the commit.
"""
@dataclass
class Commit(DataClassJsonMixin):
"""Dataclass for the commit object in the commit. (commit.commit)."""
@dataclass
class Tree(DataClassJsonMixin):
"""
Dataclass for the tree object in the commit.
Attributes:
- sha (str): SHA for the commit
"""
sha: str
tree: Tree
commit: Commit
url: str
sha: str
@dataclass
|
GitCommitResponseModel
|
python
|
PyCQA__pylint
|
tests/pyreverse/functional/class_diagrams/relationships/comprehensions.py
|
{
"start": 1190,
"end": 1755
}
|
class ____:
"""Comprehensions creating new objects - composition."""
def __init__(self):
# Composition: comprehensions creating new objects
self.components: list[Component] = [Component(f"component_{i}") for i in range(3)]
self.component_dict: dict[int, Component] = {i: Component(f"dict_component_{i}") for i in range(2)}
self.components_set: set[Component] = {Component(f"set_component_{i}") for i in range(2)}
self.lazy_components: Generator[Component] = (Component(f"lazy_{i}") for i in range(2))
|
CompositionContainer
|
python
|
h5py__h5py
|
h5py/_hl/vds.py
|
{
"start": 578,
"end": 2115
}
|
class ____(namedtuple('VDSmap', ('vspace', 'file_name',
'dset_name', 'src_space'))):
'''Defines a region in a virtual dataset mapping to part of a source dataset
'''
vds_support = True
def _convert_space_for_key(space, key):
"""
Converts the space with the given key. Mainly used to allow unlimited
dimensions in virtual space selection.
"""
key = key if isinstance(key, tuple) else (key,)
type_code = space.get_select_type()
# check for unlimited selections in case where selection is regular
# hyperslab, which is the only allowed case for h5s.UNLIMITED to be
# in the selection
if type_code == h5s.SEL_HYPERSLABS and space.is_regular_hyperslab():
rank = space.get_simple_extent_ndims()
nargs = len(key)
idx_offset = 0
start, stride, count, block = space.get_regular_hyperslab()
# iterate through keys. we ignore numeral indices. if we get a
# slice, we check for an h5s.UNLIMITED value as the stop
# if we get an ellipsis, we offset index by (rank - nargs)
for i, sl in enumerate(key):
if isinstance(sl, slice):
if sl.stop == h5s.UNLIMITED:
counts = list(count)
idx = i + idx_offset
counts[idx] = h5s.UNLIMITED
count = tuple(counts)
elif sl is Ellipsis:
idx_offset = rank - nargs
space.select_hyperslab(start, count, stride, block)
|
VDSmap
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sum-of-two-integers.py
|
{
"start": 29,
"end": 2025
}
|
class ____(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
bit_length = 32
neg_bit, mask = (1 << bit_length) >> 1, ~(~0 << bit_length)
a = (a | ~mask) if (a & neg_bit) else (a & mask)
b = (b | ~mask) if (b & neg_bit) else (b & mask)
while b:
carry = a & b
a ^= b
a = (a | ~mask) if (a & neg_bit) else (a & mask)
b = carry << 1
b = (b | ~mask) if (b & neg_bit) else (b & mask)
return a
def getSum2(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
# 32 bits integer max
MAX = 0x7FFFFFFF
# 32 bits interger min
MIN = 0x80000000
# mask to get last 32 bits
mask = 0xFFFFFFFF
while b:
# ^ get different bits and & gets double 1s, << moves carry
a, b = (a ^ b) & mask, ((a & b) << 1) & mask
# if a is negative, get a's 32 bits complement positive first
# then get 32-bit positive's Python complement negative
return a if a <= MAX else ~(a ^ mask)
def minus(self, a, b):
b = self.getSum(~b, 1)
return self.getSum(a, b)
def multiply(self, a, b):
isNeg = (a > 0) ^ (b > 0)
x = a if a > 0 else self.getSum(~a, 1)
y = b if b > 0 else self.getSum(~b, 1)
ans = 0
while y & 0x01:
ans = self.getSum(ans, x)
y >>= 1
x <<= 1
return self.getSum(~ans, 1) if isNeg else ans
def divide(self, a, b):
isNeg = (a > 0) ^ (b > 0)
x = a if a > 0 else self.getSum(~a, 1)
y = b if b > 0 else self.getSum(~b, 1)
ans = 0
for i in range(31, -1, -1):
if (x >> i) >= y:
x = self.minus(x, y << i)
ans = self.getSum(ans, 1 << i)
return self.getSum(~ans, 1) if isNeg else ans
|
Solution
|
python
|
huggingface__transformers
|
tests/models/blt/test_modeling_blt.py
|
{
"start": 6375,
"end": 8812
}
|
class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = BltModelTester
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
# used in `test_torch_compile_for_training`
_torch_compile_train_cls = BltForCausalLM if is_torch_available() else None
@pytest.mark.generate
@parameterized.expand([("greedy", 1), ("beam search", 2)])
@unittest.skip(
"Blt requires real token IDs for its hash-based embedding computation, making inputs_embeds generation incompatible with identical outputs"
)
def test_generate_from_inputs_embeds(self, _, num_beams):
pass
@pytest.mark.generate
@unittest.skip(
"Blt requires real token IDs for its hash-based embedding computation, making inputs_embeds generation incompatible with identical outputs"
)
def test_inputs_embeds_matches_input_ids(self):
pass
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
def test_eager_matches_sdpa_inference(
self,
name,
torch_dtype,
padding_side,
use_attention_mask,
output_attentions,
enable_kernels,
):
"We need to relax a bit the `atols` for fp32 here due to the altup projections"
atols = {
("cpu", False, torch.float32): 2e-2, # this was relaxed
("cpu", False, torch.float16): 5e-3,
("cpu", False, torch.bfloat16): 1e-2,
("cpu", True, torch.float32): 2e-2, # this was relaxed
("cpu", True, torch.float16): 5e-3,
("cpu", True, torch.bfloat16): 1e-2,
("cuda", False, torch.float32): 2e-2, # this was relaxed
("cuda", False, torch.bfloat16): 1e-2,
("cuda", False, torch.float16): 5e-3,
("cuda", True, torch.float32): 2e-2, # this was relaxed
("cuda", True, torch.bfloat16): 1e-2,
("cuda", True, torch.float16): 5e-3,
}
_test_eager_matches_sdpa_inference(
self, name, torch_dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, atols=atols
)
@require_torch_accelerator
@slow
def test_sdpa_can_dispatch_on_flash(self):
self.skipTest("BLT always has an attention_mask input")
@require_torch_accelerator
|
BltModelTest
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_select.py
|
{
"start": 17885,
"end": 20554
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
"""tests related to #8285."""
__dialect__ = "default"
def test_c_collection_as_from(self):
stmt = select(parent.c)
# this works because _all_selected_columns expands out
# ClauseList. it does so in the same way that it works for
# Table already. so this is free
eq_(stmt._all_selected_columns, [parent.c.id, parent.c.data])
self.assert_compile(stmt, "SELECT parent.id, parent.data FROM parent")
def test_c_sub_collection_str_stmt(self):
stmt = select(table1.c["myid", "description"])
self.assert_compile(
stmt, "SELECT mytable.myid, mytable.description FROM mytable"
)
subq = stmt.subquery()
self.assert_compile(
select(subq.c[0]).where(subq.c.description == "x"),
"SELECT anon_1.myid FROM (SELECT mytable.myid AS myid, "
"mytable.description AS description FROM mytable) AS anon_1 "
"WHERE anon_1.description = :description_1",
)
def test_c_sub_collection_int_stmt(self):
stmt = select(table1.c[2, 0])
self.assert_compile(
stmt, "SELECT mytable.description, mytable.myid FROM mytable"
)
subq = stmt.subquery()
self.assert_compile(
select(subq.c.myid).where(subq.c[1] == "x"),
"SELECT anon_1.myid FROM (SELECT mytable.description AS "
"description, mytable.myid AS myid FROM mytable) AS anon_1 "
"WHERE anon_1.myid = :myid_1",
)
def test_c_sub_collection_str(self):
coll = table1.c["myid", "description"]
is_(coll.myid, table1.c.myid)
eq_(list(coll), [table1.c.myid, table1.c.description])
def test_c_sub_collection_int(self):
coll = table1.c[2, 0]
is_(coll.myid, table1.c.myid)
eq_(list(coll), [table1.c.description, table1.c.myid])
def test_c_sub_collection_positive_slice(self):
coll = table1.c[0:2]
is_(coll.myid, table1.c.myid)
is_(coll.name, table1.c.name)
eq_(list(coll), [table1.c.myid, table1.c.name])
def test_c_sub_collection_negative_slice(self):
coll = table1.c[-2:]
is_(coll.name, table1.c.name)
is_(coll.description, table1.c.description)
eq_(list(coll), [table1.c.name, table1.c.description])
def test_missing_key(self):
with expect_raises_message(KeyError, "unknown"):
table1.c["myid", "unknown"]
def test_missing_index(self):
with expect_raises_message(IndexError, "5"):
table1.c["myid", 5]
|
ColumnCollectionAsSelectTest
|
python
|
getsentry__sentry
|
tests/sentry/middleware/test_health.py
|
{
"start": 274,
"end": 1926
}
|
class ____(TestCase):
middleware = cached_property(HealthCheck)
@cached_property
def factory(self):
return RequestFactory()
@patch("sentry.status_checks.check_all")
def test_other_url(self, check_all: MagicMock) -> None:
req = self.factory.get("/")
resp = self.middleware.process_request(req)
assert resp is None, resp
assert check_all.call_count == 0
@patch("sentry.status_checks.check_all")
def test_basic_health(self, check_all: MagicMock) -> None:
req = self.factory.get("/_health/")
resp = self.middleware.process_request(req)
assert resp.status_code == 200, resp
assert check_all.call_count == 0
@patch("sentry.status_checks.check_all")
def test_full_health_ok(self, check_all: MagicMock) -> None:
check_all.return_value = {object(): []}
req = self.factory.get("/_health/?full")
resp = self.middleware.process_request(req)
assert resp.status_code == 200, resp
body = json.loads(resp.content)
assert "problems" in body
assert "healthy" in body
assert check_all.call_count == 1
@patch("sentry.status_checks.check_all")
def test_full_health_bad(self, check_all: MagicMock) -> None:
check_all.return_value = {object(): [Problem("the system is down")]}
req = self.factory.get("/_health/?full")
resp = self.middleware.process_request(req)
assert resp.status_code == 500, resp
body = json.loads(resp.content)
assert "problems" in body
assert "healthy" in body
assert check_all.call_count == 1
|
HealthCheckTest
|
python
|
fastai__fastai
|
fastai/layers.py
|
{
"start": 19326,
"end": 22171
}
|
class ____(Module):
"Resnet block from `ni` to `nh` with `stride`"
@delegates(ConvLayer.__init__)
def __init__(self, expansion, ni, nf, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1,
sa=False, sym=False, norm_type=NormType.Batch, act_cls=defaults.activation, ndim=2, ks=3,
pool=AvgPool, pool_first=True, **kwargs):
norm2 = (NormType.BatchZero if norm_type==NormType.Batch else
NormType.InstanceZero if norm_type==NormType.Instance else norm_type)
if nh2 is None: nh2 = nf
if nh1 is None: nh1 = nh2
nf,ni = nf*expansion,ni*expansion
k0 = dict(norm_type=norm_type, act_cls=act_cls, ndim=ndim, **kwargs)
k1 = dict(norm_type=norm2, act_cls=None, ndim=ndim, **kwargs)
convpath = [ConvLayer(ni, nh2, ks, stride=stride, groups=ni if dw else groups, **k0),
ConvLayer(nh2, nf, ks, groups=g2, **k1)
] if expansion == 1 else [
ConvLayer(ni, nh1, 1, **k0),
ConvLayer(nh1, nh2, ks, stride=stride, groups=nh1 if dw else groups, **k0),
ConvLayer(nh2, nf, 1, groups=g2, **k1)]
if reduction: convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))
if sa: convpath.append(SimpleSelfAttention(nf,ks=1,sym=sym))
self.convpath = nn.Sequential(*convpath)
idpath = []
if ni!=nf: idpath.append(ConvLayer(ni, nf, 1, act_cls=None, ndim=ndim, **kwargs))
if stride!=1: idpath.insert((1,0)[pool_first], pool(stride, ndim=ndim, ceil_mode=True))
self.idpath = nn.Sequential(*idpath)
self.act = defaults.activation(inplace=True) if act_cls is defaults.activation else act_cls()
def forward(self, x): return self.act(self.convpath(x) + self.idpath(x))
# %% ../nbs/01_layers.ipynb 133
def SEBlock(expansion, ni, nf, groups=1, reduction=16, stride=1, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh1=nf*2, nh2=nf*expansion, **kwargs)
# %% ../nbs/01_layers.ipynb 134
def SEResNeXtBlock(expansion, ni, nf, groups=32, reduction=16, stride=1, base_width=4, **kwargs):
w = math.floor(nf * (base_width / 64)) * groups
return ResBlock(expansion, ni, nf, stride=stride, groups=groups, reduction=reduction, nh2=w, **kwargs)
# %% ../nbs/01_layers.ipynb 135
def SeparableBlock(expansion, ni, nf, reduction=16, stride=1, base_width=4, **kwargs):
return ResBlock(expansion, ni, nf, stride=stride, reduction=reduction, nh2=nf*2, dw=True, **kwargs)
# %% ../nbs/01_layers.ipynb 138
def _stack_tups(tuples, stack_dim=1):
"Stack tuple of tensors along `stack_dim`"
return tuple(torch.stack([t[i] for t in tuples], dim=stack_dim) for i in range_of(tuples[0]))
# %% ../nbs/01_layers.ipynb 139
|
ResBlock
|
python
|
pytorch__pytorch
|
torch/_inductor/select_algorithm.py
|
{
"start": 59950,
"end": 60046
}
|
class ____(NamedTuple):
code: str
extra: str
events: list[Any]
|
GeneratedCodeCacheEntry
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v/modular_glm4v.py
|
{
"start": 30834,
"end": 32982
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: Glm4vTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Glm4vTextAttention(config, layer_idx)
self.mlp = Glm4vTextMLP(config)
self.input_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_self_attn_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_mlp_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_mlp_layernorm(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
Glm4vTextDecoderLayer
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/frame_methods.py
|
{
"start": 17852,
"end": 18541
}
|
class ____:
def setup(self):
n = 1 << 20
t = date_range("2015-01-01", freq="s", periods=(n // 64))
xs = np.random.randn(n // 64).round(2)
self.df = DataFrame(
{
"a": np.random.randint(-1 << 8, 1 << 8, n),
"b": np.random.choice(t, n),
"c": np.random.choice(xs, n),
}
)
self.df2 = DataFrame(np.random.randn(1000, 100).astype(str)).T
def time_frame_duplicated(self):
self.df.duplicated()
def time_frame_duplicated_wide(self):
self.df2.duplicated()
def time_frame_duplicated_subset(self):
self.df.duplicated(subset=["a"])
|
Duplicated
|
python
|
pyinstaller__pyinstaller
|
bootloader/waflib/Tools/qt5.py
|
{
"start": 8050,
"end": 20211
}
|
class ____(Task.Task):
color = 'BLUE'
after = 'ts2qm'
def run(self):
txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs])
code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt
self.outputs[0].write(code)
def configure(self):
self.find_qt5_binaries()
self.set_qt5_libs_dir()
self.set_qt5_libs_to_check()
self.set_qt5_defines()
self.find_qt5_libraries()
self.add_qt5_rpath()
self.simplify_qt5_libs()
if not has_xml:
Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!')
if 'COMPILER_CXX' not in self.env:
self.fatal('No CXX compiler defined: did you forget to configure compiler_cxx first?')
frag = '#include <QMap>\nint main(int argc, char **argv) {QMap<int,int> m;return m.keys().size();}\n'
uses = 'QT5CORE'
for flag in [[], '-fPIE', '-fPIC', '-std=c++11', ['-std=c++11', '-fPIE'], ['-std=c++11', '-fPIC']]:
msg = 'See if Qt files compile '
if flag:
msg += 'with %s' % flag
try:
self.check(features='qt5 cxx', use=uses, uselib_store='qt5', cxxflags=flag, fragment=frag, msg=msg)
except self.errors.ConfigurationError:
pass
else:
break
else:
self.fatal('Could not build a simple Qt application')
if Utils.unversioned_sys_platform() == 'freebsd':
frag = '#include <QMap>\nint main(int argc, char **argv) {QMap<int,int> m;return m.keys().size();}\n'
try:
self.check(
features='qt5 cxx cxxprogram',
use=uses,
fragment=frag,
msg='Can we link Qt programs on FreeBSD directly?'
)
except self.errors.ConfigurationError:
self.check(
features='qt5 cxx cxxprogram',
use=uses,
uselib_store='qt5',
libpath='/usr/local/lib',
fragment=frag,
msg='Is /usr/local/lib required?'
)
@conf
def find_qt5_binaries(self):
env = self.env
opt = Options.options
qtdir = getattr(opt, 'qtdir', '')
qtbin = getattr(opt, 'qtbin', '')
paths = []
if qtdir:
qtbin = os.path.join(qtdir, 'bin')
if not qtdir:
qtdir = self.environ.get('QT5_ROOT', '')
qtbin = self.environ.get('QT5_BIN') or os.path.join(qtdir, 'bin')
if qtbin:
paths = [qtbin]
if not qtdir:
paths = self.environ.get('PATH', '').split(os.pathsep)
paths.extend(['/usr/share/qt5/bin', '/usr/local/lib/qt5/bin'])
try:
lst = Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir = '/usr/local/Trolltech/%s/' % lst[0]
qtbin = os.path.join(qtdir, 'bin')
paths.append(qtbin)
cand = None
prev_ver = ['5', '0', '0']
for qmk in ('qmake-qt5', 'qmake5', 'qmake'):
try:
qmake = self.find_program(qmk, path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip()
except self.errors.WafError:
pass
else:
if version:
new_ver = version.split('.')
if new_ver > prev_ver:
cand = qmake
prev_ver = new_ver
if not cand:
try:
self.find_program('qtchooser')
except self.errors.ConfigurationError:
pass
else:
cmd = self.env.QTCHOOSER + ['-qt=5', '-run-tool=qmake']
try:
version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION'])
except self.errors.WafError:
pass
else:
cand = cmd
if cand:
self.env.QMAKE = cand
else:
self.fatal('Could not find qmake for qt5')
self.env.QT_HOST_BINS = qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_HOST_BINS']).strip()
paths.insert(0, qtbin)
def find_bin(lst, var):
if var in env:
return
for f in lst:
try:
ret = self.find_program(f, path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var] = ret
break
find_bin(['uic-qt5', 'uic'], 'QT_UIC')
if not env.QT_UIC:
self.fatal('cannot find the uic compiler for qt5')
self.start_msg('Checking for uic version')
uicver = self.cmd_and_log(env.QT_UIC + ['-version'], output=Context.BOTH)
uicver = ''.join(uicver).strip()
uicver = uicver.replace('Qt User Interface Compiler ', '').replace('User Interface Compiler for Qt', '')
self.end_msg(uicver)
if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1:
self.fatal('this uic compiler is for qt3 or qt4, add uic for qt5 to your path')
find_bin(['moc-qt5', 'moc'], 'QT_MOC')
find_bin(['rcc-qt5', 'rcc'], 'QT_RCC')
find_bin(['lrelease-qt5', 'lrelease'], 'QT_LRELEASE')
find_bin(['lupdate-qt5', 'lupdate'], 'QT_LUPDATE')
env.UIC_ST = '%s -o %s'
env.MOC_ST = '-o'
env.ui_PATTERN = 'ui_%s.h'
env.QT_LRELEASE_FLAGS = ['-silent']
env.MOCCPPPATH_ST = '-I%s'
env.MOCDEFINES_ST = '-D%s'
@conf
def set_qt5_libs_dir(self):
env = self.env
qtlibs = getattr(Options.options, 'qtlibs', None) or self.environ.get('QT5_LIBDIR')
if not qtlibs:
try:
qtlibs = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip()
qtlibs = os.path.join(qtdir, 'lib')
self.msg('Found the Qt5 libraries in', qtlibs)
env.QTLIBS = qtlibs
@conf
def find_single_qt5_lib(self, name, uselib, qtlibs, qtincludes, force_static):
env = self.env
if force_static:
exts = ('.a', '.lib')
prefix = 'STLIB'
else:
exts = ('.so', '.lib')
prefix = 'LIB'
def lib_names():
for x in exts:
for k in ('', '5') if Utils.is_win32 else ['']:
for p in ('lib', ''):
yield (p, name, k, x)
for tup in lib_names():
k = ''.join(tup)
path = os.path.join(qtlibs, k)
if os.path.exists(path):
if env.DEST_OS == 'win32':
libval = ''.join(tup[:-1])
else:
libval = name
env.append_unique(prefix + '_' + uselib, libval)
env.append_unique('%sPATH_%s' % (prefix, uselib), qtlibs)
env.append_unique('INCLUDES_' + uselib, qtincludes)
env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, name.replace('Qt5', 'Qt')))
return k
return False
@conf
def find_qt5_libraries(self):
env = self.env
qtincludes = self.environ.get('QT5_INCLUDES') or self.cmd_and_log(env.QMAKE +
['-query', 'QT_INSTALL_HEADERS']).strip()
force_static = self.environ.get('QT5_FORCE_STATIC')
try:
if self.environ.get('QT5_XCOMPILE'):
self.fatal('QT5_XCOMPILE Disables pkg-config detection')
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt5_vars:
uselib = i.upper()
if Utils.unversioned_sys_platform() == 'darwin':
fwk = i.replace('Qt5', 'Qt')
frameworkName = fwk + '.framework'
qtDynamicLib = os.path.join(env.QTLIBS, frameworkName, fwk)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_' + uselib, fwk)
env.append_unique('FRAMEWORKPATH_' + uselib, env.QTLIBS)
self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN')
else:
self.msg('Checking for %s' % i, False, 'YELLOW')
env.append_unique('INCLUDES_' + uselib, os.path.join(env.QTLIBS, frameworkName, 'Headers'))
else:
ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, force_static)
if not force_static and not ret:
ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, True)
self.msg('Checking for %s' % i, ret, 'GREEN' if ret else 'YELLOW')
else:
path = '%s:%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib' % (
self.environ.get('PKG_CONFIG_PATH', ''), env.QTLIBS, env.QTLIBS
)
for i in self.qt5_vars:
self.check_cfg(
package=i, args='--cflags --libs', mandatory=False, force_static=force_static, pkg_config_path=path
)
@conf
def simplify_qt5_libs(self):
env = self.env
def process_lib(vars_, coreval):
for d in vars_:
var = d.upper()
if var == 'QTCORE':
continue
value = env['LIBPATH_' + var]
if value:
core = env[coreval]
accu = []
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_' + var] = accu
process_lib(self.qt5_vars, 'LIBPATH_QTCORE')
@conf
def add_qt5_rpath(self):
env = self.env
if getattr(Options.options, 'want_rpath', False):
def process_rpath(vars_, coreval):
for d in vars_:
var = d.upper()
value = env['LIBPATH_' + var]
if value:
core = env[coreval]
accu = []
for lib in value:
if var != 'QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath=' + lib)
env['RPATH_' + var] = accu
process_rpath(self.qt5_vars, 'LIBPATH_QTCORE')
@conf
def set_qt5_libs_to_check(self):
self.qt5_vars = Utils.to_list(getattr(self, 'qt5_vars', []))
if not self.qt5_vars:
dirlst = Utils.listdir(self.env.QTLIBS)
pat = self.env.cxxshlib_PATTERN
if Utils.is_win32:
pat = pat.replace('.dll', '.lib')
if self.environ.get('QT5_FORCE_STATIC'):
pat = self.env.cxxstlib_PATTERN
if Utils.unversioned_sys_platform() == 'darwin':
pat = r"%s\.framework"
re_qt = re.compile(pat % 'Qt5?(?P<name>.*)' + '$')
for x in dirlst:
m = re_qt.match(x)
if m:
self.qt5_vars.append("Qt5%s" % m.group('name'))
if not self.qt5_vars:
self.fatal('cannot find any Qt5 library (%r)' % self.env.QTLIBS)
qtextralibs = getattr(Options.options, 'qtextralibs', None)
if qtextralibs:
self.qt5_vars.extend(qtextralibs.split(','))
@conf
def set_qt5_defines(self):
if sys.platform != 'win32':
return
for x in self.qt5_vars:
y = x.replace('Qt5', 'Qt')[2:].upper()
self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y)
def options(opt):
opt.add_option(
'--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries'
)
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--' + i, type='string', default='', dest=i)
opt.add_option(
'--translate', action='store_true', help='collect translation strings', dest='trans_qt5', default=False
)
opt.add_option(
'--qtextralibs',
type='string',
default='',
dest='qtextralibs',
help='additional qt libraries on the system to add to default ones, comma separated'
)
|
qm2rcc
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorBreakingChanges.py
|
{
"start": 587,
"end": 785
}
|
class ____(BaseModel):
__root__: StreamBreakingChangeScope = Field(
...,
description="A scope that can be used to limit the impact of a breaking change.",
)
|
BreakingChangeScope
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_https.py
|
{
"start": 46672,
"end": 46819
}
|
class ____(BaseTestHTTPS):
tls_protocol_name = "TLSv1.2"
certs = TLSv1_2_CERTS
@pytest.mark.usefixtures("requires_tlsv1_3")
|
TestHTTPS_TLSv1_2
|
python
|
readthedocs__readthedocs.org
|
readthedocs/notifications/messages.py
|
{
"start": 18727,
"end": 19939
}
|
class ____:
def __init__(self):
self.messages = {}
def add(self, messages):
if not isinstance(messages, list):
if not isinstance(messages, Message):
raise ValueError("A message should be instance of Message or a list of Messages.")
messages = [messages]
for message in messages:
if message.id in messages:
raise ValueError("A message with the same 'id' is already registered.")
self.messages[message.id] = message
def get(self, message_id, format_values=None):
# Copy to avoid setting format values on the static instance of the
# message inside the registry, set on a per-request instance instead.
message = copy.copy(self.messages.get(message_id))
if message is not None:
# Always include global variables, override with provided values
all_format_values = readthedocs_processor(None)
all_format_values.update(format_values or {})
message.set_format_values(all_format_values)
return message
registry = MessagesRegistry()
registry.add(BUILD_MKDOCS_MESSAGES)
registry.add(BUILD_MESSAGES)
|
MessagesRegistry
|
python
|
huggingface__transformers
|
tests/models/bamba/test_modeling_bamba.py
|
{
"start": 21837,
"end": 27861
}
|
class ____(unittest.TestCase):
model = None
tokenizer = None
# This variable is used to determine which CUDA device are we using for our runners (A10 or T4)
# Depending on the hardware we get different logits / generations
device_properties: DeviceProperties = (None, None, None)
@classmethod
def setUpClass(cls):
model_id = "ibm-fms/Bamba-9B"
cls.model = BambaForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16)
cls.tokenizer = AutoTokenizer.from_pretrained(model_id)
# feels a bit forced to have to do this for the generation test
cls.tokenizer.pad_token_id = cls.model.config.pad_token_id
cls.tokenizer.padding_side = "left"
cls.device_properties = get_device_properties()
def test_simple_generate(self):
# fmt: off
expectations = Expectations(
{
("cuda", 8): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all having a good time.",
("rocm", 9): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are doing well. I am here",
("xpu", 3): "<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all doing well. I am",
}
)
# fmt: on
self.model.to(torch_device)
input_ids = self.tokenizer("Hey how are you doing on this lovely evening?", return_tensors="pt")[
"input_ids"
].to(torch_device)
out = self.model.generate(input_ids, do_sample=False, max_new_tokens=10)
output_sentence = self.tokenizer.decode(out[0, :])
expected = expectations.get_expectation()
self.assertEqual(output_sentence, expected)
# TODO: there are significant differences in the logits across major cuda versions, which shouldn't exist
if self.device_properties[0] == "cuda" and self.device_properties[1] == 8:
with torch.no_grad():
logits = self.model(input_ids=input_ids, logits_to_keep=40).logits
EXPECTED_LOGITS_NO_GRAD = torch.tensor(
[
149., 142., 146., 142., 143., 144., 142., 145.,
142., 146., 144., 146., 147., 147., 148., 145.,
147., 145., 145., 145., 145., 144., 144., 144.,
144., 145., 147., 146., 144., 144., 148., 147.,
148., 147., 147., 147., 146., 146., 148., 148.
], dtype=torch.bfloat16) # fmt: skip
torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1)
@require_deterministic_for_xpu
def test_simple_batched_generate_with_padding(self):
# Key 9 for MI300, Key 8 for A100/A10, and Key 7 for T4.
#
# Note: Key 9 is currently set for MI300, but may need potential future adjustments for H100s,
# considering differences in hardware processing and potential deviations in generated text.
# fmt: off
EXPECTED_TEXTS = Expectations(
{
("cuda", 7): [],
("cuda", 8): [
"<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are doing well. I am here",
"!!!<|begin_of_text|>I am late! I need to get to work! I have to get to the",
],
("rocm", 9): [
"<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are doing well. I am here",
"!!!<|begin_of_text|>I am late! I need to be at the airport in 20 minutes! I",
],
("xpu", 3): [
"<|begin_of_text|>Hey how are you doing on this lovely evening? I hope you are all doing well. I am",
"!!!<|begin_of_text|>I am late! I need to get to work! I have to get to the",
],
}
)
# fmt: on
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
self.model.to(torch_device)
inputs = self.tokenizer(
["Hey how are you doing on this lovely evening?", "I am late! I need to"],
padding=True,
return_tensors="pt",
).to(torch_device)
out = self.model.generate(**inputs, do_sample=False, max_new_tokens=10)
output_sentences = self.tokenizer.batch_decode(out)
self.assertEqual(output_sentences[0], EXPECTED_TEXT[0])
self.assertEqual(output_sentences[1], EXPECTED_TEXT[1])
# TODO: there are significant differences in the logits across major cuda versions, which shouldn't exist
if self.device_properties[0] == "cuda" and self.device_properties[1] == 8:
with torch.no_grad():
logits = self.model(input_ids=inputs["input_ids"]).logits
EXPECTED_LOGITS_NO_GRAD_0 = torch.tensor(
[
149., 142., 146., 142., 143., 144., 142., 145.,
142., 146., 144., 146., 147., 147., 148., 145.,
147., 145., 145., 145., 145., 144., 144., 144.,
144., 145., 147., 146., 144., 144., 148., 147.,
148., 147., 147., 147., 146., 146., 148., 148.
], dtype=torch.bfloat16) # fmt: skip
EXPECTED_LOGITS_NO_GRAD_1 = torch.tensor(
[
182., 178., 177., 174., 176., 176., 178., 178.,
177., 179., 176., 183., 180., 182., 179., 174.,
178., 176., 176., 175., 175., 175., 174., 173.,
174., 182., 180., 176., 177., 177., 180., 176.,
178., 177., 177., 175., 176., 177., 175., 177.
], dtype=torch.bfloat16) # fmt: skip
torch.testing.assert_close(logits[0, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_0, rtol=1e-3, atol=1)
torch.testing.assert_close(logits[1, -1, :40].cpu(), EXPECTED_LOGITS_NO_GRAD_1, rtol=1e-3, atol=1)
|
BambaModelIntegrationTest
|
python
|
huggingface__transformers
|
src/transformers/models/maskformer/modeling_maskformer.py
|
{
"start": 6082,
"end": 9445
}
|
class ____(ModelOutput):
r"""
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the encoder model (backbone).
pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).
transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Last hidden states (final feature map) of the last stage of the transformer decoder model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder
model at the output of each stage.
pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel
decoder model at the output of each stage.
transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states (also called feature maps) of the
transformer decoder at the output of each stage.
hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and
`decoder_hidden_states`
"""
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
pixel_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
transformer_decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`MaskFormerForInstanceSegmentation`].
This output can be directly passed to [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or or
[`~MaskFormerImageProcessor.post_process_instance_segmentation`] or
[`~MaskFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see
[`~MaskFormerImageProcessor] for details regarding usage.
"""
)
|
MaskFormerModelOutput
|
python
|
django__django
|
django/contrib/gis/db/models/sql/conversion.py
|
{
"start": 1366,
"end": 2433
}
|
class ____(models.FloatField):
"Wrapper for Distance values."
def __init__(self, geo_field):
super().__init__()
self.geo_field = geo_field
def get_prep_value(self, value):
if isinstance(value, Distance):
return value
return super().get_prep_value(value)
def get_db_prep_value(self, value, connection, prepared=False):
if not isinstance(value, Distance):
return value
distance_att = connection.ops.get_distance_att_for_field(self.geo_field)
if not distance_att:
raise ValueError(
"Distance measure is supplied, but units are unknown for result."
)
return getattr(value, distance_att)
def from_db_value(self, value, expression, connection):
if value is None:
return
distance_att = connection.ops.get_distance_att_for_field(self.geo_field)
return Distance(**{distance_att: value}) if distance_att else value
def get_internal_type(self):
return "DistanceField"
|
DistanceField
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/core/expect_column_values_to_match_regex_list.py
|
{
"start": 2425,
"end": 16389
}
|
class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
Matches can be anywhere in the string.
ExpectColumnValuesToMatchRegexList is a \
Column Map Expectation.
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
regex_list (list): \
{REGEX_LIST_DESCRIPTION}
match_on (string): \
{MATCH_ON_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
{MOSTLY_DESCRIPTION} \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToMatchRegex](https://greatexpectations.io/expectations/expect_column_values_to_match_regex)
[ExpectColumnValuesToNotMatchRegex](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex)
[ExpectColumnValuesToNotMatchRegexList](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex_list)
[ExpectColumnValuesToMatchLikePattern](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern)
[ExpectColumnValuesToMatchLikePatternList](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern_list)
[ExpectColumnValuesToNotMatchLikePattern](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern)
[ExpectColumnValuesToNotMatchLikePatternList](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern_list)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 "aaa" "bcc"
1 "abb" "bdd"
2 "acc" "abc"
Code Examples:
Passing Case:
Input:
ExpectColumnValuesToMatchRegexList(
column="test2",
regex_list=["^a.*", "^b.*"],
match_on="any"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValuesToMatchRegexList(
column="test",
regex_list=["^a.*", "^b.*"],
match_on="all"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 3,
"unexpected_percent": 100,
"partial_unexpected_list": [
"bcc",
"bdd"
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 100,
"unexpected_percent_nonmissing": 100
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
regex_list: Union[List[str], SuiteParameterDict] = pydantic.Field(
description=REGEX_LIST_DESCRIPTION
)
match_on: Union[Literal["any", "all"], SuiteParameterDict] = pydantic.Field(
default="any", description=MATCH_ON_DESCRIPTION
)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.match_regex_list"
success_keys = (
"regex_list",
"match_on",
"mostly",
)
args_keys = (
"column",
"regex_list",
)
class Config:
title = "Expect column values to match regex list"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnValuesToMatchRegexList]
) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
):
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("regex_list", RendererValueType.ARRAY),
("mostly", RendererValueType.NUMBER),
("match_on", RendererValueType.STRING),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.regex_list or not params.regex_list.value:
values_string = "[ ]"
else:
array_param_name = "regex_list"
param_prefix = "v__"
renderer_configuration = cls._add_array_params(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
values_string: str = cls._get_array_string(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
if params.match_on and params.match_on.value == "all":
template_str = (
"values must match all of the following regular expressions: " + values_string
)
else:
template_str = (
"values must match any of the following regular expressions: " + values_string
)
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"regex_list",
"mostly",
"match_on",
"row_condition",
"condition_parser",
],
)
if not params.get("regex_list") or len(params.get("regex_list")) == 0:
values_string = "[ ]"
else:
for i, v in enumerate(params["regex_list"]):
params[f"v__{i!s}"] = v
values_string = " ".join([f"$v__{i!s}" for i, v in enumerate(params["regex_list"])])
if params.get("match_on") == "all":
template_str = (
"values must match all of the following regular expressions: " + values_string
)
else:
template_str = (
"values must match any of the following regular expressions: " + values_string
)
if params["mostly"] is not None:
if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
|
ExpectColumnValuesToMatchRegexList
|
python
|
apache__airflow
|
providers/ftp/tests/unit/ftp/hooks/test_ftp.py
|
{
"start": 4722,
"end": 8409
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
from airflow.models import Connection
create_connection_without_db(
Connection(conn_id="ftp_passive", conn_type="ftp", host="localhost", extra='{"passive": true}')
)
create_connection_without_db(
Connection(conn_id="ftp_active", conn_type="ftp", host="localhost", extra='{"passive": false}')
)
create_connection_without_db(
Connection(
conn_id="ftp_custom_port",
conn_type="ftp",
host="localhost",
port=10000,
extra='{"passive": true}',
)
)
create_connection_without_db(
Connection(
conn_id="ftp_custom_port_and_login",
conn_type="ftp",
host="localhost",
port=10000,
login="user",
password="pass123",
extra='{"passive": true}',
)
)
create_connection_without_db(
Connection(
conn_id="ftp_encoding",
conn_type="ftp",
host="localhost",
extra='{"encoding": "cp1251"}',
)
)
def _test_mode(self, hook_type, connection_id, expected_mode):
hook = hook_type(connection_id)
conn = hook.get_conn()
conn.set_pasv.assert_called_once_with(expected_mode)
@mock.patch("ftplib.FTP")
def test_ftp_passive_mode(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPHook
self._test_mode(FTPHook, "ftp_passive", True)
@mock.patch("ftplib.FTP")
def test_ftp_active_mode(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPHook
self._test_mode(FTPHook, "ftp_active", False)
@mock.patch("ftplib.FTP")
def test_ftp_custom_port(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPHook
hook = FTPHook("ftp_custom_port")
conn = hook.get_conn()
conn.connect.assert_called_once_with("localhost", 10000)
conn.login.assert_not_called()
conn.set_pasv.assert_called_once_with(True)
@mock.patch("ftplib.FTP")
def test_ftp_custom_port_and_login(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPHook
hook = FTPHook("ftp_custom_port_and_login")
conn = hook.get_conn()
conn.connect.assert_called_once_with("localhost", 10000)
conn.login.assert_called_once_with("user", "pass123")
conn.set_pasv.assert_called_once_with(True)
@mock.patch("ftplib.FTP_TLS")
def test_ftps_passive_mode(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPSHook
self._test_mode(FTPSHook, "ftp_passive", True)
@mock.patch("ftplib.FTP_TLS")
def test_ftps_active_mode(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPSHook
self._test_mode(FTPSHook, "ftp_active", False)
@mock.patch("ftplib.FTP")
def test_ftp_encoding_extra(self, mock_ftp):
from airflow.providers.ftp.hooks.ftp import FTPHook
hook = FTPHook("ftp_encoding")
hook.get_conn()
assert mock_ftp.mock_calls[0] == mock.call(encoding="cp1251")
@mock.patch("ftplib.FTP_TLS")
def test_ftps_encoding_extra(self, mock_ftp_tls):
from airflow.providers.ftp.hooks.ftp import FTPSHook
hook = FTPSHook("ftp_encoding")
hook.get_conn()
assert any(call.kwargs.get("encoding") == "cp1251" for call in mock_ftp_tls.mock_calls)
|
TestIntegrationFTPHook
|
python
|
great-expectations__great_expectations
|
tests/core/test_expectation_suite.py
|
{
"start": 31407,
"end": 34070
}
|
class ____:
@pytest.mark.unit
def test_equality_to_unsupported_class_is_false(
self, suite_with_single_expectation: ExpectationSuite
):
"""If we are not comparing to an ExpectationSuite or dict then we should return False."""
class UnsupportedClass:
pass
return_value = suite_with_single_expectation == UnsupportedClass()
assert not return_value
@pytest.mark.unit
def test_expectation_suite_equality_single_expectation_true(
self, suite_with_single_expectation: ExpectationSuite
):
different_but_equivalent_suite = deepcopy(suite_with_single_expectation)
assert suite_with_single_expectation == different_but_equivalent_suite
assert different_but_equivalent_suite == suite_with_single_expectation
assert not suite_with_single_expectation != different_but_equivalent_suite
assert not different_but_equivalent_suite != suite_with_single_expectation
@pytest.mark.parametrize(
"attribute,new_value",
[
pytest.param("name", "different_name"),
pytest.param(
"data_context",
MagicMock(),
marks=pytest.mark.xfail(
strict=True,
raises=AssertionError,
reason="Currently data_context is not considered in ExpectationSuite equality",
),
),
pytest.param("expectations", []),
pytest.param("suite_parameters", {"different": "suite_parameters"}),
pytest.param("meta", {"notes": "Different meta."}),
pytest.param(
"id",
"different_id",
marks=pytest.mark.xfail(
strict=True,
raises=AssertionError,
reason="Currently id is not considered in ExpectationSuite equality",
),
),
],
)
@pytest.mark.unit
def test_expectation_suite_equality_false(
self,
attribute: str,
new_value: Union[str, Dict[str, str]],
suite_with_single_expectation: ExpectationSuite,
):
different_but_equivalent_suite = deepcopy(suite_with_single_expectation)
setattr(different_but_equivalent_suite, attribute, new_value)
assert suite_with_single_expectation != different_but_equivalent_suite
assert different_but_equivalent_suite != suite_with_single_expectation
assert suite_with_single_expectation != different_but_equivalent_suite
assert different_but_equivalent_suite != suite_with_single_expectation
|
TestEqDunder
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/markup.py
|
{
"start": 3381,
"end": 10588
}
|
class ____(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
.. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
``True``).
.. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
|
RstLexer
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/metadata/base.py
|
{
"start": 1341,
"end": 2691
}
|
class ____(Protocol):
@property
def name(self) -> str:
raise NotImplementedError()
@property
def value(self) -> str:
raise NotImplementedError()
@property
def group(self) -> str:
raise NotImplementedError()
def _convert_installed_files_path(
entry: Tuple[str, ...],
info: Tuple[str, ...],
) -> str:
"""Convert a legacy installed-files.txt path into modern RECORD path.
The legacy format stores paths relative to the info directory, while the
modern format stores paths relative to the package root, e.g. the
site-packages directory.
:param entry: Path parts of the installed-files.txt entry.
:param info: Path parts of the egg-info directory relative to package root.
:returns: The converted entry.
For best compatibility with symlinks, this does not use ``abspath()`` or
``Path.resolve()``, but tries to work with path parts:
1. While ``entry`` starts with ``..``, remove the equal amounts of parts
from ``info``; if ``info`` is empty, start appending ``..`` instead.
2. Join the two directly.
"""
while entry and entry[0] == "..":
if not info or info[-1] == "..":
info += ("..",)
else:
info = info[:-1]
entry = entry[1:]
return str(pathlib.Path(*info, *entry))
|
BaseEntryPoint
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 140439,
"end": 160937
}
|
class ____(Request):
"""
Edit task's details.
:param task: ID of the task
:type task: str
:param force: If not true, call fails if the task status is not 'created'
:type force: bool
:param name: Task name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task id Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param script: Script info
:type script: Script
"""
_service = "tasks"
_action = "edit"
_version = "2.9"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"default": "output",
"description": "System defined input/output indication",
"enum": ["input", "output"],
"type": "string",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {"description": "System defined type", "type": "string"},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"docker_cmd": {
"description": "Command for running docker script for the execution of the task",
"type": ["string", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model": {
"description": "Execution input model ID Not applicable for Register (Import) tasks",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'created'",
"type": "boolean",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"name": {
"description": "Task name Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
name: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
type: Any = None,
comment: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
output_dest: Optional[str] = None,
execution: Any = None,
hyperparams: Optional[dict] = None,
configuration: Optional[dict] = None,
script: Any = None,
**kwargs: Any
) -> None:
super(EditRequest, self).__init__(**kwargs)
self.task = task
self.force = force
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.output_dest = output_dest
self.execution = execution
self.hyperparams = hyperparams
self.configuration = configuration
self.script = script
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output_dest")
def output_dest(self) -> Optional[str]:
return self._property_output_dest
@output_dest.setter
def output_dest(self, value: Optional[str]) -> None:
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self) -> Any:
return self._property_execution
@execution.setter
def execution(self, value: Any) -> None:
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("hyperparams")
def hyperparams(self) -> Optional[dict]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True)
self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True)
value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self) -> Optional[dict]:
return self._property_configuration
@configuration.setter
def configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_configuration = value
@schema_property("script")
def script(self) -> Any:
return self._property_script
@script.setter
def script(self, value: Any) -> None:
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
|
EditRequest
|
python
|
huggingface__transformers
|
src/transformers/models/idefics3/modeling_idefics3.py
|
{
"start": 16784,
"end": 17799
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.scale_factor = config.scale_factor
self.modality_projection = Idefics3SimpleMLP(config)
def pixel_shuffle(self, x, scale_factor=2):
bsz, seq, embed_dim = x.size()
height = width = int(seq**0.5)
x = x.view(bsz, height, width, embed_dim)
x = x.view(bsz, height, int(width / scale_factor), embed_dim * scale_factor)
x = x.permute(0, 2, 1, 3)
x = x.reshape(bsz, int(width / scale_factor), int(height / scale_factor), embed_dim * (scale_factor**2))
x = x.permute(0, 2, 1, 3)
x = x.reshape(bsz, int(seq / (scale_factor**2)), embed_dim * (scale_factor**2))
return x
def forward(self, image_hidden_states):
image_hidden_states = self.pixel_shuffle(image_hidden_states, self.scale_factor)
image_hidden_states = self.modality_projection(image_hidden_states)
return image_hidden_states
@auto_docstring
|
Idefics3Connector
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/aggregate.py
|
{
"start": 499,
"end": 668
}
|
class ____(
_HybridAsync,
_NearImageAsync,
_NearObjectAsync,
_NearTextAsync,
_NearVectorAsync,
_OverAllAsync,
):
pass
|
_AggregateCollectionAsync
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/backfill.py
|
{
"start": 2965,
"end": 3187
}
|
class ____(graphene.ObjectType):
backfill_id = graphene.NonNull(graphene.String)
launched_run_ids = graphene.List(graphene.String)
class Meta:
name = "LaunchBackfillSuccess"
|
GrapheneLaunchBackfillSuccess
|
python
|
kamyu104__LeetCode-Solutions
|
Python/remove-all-adjacent-duplicates-in-string.py
|
{
"start": 29,
"end": 347
}
|
class ____(object):
def removeDuplicates(self, S):
"""
:type S: str
:rtype: str
"""
result = []
for c in S:
if result and result[-1] == c:
result.pop()
else:
result.append(c)
return "".join(result)
|
Solution
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1alpha1_mutation.py
|
{
"start": 383,
"end": 5570
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'apply_configuration': 'V1alpha1ApplyConfiguration',
'json_patch': 'V1alpha1JSONPatch',
'patch_type': 'str'
}
attribute_map = {
'apply_configuration': 'applyConfiguration',
'json_patch': 'jsonPatch',
'patch_type': 'patchType'
}
def __init__(self, apply_configuration=None, json_patch=None, patch_type=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1Mutation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._apply_configuration = None
self._json_patch = None
self._patch_type = None
self.discriminator = None
if apply_configuration is not None:
self.apply_configuration = apply_configuration
if json_patch is not None:
self.json_patch = json_patch
self.patch_type = patch_type
@property
def apply_configuration(self):
"""Gets the apply_configuration of this V1alpha1Mutation. # noqa: E501
:return: The apply_configuration of this V1alpha1Mutation. # noqa: E501
:rtype: V1alpha1ApplyConfiguration
"""
return self._apply_configuration
@apply_configuration.setter
def apply_configuration(self, apply_configuration):
"""Sets the apply_configuration of this V1alpha1Mutation.
:param apply_configuration: The apply_configuration of this V1alpha1Mutation. # noqa: E501
:type: V1alpha1ApplyConfiguration
"""
self._apply_configuration = apply_configuration
@property
def json_patch(self):
"""Gets the json_patch of this V1alpha1Mutation. # noqa: E501
:return: The json_patch of this V1alpha1Mutation. # noqa: E501
:rtype: V1alpha1JSONPatch
"""
return self._json_patch
@json_patch.setter
def json_patch(self, json_patch):
"""Sets the json_patch of this V1alpha1Mutation.
:param json_patch: The json_patch of this V1alpha1Mutation. # noqa: E501
:type: V1alpha1JSONPatch
"""
self._json_patch = json_patch
@property
def patch_type(self):
"""Gets the patch_type of this V1alpha1Mutation. # noqa: E501
patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required. # noqa: E501
:return: The patch_type of this V1alpha1Mutation. # noqa: E501
:rtype: str
"""
return self._patch_type
@patch_type.setter
def patch_type(self, patch_type):
"""Sets the patch_type of this V1alpha1Mutation.
patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required. # noqa: E501
:param patch_type: The patch_type of this V1alpha1Mutation. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and patch_type is None: # noqa: E501
raise ValueError("Invalid value for `patch_type`, must not be `None`") # noqa: E501
self._patch_type = patch_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1Mutation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1Mutation):
return True
return self.to_dict() != other.to_dict()
|
V1alpha1Mutation
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/partitions/snap/snap.py
|
{
"start": 1102,
"end": 2451
}
|
class ____(ABC):
@classmethod
def from_def(cls, partitions_def: "PartitionsDefinition") -> "PartitionsSnap":
from dagster._core.definitions.partitions.definition import (
DynamicPartitionsDefinition,
MultiPartitionsDefinition,
StaticPartitionsDefinition,
TimeWindowPartitionsDefinition,
)
if isinstance(partitions_def, TimeWindowPartitionsDefinition):
return TimeWindowPartitionsSnap.from_def(partitions_def)
elif isinstance(partitions_def, StaticPartitionsDefinition):
return StaticPartitionsSnap.from_def(partitions_def)
elif isinstance(partitions_def, MultiPartitionsDefinition):
return MultiPartitionsSnap.from_def(partitions_def)
elif isinstance(partitions_def, DynamicPartitionsDefinition):
return DynamicPartitionsSnap.from_def(partitions_def)
else:
raise DagsterInvalidDefinitionError(
"Only static, time window, multi-dimensional partitions, and dynamic partitions"
" definitions with a name parameter are currently supported."
)
@abstractmethod
def get_partitions_definition(self) -> "PartitionsDefinition": ...
@whitelist_for_serdes(storage_name="ExternalTimeWindowPartitionsDefinitionData")
@record
|
PartitionsSnap
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 26078,
"end": 26203
}
|
class ____(admin.ModelAdmin):
list_display = ["choice"]
readonly_fields = ["choice"]
fields = ["choice"]
|
ChoiceList
|
python
|
prabhupant__python-ds
|
data_structures/graphs/two_cliques.py
|
{
"start": 667,
"end": 2008
}
|
class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.cgraph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def is_bipartite(self):
colors = [-1] * self.vertices
queue = []
for v in range(self.vertices):
if colors[v] == -1:
colors[v] = 1
queue.append(v)
while queue:
s = queue.pop(0)
for i in self.cgraph[s]:
if colors[i] == -1:
colors[i] = 1 - colors[s]
queue.append(i)
elif colors[i] == colors[s]:
return False
return True
def make_complement(self):
for src, dest in self.graph.items():
for v in range(self.vertices):
if v not in dest and src != v:
self.cgraph[src].append(v)
self.cgraph[v].append(src)
def two_cliques(self):
self.make_complement()
return self.is_bipartite()
g = Graph(5)
g.add_edge(0, 3)
g.add_edge(3, 4)
g.add_edge(0, 1)
g.add_edge(1, 2)
g.add_edge(2, 0)
print(g.two_cliques())
|
Graph
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_device_class_list.py
|
{
"start": 383,
"end": 6934
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1DeviceClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1DeviceClassList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1DeviceClassList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1DeviceClassList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1DeviceClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1DeviceClassList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1DeviceClassList. # noqa: E501
Items is the list of resource classes. # noqa: E501
:return: The items of this V1DeviceClassList. # noqa: E501
:rtype: list[V1DeviceClass]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1DeviceClassList.
Items is the list of resource classes. # noqa: E501
:param items: The items of this V1DeviceClassList. # noqa: E501
:type: list[V1DeviceClass]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1DeviceClassList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1DeviceClassList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1DeviceClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1DeviceClassList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1DeviceClassList. # noqa: E501
:return: The metadata of this V1DeviceClassList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1DeviceClassList.
:param metadata: The metadata of this V1DeviceClassList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeviceClassList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeviceClassList):
return True
return self.to_dict() != other.to_dict()
|
V1DeviceClassList
|
python
|
walkccc__LeetCode
|
solutions/1088. Confusing Number II/1088.py
|
{
"start": 0,
"end": 536
}
|
class ____:
def confusingNumberII(self, n: int) -> int:
digitToRotated = [(0, 0), (1, 1), (6, 9), (8, 8), (9, 6)]
def dfs(num: int, rotatedNum: int, unit: int) -> int:
ans = 0 if num == rotatedNum else 1
# Add one more digit
for digit, rotated in digitToRotated:
if digit == 0 and num == 0:
continue
nextNum = num * 10 + digit
if nextNum > n:
break
ans += dfs(nextNum, rotated * unit + rotatedNum, unit * 10)
return ans
return dfs(0, 0, 1)
|
Solution
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/enumerated.py
|
{
"start": 4044,
"end": 10146
}
|
class ____(_StringType):
"""MySQL SET type."""
__visit_name__ = "SET"
def __init__(self, *values: str, **kw: Any):
"""Construct a SET.
E.g.::
Column("myset", SET("foo", "bar", "baz"))
The list of potential values is required in the case that this
set will be used to generate DDL for a table, or if the
:paramref:`.SET.retrieve_as_bitwise` flag is set to True.
:param values: The range of valid values for this SET. The values
are not quoted, they will be escaped and surrounded by single
quotes when generating the schema.
:param convert_unicode: Same flag as that of
:paramref:`.String.convert_unicode`.
:param collation: same as that of :paramref:`.String.collation`
:param charset: same as that of :paramref:`.VARCHAR.charset`.
:param ascii: same as that of :paramref:`.VARCHAR.ascii`.
:param unicode: same as that of :paramref:`.VARCHAR.unicode`.
:param binary: same as that of :paramref:`.VARCHAR.binary`.
:param retrieve_as_bitwise: if True, the data for the set type will be
persisted and selected using an integer value, where a set is coerced
into a bitwise mask for persistence. MySQL allows this mode which
has the advantage of being able to store values unambiguously,
such as the blank string ``''``. The datatype will appear
as the expression ``col + 0`` in a SELECT statement, so that the
value is coerced into an integer value in result sets.
This flag is required if one wishes
to persist a set that can store the blank string ``''`` as a value.
.. warning::
When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
essential that the list of set values is expressed in the
**exact same order** as exists on the MySQL database.
"""
self.retrieve_as_bitwise = kw.pop("retrieve_as_bitwise", False)
self.values = tuple(values)
if not self.retrieve_as_bitwise and "" in values:
raise exc.ArgumentError(
"Can't use the blank value '' in a SET without "
"setting retrieve_as_bitwise=True"
)
if self.retrieve_as_bitwise:
self._inversed_bitmap: dict[str, int] = {
value: 2**idx for idx, value in enumerate(self.values)
}
self._bitmap: dict[int, str] = {
2**idx: value for idx, value in enumerate(self.values)
}
length = max([len(v) for v in values] + [0])
kw.setdefault("length", length)
super().__init__(**kw)
def column_expression(
self, colexpr: ColumnElement[Any]
) -> ColumnElement[Any]:
if self.retrieve_as_bitwise:
return sql.type_coerce(
sql.type_coerce(colexpr, sqltypes.Integer) + 0, self
)
else:
return colexpr
def result_processor(
self, dialect: Dialect, coltype: Any
) -> Optional[_ResultProcessorType[Any]]:
if self.retrieve_as_bitwise:
def process(value: Union[str, int, None]) -> Optional[set[str]]:
if value is not None:
value = int(value)
return set(util.map_bits(self._bitmap.__getitem__, value))
else:
return None
else:
super_convert = super().result_processor(dialect, coltype)
def process(value: Union[str, set[str], None]) -> Optional[set[str]]: # type: ignore[misc] # noqa: E501
if isinstance(value, str):
# MySQLdb returns a string, let's parse
if super_convert:
value = super_convert(value)
assert value is not None
if TYPE_CHECKING:
assert isinstance(value, str)
return set(re.findall(r"[^,]+", value))
else:
# mysql-connector-python does a naive
# split(",") which throws in an empty string
if value is not None:
value.discard("")
return value
return process
def bind_processor(
self, dialect: Dialect
) -> _BindProcessorType[Union[str, int]]:
super_convert = super().bind_processor(dialect)
if self.retrieve_as_bitwise:
def process(
value: Union[str, int, set[str], None],
) -> Union[str, int, None]:
if value is None:
return None
elif isinstance(value, (int, str)):
if super_convert:
return super_convert(value) # type: ignore[arg-type, no-any-return] # noqa: E501
else:
return value
else:
int_value = 0
for v in value:
int_value |= self._inversed_bitmap[v]
return int_value
else:
def process(
value: Union[str, int, set[str], None],
) -> Union[str, int, None]:
# accept strings and int (actually bitflag) values directly
if value is not None and not isinstance(value, (int, str)):
value = ",".join(value)
if super_convert:
return super_convert(value) # type: ignore
else:
return value
return process
def adapt(self, cls: type, **kw: Any) -> Any:
kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise
return util.constructor_copy(self, cls, *self.values, **kw)
def __repr__(self) -> str:
return util.generic_repr(
self,
to_inspect=[SET, _StringType],
additional_kw=[
("retrieve_as_bitwise", False),
],
)
|
SET
|
python
|
sanic-org__sanic
|
sanic/errorpages.py
|
{
"start": 1245,
"end": 4326
}
|
class ____:
"""Base class that all renderers must inherit from.
This class defines the structure for rendering objects, handling the core functionality that specific renderers may extend.
Attributes:
request (Request): The incoming request object that needs rendering.
exception (Exception): Any exception that occurred and needs to be rendered.
debug (bool): Flag indicating whether to render with debugging information.
Methods:
dumps: A static method that must be overridden by subclasses to define the specific rendering.
Args:
request (Request): The incoming request object that needs rendering.
exception (Exception): Any exception that occurred and needs to be rendered.
debug (bool): Flag indicating whether to render with debugging information.
""" # noqa: E501
dumps = staticmethod(dumps)
def __init__(self, request: Request, exception: Exception, debug: bool):
self.request = request
self.exception = exception
self.debug = debug
@property
def headers(self) -> t.Dict[str, str]:
"""The headers to be used for the response."""
if isinstance(self.exception, SanicException):
return getattr(self.exception, "headers", {})
return {}
@property
def status(self):
"""The status code to be used for the response."""
if isinstance(self.exception, SanicException):
return getattr(self.exception, "status_code", FALLBACK_STATUS)
return FALLBACK_STATUS
@property
def text(self):
"""The text to be used for the response."""
if self.debug or isinstance(self.exception, SanicException):
return str(self.exception)
return FALLBACK_TEXT
@property
def title(self):
"""The title to be used for the response."""
status_text = STATUS_CODES.get(self.status, b"Error Occurred").decode()
return f"{self.status} — {status_text}"
def render(self) -> HTTPResponse:
"""Outputs the exception as a response.
Returns:
HTTPResponse: The response object.
"""
output = (
self.full
if self.debug and not getattr(self.exception, "quiet", False)
else self.minimal
)()
output.status = self.status
output.headers.update(self.headers)
return output
def minimal(self) -> HTTPResponse: # noqa
"""Provide a formatted message that is meant to not show any sensitive data or details.
This is the default fallback for production environments.
Returns:
HTTPResponse: The response object.
""" # noqa: E501
raise NotImplementedError
def full(self) -> HTTPResponse: # noqa
"""Provide a formatted message that has all details and is mean to be used primarily for debugging and non-production environments.
Returns:
HTTPResponse: The response object.
""" # noqa: E501
raise NotImplementedError
|
BaseRenderer
|
python
|
google__jax
|
jax/_src/path.py
|
{
"start": 730,
"end": 2162
}
|
class ____(Protocol):
"""A factory that creates a PurePath."""
def __call__(self, *pathsegments: str | os.PathLike) -> pathlib.Path:
...
Path: PathProtocol
# If etils.epath (aka etils[epath] to pip) is present, we prefer it because it
# can read and write to, e.g., GCS buckets. Otherwise we use the builtin
# pathlib and can only read/write to the local filesystem.
try:
from etils import epath # type: ignore
except ImportError:
logger.debug("etils.epath was not found. Using pathlib for file I/O.")
Path = pathlib.Path
epath_installed = False
else:
logger.debug("etils.epath found. Using etils.epath for file I/O.")
# Ultimately, epath.Path implements pathlib.Path. See:
# https://github.com/google/etils/blob/2083f3d932a88d8a135ef57112cd1f9aff5d559e/etils/epath/abstract_path.py#L47
Path = epath.Path
epath_installed = True
def make_jax_dump_dir(out_dir_path: str) -> pathlib.Path | None:
"""Make a directory or return the undeclared outputs directory if `sponge`."""
if not out_dir_path:
return None
if out_dir_path == "sponge":
out_dir_path = os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR", "")
if not out_dir_path:
raise ValueError(
"Got output directory (e.g., via JAX_DUMP_IR_TO) 'sponge' but"
" TEST_UNDECLARED_OUTPUTS_DIR is not defined."
)
out_dir = Path(out_dir_path)
out_dir.mkdir(parents=True, exist_ok=True)
return out_dir
|
PathProtocol
|
python
|
numpy__numpy
|
numpy/_typing/_nbit_base.py
|
{
"start": 228,
"end": 2288
}
|
class ____:
"""
A type representing `numpy.number` precision during static type checking.
Used exclusively for the purpose of static type checking, `NBitBase`
represents the base of a hierarchical set of subclasses.
Each subsequent subclass is herein used for representing a lower level
of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
.. versionadded:: 1.20
.. deprecated:: 2.3
Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper
bound, instead.
Examples
--------
Below is a typical usage example: `NBitBase` is herein used for annotating
a function that takes a float and integer of arbitrary precision
as arguments and returns a new float of whichever precision is largest
(*e.g.* ``np.float16 + np.int64 -> np.float64``).
.. code-block:: python
>>> from typing import TypeVar, TYPE_CHECKING
>>> import numpy as np
>>> import numpy.typing as npt
>>> S = TypeVar("S", bound=npt.NBitBase)
>>> T = TypeVar("T", bound=npt.NBitBase)
>>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]:
... return a + b
>>> a = np.float16()
>>> b = np.int64()
>>> out = add(a, b)
>>> if TYPE_CHECKING:
... reveal_locals()
... # note: Revealed local types are:
... # note: a: numpy.floating[numpy.typing._16Bit*]
... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
... # note: out: numpy.floating[numpy.typing._64Bit*]
"""
# Deprecated in NumPy 2.3, 2025-05-01
def __init_subclass__(cls) -> None:
allowed_names = {
"NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit"
}
if cls.__name__ not in allowed_names:
raise TypeError('cannot inherit from final class "NBitBase"')
super().__init_subclass__()
@final
@set_module("numpy._typing")
# Silence errors about subclassing a `@final`-decorated class
|
NBitBase
|
python
|
pydata__xarray
|
xarray/tests/test_ufuncs.py
|
{
"start": 6524,
"end": 9472
}
|
class ____:
@pytest.fixture(autouse=True)
def setUp(self):
self.x = xr.DataArray([1, 2, 3])
self.xd = xr.DataArray(DuckArray([1, 2, 3]))
self.xd2 = xr.DataArray(DuckArray2([1, 2, 3]))
self.xt = xr.DataArray(np.datetime64("2021-01-01", "ns"))
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize("name", xu.__all__)
def test_ufuncs(self, name, request):
xu_func = getattr(xu, name)
np_func = getattr(np, name, None)
if np_func is None and np.lib.NumpyVersion(np.__version__) < "2.0.0":
pytest.skip(f"Ufunc {name} is not available in numpy {np.__version__}.")
if name == "isnat":
args = (self.xt,)
elif hasattr(np_func, "nin") and np_func.nin == 2: # type: ignore[union-attr]
args = (self.x, self.x) # type: ignore[assignment]
else:
args = (self.x,)
expected = np_func(*args) # type: ignore[misc]
actual = xu_func(*args)
if name in ["angle", "iscomplex"]:
np.testing.assert_equal(expected, actual.values)
else:
assert_identical(actual, expected)
def test_ufunc_pickle(self):
a = 1.0
cos_pickled = pickle.loads(pickle.dumps(xu.cos))
assert_identical(cos_pickled(a), xu.cos(a))
def test_ufunc_scalar(self):
actual = xu.sin(1)
assert isinstance(actual, float)
def test_ufunc_duck_array_dataarray(self):
actual = xu.sin(self.xd)
assert isinstance(actual.data, DuckArray)
def test_ufunc_duck_array_variable(self):
actual = xu.sin(self.xd.variable)
assert isinstance(actual.data, DuckArray)
def test_ufunc_duck_array_dataset(self):
ds = xr.Dataset({"a": self.xd})
actual = xu.sin(ds)
assert isinstance(actual.a.data, DuckArray)
@requires_dask
def test_ufunc_duck_dask(self):
import dask.array as da
x = xr.DataArray(da.from_array(DuckArray(np.array([1, 2, 3]))))
actual = xu.sin(x)
assert isinstance(actual.data._meta, DuckArray)
@requires_dask
@pytest.mark.xfail(reason="dask ufuncs currently dispatch to numpy")
def test_ufunc_duck_dask_no_array_ufunc(self):
import dask.array as da
# dask ufuncs currently only preserve duck arrays that implement __array_ufunc__
with patch.object(DuckArray, "__array_ufunc__", new=None, create=True):
x = xr.DataArray(da.from_array(DuckArray(np.array([1, 2, 3]))))
actual = xu.sin(x)
assert isinstance(actual.data._meta, DuckArray)
def test_ufunc_mixed_arrays_compatible(self):
actual = xu.add(self.xd, self.x)
assert isinstance(actual.data, DuckArray)
def test_ufunc_mixed_arrays_incompatible(self):
with pytest.raises(ValueError, match=r"Mixed array types"):
xu.add(self.xd, self.xd2)
|
TestXarrayUfuncs
|
python
|
vyperlang__vyper
|
tests/evm_backends/base_env.py
|
{
"start": 863,
"end": 938
}
|
class ____(Exception):
"""Exception raised when a call fails."""
|
EvmError
|
python
|
huggingface__transformers
|
tests/models/qwen2_moe/test_modeling_qwen2_moe.py
|
{
"start": 1338,
"end": 4238
}
|
class ____(CausalLMModelTest, unittest.TestCase):
test_all_params_have_gradient = False
model_tester_class = Qwen2MoeModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="Qwen2Moe flash attention does not support right padding")
# Ignore copy
def test_load_balancing_loss(self):
r"""
Let's make sure we can actually compute the loss and do a backward on it.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.num_labels = 3
config.num_experts = 8
config.expert_interval = 2
config.output_router_logits = True
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = Qwen2MoeForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=attention_mask)
self.assertEqual(result.router_logits[0].shape, (91, config.num_experts))
torch.testing.assert_close(result.aux_loss.cpu(), torch.tensor(2, dtype=torch.float32), rtol=1e-2, atol=1e-2)
# First, we make sure that adding padding tokens doesn't change the loss
# loss(input_ids, attention_mask=None) == loss(input_ids + padding, attention_mask=attention_mask_with_padding)
pad_length = 1000
# Add padding tokens (assume that pad_token_id=1) to input_ids
padding_block = torch.ones(input_ids.shape[0], pad_length, dtype=torch.int32).to(torch_device)
padded_input_ids = torch.cat((padding_block, input_ids), dim=1) # this is to simulate padding to the left
padded_attention_mask = padded_input_ids.ne(1).to(torch_device)
padded_result = model(padded_input_ids, attention_mask=padded_attention_mask)
torch.testing.assert_close(result.aux_loss.cpu(), padded_result.aux_loss.cpu(), rtol=1e-4, atol=1e-4)
# We make sure that the loss of including padding tokens != the loss without padding tokens
# if attention_mask=None --> we don't exclude padding tokens
include_padding_result = model(padded_input_ids, attention_mask=None)
# This is to mimic torch.testing.assert_not_close
self.assertNotAlmostEqual(include_padding_result.aux_loss.item(), result.aux_loss.item())
@require_torch
|
Qwen2MoeModelTest
|
python
|
scipy__scipy
|
scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py
|
{
"start": 8756,
"end": 11014
}
|
class ____:
def __init__(self):
self.eigs = eigsh
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
self.mattypes = [csr_array, aslinearoperator, np.asarray]
self.sigmas_modes = {None: ['normal'],
0.5: ['normal', 'buckling', 'cayley']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
rng = np.random.RandomState(2300)
Ar = generate_matrix(N, hermitian=True,
pos_definite=True,
rng=rng).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True,
rng=rng).astype('f').astype('d')
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True, rng=rng).astype('F').astype('D')
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
complex_=True, rng=rng).astype('F').astype('D')
v0 = rng.random(N)
# standard symmetric problem
SS = DictWithRepr("std-symmetric")
SS['mat'] = Ar
SS['v0'] = v0
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
# general symmetric problem
GS = DictWithRepr("gen-symmetric")
GS['mat'] = Ar
GS['bmat'] = M
GS['v0'] = v0
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
# standard hermitian problem
SH = DictWithRepr("std-hermitian")
SH['mat'] = Ac
SH['v0'] = v0
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
# general hermitian problem
GH = DictWithRepr("gen-hermitian")
GH['mat'] = Ac
GH['bmat'] = M
GH['v0'] = v0
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
# general hermitian problem with hermitian M
GHc = DictWithRepr("gen-hermitian-Mc")
GHc['mat'] = Ac
GHc['bmat'] = Mc
GHc['v0'] = v0
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
self.real_test_cases = [SS, GS]
self.complex_test_cases = [SH, GH, GHc]
|
SymmetricParams
|
python
|
run-llama__llama_index
|
llama-index-integrations/storage/docstore/llama-index-storage-docstore-redis/llama_index/storage/docstore/redis/base.py
|
{
"start": 244,
"end": 1608
}
|
class ____(KVDocumentStore):
"""
Redis Document (Node) store.
A Redis store for Document and Node objects.
Args:
redis_kvstore (RedisKVStore): Redis key-value store
namespace (str): namespace for the docstore
"""
def __init__(
self,
redis_kvstore: RedisKVStore,
namespace: Optional[str] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""Init a RedisDocumentStore."""
super().__init__(redis_kvstore, namespace=namespace, batch_size=batch_size)
# avoid conflicts with redis index store
self._node_collection = f"{self._namespace}/doc"
@classmethod
def from_redis_client(
cls,
redis_client: Any,
namespace: Optional[str] = None,
) -> "RedisDocumentStore":
"""Load a RedisDocumentStore from a Redis Client."""
redis_kvstore = RedisKVStore.from_redis_client(redis_client=redis_client)
return cls(redis_kvstore, namespace)
@classmethod
def from_host_and_port(
cls,
host: str,
port: int,
namespace: Optional[str] = None,
) -> "RedisDocumentStore":
"""Load a RedisDocumentStore from a Redis host and port."""
redis_kvstore = RedisKVStore.from_host_and_port(host, port)
return cls(redis_kvstore, namespace)
|
RedisDocumentStore
|
python
|
giampaolo__psutil
|
tests/test_connections.py
|
{
"start": 20510,
"end": 21136
}
|
class ____(PsutilTestCase):
def test_net_connection_constants(self):
ints = []
strs = []
for name in dir(psutil):
if name.startswith('CONN_'):
num = getattr(psutil, name)
str_ = str(num)
assert str_.isupper(), str_
assert str not in strs
assert num not in ints
ints.append(num)
strs.append(str_)
if SUNOS:
psutil.CONN_IDLE # noqa: B018
psutil.CONN_BOUND # noqa: B018
if WINDOWS:
psutil.CONN_DELETE_TCB # noqa: B018
|
TestMisc
|
python
|
getsentry__sentry
|
src/sentry/plugins/bases/releasetracking.py
|
{
"start": 45,
"end": 236
}
|
class ____(Plugin2):
def get_plugin_type(self) -> str:
return "release-tracking"
def get_release_doc_html(self, hook_url):
raise NotImplementedError
|
ReleaseTrackingPlugin
|
python
|
great-expectations__great_expectations
|
tests/data_context/test_data_context_state_management.py
|
{
"start": 1082,
"end": 1688
}
|
class ____(ExpectationsStore):
def __init__(self) -> None:
self.save_count = 0
super().__init__()
def add(self, key, value, **kwargs):
ret = super().add(key=key, value=value, **kwargs)
self.save_count += 1
return ret
def update(self, key, value, **kwargs):
ret = super().update(key=key, value=value, **kwargs)
self.save_count += 1
return ret
def add_or_update(self, key, value, **kwargs):
ret = super().add_or_update(key=key, value=value, **kwargs)
self.save_count += 1
return ret
|
ExpectationsStoreSpy
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_unitofwork.py
|
{
"start": 17825,
"end": 22448
}
|
class ____(fixtures.MappedTest):
__requires__ = ("foreign_keys",)
@classmethod
def define_tables(cls, metadata):
Table(
"mytable",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
test_needs_fk=True,
)
Table(
"myothertable",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("parent_id", Integer),
Column("data", String(30)),
sa.ForeignKeyConstraint(
["parent_id"], ["mytable.id"], ondelete="CASCADE"
),
test_needs_fk=True,
)
@classmethod
def setup_classes(cls):
class MyClass(cls.Basic):
pass
class MyOtherClass(cls.Basic):
pass
def test_basic(self):
myothertable, MyClass, MyOtherClass, mytable = (
self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable,
)
self.mapper_registry.map_imperatively(MyOtherClass, myothertable)
self.mapper_registry.map_imperatively(
MyClass,
mytable,
properties={
"children": relationship(
MyOtherClass, passive_deletes=True, cascade="all"
)
},
)
with fixture_session() as session:
mc = MyClass()
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
mc.children.append(MyOtherClass())
session.add(mc)
session.flush()
session.expunge_all()
conn = session.connection()
eq_(
conn.scalar(select(func.count("*")).select_from(myothertable)),
4,
)
mc = session.get(MyClass, mc.id)
session.delete(mc)
session.flush()
eq_(conn.scalar(select(func.count("*")).select_from(mytable)), 0)
eq_(
conn.scalar(select(func.count("*")).select_from(myothertable)),
0,
)
@testing.emits_warning(
r".*'passive_deletes' is normally configured on one-to-many"
)
def test_backwards_pd(self):
"""Test that passive_deletes=True disables a delete from an m2o.
This is not the usual usage and it now raises a warning, but test
that it works nonetheless.
"""
myothertable, MyClass, MyOtherClass, mytable = (
self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable,
)
self.mapper_registry.map_imperatively(
MyOtherClass,
myothertable,
properties={
"myclass": relationship(
MyClass, cascade="all, delete", passive_deletes=True
)
},
)
self.mapper_registry.map_imperatively(MyClass, mytable)
session = fixture_session()
mc = MyClass()
mco = MyOtherClass()
mco.myclass = mc
session.add(mco)
session.commit()
eq_(session.scalar(select(func.count("*")).select_from(mytable)), 1)
eq_(
session.scalar(select(func.count("*")).select_from(myothertable)),
1,
)
session.expire(mco, ["myclass"])
session.delete(mco)
session.commit()
# mytable wasn't deleted, is the point.
eq_(session.scalar(select(func.count("*")).select_from(mytable)), 1)
eq_(
session.scalar(select(func.count("*")).select_from(myothertable)),
0,
)
def test_aaa_m2o_no_longer_emits_warning(self):
myothertable, MyClass, MyOtherClass, mytable = (
self.tables.myothertable,
self.classes.MyClass,
self.classes.MyOtherClass,
self.tables.mytable,
)
self.mapper_registry.map_imperatively(
MyOtherClass,
myothertable,
properties={
"myclass": relationship(
MyClass, cascade="all, delete", passive_deletes=True
)
},
)
self.mapper_registry.map_imperatively(MyClass, mytable)
sa.orm.configure_mappers()
|
PassiveDeletesTest
|
python
|
django__django
|
tests/i18n/test_extraction.py
|
{
"start": 46148,
"end": 48398
}
|
class ____(ExtractorTests):
work_subdir = "project_dir"
def test_no_locale_raises(self):
msg = (
"Unable to find a locale path to store translations for file "
"__init__.py. Make sure the 'locale' directory exists in an app "
"or LOCALE_PATHS setting is set."
)
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command("makemessages", locale=[LOCALE], verbosity=0)
# Working files are cleaned up on an error.
self.assertFalse(os.path.exists("./app_no_locale/test.html.py"))
def test_project_locale_paths(self):
self._test_project_locale_paths(os.path.join(self.test_dir, "project_locale"))
def test_project_locale_paths_pathlib(self):
self._test_project_locale_paths(Path(self.test_dir) / "project_locale")
def _test_project_locale_paths(self, locale_path):
"""
* translations for an app containing a locale folder are stored in that
folder
* translations outside of that app are in LOCALE_PATHS[0]
"""
with override_settings(LOCALE_PATHS=[locale_path]):
management.call_command("makemessages", locale=[LOCALE], verbosity=0)
project_de_locale = os.path.join(
self.test_dir, "project_locale", "de", "LC_MESSAGES", "django.po"
)
app_de_locale = os.path.join(
self.test_dir,
"app_with_locale",
"locale",
"de",
"LC_MESSAGES",
"django.po",
)
self.assertTrue(os.path.exists(project_de_locale))
self.assertTrue(os.path.exists(app_de_locale))
with open(project_de_locale) as fp:
po_contents = fp.read()
self.assertMsgId("This app has no locale directory", po_contents)
self.assertMsgId("This is a project-level string", po_contents)
with open(app_de_locale) as fp:
po_contents = fp.read()
self.assertMsgId("This app has a locale directory", po_contents)
@skipUnless(has_xgettext, "xgettext is mandatory for extraction tests")
|
CustomLayoutExtractionTests
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 6792,
"end": 7009
}
|
class ____(PollParentWithManyToMany):
restaurants = models.ManyToManyField(
"Restaurant", related_name="restaurants_poll_child"
)
_history_m2m_fields = [restaurants]
|
PollChildRestaurantWithManyToMany
|
python
|
pytorch__pytorch
|
test/dynamo/test_metrics_context.py
|
{
"start": 153,
"end": 4044
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self.metrics = {}
def _on_exit(self, start_ns, end_ns, metrics, exc_type, exc_value):
# Save away the metrics to be validated in the test.
self.metrics = metrics.copy()
def test_context_exists(self):
"""
Setting a value without entering the context should raise.
"""
context = MetricsContext(self._on_exit)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.increment("m", 1)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.set("m", 1)
with self.assertRaisesRegex(RuntimeError, "outside of a MetricsContext"):
context.update({"m", 1})
def test_nested_context(self):
"""
Only the outermost context should get an on_exit call, and it should
include everything.
"""
context = MetricsContext(self._on_exit)
with context:
with context:
context.set("m1", 1)
self.assertEqual(self.metrics, {})
context.set("m2", 2)
self.assertEqual(self.metrics, {"m1": 1, "m2": 2})
def test_set(self):
"""
Validate various ways to set metrics.
"""
with MetricsContext(self._on_exit) as context:
context.set("m1", 1)
context.set("m2", 2)
context.update({"m3": 3, "m4": 4})
self.assertEqual(self.metrics, {"m1": 1, "m2": 2, "m3": 3, "m4": 4})
def test_set_disallow_overwrite(self):
"""
Validate set won't overwrite.
"""
with MetricsContext(self._on_exit) as context:
context.set("m1", 1)
with self.assertRaisesRegex(RuntimeError, "already been set"):
context.set("m1", 2)
self.assertEqual(self.metrics, {"m1": 1})
def test_update_disallow_overwrite(self):
"""
Validate update won't overwrite.
"""
with MetricsContext(self._on_exit) as context:
context.update({"m1": 1, "m2": 2})
with self.assertRaisesRegex(RuntimeError, "already been set"):
context.update({"m1": 7, "m3": 3})
def test_update_allow_overwrite(self):
"""
Validate update will overwrite when given param.
"""
with MetricsContext(self._on_exit) as context:
context.update({"m1": 1, "m2": 2})
context.update({"m1": 7, "m3": 3}, overwrite=True)
self.assertEqual(self.metrics, {"m1": 7, "m2": 2, "m3": 3})
def test_add_to_set(self):
"""
Validate add_to_set.
"""
with MetricsContext(self._on_exit) as context:
context.add_to_set("m1", 1)
context.add_to_set("m1", 2)
context.add_to_set("m2", 3)
context.add_to_set("m2", 4)
self.assertEqual(self.metrics, {"m1": {1, 2}, "m2": {3, 4}})
self.assertTrue(isinstance(self.metrics["m1"], set))
self.assertTrue(isinstance(self.metrics["m2"], set))
def test_set_key_value(self):
with MetricsContext(self._on_exit) as context:
context.set_key_value("feature_usage", "k", True)
# Overrides allowed
context.set_key_value("feature_usage", "k2", True)
context.set_key_value("feature_usage", "k2", False)
self.assertEqual(self.metrics, {"feature_usage": {"k": True, "k2": False}})
def test_top_n(self):
top_n = TopN(3)
for k, v in (("seven", 7), ("four", 4), ("five", 5), ("six", 6), ("eight", 8)):
top_n.add(k, v)
self.assertEqual(len(top_n), 3)
print(list(top_n))
self.assertEqual(list(top_n), [("eight", 8), ("seven", 7), ("six", 6)])
if __name__ == "__main__":
run_tests()
|
TestMetricsContext
|
python
|
Pylons__pyramid
|
src/pyramid/config/actions.py
|
{
"start": 10966,
"end": 18584
}
|
class ____:
def __init__(self):
# keep a set of resolved discriminators to test against to ensure
# that a new action does not conflict with something already executed
self.resolved_ainfos = {}
# actions left over from a previous iteration
self.remaining_actions = []
# after executing an action we memoize its order to avoid any new
# actions sending us backward
self.min_order = None
# unique tracks the index of the action so we need it to increase
# monotonically across invocations to resolveConflicts
self.start = 0
# this function is licensed under the ZPL (stolen from Zope)
def resolveConflicts(actions, state=None):
"""Resolve conflicting actions
Given an actions list, identify and try to resolve conflicting actions.
Actions conflict if they have the same non-None discriminator.
Conflicting actions can be resolved if the include path of one of
the actions is a prefix of the includepaths of the other
conflicting actions and is unequal to the include paths in the
other conflicting actions.
Actions are resolved on a per-order basis because some discriminators
cannot be computed until earlier actions have executed. An action in an
earlier order may execute successfully only to find out later that it was
overridden by another action with a smaller include path. This will result
in a conflict as there is no way to revert the original action.
``state`` may be an instance of ``ConflictResolverState`` that
can be used to resume execution and resolve the new actions against the
list of executed actions from a previous call.
"""
if state is None:
state = ConflictResolverState()
# pick up where we left off last time, but track the new actions as well
state.remaining_actions.extend(normalize_actions(actions))
actions = state.remaining_actions
def orderandpos(v):
n, v = v
return (v['order'] or 0, n)
def orderonly(v):
n, v = v
return v['order'] or 0
sactions = sorted(enumerate(actions, start=state.start), key=orderandpos)
for order, actiongroup in itertools.groupby(sactions, orderonly):
# "order" is an integer grouping. Actions in a lower order will be
# executed before actions in a higher order. All of the actions in
# one grouping will be executed (its callable, if any will be called)
# before any of the actions in the next.
output = []
unique = {}
# error out if we went backward in order
if state.min_order is not None and order < state.min_order:
r = [
'Actions were added to order={} after execution had moved '
'on to order={}. Conflicting actions: '.format(
order, state.min_order
)
]
for i, action in actiongroup:
for line in str(action['info']).rstrip().split('\n'):
r.append(" " + line)
raise ConfigurationError('\n'.join(r))
for i, action in actiongroup:
# Within an order, actions are executed sequentially based on
# original action ordering ("i").
# "ainfo" is a tuple of (i, action) where "i" is an integer
# expressing the relative position of this action in the action
# list being resolved, and "action" is an action dictionary. The
# purpose of an ainfo is to associate an "i" with a particular
# action; "i" exists for sorting after conflict resolution.
ainfo = (i, action)
# wait to defer discriminators until we are on their order because
# the discriminator may depend on state from a previous order
discriminator = undefer(action['discriminator'])
action['discriminator'] = discriminator
if discriminator is None:
# The discriminator is None, so this action can never conflict.
# We can add it directly to the result.
output.append(ainfo)
continue
L = unique.setdefault(discriminator, [])
L.append(ainfo)
# Check for conflicts
conflicts = {}
for discriminator, ainfos in unique.items():
# We use (includepath, i) as a sort key because we need to
# sort the actions by the paths so that the shortest path with a
# given prefix comes first. The "first" action is the one with the
# shortest include path. We break sorting ties using "i".
def bypath(ainfo):
path, i = ainfo[1]['includepath'], ainfo[0]
return path, order, i
ainfos.sort(key=bypath)
ainfo, rest = ainfos[0], ainfos[1:]
_, action = ainfo
# ensure this new action does not conflict with a previously
# resolved action from an earlier order / invocation
prev_ainfo = state.resolved_ainfos.get(discriminator)
if prev_ainfo is not None:
_, paction = prev_ainfo
basepath, baseinfo = paction['includepath'], paction['info']
includepath = action['includepath']
# if the new action conflicts with the resolved action then
# note the conflict, otherwise drop the action as it's
# effectively overriden by the previous action
if (
includepath[: len(basepath)] != basepath
or includepath == basepath
):
L = conflicts.setdefault(discriminator, [baseinfo])
L.append(action['info'])
else:
output.append(ainfo)
basepath, baseinfo = action['includepath'], action['info']
for _, action in rest:
includepath = action['includepath']
# Test whether path is a prefix of opath
if (
includepath[: len(basepath)] != basepath
or includepath == basepath # not a prefix
):
L = conflicts.setdefault(discriminator, [baseinfo])
L.append(action['info'])
if conflicts:
raise ConfigurationConflictError(conflicts)
# sort resolved actions by "i" and yield them one by one
for i, action in sorted(output, key=operator.itemgetter(0)):
# do not memoize the order until we resolve an action inside it
state.min_order = action['order']
state.start = i + 1
state.remaining_actions.remove(action)
state.resolved_ainfos[action['discriminator']] = (i, action)
yield action
def normalize_actions(actions):
"""Convert old-style tuple actions to new-style dicts."""
result = []
for v in actions:
if not isinstance(v, dict):
v = expand_action_tuple(*v)
result.append(v)
return result
def expand_action_tuple(
discriminator,
callable=None,
args=(),
kw=None,
includepath=(),
info=None,
order=0,
introspectables=(),
):
if kw is None:
kw = {}
return dict(
discriminator=discriminator,
callable=callable,
args=args,
kw=kw,
includepath=includepath,
info=info,
order=order,
introspectables=introspectables,
)
@implementer(IActionInfo)
|
ConflictResolverState
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/math_ops_test.py
|
{
"start": 1859,
"end": 6757
}
|
class ____(test_util.TensorFlowTestCase):
def testReduceAllDims(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
y_tf = self.evaluate(math_ops.reduce_sum(x))
self.assertEqual(y_tf, 21)
def testReduceExtendType(self):
in_f32 = np.random.randn(1000, 1000).astype(np.float32)
in_bf16 = math_ops.cast(in_f32, dtypes.bfloat16)
out_f32 = self.evaluate(math_ops.reduce_sum(in_f32))
out_bf16 = self.evaluate(math_ops.reduce_sum(in_bf16))
expected = math_ops.cast(out_f32, dtypes.bfloat16)
self.assertAllClose(out_bf16, expected, 1e-3)
def testCountNonzero(self):
# simple case
x = np.array([[0, -2, 0], [4, 0, 0]], dtype=np.int32)
self.assertEqual(self.evaluate(math_ops.count_nonzero(x)), 2)
# boolean input
x = math_ops.not_equal(x, 0)
self.assertEqual(self.evaluate(math_ops.count_nonzero(x)), 2)
# would overflow if int8 would be used for internal calculations
x = 2 * np.ones(512, dtype=np.int8)
self.assertEqual(self.evaluate(math_ops.count_nonzero(x)), 512)
def testReduceExplicitAxes(self):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
with test_util.device(use_gpu=True):
for axis in (0, -2):
self.assertAllEqual(
self.evaluate(math_ops.reduce_sum(x, axis=axis)), [5, 7, 9])
for axis in (1, -1):
self.assertAllEqual(
self.evaluate(math_ops.reduce_sum(x, axis=axis)), [6, 15])
for axis in (None, (0, 1), (1, 0), (-1, 0), (0, -1), (-2, 1), (1, -2),
(-1, -2), (-2, -1)):
self.assertEqual(self.evaluate(math_ops.reduce_sum(x, axis=axis)), 21)
def testReduceInvalidAxis(self):
if context.executing_eagerly():
# The shape check is in run a graph construction time. In eager mode,
# it misses the check, magically return result given wrong shape.
return
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
axis = np.array([[0], [1]])
with self.assertRaisesRegex(ValueError, "must be at most rank 1"):
math_ops.reduce_sum(x, axis)
def testReduceVar(self):
x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
self.assertAllClose(self.evaluate(math_ops.reduce_variance(x)), 0)
self.assertAllClose(
self.evaluate(math_ops.reduce_variance(x, axis=0)), [0, 0, 0])
x = [[1, 2, 1, 1], [1, 1, 0, 1]]
with self.assertRaisesRegex(TypeError, "must be either real or complex"):
math_ops.reduce_variance(x)
x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
self.assertEqual(self.evaluate(math_ops.reduce_variance(x)), 0.25)
x_np = np.array(x)
self.assertEqual(np.var(x_np), 0.25)
self.assertEqual(self.evaluate(math_ops.reduce_variance(x_np)), 0.25)
x = ragged_factory_ops.constant([[5., 1., 4., 1.], [], [5., 9., 2.], [5.],
[]])
self.assertAllClose(math_ops.reduce_variance(x, axis=0), [0., 16., 1., 0.])
def testReduceVarComplex(self):
# Ensure that complex values are handled to be consistent with numpy
complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
(np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
(np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
for y, dtype in complex_ys:
y_result = math_ops.reduce_variance(y)
self.assertEqual(np.var(y), 1.0)
self.assertEqual(self.evaluate(y_result), 1.0)
self.assertEqual(y_result.dtype, dtype)
def testReduceStd(self):
x = np.array([[0, 0, 0], [0, 0, 0]], "float32")
self.assertAllClose(self.evaluate(math_ops.reduce_std(x)), 0)
self.assertAllClose(
self.evaluate(math_ops.reduce_std(x, axis=0)), [0, 0, 0])
x = [[1, 2, 1, 1], [1, 1, 0, 1]]
with self.assertRaisesRegex(TypeError, "must be either real or complex"):
math_ops.reduce_std(x)
x = [[1., 2., 1., 1.], [1., 1., 0., 1.]]
self.assertEqual(self.evaluate(math_ops.reduce_std(x)), 0.5)
x_np = np.array(x)
self.assertEqual(np.std(x_np), 0.5)
self.assertEqual(self.evaluate(math_ops.reduce_std(x_np)), 0.5)
x = ragged_factory_ops.constant([[5., 1., 4., 1.], [], [5., 9., 2.], [5.],
[]])
self.assertAllClose(math_ops.reduce_std(x, axis=0), [0., 4., 1., 0.])
def testReduceStdComplex(self):
# Ensure that complex values are handled to be consistent with numpy
complex_ys = [([0 - 1j, 0 + 1j], dtypes.float64),
(np.array([0 - 1j, 0 + 1j], "complex64"), dtypes.float32),
(np.array([0 - 1j, 0 + 1j], "complex128"), dtypes.float64)]
for y, dtype in complex_ys:
y_result = math_ops.reduce_std(y)
self.assertEqual(np.std(y), 1.0)
self.assertEqual(self.evaluate(y_result), 1.0)
self.assertEqual(y_result.dtype, dtype)
@test_util.run_all_in_graph_and_eager_modes
|
ReduceTest
|
python
|
explosion__spaCy
|
spacy/lang/ko/__init__.py
|
{
"start": 3327,
"end": 4181
}
|
class ____(Language):
lang = "ko"
Defaults = KoreanDefaults
def try_mecab_import() -> None:
try:
from natto import MeCab
return MeCab
except ImportError:
raise ImportError(
'The Korean tokenizer ("spacy.ko.KoreanTokenizer") requires '
"[mecab-ko](https://bitbucket.org/eunjeon/mecab-ko/src/master/README.md), "
"[mecab-ko-dic](https://bitbucket.org/eunjeon/mecab-ko-dic), "
"and [natto-py](https://github.com/buruzaemon/natto-py)"
) from None
def check_spaces(text, tokens):
prev_end = -1
start = 0
for token in tokens:
idx = text.find(token, start)
if prev_end > 0:
yield prev_end != idx
prev_end = idx + len(token)
start = prev_end
if start > 0:
yield False
__all__ = ["Korean"]
|
Korean
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 114816,
"end": 123659
}
|
class ____(
torch._dynamo.test_case.TestCaseWithNestedGraphBreaks, LoggingTestCase
):
@make_logging_test(recompiles=True)
def test_vmap_grad_guard_ok(self, records):
vmap = torch.vmap
grad = torch.func.grad
def g(x):
return vmap(grad(torch.sin))(x)
@torch.compile(backend="eager")
def fn(x):
return vmap(g)(x)
x = torch.randn(4, 5)
y = fn(x)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(x.cos(), y)
# Calling the same function again won't have any effect on guards
fn(x)
self.assertEqual(len(records), 0)
@xfailIfTorchDynamo
@make_logging_test(recompiles=True)
def test_grad_guard_fail(self, records):
grad = torch.func.grad
@torch.compile(backend="eager")
def fn(x):
return grad(torch.sin)(x.sum())
x = torch.randn([])
fn(x)
self.assertEqual(len(records), 0)
# calling again should not invalidate the graph
fn(x)
self.assertEqual(len(records), 0)
# call grad should retrigger compilation
x = torch.randn(3)
grad(fn)(x)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([])""",
munge_exc(record.getMessage()),
)
@make_logging_test(recompiles=True)
def test_dual_level_guard(self, records):
fwAD = torch.autograd.forward_ad
@torch.compile(backend="eager", fullgraph=True)
def fn(foo, tangent):
with fwAD.dual_level():
dual = fwAD.make_dual(foo, tangent[1:])
return dual
foo = torch.rand(2)
tangent = torch.rand(3)
fn(foo, tangent)
self.assertEqual(len(records), 0)
# calling again should not invalidate the graph
fn(foo, tangent)
self.assertEqual(len(records), 0)
# assertRaises is only here because Nested forward mode AD is not supported
with self.assertRaises(torch._dynamo.exc.InternalTorchDynamoError):
with fwAD.dual_level():
fn(foo, tangent)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "forward_ad")
self.assertIn(
"""torch.autograd.forward_ad._current_level == -1""",
munge_exc(record.getMessage()),
)
@xfailIfTorchDynamo
@make_logging_test(recompiles=True)
def test_jvp_guard_fail(self, records):
jvp = torch.func.jvp
vmap = torch.func.vmap
@torch.compile(backend="eager")
def fn(x):
return jvp(torch.sin, (x,), (x,))
x = torch.randn(3, 4)
fn(x)
self.assertEqual(len(records), 0)
# calling again should not invalidate the graph
fn(x)
self.assertEqual(len(records), 0)
# call jvp should retrigger compilation
x = torch.randn(3, 4, 5)
jvp(vmap(fn), (x,), (x,))
self.assertGreater(len(records), 0)
if self.hasRecord(records, "pyfunctorch"):
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([])""",
munge_exc(record.getMessage()),
)
elif self.hasRecord(records, "forward_ad"):
record = self.getRecord(records, "forward_ad")
self.assertIn(
"""torch.autograd.forward_ad._current_level == -1""",
munge_exc(record.getMessage()),
)
@make_logging_test(recompiles=True)
def test_vmap_guard_ok(self, records):
@torch.compile(backend="eager")
def fn(x):
return torch.vmap(lambda x: x.sin())(x)
x = torch.randn(3, 3, 4, 5)
y = fn(x)
# sanity check
self.assertEqual(len(records), 0)
self.assertEqual(x.sin(), y)
# Calling the same function again won't have any effect on guards
z = fn(x)
self.assertEqual(len(records), 0)
self.assertEqual(x.sin(), z)
# calling with a different object will also not affect guards
w = fn(z)
self.assertEqual(len(records), 0)
self.assertEqual(z.sin(), w)
@xfailIfTorchDynamo
@make_logging_test(recompiles=True)
def test_vmap_guard_fail_different_state(self, records):
@torch.compile(backend="eager")
def fn(x):
return torch.vmap(lambda x: x.sin())(x)
x = torch.zeros(3, 4)
y = torch.vmap(fn, randomness="same")(x)
self.assertEqual(x.sin(), y)
self.assertEqual(len(records), 0)
# call vmap(vmap(fn))(x) should retrigger compilation
y = torch.vmap(fn, randomness="different")(x)
self.assertEqual(x.sin(), y)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([('Vmap', 1, 'same')])""",
record.getMessage(),
)
@xfailIfTorchDynamo
@make_logging_test(recompiles=True)
def test_vmap_guard_fail(self, records):
@torch.compile(backend="eager")
def fn(x):
return torch.vmap(lambda x: x.sin())(x)
x = torch.zeros(3, 3, 4, 5)
y = torch.vmap(fn)(x)
self.assertEqual(x.sin(), y)
self.assertEqual(len(records), 0)
# call vmap(vmap(fn))(x) should retrigger compilation as
# _functorch.current_level() is not the same
x = torch.zeros(3, 3, 3, 4, 5)
y = torch.vmap(torch.vmap(fn))(x)
self.assertEqual(x.sin(), y)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([('Vmap', 1, 'error')])""",
record.getMessage(),
)
@xfailIfTorchDynamo
@make_logging_test(recompiles=True)
def test_vmap_grad_vmap_guard_fail(self, records):
vmap = torch.vmap
grad = torch.func.grad
def g(x):
y = vmap(torch.sin, randomness="same")(x)
return y.sum(0)
@torch.compile(backend="eager")
def fn(x):
return grad(g)(x)
x = torch.randn(3, 3)
y = vmap(fn, randomness="error")(x)
self.assertEqual(x.cos(), y)
# previous FX graph should be invalidated
x = torch.randn(3, 3, 4)
y = vmap(vmap(fn, randomness="different"))(x)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([('Vmap', 1, 'error')])""",
munge_exc(record.getMessage()),
)
@xfailIfTorchDynamo
@make_logging_test(recompiles=True)
def test_vmap_recompile_different_states(self, records):
@torch.compile(backend="eager")
def fn(x):
return torch.vmap(lambda x: x.sin())(x)
x = torch.zeros(3, 3, 4, 5)
torch.vmap(fn, randomness="same")(x)
self.assertEqual(len(records), 0) # sanity check
torch.vmap(fn, randomness="different")(x)
self.assertGreater(len(records), 0)
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([('Vmap', 1, 'same')])""",
munge_exc(record.getMessage()),
)
@make_logging_test(guards=True)
def test_emit_functorch_guard_if_active(self, records):
@torch.compile(backend="eager")
def fn(x):
return torch.sin(x)
x = torch.randn(3, 4)
_ = fn(x)
self.assertFalse(self.hasRecord(records, "pyfunctorch")) # sanity check
_ = torch.vmap(fn)(x)
self.assertTrue(self.hasRecord(records, "pyfunctorch"))
record = self.getRecord(records, "pyfunctorch")
self.assertIn(
"""torch._functorch.pyfunctorch.compare_functorch_state([('Vmap', 1, 'error')])""",
munge_exc(record.getMessage()),
)
@make_logging_test(recompiles=True)
def test_linearize_recompiles(self, records):
@torch.compile(backend="eager")
def fn(x):
out, jvp_fn = torch.func.linearize(torch.sin, x)
return out, jvp_fn(x)
x = torch.randn(2, 3)
fn(x)
self.assertEqual(len(records), 0)
z = torch.randn(2, 3)
fn(z)
self.assertEqual(len(records), 0)
y = torch.randn(3, 4)
fn(y)
self.assertGreater(len(records), 0)
|
HigherOrderOpVmapGuardTests
|
python
|
realpython__materials
|
python-textual/horizontal_scroll.py
|
{
"start": 129,
"end": 513
}
|
class ____(App):
def compose(self):
with HorizontalScroll():
for i in range(NUM_BOXES):
static = Static(f"Static {i + 1}")
static.styles.border = ("solid", "green")
static.styles.width = "10%"
yield static
if __name__ == "__main__":
app = HorizontalScrollApp()
app.run()
|
HorizontalScrollApp
|
python
|
getsentry__sentry
|
tests/apidocs/endpoints/projects/test_service_hook_details.py
|
{
"start": 136,
"end": 1098
}
|
class ____(APIDocsTestCase):
def setUp(self) -> None:
hook = self.create_service_hook(project=self.project, events=("event.created",))
self.url = reverse(
"sentry-api-0-project-service-hook-details",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"hook_id": hook.guid,
},
)
self.login_as(user=self.user)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_put(self) -> None:
data = {"url": "https://example.com/other-sentry-hook", "events": ["event.created"]}
response = self.client.put(self.url, data)
request = RequestFactory().put(self.url, data)
self.validate_schema(request, response)
|
ProjectServiceHookDetailsDocs
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
|
{
"start": 29957,
"end": 31063
}
|
class ____(TypeDefinition):
__slots__ = ('loc', 'name', 'values', 'directives',)
_fields = ('name', 'values',)
def __init__(self, name, values, loc=None, directives=None):
self.loc = loc
self.name = name
self.values = values
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, EnumTypeDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.values == other.values and
self.directives == other.directives
)
)
def __repr__(self):
return ('EnumTypeDefinition('
'name={self.name!r}'
', values={self.values!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.values,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
|
EnumTypeDefinition
|
python
|
mlflow__mlflow
|
mlflow/types/chat.py
|
{
"start": 219,
"end": 939
}
|
class ____(BaseModel):
"""
Represents an image URL.
Attributes:
url: Either a URL of an image or base64 encoded data.
https://platform.openai.com/docs/guides/vision?lang=curl#uploading-base64-encoded-images
detail: The level of resolution for the image when the model receives it.
For example, when set to "low", the model will see a image resized to
512x512 pixels, which consumes fewer tokens. In OpenAI, this is optional
and defaults to "auto".
https://platform.openai.com/docs/guides/vision?lang=curl#low-or-high-fidelity-image-understanding
"""
url: str
detail: Literal["auto", "low", "high"] | None = None
|
ImageUrl
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/load_test.py
|
{
"start": 123547,
"end": 124018
}
|
class ____(module.Module):
def __init__(self, rows, cols):
super().__init__()
self.rows = rows
self.cols = cols
self.table = None
def __call__(self, x):
with ops.device("/cpu:0"):
self.table = variables.Variable(
constant_op.constant(1.0, shape=[self.rows, self.cols])
)
x = math_ops.matmul(self.table, x)
x = math_ops.reduce_sum(x, axis=0)
return x
@parameterized.named_parameters(*_test_params())
|
_TestModel
|
python
|
networkx__networkx
|
benchmarks/benchmarks/benchmark_algorithms.py
|
{
"start": 7060,
"end": 7881
}
|
class ____:
"""Benchmark for shortest path algorithms on various weighted graphs."""
timeout = 120
_seed = 42
param_names = ["graph"]
_graphs = _make_weighted_benchmark_graphs(_seed)
params = list(_graphs)
def setup(self, graph):
f, args, kwargs = self._graphs[graph]
self.G = f(*args, **kwargs)
self.nodes = sorted(self.G)
def time_weighted_single_source_dijkstra(self, graph):
source = self.nodes[0]
target = self.nodes[-1]
try:
_ = nx.single_source_dijkstra(self.G, source, target)
except nx.NetworkXNoPath:
pass
def time_shortest_path(self, graph):
source = self.nodes[0]
target = self.nodes[-1]
nx.shortest_path(self.G, source, target, weight="weight")
|
WeightedGraphBenchmark
|
python
|
numba__numba
|
numba/tests/test_parallel_backend.py
|
{
"start": 5593,
"end": 8866
}
|
class ____(TestCase):
"""
Base class for testing the parallel backends
"""
all_impls = [
jit_runner(nopython=True),
jit_runner(nopython=True, cache=True),
jit_runner(nopython=True, nogil=True),
linalg_runner(nopython=True),
linalg_runner(nopython=True, nogil=True),
vectorize_runner(nopython=True),
vectorize_runner(nopython=True, target='parallel'),
vectorize_runner(nopython=True, target='parallel', cache=True),
guvectorize_runner(nopython=True),
guvectorize_runner(nopython=True, target='parallel'),
guvectorize_runner(nopython=True, target='parallel', cache=True),
]
if not _parfors_unsupported:
parfor_impls = [
jit_runner(nopython=True, parallel=True),
jit_runner(nopython=True, parallel=True, cache=True),
linalg_runner(nopython=True, parallel=True),
linalg_runner(nopython=True, parallel=True, cache=True),
]
all_impls.extend(parfor_impls)
if config.NUMBA_NUM_THREADS < 2:
# Not enough cores
masks = []
else:
masks = [1, 2]
mask_impls = []
for impl in all_impls:
for mask in masks:
mask_impls.append(mask_runner(impl, mask))
parallelism = ['threading', 'random']
parallelism.append('multiprocessing_spawn')
if _HAVE_OS_FORK:
parallelism.append('multiprocessing_fork')
parallelism.append('multiprocessing_forkserver')
runners = {
'concurrent_jit': [
jit_runner(nopython=True, parallel=(not _parfors_unsupported)),
],
'concurrent_vectorize': [
vectorize_runner(nopython=True, target='parallel'),
],
'concurrent_guvectorize': [
guvectorize_runner(nopython=True, target='parallel'),
],
'concurrent_mix_use': all_impls,
'concurrent_mix_use_masks': mask_impls,
}
safe_backends = {'omp', 'tbb'}
def run_compile(self, fnlist, parallelism='threading'):
self._cache_dir = temp_directory(self.__class__.__name__)
with override_config('CACHE_DIR', self._cache_dir):
if parallelism == 'threading':
thread_impl(fnlist)
elif parallelism == 'multiprocessing_fork':
fork_proc_impl(fnlist)
elif parallelism == 'multiprocessing_forkserver':
forkserver_proc_impl(fnlist)
elif parallelism == 'multiprocessing_spawn':
spawn_proc_impl(fnlist)
elif parallelism == 'multiprocessing_default':
default_proc_impl(fnlist)
elif parallelism == 'random':
ps = [thread_impl, spawn_proc_impl]
if _HAVE_OS_FORK:
ps.append(fork_proc_impl)
ps.append(forkserver_proc_impl)
random.shuffle(ps)
for impl in ps:
impl(fnlist)
else:
raise ValueError(
'Unknown parallelism supplied %s' % parallelism)
_specific_backends = config.THREADING_LAYER in ('omp', 'tbb', 'workqueue')
@unittest.skipUnless(_specific_backends, "Threading layer not explicit")
|
TestParallelBackendBase
|
python
|
numba__numba
|
numba/core/typeinfer.py
|
{
"start": 15480,
"end": 16214
}
|
class ____(object):
def __init__(self, target, pair, loc):
self.target = target
self.pair = pair
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of pair-first at {loc}",
loc=self.loc):
typevars = typeinfer.typevars
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
assert (isinstance(tp.first_type, types.UndefinedFunctionType)
or tp.first_type.is_precise())
typeinfer.add_type(self.target, tp.first_type, loc=self.loc)
|
PairFirstConstraint
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/isinstance6.py
|
{
"start": 532,
"end": 591
}
|
class ____(Protocol):
def other(self) -> None: ...
|
Proto2
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 588086,
"end": 588503
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("about", "name", "url")
about = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="about")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
|
RepositoryContactLink
|
python
|
huggingface__transformers
|
src/transformers/models/hgnet_v2/modular_hgnet_v2.py
|
{
"start": 21188,
"end": 24392
}
|
class ____(HGNetV2PreTrainedModel):
def __init__(self, config: HGNetV2Config):
super().__init__(config)
self.num_labels = config.num_labels
self.embedder = HGNetV2Embeddings(config)
self.encoder = HGNetV2Encoder(config)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten()
self.fc = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
# classification head
self.classifier = nn.ModuleList([self.avg_pool, self.flatten])
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import requests
>>> from transformers import HGNetV2ForImageClassification, AutoImageProcessor
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model = HGNetV2ForImageClassification.from_pretrained("ustc-community/hgnet-v2")
>>> processor = AutoImageProcessor.from_pretrained("ustc-community/hgnet-v2")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> outputs.logits.shape
torch.Size([1, 2])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
for layer in self.classifier:
last_hidden_state = layer(last_hidden_state)
logits = self.fc(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
__all__ = ["HGNetV2Config", "HGNetV2Backbone", "HGNetV2PreTrainedModel", "HGNetV2ForImageClassification"]
|
HGNetV2ForImageClassification
|
python
|
ray-project__ray
|
release/ray_release/tests/test_cluster_manager.py
|
{
"start": 30477,
"end": 31671
}
|
class ____(unittest.TestCase):
def setUp(self) -> None:
self.sdk = get_anyscale_sdk()
self.cluster_compute = TEST_CLUSTER_COMPUTE
self.cluster_manager = FullClusterManager(
project_id=UNIT_TEST_PROJECT_ID,
sdk=self.sdk,
test_name=f"unit_test__{self.__class__.__name__}__endToEnd",
)
def tearDown(self) -> None:
self.cluster_manager.terminate_cluster()
self.cluster_manager.delete_configs()
def testSessionEndToEnd(self):
self.cluster_manager.set_cluster_env(self.cluster_env)
self.cluster_manager.set_cluster_compute(self.cluster_compute)
self.cluster_manager.build_configs(timeout=1200)
# Reset, so that we fetch them again and test that code path
self.cluster_manager.cluster_compute_id = None
self.cluster_manager.cluster_env_id = None
self.cluster_manager.cluster_env_build_id = None
self.cluster_manager.build_configs(timeout=1200)
# Start cluster
self.cluster_manager.start_cluster(timeout=1200)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
LiveSessionManagerTest
|
python
|
google__jax
|
jax/_src/sharding_impls.py
|
{
"start": 18265,
"end": 19169
}
|
class ____:
"""A hardware axis context for parallel computations that use the GSPMD partitioner.
This includes the mesh that will later by used to execute this computation,
as well as a set of mesh axes that are currently lowered in the MANUAL
sharding mode.
"""
mesh: mesh_lib.Mesh
manual_axes: frozenset[MeshAxisName] = frozenset()
@property
def axis_env(self):
# All collectives that touch axis_env should remember to set use_global_device_ids
# when this context is enabled!
return self.unsafe_axis_env
@property
def unsafe_axis_env(self):
return AxisEnv(
nreps=self.mesh.size,
names=self.mesh.axis_names,
sizes=tuple(self.mesh.shape.values()))
def extend_manual(self, axes: frozenset[MeshAxisName]) -> SPMDAxisContext:
return SPMDAxisContext(self.mesh, self.manual_axes | axes)
@dataclasses.dataclass(frozen=True)
|
SPMDAxisContext
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/data_preprocessing/categorical_encoding/__init__.py
|
{
"start": 949,
"end": 4429
}
|
class ____(AutoSklearnChoice):
@classmethod
def get_components(cls: BaseEstimator) -> Dict[str, BaseEstimator]:
components: Dict[str, BaseEstimator] = OrderedDict()
components.update(_ohes)
components.update(additional_components.components)
return components
def get_hyperparameter_search_space(
self,
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
default: Optional[str] = None,
include: Optional[Dict[str, str]] = None,
exclude: Optional[Dict[str, str]] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
if dataset_properties is None:
dataset_properties = {}
# Compile a list of legal preprocessors for this problem
available_preprocessors = self.get_available_components(
dataset_properties=dataset_properties, include=include, exclude=exclude
)
if len(available_preprocessors) == 0:
raise ValueError(
"No ohe hot encoders found, please add any one hot encoder "
"component."
)
if default is None:
defaults = ["one_hot_encoding", "no_encoding"]
for default_ in defaults:
if default_ in available_preprocessors:
default = default_
break
preprocessor = CategoricalHyperparameter(
"__choice__", list(available_preprocessors.keys()), default_value=default
)
cs.add_hyperparameter(preprocessor)
for name in available_preprocessors:
preprocessor_configuration_space = available_preprocessors[
name
].get_hyperparameter_search_space(dataset_properties=dataset_properties)
parent_hyperparameter = {"parent": preprocessor, "value": name}
cs.add_configuration_space(
name,
preprocessor_configuration_space,
parent_hyperparameter=parent_hyperparameter,
)
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def set_hyperparameters(
self,
feat_type: FEAT_TYPE_TYPE,
configuration: Configuration,
init_params: Optional[Dict[str, Any]] = None,
) -> "OHEChoice":
new_params = {}
params = configuration.get_dictionary()
choice = params["__choice__"]
del params["__choice__"]
for param, value in params.items():
param = param.replace(choice, "").replace(":", "")
new_params[param] = value
if init_params is not None:
for param, value in init_params.items():
# These next two lines are different than in the base class -
# they allow removing the categorical feature indicator array
# in order to not pass it to the no encoding
if choice not in param:
continue
param = param.replace(choice, "").replace(":", "")
new_params[param] = value
new_params["random_state"] = self.random_state
self.new_params = new_params
self.choice = self.get_components()[choice](**new_params)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
return self.choice.transform(X)
|
OHEChoice
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/depend/imphook.py
|
{
"start": 938,
"end": 9732
}
|
class ____(dict):
"""
Cache of lazily loadable hook script objects.
This cache is implemented as a `dict` subclass mapping from the fully-qualified names of all modules with at
least one hook script to lists of `ModuleHook` instances encapsulating these scripts. As a `dict` subclass,
all cached module names and hook scripts are accessible via standard dictionary operations.
Attributes
----------
module_graph : ModuleGraph
Current module graph.
_hook_module_name_prefix : str
String prefixing the names of all in-memory modules lazily loaded from cached hook scripts. See also the
`hook_module_name_prefix` parameter passed to the `ModuleHook.__init__()` method.
"""
_cache_id_next = 0
"""
0-based identifier unique to the next `ModuleHookCache` to be instantiated.
This identifier is incremented on each instantiation of a new `ModuleHookCache` to isolate in-memory modules of
lazily loaded hook scripts in that cache to the same cache-specific namespace, preventing edge-case collisions
with existing in-memory modules in other caches.
"""
def __init__(self, module_graph, hook_dirs):
"""
Cache all hook scripts in the passed directories.
**Order of caching is significant** with respect to hooks for the same module, as the values of this
dictionary are lists. Hooks for the same module will be run in the order in which they are cached. Previously
cached hooks are always preserved rather than overridden.
By default, official hooks are cached _before_ user-defined hooks. For modules with both official and
user-defined hooks, this implies that the former take priority over and hence will be loaded _before_ the
latter.
Parameters
----------
module_graph : ModuleGraph
Current module graph.
hook_dirs : list
List of the absolute or relative paths of all directories containing **hook scripts** (i.e.,
Python scripts with filenames matching `hook-{module_name}.py`, where `{module_name}` is the module
hooked by that script) to be cached.
"""
super().__init__()
# To avoid circular references and hence increased memory consumption, a weak rather than strong reference is
# stored to the passed graph. Since this graph is guaranteed to live longer than this cache,
# this is guaranteed to be safe.
self.module_graph = weakref.proxy(module_graph)
# String unique to this cache prefixing the names of all in-memory modules lazily loaded from cached hook
# scripts, privatized for safety.
self._hook_module_name_prefix = '__PyInstaller_hooks_{}_'.format(ModuleHookCache._cache_id_next)
ModuleHookCache._cache_id_next += 1
# Cache all hook scripts in the passed directories.
self._cache_hook_dirs(hook_dirs)
def _cache_hook_dirs(self, hook_dirs):
"""
Cache all hook scripts in the passed directories.
Parameters
----------
hook_dirs : list
List of the absolute or relative paths of all directories containing hook scripts to be cached.
"""
for hook_dir, default_priority in hook_dirs:
# Canonicalize this directory's path and validate its existence.
hook_dir = os.path.abspath(hook_dir)
if not os.path.isdir(hook_dir):
raise FileNotFoundError('Hook directory "{}" not found.'.format(hook_dir))
# For each hook script in this directory...
hook_filenames = glob.glob(os.path.join(hook_dir, 'hook-*.py'))
for hook_filename in hook_filenames:
# Fully-qualified name of this hook's corresponding module, constructed by removing the "hook-" prefix
# and ".py" suffix.
module_name = os.path.basename(hook_filename)[5:-3]
# Lazily loadable hook object.
module_hook = ModuleHook(
module_graph=self.module_graph,
module_name=module_name,
hook_filename=hook_filename,
hook_module_name_prefix=self._hook_module_name_prefix,
default_priority=default_priority,
)
# Add this hook to this module's list of hooks.
module_hooks = self.setdefault(module_name, [])
module_hooks.append(module_hook)
# Post-processing: we allow only one instance of hook per module. Currently, the priority order is defined
# implicitly, via order of hook directories, so the first hook in the list has the highest priority.
for module_name in self.keys():
hooks = self[module_name]
if len(hooks) == 1:
self[module_name] = hooks[0]
else:
# Order by priority value, in descending order.
sorted_hooks = sorted(hooks, key=lambda hook: hook.priority, reverse=True)
self[module_name] = sorted_hooks[0]
def remove_modules(self, *module_names):
"""
Remove the passed modules and all hook scripts cached for these modules from this cache.
Parameters
----------
module_names : list
List of all fully-qualified module names to be removed.
"""
for module_name in module_names:
# Unload this module's hook script modules from memory. Since these are top-level pure-Python modules cached
# only in the "sys.modules" dictionary, popping these modules from this dictionary suffices to garbage
# collect them.
module_hook = self.pop(module_name, None) # Remove our reference, if available.
if module_hook is not None:
sys.modules.pop(module_hook.hook_module_name, None)
def _module_collection_mode_sanitizer(value):
if isinstance(value, dict):
# Hook set a dictionary; use it as-is
return value
elif isinstance(value, str):
# Hook set a mode string; convert to a dictionary and assign the string to `None` (= the hooked module).
return {None: value}
raise ValueError(f"Invalid module collection mode setting value: {value!r}")
def _bindepend_symlink_suppression_sanitizer(value):
if isinstance(value, (list, set)):
# Hook set a list or a set; use it as-is
return set(value)
elif isinstance(value, str):
# Hook set a string; create a set with single element.
return set([value])
raise ValueError(f"Invalid value for bindepend_symlink_suppression: {value!r}")
# Dictionary mapping the names of magic attributes required by the "ModuleHook" class to 2-tuples "(default_type,
# sanitizer_func)", where:
#
# * "default_type" is the type to which that attribute will be initialized when that hook is lazily loaded.
# * "sanitizer_func" is the callable sanitizing the original value of that attribute defined by that hook into a
# safer value consumable by "ModuleHook" callers if any or "None" if the original value requires no sanitization.
#
# To avoid subtleties in the ModuleHook.__getattr__() method, this dictionary is declared as a module rather than a
# class attribute. If declared as a class attribute and then undefined (...for whatever reason), attempting to access
# this attribute from that method would produce infinite recursion.
_MAGIC_MODULE_HOOK_ATTRS = {
# Collections in which order is insignificant. This includes:
#
# * "datas", sanitized from hook-style 2-tuple lists defined by hooks into TOC-style 2-tuple sets consumable by
# "ModuleHook" callers.
# * "binaries", sanitized in the same way.
'datas': (set, format_binaries_and_datas),
'binaries': (set, format_binaries_and_datas),
'excludedimports': (set, None),
# Collections in which order is significant. This includes:
#
# * "hiddenimports", as order of importation is significant. On module importation, hook scripts are loaded and hook
# functions declared by these scripts are called. As these scripts and functions can have side effects dependent
# on module importation order, module importation itself can have side effects dependent on this order!
'hiddenimports': (list, None),
# Flags
'warn_on_missing_hiddenimports': (lambda: True, bool),
# Package/module collection mode dictionary.
'module_collection_mode': (dict, _module_collection_mode_sanitizer),
# Path patterns for suppression of symbolic links created by binary dependency analysis.
'bindepend_symlink_suppression': (set, _bindepend_symlink_suppression_sanitizer),
}
|
ModuleHookCache
|
python
|
tornadoweb__tornado
|
tornado/test/websocket_test.py
|
{
"start": 26886,
"end": 27539
}
|
class ____(WebSocketBaseTestCase):
def get_app(self):
class PingHandler(TestWebSocketHandler):
def on_pong(self, data):
self.write_message("got pong")
return Application(
[("/", PingHandler)],
websocket_ping_interval=0.01,
websocket_ping_timeout=0,
)
@gen_test
def test_server_ping(self):
ws = yield self.ws_connect("/")
for i in range(3):
response = yield ws.read_message()
self.assertEqual(response, "got pong")
# TODO: test that the connection gets closed if ping responses stop.
|
ServerPeriodicPingTest
|
python
|
doocs__leetcode
|
solution/2800-2899/2867.Count Valid Paths in a Tree/Solution.py
|
{
"start": 0,
"end": 750
}
|
class ____:
def __init__(self, n):
self.p = list(range(n))
self.size = [1] * n
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, a, b):
pa, pb = self.find(a), self.find(b)
if pa == pb:
return False
if self.size[pa] > self.size[pb]:
self.p[pb] = pa
self.size[pa] += self.size[pb]
else:
self.p[pa] = pb
self.size[pb] += self.size[pa]
return True
mx = 10**5 + 10
prime = [True] * (mx + 1)
prime[0] = prime[1] = False
for i in range(2, mx + 1):
if prime[i]:
for j in range(i * i, mx + 1, i):
prime[j] = False
|
UnionFind
|
python
|
scipy__scipy
|
benchmarks/benchmarks/stats.py
|
{
"start": 28817,
"end": 29185
}
|
class ____(Benchmark):
param_names = ["size", "d"]
params = [
[10_000, 100_000, 1_000_000],
[1, 100]
]
def setup(self, size, d):
self.rng = np.random.default_rng(2475928)
n = size // d
self.x = self.rng.uniform(size=(d, n))
def time_quantile(self, size, d):
stats.quantile(self.x, 0.5, axis=1)
|
Quantile
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0085_subscribe_old_webhooks_to_events.py
|
{
"start": 698,
"end": 918
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0084_create_webhook_events"),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migration
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_extendedattr.py
|
{
"start": 17131,
"end": 18598
}
|
class ____(_ExtBase, fixtures.ORMTest):
def test_standard(self):
class A:
pass
register_class(A)
eq_(type(manager_of_class(A)), instrumentation.ClassManager)
def test_nativeext_interfaceexact(self):
class A:
__sa_instrumentation_manager__ = (
instrumentation.InstrumentationManager
)
register_class(A)
ne_(
type(attributes.opt_manager_of_class(A)),
instrumentation.ClassManager,
)
def test_nativeext_submanager(self):
class Mine(instrumentation.ClassManager):
pass
class A:
__sa_instrumentation_manager__ = Mine
register_class(A)
eq_(type(manager_of_class(A)), Mine)
@modifies_instrumentation_finders
def test_customfinder_greedy(self):
class Mine(instrumentation.ClassManager):
pass
class A:
pass
def find(cls):
return Mine
instrumentation.instrumentation_finders.insert(0, find)
register_class(A)
eq_(type(manager_of_class(A)), Mine)
@modifies_instrumentation_finders
def test_customfinder_pass(self):
class A:
pass
def find(cls):
return None
instrumentation.instrumentation_finders.insert(0, find)
register_class(A)
eq_(type(manager_of_class(A)), instrumentation.ClassManager)
|
FinderTest
|
python
|
PrefectHQ__prefect
|
src/prefect/server/database/orm_models.py
|
{
"start": 52805,
"end": 53202
}
|
class ____(BaseORMConfiguration):
"""SQLite specific orm configuration"""
@property
def versions_dir(self) -> Path:
"""Directory containing migrations"""
import prefect.server.database
return (
Path(prefect.server.database.__file__).parent
/ "_migrations"
/ "versions"
/ "sqlite"
)
|
AioSqliteORMConfiguration
|
python
|
PyCQA__pylint
|
tests/functional/r/regression/regression_4680.py
|
{
"start": 223,
"end": 311
}
|
class ____(metaclass=foo.sob.Metaclass):
pass
assert foo.sub.value is None
|
FailedThree
|
python
|
docker__docker-py
|
tests/integration/api_client_test.py
|
{
"start": 912,
"end": 1569
}
|
class ____(unittest.TestCase):
def setUp(self):
self.timeout = 0.5
self.client = docker.api.APIClient(
version=docker.constants.MINIMUM_DOCKER_API_VERSION,
base_url='http://192.168.10.2:4243',
timeout=self.timeout
)
def test_timeout(self):
start = time.time()
res = None
# This call isn't supposed to complete, and it should fail fast.
try:
res = self.client.inspect_container('id')
except Exception:
pass
end = time.time()
assert res is None
assert end - start < 2 * self.timeout
|
ConnectionTimeoutTest
|
python
|
coleifer__peewee
|
peewee.py
|
{
"start": 185872,
"end": 187159
}
|
class ____(Field):
_unresolved = set()
def __init__(self, rel_model_name, **kwargs):
self.field_kwargs = kwargs
self.rel_model_name = rel_model_name.lower()
DeferredForeignKey._unresolved.add(self)
super(DeferredForeignKey, self).__init__(
column_name=kwargs.get('column_name'),
null=kwargs.get('null'),
primary_key=kwargs.get('primary_key'))
__hash__ = object.__hash__
def __deepcopy__(self, memo=None):
return DeferredForeignKey(self.rel_model_name, **self.field_kwargs)
def set_model(self, rel_model):
field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs)
if field.primary_key:
# NOTE: this calls add_field() under-the-hood.
self.model._meta.set_primary_key(self.name, field)
else:
self.model._meta.add_field(self.name, field)
@staticmethod
def resolve(model_cls):
unresolved = sorted(DeferredForeignKey._unresolved,
key=operator.attrgetter('_order'))
for dr in unresolved:
if dr.rel_model_name == model_cls.__name__.lower():
dr.set_model(model_cls)
DeferredForeignKey._unresolved.discard(dr)
|
DeferredForeignKey
|
python
|
numba__numba
|
numba/tests/test_array_exprs.py
|
{
"start": 19182,
"end": 20934
}
|
class ____(MemoryLeakMixin, unittest.TestCase):
# same as above, but the Optional resolves to None and TypeError's
def test_optional_scalar_type_exception_on_none(self):
self.disable_leak_check()
@njit
def arr_expr(x, y):
return x + y
@njit
def do_call(x, y):
if y > 0:
z = None
else:
z = y
return arr_expr(x, z)
args = (np.arange(5), 1.0)
# check result
with self.assertRaises(TypeError) as raises:
do_call(*args)
self.assertIn("expected float64, got None", str(raises.exception))
# check type
s = arr_expr.signatures
oty = s[0][1]
self.assertTrue(isinstance(oty, types.Optional))
self.assertTrue(isinstance(oty.type, types.Float))
def test_optional_array_type_exception_on_none(self):
self.disable_leak_check()
@njit
def arr_expr(x, y):
return x + y
@njit
def do_call(x, y):
if y[0] > 0:
z = None
else:
z = y
return arr_expr(x, z)
args = (np.arange(5), np.arange(1., 5.))
# check result
with self.assertRaises(TypeError) as raises:
do_call(*args)
excstr = str(raises.exception)
self.assertIn("expected array(float64,", excstr)
self.assertIn("got None", excstr)
# check type
s = arr_expr.signatures
oty = s[0][1]
self.assertTrue(isinstance(oty, types.Optional))
self.assertTrue(isinstance(oty.type, types.Array))
self.assertTrue(isinstance(oty.type.dtype, types.Float))
|
TestOptionalsExceptions
|
python
|
huggingface__transformers
|
src/transformers/models/superglue/modeling_superglue.py
|
{
"start": 20137,
"end": 32583
}
|
class ____(SuperGluePreTrainedModel):
"""SuperGlue feature matching middle-end
Given two sets of keypoints and locations, we determine the
correspondences by:
1. Keypoint Encoding (normalization + visual feature and location fusion)
2. Graph Neural Network with multiple self and cross-attention layers
3. Final projection layer
4. Optimal Transport Layer (a differentiable Hungarian matching algorithm)
5. Thresholding matrix based on mutual exclusivity and a match_threshold
The correspondence ids use -1 to indicate non-matching points.
Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew
Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural
Networks. In CVPR, 2020. https://huggingface.co/papers/1911.11763
"""
def __init__(self, config: SuperGlueConfig) -> None:
super().__init__(config)
self.keypoint_detector = AutoModelForKeypointDetection.from_config(config.keypoint_detector_config)
self.keypoint_encoder = SuperGlueKeypointEncoder(config)
self.gnn = SuperGlueAttentionalGNN(config)
self.final_projection = SuperGlueFinalProjection(config)
bin_score = torch.nn.Parameter(torch.tensor(1.0))
self.register_parameter("bin_score", bin_score)
self.post_init()
def _match_image_pair(
self,
keypoints: torch.Tensor,
descriptors: torch.Tensor,
scores: torch.Tensor,
height: int,
width: int,
mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> tuple[torch.Tensor, torch.Tensor, tuple, tuple]:
"""
Perform keypoint matching between two images.
Args:
keypoints (`torch.Tensor` of shape `(batch_size, 2, num_keypoints, 2)`):
Keypoints detected in the pair of image.
descriptors (`torch.Tensor` of shape `(batch_size, 2, descriptor_dim, num_keypoints)`):
Descriptors of the keypoints detected in the image pair.
scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Confidence scores of the keypoints detected in the image pair.
height (`int`): Image height.
width (`int`): Image width.
mask (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`, *optional*):
Mask indicating which values in the keypoints, matches and matching_scores tensors are keypoint matching
information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors. Default to `config.output_attentions`.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. Default to `config.output_hidden_states`.
Returns:
matches (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
For each image pair, for each keypoint in image0, the index of the keypoint in image1 that was matched
with. And for each keypoint in image1, the index of the keypoint in image0 that was matched with.
matching_scores (`torch.Tensor` of shape `(batch_size, 2, num_keypoints)`):
Scores of predicted matches for each image pair
all_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(1, 2, num_keypoints,
num_channels)`.
all_attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(1, 2, num_heads, num_keypoints,
num_keypoints)`.
"""
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if keypoints.shape[2] == 0: # no keypoints
shape = keypoints.shape[:-1]
return (
keypoints.new_full(shape, -1, dtype=torch.int),
keypoints.new_zeros(shape),
all_hidden_states,
all_attentions,
)
batch_size, _, num_keypoints, _ = keypoints.shape
# (batch_size, 2, num_keypoints, 2) -> (batch_size * 2, num_keypoints, 2)
keypoints = keypoints.reshape(batch_size * 2, num_keypoints, 2)
descriptors = descriptors.reshape(batch_size * 2, num_keypoints, self.config.hidden_size)
scores = scores.reshape(batch_size * 2, num_keypoints)
mask = mask.reshape(batch_size * 2, num_keypoints) if mask is not None else None
# Keypoint normalization
keypoints = normalize_keypoints(keypoints, height, width)
encoded_keypoints = self.keypoint_encoder(keypoints, scores, output_hidden_states=output_hidden_states)
last_hidden_state = encoded_keypoints[0]
# Keypoint MLP encoder.
descriptors = descriptors + last_hidden_state
if mask is not None:
input_shape = descriptors.size()
extended_attention_mask = self.get_extended_attention_mask(mask, input_shape)
else:
extended_attention_mask = torch.ones((batch_size, num_keypoints), device=keypoints.device)
# Multi-layer Transformer network.
gnn_outputs = self.gnn(
descriptors,
mask=extended_attention_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
descriptors = gnn_outputs[0]
# Final MLP projection.
projected_descriptors = self.final_projection(descriptors)
# (batch_size * 2, num_keypoints, descriptor_dim) -> (batch_size, 2, num_keypoints, descriptor_dim)
final_descriptors = projected_descriptors.reshape(batch_size, 2, num_keypoints, self.config.hidden_size)
final_descriptors0 = final_descriptors[:, 0]
final_descriptors1 = final_descriptors[:, 1]
# Compute matching descriptor distance.
scores = final_descriptors0 @ final_descriptors1.transpose(1, 2)
scores = scores / self.config.hidden_size**0.5
if mask is not None:
mask = mask.reshape(batch_size, 2, num_keypoints)
mask0 = mask[:, 0].unsqueeze(2)
mask1 = mask[:, 1].unsqueeze(1)
mask = torch.logical_and(mask0, mask1)
scores = scores.masked_fill(mask == 0, torch.finfo(scores.dtype).min)
# Run the optimal transport.
scores = log_optimal_transport(scores, self.bin_score, iterations=self.config.sinkhorn_iterations)
# Get the matches with score above "match_threshold".
max0 = scores[:, :-1, :-1].max(2)
max1 = scores[:, :-1, :-1].max(1)
indices0 = max0.indices
indices1 = max1.indices
mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0)
mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1)
zero = scores.new_tensor(0)
matching_scores0 = torch.where(mutual0, max0.values.exp(), zero)
matching_scores0 = torch.where(matching_scores0 > self.config.matching_threshold, matching_scores0, zero)
matching_scores1 = torch.where(mutual1, matching_scores0.gather(1, indices1), zero)
valid0 = mutual0 & (matching_scores0 > zero)
valid1 = mutual1 & valid0.gather(1, indices1)
matches0 = torch.where(valid0, indices0, indices0.new_tensor(-1))
matches1 = torch.where(valid1, indices1, indices1.new_tensor(-1))
matches = torch.cat([matches0, matches1], dim=1).reshape(batch_size, 2, -1)
matching_scores = torch.cat([matching_scores0, matching_scores1], dim=1).reshape(batch_size, 2, -1)
if output_hidden_states:
all_hidden_states = all_hidden_states + encoded_keypoints[1]
all_hidden_states = all_hidden_states + gnn_outputs[1]
all_hidden_states = all_hidden_states + (projected_descriptors,)
all_hidden_states = tuple(
x.reshape(batch_size, 2, num_keypoints, -1).transpose(-1, -2) for x in all_hidden_states
)
if output_attentions:
all_attentions = all_attentions + gnn_outputs[2]
all_attentions = tuple(x.reshape(batch_size, 2, -1, num_keypoints, num_keypoints) for x in all_attentions)
return (
matches,
matching_scores,
all_hidden_states,
all_attentions,
)
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SuperGlueKeypointMatchingOutput]:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoModel
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_78916675_4568141288.jpg?raw=true"
>>> image1 = Image.open(requests.get(url, stream=True).raw)
>>> url = "https://github.com/magicleap/SuperGluePretrainedNetwork/blob/master/assets/phototourism_sample_images/london_bridge_19481797_2295892421.jpg?raw=true"
>>> image2 = Image.open(requests.get(url, stream=True).raw)
>>> images = [image1, image2]
>>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superglue_outdoor")
>>> model = AutoModel.from_pretrained("magic-leap-community/superglue_outdoor")
>>> with torch.no_grad():
>>> inputs = processor(images, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
loss = None
if labels is not None:
raise ValueError("SuperGlue is not trainable, no labels should be provided.")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values.ndim != 5 or pixel_values.size(1) != 2:
raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)")
batch_size, _, channels, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width)
keypoint_detections = self.keypoint_detector(pixel_values)
keypoints, scores, descriptors, mask = keypoint_detections[:4]
keypoints = keypoints.reshape(batch_size, 2, -1, 2).to(pixel_values)
scores = scores.reshape(batch_size, 2, -1).to(pixel_values)
descriptors = descriptors.reshape(batch_size, 2, -1, self.config.hidden_size).to(pixel_values)
mask = mask.reshape(batch_size, 2, -1)
absolute_keypoints = keypoints.clone()
absolute_keypoints[:, :, :, 0] = absolute_keypoints[:, :, :, 0] * width
absolute_keypoints[:, :, :, 1] = absolute_keypoints[:, :, :, 1] * height
matches, matching_scores, hidden_states, attentions = self._match_image_pair(
absolute_keypoints,
descriptors,
scores,
height,
width,
mask=mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if not return_dict:
return tuple(
v
for v in [loss, matches, matching_scores, keypoints, mask, hidden_states, attentions]
if v is not None
)
return SuperGlueKeypointMatchingOutput(
loss=loss,
matches=matches,
matching_scores=matching_scores,
keypoints=keypoints,
mask=mask,
hidden_states=hidden_states,
attentions=attentions,
)
__all__ = ["SuperGluePreTrainedModel", "SuperGlueForKeypointMatching"]
|
SuperGlueForKeypointMatching
|
python
|
plotly__plotly.py
|
_plotly_utils/basevalidators.py
|
{
"start": 19758,
"end": 20531
}
|
class ____(BaseValidator):
"""
"boolean": {
"description": "A boolean (true/false) value.",
"requiredOpts": [],
"otherOpts": [
"dflt"
]
},
"""
def __init__(self, plotly_name, parent_name, **kwargs):
super(BooleanValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
def description(self):
return """\
The '{plotly_name}' property must be specified as a bool
(either True, or False)""".format(plotly_name=self.plotly_name)
def validate_coerce(self, v):
if is_none_or_typed_array_spec(v):
pass
elif not isinstance(v, bool):
self.raise_invalid_val(v)
return v
|
BooleanValidator
|
python
|
walkccc__LeetCode
|
solutions/61. Rotate List/61.py
|
{
"start": 0,
"end": 414
}
|
class ____:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if not head or not head.next or k == 0:
return head
tail = head
length = 1
while tail.next:
tail = tail.next
length += 1
tail.next = head # Circle the list.
t = length - k % length
for _ in range(t):
tail = tail.next
newHead = tail.next
tail.next = None
return newHead
|
Solution
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_non_resource_policy_rule.py
|
{
"start": 383,
"end": 5783
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'non_resource_ur_ls': 'list[str]',
'verbs': 'list[str]'
}
attribute_map = {
'non_resource_ur_ls': 'nonResourceURLs',
'verbs': 'verbs'
}
def __init__(self, non_resource_ur_ls=None, verbs=None, local_vars_configuration=None): # noqa: E501
"""V1NonResourcePolicyRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._non_resource_ur_ls = None
self._verbs = None
self.discriminator = None
self.non_resource_ur_ls = non_resource_ur_ls
self.verbs = verbs
@property
def non_resource_ur_ls(self):
"""Gets the non_resource_ur_ls of this V1NonResourcePolicyRule. # noqa: E501
`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
:return: The non_resource_ur_ls of this V1NonResourcePolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._non_resource_ur_ls
@non_resource_ur_ls.setter
def non_resource_ur_ls(self, non_resource_ur_ls):
"""Sets the non_resource_ur_ls of this V1NonResourcePolicyRule.
`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example: - \"/healthz\" is legal - \"/hea*\" is illegal - \"/hea\" is legal but matches nothing - \"/hea/*\" also matches nothing - \"/healthz/*\" matches all per-component health checks. \"*\" matches all non-resource urls. if it is present, it must be the only entry. Required. # noqa: E501
:param non_resource_ur_ls: The non_resource_ur_ls of this V1NonResourcePolicyRule. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and non_resource_ur_ls is None: # noqa: E501
raise ValueError("Invalid value for `non_resource_ur_ls`, must not be `None`") # noqa: E501
self._non_resource_ur_ls = non_resource_ur_ls
@property
def verbs(self):
"""Gets the verbs of this V1NonResourcePolicyRule. # noqa: E501
`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
:return: The verbs of this V1NonResourcePolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""Sets the verbs of this V1NonResourcePolicyRule.
`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required. # noqa: E501
:param verbs: The verbs of this V1NonResourcePolicyRule. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
self._verbs = verbs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NonResourcePolicyRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NonResourcePolicyRule):
return True
return self.to_dict() != other.to_dict()
|
V1NonResourcePolicyRule
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.