language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
sympy__sympy
|
sympy/stats/symbolic_probability.py
|
{
"start": 15062,
"end": 19536
}
|
class ____(Expr):
"""
Symbolic expression for the covariance.
Examples
========
>>> from sympy.stats import Covariance
>>> from sympy.stats import Normal
>>> X = Normal("X", 3, 2)
>>> Y = Normal("Y", 0, 1)
>>> Z = Normal("Z", 0, 1)
>>> W = Normal("W", 0, 1)
>>> cexpr = Covariance(X, Y)
>>> cexpr
Covariance(X, Y)
Evaluate the covariance, `X` and `Y` are independent,
therefore zero is the result:
>>> cexpr.evaluate_integral()
0
Rewrite the covariance expression in terms of expectations:
>>> from sympy.stats import Expectation
>>> cexpr.rewrite(Expectation)
Expectation(X*Y) - Expectation(X)*Expectation(Y)
In order to expand the argument, use ``expand()``:
>>> from sympy.abc import a, b, c, d
>>> Covariance(a*X + b*Y, c*Z + d*W)
Covariance(a*X + b*Y, c*Z + d*W)
>>> Covariance(a*X + b*Y, c*Z + d*W).expand()
a*c*Covariance(X, Z) + a*d*Covariance(W, X) + b*c*Covariance(Y, Z) + b*d*Covariance(W, Y)
This class is aware of some properties of the covariance:
>>> Covariance(X, X).expand()
Variance(X)
>>> Covariance(a*X, b*Y).expand()
a*b*Covariance(X, Y)
"""
def __new__(cls, arg1, arg2, condition=None, **kwargs):
arg1 = _sympify(arg1)
arg2 = _sympify(arg2)
if arg1.is_Matrix or arg2.is_Matrix:
from sympy.stats.symbolic_multivariate_probability import CrossCovarianceMatrix
return CrossCovarianceMatrix(arg1, arg2, condition)
if kwargs.pop('evaluate', global_parameters.evaluate):
arg1, arg2 = sorted([arg1, arg2], key=default_sort_key)
if condition is None:
obj = Expr.__new__(cls, arg1, arg2)
else:
condition = _sympify(condition)
obj = Expr.__new__(cls, arg1, arg2, condition)
obj._condition = condition
return obj
def _eval_is_commutative(self):
return self.args[0].is_commutative
def expand(self, **hints):
arg1 = self.args[0]
arg2 = self.args[1]
condition = self._condition
if arg1 == arg2:
return Variance(arg1, condition).expand()
if not is_random(arg1):
return S.Zero
if not is_random(arg2):
return S.Zero
arg1, arg2 = sorted([arg1, arg2], key=default_sort_key)
if isinstance(arg1, RandomSymbol) and isinstance(arg2, RandomSymbol):
return Covariance(arg1, arg2, condition)
coeff_rv_list1 = self._expand_single_argument(arg1.expand())
coeff_rv_list2 = self._expand_single_argument(arg2.expand())
addends = [a*b*Covariance(*sorted([r1, r2], key=default_sort_key), condition=condition)
for (a, r1) in coeff_rv_list1 for (b, r2) in coeff_rv_list2]
return Add.fromiter(addends)
@classmethod
def _expand_single_argument(cls, expr):
# return (coefficient, random_symbol) pairs:
if isinstance(expr, RandomSymbol):
return [(S.One, expr)]
elif isinstance(expr, Add):
outval = []
for a in expr.args:
if isinstance(a, Mul):
outval.append(cls._get_mul_nonrv_rv_tuple(a))
elif is_random(a):
outval.append((S.One, a))
return outval
elif isinstance(expr, Mul):
return [cls._get_mul_nonrv_rv_tuple(expr)]
elif is_random(expr):
return [(S.One, expr)]
@classmethod
def _get_mul_nonrv_rv_tuple(cls, m):
rv = []
nonrv = []
for a in m.args:
if is_random(a):
rv.append(a)
else:
nonrv.append(a)
return (Mul.fromiter(nonrv), Mul.fromiter(rv))
def _eval_rewrite_as_Expectation(self, arg1, arg2, condition=None, **kwargs):
e1 = Expectation(arg1*arg2, condition)
e2 = Expectation(arg1, condition)*Expectation(arg2, condition)
return e1 - e2
def _eval_rewrite_as_Probability(self, arg1, arg2, condition=None, **kwargs):
return self.rewrite(Expectation).rewrite(Probability)
def _eval_rewrite_as_Integral(self, arg1, arg2, condition=None, **kwargs):
return covariance(self.args[0], self.args[1], self._condition, evaluate=False)
_eval_rewrite_as_Sum = _eval_rewrite_as_Integral
def evaluate_integral(self):
return self.rewrite(Integral).doit()
|
Covariance
|
python
|
facelessuser__soupsieve
|
tests/test_level4/test_attribute.py
|
{
"start": 96,
"end": 2480
}
|
class ____(util.TestCase):
"""Test attribute selectors."""
MARKUP = """
<div>
<p type="TEST" id="0" class="somewordshere">Some text <span id="1"> in a paragraph</span>.</p>
<a type="test" id="2" href="http://google.com">Link</a>
<span id="3" class="herewords">Direct child</span>
<pre id="pre" class="wordshere">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
def test_attribute_forced_case_insensitive(self):
"""Test attribute value case insensitivity."""
self.assert_selector(
self.MARKUP,
"[class*=WORDS]",
[],
flags=util.HTML
)
self.assert_selector(
self.MARKUP,
"[class*=WORDS i]",
["0", "3", "pre"],
flags=util.HTML
)
def test_attribute_case_insensitive_flag_spacing(self):
"""Test attribute value case insensitivity spacing."""
self.assert_selector(
self.MARKUP,
"[class*=WORDSi]",
[],
flags=util.HTML
)
self.assert_selector(
self.MARKUP,
"[class*='WORDS'i]",
["0", "3", "pre"],
flags=util.HTML
)
def test_attribute_forced_case_insensitive_xml(self):
"""Test that attribute value case insensitivity can be forced in XML."""
self.assert_selector(
self.MARKUP,
'[type="test" i]',
['0', '2'],
flags=util.XML
)
def test_attribute_forced_case_insensitive_xhtml(self):
"""Test that attribute value case insensitivity can be forced in XHTML."""
self.assert_selector(
self.wrap_xhtml(self.MARKUP),
'[type="test" i]',
['0', '2'],
flags=util.XML
)
def test_attribute_forced_case_needs_value(self):
"""Test attribute value case insensitivity requires a value."""
self.assert_raises('[id i]', SelectorSyntaxError)
def test_attribute_type_case_sensitive(self):
"""Type is treated as case insensitive in HTML, so test that we can force the opposite."""
self.assert_selector(
self.MARKUP,
'[type="test" s]',
['2'],
flags=util.HTML
)
|
TestAttribute
|
python
|
ray-project__ray
|
release/ray_release/tests/test_buildkite.py
|
{
"start": 1238,
"end": 1649
}
|
class ____:
return_dict = {}
def __getattribute__(self, item):
return_dict = object.__getattribute__(self, "return_dict")
if item in return_dict:
mocked = return_dict[item]
if isinstance(mocked, Callable):
return mocked()
else:
return lambda *a, **kw: mocked
return object.__getattribute__(self, item)
|
MockReturn
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py
|
{
"start": 598,
"end": 800
}
|
class ____(Generic[T, SupportsRichComparisonT]):
var: T
compare: SupportsRichComparisonT
# typing.AnyStr is a common external type variable, so treat it specially as a
# known TypeVar
|
ExternalType
|
python
|
run-llama__llama_index
|
llama-index-core/tests/agent/workflow/test_code_act_agent.py
|
{
"start": 684,
"end": 5594
}
|
class ____:
events: list[Event]
context: Context
def mock_context(workflow: Workflow) -> MockContext:
ctx = Context(workflow)
events = []
def write_event_to_stream(event):
events.append(event)
ctx.write_event_to_stream = write_event_to_stream
return MockContext(events=events, context=ctx)
@pytest.fixture()
def mock_llm():
# Create a mock that inherits from FunctionCallingLLM
class MockFunctionCallingLLM(FunctionCallingLLM):
get_tool_calls_from_response: Any = MagicMock(return_value=[])
def __init__(self) -> None:
super().__init__()
self._responses = []
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
is_function_calling_model=True,
)
async def astream_chat(self, *args, **kwargs):
# Return an async generator that yields each response
async def gen():
for response in self._responses:
yield response
return gen()
async def achat(self, *args, **kwargs):
pass
def chat(self, *args, **kwargs):
pass
def stream_chat(self, *args, **kwargs):
pass
def complete(self, *args, **kwargs):
pass
async def acomplete(self, *args, **kwargs):
pass
def stream_complete(self, *args, **kwargs):
pass
async def astream_complete(self, *args, **kwargs):
pass
def _prepare_chat_with_tools(self, *args, **kwargs):
return {}
return MockFunctionCallingLLM()
@pytest.fixture()
def mock_code_execute_fn():
return lambda code: "Code executed"
@pytest.fixture()
def mock_memory():
memory = AsyncMock(spec=BaseMemory)
memory.aput = AsyncMock()
return memory
@pytest.mark.asyncio
async def test_code_act_agent_basic_execution(
mock_llm, mock_code_execute_fn, mock_memory
):
# Setup mock response
mock_response = ChatResponse(
message=ChatMessage(
role="assistant",
content="Let me calculate that for you.\n<execute>\nprint('Hello World')\n</execute>",
),
delta="Let me calculate that for you.\n<execute>\nprint('Hello World')\n</execute>",
)
mock_llm._responses = [mock_response] # Set the responses to be yielded
# Create agent
agent = CodeActAgent(
code_execute_fn=mock_code_execute_fn,
llm=mock_llm,
)
# Create context
mock_ctx = mock_context(agent)
# Take step
output = await agent.take_step(
ctx=mock_ctx.context,
llm_input=[ChatMessage(role="user", content="Say hello")],
tools=[],
memory=mock_memory,
)
# Verify output
assert isinstance(output, AgentOutput)
assert len(output.tool_calls) == 1
assert output.tool_calls[0].tool_name == "execute"
assert "print('Hello World')" in output.tool_calls[0].tool_kwargs["code"]
@pytest.mark.asyncio
async def test_code_act_agent_tool_handling(
mock_llm, mock_code_execute_fn, mock_memory
):
# Setup mock response
mock_response = ChatResponse(
message=ChatMessage(
role="assistant",
content="Let me calculate that for you.\n<execute>\nresult = 2 + 2\nprint(result)\n</execute>",
),
delta="Let me calculate that for you.\n<execute>\nresult = 2 + 2\nprint(result)\n</execute>",
)
mock_llm._responses = [mock_response] # Set the responses to be yielded
# Create agent
agent = CodeActAgent(
code_execute_fn=mock_code_execute_fn,
llm=mock_llm,
)
# Create context
mock_ctx = mock_context(agent)
# Take step
output = await agent.take_step(
ctx=mock_ctx.context,
llm_input=[ChatMessage(role="user", content="What is 2 + 2?")],
tools=[],
memory=mock_memory,
)
# Handle tool results
tool_results = [
ToolCallResult(
tool_id=output.tool_calls[0].tool_id,
tool_name="execute",
tool_kwargs={"code": "result = 2 + 2\nprint(result)\n"},
tool_output=ToolOutput(
content="4", tool_name="execute", raw_input={}, raw_output={}
),
return_direct=False,
)
]
await agent.handle_tool_call_results(mock_ctx.context, tool_results, mock_memory)
# Verify scratchpad was updated
scratchpad = await mock_ctx.context.store.get("scratchpad")
assert len(scratchpad) == 2 # User message and assistant response
assert "4" in scratchpad[1].content # Verify the result was added to scratchpad
# Finalize
final_output = await agent.finalize(mock_ctx.context, output, mock_memory)
assert isinstance(final_output, AgentOutput)
assert mock_memory.aput_messages.called # Verify memory was updated
|
MockContext
|
python
|
python-pillow__Pillow
|
Tests/test_pyarrow.py
|
{
"start": 4605,
"end": 8122
}
|
class ____(NamedTuple):
dtype: pyarrow.DataType
# Strictly speaking, elt should be a pixel or pixel component, so
# list[uint8][4], float, int, uint32, uint8, etc. But more
# correctly, it should be exactly the dtype from the line above.
elt: Any
elts_per_pixel: int
UINT_ARR = DataShape(
dtype=fl_uint8_4_type,
elt=[1, 2, 3, 4], # array of 4 uint8 per pixel
elts_per_pixel=1, # only one array per pixel
)
UINT = DataShape(
dtype=pyarrow.uint8(),
elt=3, # one uint8,
elts_per_pixel=4, # but repeated 4x per pixel
)
UINT32 = DataShape(
dtype=pyarrow.uint32(),
elt=0xABCDEF45, # one packed int, doesn't fit in a int32 > 0x80000000
elts_per_pixel=1, # one per pixel
)
INT32 = DataShape(
dtype=pyarrow.uint32(),
elt=0x12CDEF45, # one packed int
elts_per_pixel=1, # one per pixel
)
@pytest.mark.parametrize(
"mode, data_tp, mask",
(
("L", DataShape(pyarrow.uint8(), 3, 1), None),
("I", DataShape(pyarrow.int32(), 1 << 24, 1), None),
("F", DataShape(pyarrow.float32(), 3.14159, 1), None),
("LA", UINT_ARR, [0, 3]),
("LA", UINT, [0, 3]),
("RGB", UINT_ARR, [0, 1, 2]),
("RGBA", UINT_ARR, None),
("CMYK", UINT_ARR, None),
("YCbCr", UINT_ARR, [0, 1, 2]),
("HSV", UINT_ARR, [0, 1, 2]),
("RGB", UINT, [0, 1, 2]),
("RGBA", UINT, None),
("CMYK", UINT, None),
("YCbCr", UINT, [0, 1, 2]),
("HSV", UINT, [0, 1, 2]),
),
)
def test_fromarray(mode: str, data_tp: DataShape, mask: list[int] | None) -> None:
(dtype, elt, elts_per_pixel) = data_tp
ct_pixels = TEST_IMAGE_SIZE[0] * TEST_IMAGE_SIZE[1]
arr = pyarrow.array([elt] * (ct_pixels * elts_per_pixel), type=dtype)
img = Image.fromarrow(arr, mode, TEST_IMAGE_SIZE)
_test_img_equals_pyarray(img, arr, mask, elts_per_pixel)
@pytest.mark.parametrize(
"mode, data_tp, mask",
(
("LA", UINT32, [0, 3]),
("RGB", UINT32, [0, 1, 2]),
("RGBA", UINT32, None),
("CMYK", UINT32, None),
("YCbCr", UINT32, [0, 1, 2]),
("HSV", UINT32, [0, 1, 2]),
("LA", INT32, [0, 3]),
("RGB", INT32, [0, 1, 2]),
("RGBA", INT32, None),
("CMYK", INT32, None),
("YCbCr", INT32, [0, 1, 2]),
("HSV", INT32, [0, 1, 2]),
),
)
def test_from_int32array(mode: str, data_tp: DataShape, mask: list[int] | None) -> None:
(dtype, elt, elts_per_pixel) = data_tp
ct_pixels = TEST_IMAGE_SIZE[0] * TEST_IMAGE_SIZE[1]
arr = pyarrow.array([elt] * (ct_pixels * elts_per_pixel), type=dtype)
img = Image.fromarrow(arr, mode, TEST_IMAGE_SIZE)
_test_img_equals_int32_pyarray(img, arr, mask, elts_per_pixel)
@pytest.mark.parametrize(
"mode, metadata",
(
("LA", ["L", "X", "X", "A"]),
("RGB", ["R", "G", "B", "X"]),
("RGBX", ["R", "G", "B", "X"]),
("RGBA", ["R", "G", "B", "A"]),
("CMYK", ["C", "M", "Y", "K"]),
("YCbCr", ["Y", "Cb", "Cr", "X"]),
("HSV", ["H", "S", "V", "X"]),
),
)
def test_image_metadata(mode: str, metadata: list[str]) -> None:
img = hopper(mode)
arr = pyarrow.array(img) # type: ignore[call-overload]
assert arr.type.field(0).metadata
assert arr.type.field(0).metadata[b"image"]
parsed_metadata = json.loads(arr.type.field(0).metadata[b"image"].decode("utf8"))
assert "bands" in parsed_metadata
assert parsed_metadata["bands"] == metadata
|
DataShape
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/embed/test_util__embed.py
|
{
"start": 17487,
"end": 20410
}
|
class ____:
def test_passing_model(self) -> None:
p1 = SomeModel()
d = Document()
d.add_root(p1)
docs_json, render_items = beu.standalone_docs_json_and_render_items([p1])
doc = next(iter(docs_json.values()))
assert doc['title'] == "Bokeh Application"
assert doc['version'] == __version__
assert len(doc['roots']) == 1
assert doc['roots'] == [ObjectRefRep(type="object", name="test_util__embed.SomeModel", id=p1.id)]
assert len(render_items) == 1
def test_passing_doc(self) -> None:
p1 = SomeModel()
d = Document()
d.add_root(p1)
docs_json, render_items = beu.standalone_docs_json_and_render_items([d])
doc = next(iter(docs_json.values()))
assert doc['title'] == "Bokeh Application"
assert doc['version'] == __version__
assert len(doc['roots']) == 1
assert doc['roots'] == [ObjectRefRep(type="object", name="test_util__embed.SomeModel", id=p1.id)]
assert len(render_items) == 1
def test_exception_for_missing_doc(self) -> None:
p1 = SomeModel()
with pytest.raises(ValueError) as e:
beu.standalone_docs_json_and_render_items([p1])
assert str(e.value) == "A Bokeh Model must be part of a Document to render as standalone content"
def test_log_warning_if_python_property_callback(self, caplog: pytest.LogCaptureFixture) -> None:
d = Document()
m1 = EmbedTestUtilModel()
c1 = _GoodPropertyCallback()
d.add_root(m1)
m1.on_change('name', c1)
assert len(m1._callbacks) != 0
with caplog.at_level(logging.WARN):
beu.standalone_docs_json_and_render_items(m1)
assert len(caplog.records) == 1
assert caplog.text != ''
def test_log_warning_if_python_event_callback(self, caplog: pytest.LogCaptureFixture) -> None:
d = Document()
m1 = EmbedTestUtilModel()
c1 = _GoodEventCallback()
d.add_root(m1)
m1.on_event(Tap, c1)
assert len(m1._event_callbacks) != 0
with caplog.at_level(logging.WARN):
beu.standalone_docs_json_and_render_items(m1)
assert len(caplog.records) == 1
assert caplog.text != ''
def test_suppress_warnings(self, caplog: pytest.LogCaptureFixture) -> None:
d = Document()
m1 = EmbedTestUtilModel()
c1 = _GoodPropertyCallback()
c2 = _GoodEventCallback()
d.add_root(m1)
m1.on_change('name', c1)
assert len(m1._callbacks) != 0
m1.on_event(Tap, c2)
assert len(m1._event_callbacks) != 0
with caplog.at_level(logging.WARN):
beu.standalone_docs_json_and_render_items(m1, suppress_callback_warning=True)
assert len(caplog.records) == 0
assert caplog.text == ''
|
Test_standalone_docs_json_and_render_items
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_deploy_app_2.py
|
{
"start": 1402,
"end": 30633
}
|
class ____:
def get_deploy_config(self, model_within_logging_config: bool = False):
if model_within_logging_config:
path = "ray.serve.tests.test_config_files.logging_config_test.model2"
else:
path = "ray.serve.tests.test_config_files.logging_config_test.model"
return {
"applications": [
{
"name": "app1",
"route_prefix": "/app1",
"import_path": path,
},
],
}
@pytest.mark.parametrize("encoding_type", ["TEXT", "JSON"])
def test_deploy_app_with_application_logging_config(
self, serve_instance, encoding_type: str
):
"""Deploy application with application logging config"""
client = serve_instance
config_dict = self.get_deploy_config()
config_dict["applications"][0]["logging_config"] = {
"encoding": encoding_type,
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.post("http://localhost:8000/app1").json()
replica_id = resp["replica"].split("#")[-1]
if encoding_type == "JSON":
expected_log_regex = [f'"replica": "{replica_id}", ']
else:
expected_log_regex = [f".*{replica_id}.*"]
check_log_file(resp["log_file"], expected_log_regex)
@pytest.mark.parametrize("encoding_type", ["TEXT", "JSON"])
def test_deploy_app_with_deployment_logging_config(
self, serve_instance, encoding_type: str
):
client = serve_instance
"""Deploy application with deployment logging config inside the yaml"""
config_dict = self.get_deploy_config()
config_dict["applications"][0]["deployments"] = [
{
"name": "Model",
"logging_config": {
"encoding": encoding_type,
},
},
]
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.post("http://localhost:8000/app1").json()
replica_id = resp["replica"].split("#")[-1]
if encoding_type == "JSON":
expected_log_regex = [f'"replica": "{replica_id}", ']
else:
expected_log_regex = [f".*{replica_id}.*"]
check_log_file(resp["log_file"], expected_log_regex)
def test_deployment_logging_config_in_code(self, serve_instance):
"""Deploy application with deployment logging config inside the code"""
client = serve_instance
config_dict = self.get_deploy_config(model_within_logging_config=True)
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.post("http://localhost:8000/app1").json()
check_log_file(resp["log_file"], [".*this_is_debug_info.*"])
def test_overwritting_logging_config(self, serve_instance):
"""Overwrite the default logging config with application logging config"""
client = serve_instance
config_dict = self.get_deploy_config()
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
def get_replica_info_format(replica_id: ReplicaID) -> str:
app_name = replica_id.deployment_id.app_name
deployment_name = replica_id.deployment_id.name
return f"{app_name}_{deployment_name} {replica_id.unique_id}"
# By default, log level is "INFO"
r = httpx.post("http://localhost:8000/app1")
r.raise_for_status()
request_id = r.headers["X-Request-Id"]
replica_id = ReplicaID.from_full_id_str(r.json()["replica"])
# Make sure 'model_debug_level' log content does not exist.
with pytest.raises(AssertionError):
check_log_file(r.json()["log_file"], [".*this_is_debug_info.*"])
# Check the log formatting.
check_log_file(
r.json()["log_file"],
f" {get_replica_info_format(replica_id)} {request_id} ",
)
# Set log level to "DEBUG"
config_dict["applications"][0]["logging_config"] = {
"log_level": "DEBUG",
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
and httpx.post("http://localhost:8000/app1").json()["log_level"]
== logging.DEBUG,
)
r = httpx.post("http://localhost:8000/app1")
r.raise_for_status()
request_id = r.headers["X-Request-Id"]
replica_id = ReplicaID.from_full_id_str(r.json()["replica"])
check_log_file(
r.json()["log_file"],
[
# Check for DEBUG-level log statement.
".*this_is_debug_info.*",
# Check that the log formatting has remained the same.
f" {get_replica_info_format(replica_id)} {request_id} ",
],
)
def test_not_overwritting_logging_config_in_yaml(self, serve_instance):
"""Deployment logging config in yaml should not be overwritten
by application logging config.
"""
client = serve_instance
config_dict = self.get_deploy_config()
config_dict["applications"][0]["deployments"] = [
{
"name": "Model",
"logging_config": {
"log_level": "DEBUG",
},
},
]
config_dict["applications"][0]["logging_config"] = {
"log_level": "INFO",
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.post("http://localhost:8000/app1").json()
check_log_file(resp["log_file"], [".*this_is_debug_info.*"])
def test_not_overwritting_logging_config_in_code(self, serve_instance):
"""Deployment logging config in code should not be overwritten
by application logging config.
"""
client = serve_instance
config_dict = self.get_deploy_config(model_within_logging_config=True)
config_dict["applications"][0]["logging_config"] = {
"log_level": "INFO",
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.post("http://localhost:8000/app1").json()
check_log_file(resp["log_file"], [".*this_is_debug_info.*"])
def test_logs_dir(self, serve_instance):
client = serve_instance
config_dict = self.get_deploy_config()
config_dict["applications"][0]["logging_config"] = {
"log_level": "DEBUG",
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.get("http://127.0.0.1:8000/app1").json()
# Construct a new path
# "/tmp/ray/session_xxx/logs/serve/new_dir"
paths = resp["log_file"].split("/")
paths[-1] = "new_dir"
new_log_dir = "/".join(paths)
config_dict["applications"][0]["logging_config"] = {
"log_level": "DEBUG",
"logs_dir": new_log_dir,
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
and "new_dir" in httpx.get("http://127.0.0.1:8000/app1").json()["log_file"]
)
resp = httpx.get("http://127.0.0.1:8000/app1").json()
# log content should be redirected to new file
check_log_file(resp["log_file"], [".*this_is_debug_info.*"])
@pytest.mark.parametrize("enable_access_log", [True, False])
def test_access_log(self, serve_instance, enable_access_log: bool):
client = serve_instance
config_dict = self.get_deploy_config()
config_dict["applications"][0]["logging_config"] = {
"enable_access_log": enable_access_log,
}
config = ServeDeploySchema.parse_obj(config_dict)
client.deploy_apps(config)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").status_code == 200
)
resp = httpx.get("http://127.0.0.1:8000/app1")
assert resp.status_code == 200
resp = resp.json()
if enable_access_log:
check_log_file(resp["log_file"], [".*this_is_access_log.*"])
else:
with pytest.raises(AssertionError):
check_log_file(resp["log_file"], [".*this_is_access_log.*"])
def test_deploy_with_no_applications(serve_instance):
"""Deploy an empty list of applications, serve should just be started."""
client = serve_instance
config = ServeDeploySchema.parse_obj({"applications": []})
client.deploy_apps(config)
def serve_running():
ServeInstanceDetails.parse_obj(
ray.get(client._controller.get_serve_instance_details.remote())
)
actors = list_actors(
filters=[
("ray_namespace", "=", SERVE_NAMESPACE),
("state", "=", "ALIVE"),
]
)
actor_names = [actor["class_name"] for actor in actors]
has_proxy = any("Proxy" in name for name in actor_names)
return "ServeController" in actor_names and has_proxy
wait_for_condition(serve_running)
def test_deployments_not_listed_in_config(serve_instance):
"""Apply a config without the app's deployments listed. The deployments should
not redeploy.
"""
client = serve_instance
config = {
"applications": [{"import_path": "ray.serve.tests.test_config_files.pid.node"}]
}
client.deploy_apps(ServeDeploySchema(**config), _blocking=True)
check_running()
pid1, _ = httpx.get("http://localhost:8000/").json()
# Redeploy the same config (with no deployments listed)
client.deploy_apps(ServeDeploySchema(**config))
wait_for_condition(check_running, timeout=15)
# It should be the same replica actor
pids = []
for _ in range(4):
pids.append(httpx.get("http://localhost:8000/").json()[0])
assert all(pid == pid1 for pid in pids)
@pytest.mark.parametrize("rebuild", [True, False])
def test_redeploy_old_config_after_failed_deployment(serve_instance, rebuild):
"""
1. Deploy application which succeeds.
2. Redeploy application with an import path that fails.
3. Redeploy the exact same config from step 1.
Verify that step 3 succeeds and the application returns to running state.
"""
client = serve_instance
app_config = {
"name": "default",
"import_path": "ray.serve.tests.test_config_files.world.DagNode",
}
client.deploy_apps(ServeDeploySchema(**{"applications": [app_config]}))
def check_application_running():
status = serve.status().applications["default"]
assert status.status == "RUNNING"
assert httpx.post("http://localhost:8000/").text == "wonderful world"
return True
wait_for_condition(check_application_running)
# Change config so that redeploy will error
new_app_config = copy(app_config)
if rebuild:
# New import path will cause an error upon importing app
new_app_config[
"import_path"
] = "ray.serve.tests.test_config_files.import_error.app"
err_msg = "ZeroDivisionError"
else:
# Set config for a nonexistent deployment
new_app_config["deployments"] = [{"name": "nonexistent", "num_replicas": 1}]
err_msg = "Deployment 'nonexistent' does not exist."
client.deploy_apps(ServeDeploySchema(**{"applications": [new_app_config]}))
def check_deploy_failed(message):
status = serve.status().applications["default"]
assert status.status == "DEPLOY_FAILED"
assert message in status.message
return True
wait_for_condition(check_deploy_failed, message=err_msg)
# Redeploy old config
client.deploy_apps(ServeDeploySchema(**{"applications": [app_config]}))
wait_for_condition(check_application_running)
def test_deploy_does_not_affect_dynamic_apps(serve_instance):
"""
Deploy a set of apps via the declarative API (REST API) and then a dynamic
app via the imperative API (`serve.run`).
Check that applying a new config via the declarative API does not affect
the app deployed using the imperative API.
"""
client = serve_instance
config = ServeDeploySchema(
applications=[
ServeApplicationSchema(
name="declarative-app-1",
route_prefix="/app-1",
import_path="ray.serve.tests.test_config_files.world.DagNode",
),
],
)
client.deploy_apps(config, _blocking=True)
check_running(app_name="declarative-app-1")
url = get_application_url(app_name="declarative-app-1")
assert httpx.post(url).text == "wonderful world"
# Now `serve.run` a dynamic app.
@serve.deployment
class D:
def __call__(self, *args) -> str:
return "Hello!"
serve.run(D.bind(), name="dynamic-app", route_prefix="/dynamic")
wait_for_condition(check_running, app_name="dynamic-app")
url = get_application_url(app_name="dynamic-app")
assert httpx.post(url).text == "Hello!"
# Add a new app via declarative API.
# Existing declarative app and dynamic app should not be affected.
config.applications.append(
ServeApplicationSchema(
name="declarative-app-2",
route_prefix="/app-2",
import_path="ray.serve.tests.test_config_files.world.DagNode",
),
)
client.deploy_apps(config, _blocking=True)
check_running(app_name="declarative-app-2")
url = get_application_url(app_name="declarative-app-2")
assert httpx.post(url).text == "wonderful world"
url = get_application_url(app_name="declarative-app-1")
assert httpx.post(url).text == "wonderful world"
url = get_application_url(app_name="dynamic-app")
assert httpx.post(url).text == "Hello!"
# Delete one of the apps via declarative API.
# Other declarative app and dynamic app should not be affected.
config.applications.pop(0)
client.deploy_apps(config)
wait_for_condition(check_running, app_name="declarative-app-2")
url = get_application_url(app_name="declarative-app-2")
assert httpx.post(url).text == "wonderful world"
url = get_application_url(app_name="dynamic-app")
assert httpx.post(url).text == "Hello!"
wait_for_condition(lambda: "declarative-app-1" not in serve.status().applications)
# Now overwrite the declarative app with a dynamic app with the same name.
# On subsequent declarative apply, that app should not be affected.
serve.run(D.bind(), name="declarative-app-2", route_prefix="/app-2")
wait_for_condition(check_running, app_name="declarative-app-2")
url = get_application_url(app_name="declarative-app-2")
assert httpx.post(url).text == "Hello!"
config.applications = [
ServeApplicationSchema(
name="declarative-app-1",
route_prefix="/app-1",
import_path="ray.serve.tests.test_config_files.world.DagNode",
),
]
client.deploy_apps(config, _blocking=True)
check_running(app_name="declarative-app-1")
url = get_application_url(app_name="declarative-app-1")
assert httpx.post(url).text == "wonderful world"
wait_for_condition(check_running, app_name="dynamic-app")
url = get_application_url(app_name="dynamic-app")
assert httpx.post(url).text == "Hello!"
wait_for_condition(check_running, app_name="declarative-app-2")
url = get_application_url(app_name="declarative-app-2")
assert httpx.post(url).text == "Hello!"
# Verify that the controller does not delete the dynamic apps on recovery.
ray.kill(client._controller, no_restart=False)
wait_for_condition(check_running, app_name="declarative-app-1")
# It takes some time for the target groups to be ready after controller recovery.
# So we make sure the target groups are ready before obtaining the URL.
wait_for_condition(
check_target_groups_ready, client=client, app_name="declarative-app-1"
)
url = get_application_url(app_name="declarative-app-1")
assert httpx.post(url).text == "wonderful world"
wait_for_condition(check_running, app_name="dynamic-app")
wait_for_condition(check_target_groups_ready, client=client, app_name="dynamic-app")
url = get_application_url(app_name="dynamic-app")
assert httpx.post(url).text == "Hello!"
wait_for_condition(check_running, app_name="declarative-app-2")
wait_for_condition(
check_target_groups_ready, client=client, app_name="declarative-app-2"
)
url = get_application_url(app_name="declarative-app-2")
assert httpx.post(url).text == "Hello!"
# Now overwrite the dynamic app with a declarative one and check that it gets
# deleted upon another apply that doesn't include it.
config.applications = [
ServeApplicationSchema(
name="declarative-app-2",
route_prefix="/app-2",
import_path="ray.serve.tests.test_config_files.world.DagNode",
),
]
client.deploy_apps(config, _blocking=True)
check_running(app_name="declarative-app-2")
url = get_application_url(app_name="declarative-app-2")
assert httpx.post(url).text == "wonderful world"
config.applications = []
client.deploy_apps(config)
wait_for_condition(lambda: "declarative-app-2" not in serve.status().applications)
def test_change_route_prefix(serve_instance):
# Deploy application with route prefix /old
client = serve_instance
app_config = {
"name": "default",
"route_prefix": "/old",
"import_path": "ray.serve.tests.test_config_files.pid.node",
}
client.deploy_apps(
ServeDeploySchema(**{"applications": [app_config]}), _blocking=True
)
check_running()
url = get_application_url()
pid1 = httpx.get(url).json()[0]
# Redeploy application with route prefix /new.
app_config["route_prefix"] = "/new"
client.deploy_apps(ServeDeploySchema(**{"applications": [app_config]}))
wait_for_condition(check_running)
# Check that the old route is gone and the response from the new route
# has the same PID (replica wasn't restarted).
def check_switched():
# Old route should be gone
url = get_application_url(exclude_route_prefix=True)
resp = httpx.get(f"{url}/old")
assert "Path '/old' not found." in resp.text
# Response from new route should be same PID
url = get_application_url(exclude_route_prefix=True)
pid2 = httpx.get(f"{url}/new").json()[0]
assert pid2 == pid1
return True
wait_for_condition(check_switched)
def test_num_replicas_auto_api(serve_instance):
"""Test setting only `num_replicas="auto"`."""
client = serve_instance
config_template = {
"import_path": "ray.serve.tests.test_config_files.pid.node",
"deployments": [{"name": "f", "num_replicas": "auto"}],
}
client.deploy_apps(ServeDeploySchema.parse_obj({"applications": [config_template]}))
wait_for_condition(check_running, timeout=15)
print("Application is RUNNING.")
check_num_replicas_eq("f", 1)
app_details = client.get_serve_details()["applications"][SERVE_DEFAULT_APP_NAME]
deployment_config = app_details["deployments"]["f"]["deployment_config"]
assert "num_replicas" not in deployment_config
assert deployment_config["max_ongoing_requests"] == 5
assert deployment_config["autoscaling_config"] == {
# Set by `num_replicas="auto"`
"target_ongoing_requests": 2.0,
"min_replicas": 1,
"max_replicas": 100,
# Untouched defaults
"look_back_period_s": 30.0,
"metrics_interval_s": 10.0,
"upscale_delay_s": 30.0,
"downscale_delay_s": 600.0,
"downscale_to_zero_delay_s": None,
"upscale_smoothing_factor": None,
"downscale_smoothing_factor": None,
"upscaling_factor": None,
"downscaling_factor": None,
"smoothing_factor": 1.0,
"initial_replicas": None,
"aggregation_function": "mean",
"policy": {
"policy_function": "ray.serve.autoscaling_policy:default_autoscaling_policy"
},
}
def test_num_replicas_auto_basic(serve_instance):
"""Test `num_replicas="auto"` and the default values are used in autoscaling."""
client = serve_instance
signal = SignalActor.options(name="signal123").remote()
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_signal.app",
"deployments": [
{
"name": "A",
"num_replicas": "auto",
"autoscaling_config": {
"look_back_period_s": 2.0,
"metrics_interval_s": 1.0,
"upscale_delay_s": 1.0,
},
"graceful_shutdown_timeout_s": 1,
}
],
}
print(time.ctime(), "Deploying pid application.")
client.deploy_apps(ServeDeploySchema.parse_obj({"applications": [config_template]}))
wait_for_condition(check_running, timeout=15)
print(time.ctime(), "Application is RUNNING.")
check_num_replicas_eq("A", 1)
app_details = client.get_serve_details()["applications"][SERVE_DEFAULT_APP_NAME]
deployment_config = app_details["deployments"]["A"]["deployment_config"]
# Set by `num_replicas="auto"`
assert "num_replicas" not in deployment_config
assert deployment_config["max_ongoing_requests"] == 5
assert deployment_config["autoscaling_config"] == {
# Set by `num_replicas="auto"`
"target_ongoing_requests": 2.0,
"min_replicas": 1,
"max_replicas": 100,
# Overrided by `autoscaling_config`
"look_back_period_s": 2.0,
"metrics_interval_s": 1.0,
"upscale_delay_s": 1.0,
# Untouched defaults
"downscale_delay_s": 600.0,
"downscale_to_zero_delay_s": None,
"upscale_smoothing_factor": None,
"downscale_smoothing_factor": None,
"upscaling_factor": None,
"downscaling_factor": None,
"smoothing_factor": 1.0,
"initial_replicas": None,
"aggregation_function": "mean",
"policy": {
"policy_function": "ray.serve.autoscaling_policy:default_autoscaling_policy"
},
}
h = serve.get_app_handle(SERVE_DEFAULT_APP_NAME)
for i in range(3):
[h.remote() for _ in range(2)]
def check_num_waiters(target: int):
assert ray.get(signal.cur_num_waiters.remote()) == target
return True
wait_for_condition(check_num_waiters, target=2 * (i + 1))
print(time.time(), f"Number of waiters on signal reached {2*(i+1)}.")
wait_for_condition(check_num_replicas_eq, name="A", target=i + 1)
print(time.time(), f"Confirmed number of replicas are at {i+1}.")
signal.send.remote()
def test_deploy_one_app_failed(serve_instance):
"""Deploy two applications with separate runtime envs."""
client = serve_instance
world_import_path = "ray.serve.tests.test_config_files.world.DagNode"
fail_import_path = "ray.serve.tests.test_config_files.fail.node"
config_template = {
"applications": [
{
"name": "app1",
"route_prefix": "/app1",
"import_path": world_import_path,
},
{
"name": "app2",
"route_prefix": "/app2",
"import_path": fail_import_path,
},
],
}
client.deploy_apps(ServeDeploySchema(**config_template))
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").text == "wonderful world"
)
wait_for_condition(
lambda: serve.status().applications["app1"].status == ApplicationStatus.RUNNING
and serve.status().applications["app2"].status
== ApplicationStatus.DEPLOY_FAILED
)
# Ensure the request doesn't hang and actually returns a 503 error.
# The timeout is there to prevent the test from hanging and blocking
# the test suite if it does fail.
r = httpx.post("http://localhost:8000/app2", timeout=10)
assert r.status_code == 503 and "unavailable" in r.text.lower()
def test_deploy_with_route_prefix_conflict(serve_instance):
world_import_path = "ray.serve.tests.test_config_files.world.DagNode"
pizza_import_path = "ray.serve.tests.test_config_files.pizza.serve_dag"
client = serve_instance
test_config = {
"applications": [
{
"name": "app1",
"route_prefix": "/app1",
"import_path": world_import_path,
},
{
"name": "app2",
"route_prefix": "/app2",
"import_path": pizza_import_path,
},
],
}
client.deploy_apps(ServeDeploySchema(**test_config))
wait_for_condition(
lambda: httpx.get("http://localhost:8000/app1").text == "wonderful world"
)
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app2", json=["ADD", 2]).text
== "4 pizzas please!"
)
# Buffer time
time.sleep(1)
test_config["applications"][1] = {
"name": "app3",
"route_prefix": "/app2",
"import_path": world_import_path,
}
client.deploy_apps(ServeDeploySchema(**test_config))
def check():
serve_details = ServeInstanceDetails(
**ray.get(client._controller.get_serve_instance_details.remote())
)
app1_running = (
"app1" in serve_details.applications
and serve_details.applications["app1"].status == "RUNNING"
)
app3_running = (
"app3" in serve_details.applications
and serve_details.applications["app3"].status == "RUNNING"
)
app2_gone = "app2" not in serve_details.applications
return app1_running and app3_running and app2_gone
wait_for_condition(check)
# app1 and app3 should be up and running
wait_for_condition(
lambda: httpx.get("http://localhost:8000/app1").text == "wonderful world"
)
wait_for_condition(
lambda: httpx.get("http://localhost:8000/app2").text == "wonderful world"
)
def test_update_config_graceful_shutdown_timeout(serve_instance):
"""Check that replicas stay alive when graceful_shutdown_timeout_s is updated"""
client = serve_instance
config_template = {
"import_path": "ray.serve.tests.test_config_files.pid.node",
"deployments": [{"name": "f", "graceful_shutdown_timeout_s": 1000}],
}
# Deploy first time
client.deploy_apps(ServeDeploySchema.parse_obj({"applications": [config_template]}))
wait_for_condition(check_running, timeout=15)
handle = serve.get_app_handle(SERVE_DEFAULT_APP_NAME)
# Start off with signal ready, and send query
handle.send.remote().result()
pid1 = handle.remote().result()[0]
print("PID of replica after first deployment:", pid1)
# Redeploy with shutdown timeout set to 5 seconds
config_template["deployments"][0]["graceful_shutdown_timeout_s"] = 5
client.deploy_apps(ServeDeploySchema.parse_obj({"applications": [config_template]}))
wait_for_condition(check_running, timeout=15)
pid2 = handle.remote().result()[0]
assert pid1 == pid2
print("PID of replica after redeployment:", pid2)
# Send blocking query
handle.send.remote(clear=True)
handle.remote()
# Try to delete deployment, should be blocked until the timeout at 5 seconds
client.delete_apps([SERVE_DEFAULT_APP_NAME], blocking=False)
# Replica should be dead within 10 second timeout, which means
# graceful_shutdown_timeout_s was successfully updated lightweightly
wait_for_condition(partial(check_deployments_dead, [DeploymentID(name="f")]))
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
|
TestDeploywithLoggingConfig
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/wrappers/transform_observation.py
|
{
"start": 19556,
"end": 21959
}
|
class ____(
TransformObservation[WrapperObsType, ActType, ObsType],
gym.utils.RecordConstructorArgs,
):
"""Modifies the dtype of an observation array to a specified dtype.
Note:
This is only compatible with :class:`Box`, :class:`Discrete`, :class:`MultiDiscrete` and :class:`MultiBinary` observation spaces
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.DtypeObservation`.
Change logs:
* v1.0.0 - Initially added
"""
def __init__(self, env: gym.Env[ObsType, ActType], dtype: Any):
"""Constructor for Dtype observation wrapper.
Args:
env: The environment to wrap
dtype: The new dtype of the observation
"""
assert isinstance(
env.observation_space,
(spaces.Box, spaces.Discrete, spaces.MultiDiscrete, spaces.MultiBinary),
)
self.dtype = dtype
if isinstance(env.observation_space, spaces.Box):
new_observation_space = spaces.Box(
low=env.observation_space.low,
high=env.observation_space.high,
shape=env.observation_space.shape,
dtype=self.dtype,
)
elif isinstance(env.observation_space, spaces.Discrete):
new_observation_space = spaces.Box(
low=env.observation_space.start,
high=env.observation_space.start + env.observation_space.n,
shape=(),
dtype=self.dtype,
)
elif isinstance(env.observation_space, spaces.MultiDiscrete):
new_observation_space = spaces.MultiDiscrete(
env.observation_space.nvec, dtype=dtype
)
elif isinstance(env.observation_space, spaces.MultiBinary):
new_observation_space = spaces.Box(
low=0,
high=1,
shape=env.observation_space.shape,
dtype=self.dtype,
)
else:
raise TypeError(
"DtypeObservation is only compatible with value / array-based observations."
)
gym.utils.RecordConstructorArgs.__init__(self, dtype=dtype)
TransformObservation.__init__(
self,
env=env,
func=lambda obs: dtype(obs),
observation_space=new_observation_space,
)
|
DtypeObservation
|
python
|
davidhalter__parso
|
parso/python/tree.py
|
{
"start": 10701,
"end": 12577
}
|
class ____(Scope):
"""
The top scope, which is always a module.
Depending on the underlying parser this may be a full module or just a part
of a module.
"""
__slots__ = ('_used_names',)
type = 'file_input'
def __init__(self, children):
super().__init__(children)
self._used_names = None
def _iter_future_import_names(self):
"""
:return: A list of future import names.
:rtype: list of str
"""
# In Python it's not allowed to use future imports after the first
# actual (non-future) statement. However this is not a linter here,
# just return all future imports. If people want to scan for issues
# they should use the API.
for imp in self.iter_imports():
if imp.type == 'import_from' and imp.level == 0:
for path in imp.get_paths():
names = [name.value for name in path]
if len(names) == 2 and names[0] == '__future__':
yield names[1]
def get_used_names(self):
"""
Returns all the :class:`Name` leafs that exist in this module. This
includes both definitions and references of names.
"""
if self._used_names is None:
# Don't directly use self._used_names to eliminate a lookup.
dct = {}
def recurse(node):
try:
children = node.children
except AttributeError:
if node.type == 'name':
arr = dct.setdefault(node.value, [])
arr.append(node)
else:
for child in children:
recurse(child)
recurse(self)
self._used_names = UsedNamesMapping(dct)
return self._used_names
|
Module
|
python
|
huggingface__transformers
|
src/transformers/models/blip/modeling_blip_text.py
|
{
"start": 21720,
"end": 33016
}
|
class ____(BlipTextPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and `is_decoder` set to `True`; an
`encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BlipTextEmbeddings(config)
self.encoder = BlipTextEncoder(config)
self.pooler = BlipTextPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def get_extended_attention_mask(
self, attention_mask: Tensor, input_shape: tuple[int], device: device, is_decoder: bool
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`tuple[int]`):
The shape of the input to the model.
device (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
is_decoder: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
r"""
encoder_hidden_states (`torch.FloatTensor`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`Cache`, *optional*):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
batch_size, seq_length = input_shape
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = inputs_embeds.device
elif encoder_embeds is not None:
input_shape = encoder_embeds.size()[:-1]
batch_size, seq_length = input_shape
device = encoder_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds")
past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length + past_key_values_length)).to(device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device, is_decoder
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
if encoder_embeds is None:
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
else:
embedding_output = encoder_embeds
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
# Adapted from https://github.com/salesforce/BLIP/blob/main/models/med.py#L811
|
BlipTextModel
|
python
|
pytorch__pytorch
|
torch/utils/_python_dispatch.py
|
{
"start": 1591,
"end": 14843
}
|
class ____:
"""
A ``TorchDispatchMode`` allows you to override the meaning of all
``__torch_dispatch__`` overrideable functions within a dynamic scope,
without having to actually create a tensor subclass or manually
monkey-patch functions in the PyTorch API. Some common situations
where you should use a mode:
* You want to override the meaning of factory functions, or other
functions that do not otherwise take a tensor as an argument
(these cannot be overridden with tensor subclasses).
* You want to override the behavior of all functions without needing
to wrap your inputs in tensor subclasses; e.g., if you are just
interested in logging intermediate computations.
* You want to control the order of execution of various tensor
subclasses explicitly, rather than implicitly via the return of
``NotImplemented``.
Independent subclasses of :class:`TorchDispatchMode` are compositional:
modes can be pushed onto a stack using ``with MyMode():``.
When you call functions in the PyTorch API inside your
``__torch_dispatch__`` implementation, by default, they will forward on to
the next mode on the mode stack. If you want recursively call back into
your current ``__torch_dispatch__`` implementation, either explicitly
invoke ``self.__torch_dispatch__(...)``, or use the context manager
``self`` to make PyTorch
API self-referential (beware of infinite loops, in this case!)
"""
# - When False, custom torch dispatch mode will error out explicitly when a hop
# is called under the mode.
# - When True, custom torch dispatch mode's __torch_dispatch__ will be triggered.
# Mode authors can implement how the mode interacts with higher order operators.
supports_higher_order_operators = False
def __init__(self, _dispatch_key=None) -> None:
if _dispatch_key is not None:
if not isinstance(_dispatch_key, torch._C.DispatchKey):
raise AssertionError("_dispatch_key must be a torch._C.DispatchKey")
self.__dict__["_dispatch_key"] = _dispatch_key
self.old_dispatch_mode_flags: deque[bool] = deque()
self.old_non_infra_dispatch_mode_flags: deque[bool] = deque()
self.old_without_ignore_compile_internals_dispatch_mode_flags: deque[bool] = (
deque()
)
def _lazy_init_old_dispatch_mode_flags(self) -> None:
if not hasattr(self, "old_dispatch_mode_flags"):
self.old_dispatch_mode_flags: deque[bool] = deque() # type: ignore[no-redef]
if not hasattr(self, "old_non_infra_dispatch_mode_flags"):
self.old_non_infra_dispatch_mode_flags: deque[bool] = deque() # type: ignore[no-redef]
if not hasattr(
self, "old_without_ignore_compile_internals_dispatch_mode_flags"
):
self.old_without_ignore_compile_internals_dispatch_mode_flags: deque[ # type: ignore[no-redef]
bool
] = deque()
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
raise NotImplementedError
def __enter__(self):
global _is_in_torch_dispatch_mode
global _is_in_non_infra_torch_dispatch_mode
global _is_in_any_mode_without_ignore_compile_internals
# Previously, there wasn't any state in this class' constructor
# super calls were added to existing modes, but for any new modes
# this will replicate the previous behavior of not strictly needing
# to call super().__init__()
self._lazy_init_old_dispatch_mode_flags()
self.old_dispatch_mode_flags.append(_is_in_torch_dispatch_mode)
_is_in_torch_dispatch_mode = True
self.old_non_infra_dispatch_mode_flags.append(
_is_in_non_infra_torch_dispatch_mode
)
_is_in_non_infra_torch_dispatch_mode = (
_is_in_non_infra_torch_dispatch_mode or not self.is_infra_mode()
)
self.old_without_ignore_compile_internals_dispatch_mode_flags.append(
_is_in_any_mode_without_ignore_compile_internals
)
_is_in_any_mode_without_ignore_compile_internals = (
_is_in_any_mode_without_ignore_compile_internals
or not self.ignore_compile_internals()
)
set_is_in_mode_without_ignore_compile_internals(
_is_in_any_mode_without_ignore_compile_internals
)
_push_mode(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
mb_dk_or_mode_key = self.__dict__.get("_dispatch_key", None)
if mb_dk_or_mode_key is None:
# Today, mode keys are not used at all in the per-dispatch-key-mode logic (for pre-dispatch)
# We should probably revisit this.
mb_dk_or_mode_key = self.__dict__.get("_mode_key", None)
global _is_in_torch_dispatch_mode
_is_in_torch_dispatch_mode = self.old_dispatch_mode_flags.pop()
global _is_in_non_infra_torch_dispatch_mode
_is_in_non_infra_torch_dispatch_mode = (
self.old_non_infra_dispatch_mode_flags.pop()
)
global _is_in_any_mode_without_ignore_compile_internals
_is_in_any_mode_without_ignore_compile_internals = (
self.old_without_ignore_compile_internals_dispatch_mode_flags.pop()
)
set_is_in_mode_without_ignore_compile_internals(
_is_in_any_mode_without_ignore_compile_internals
)
_pop_mode(mb_dk_or_mode_key)
@classmethod
def push(cls, *args, **kwargs):
warnings.warn(
"`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`",
stacklevel=2,
)
instance = cls(*args, **kwargs)
return instance
@classmethod
def is_infra_mode(cls) -> bool:
return False
@classmethod
def ignore_compile_internals(cls) -> bool:
"""Ignore operators that are compiled via torch.compile.
If ``True``, then this TorchDispatchMode ignores operators that
are optimized by :func:`torch.compile`. Mechanically, this involves
turning off the TorchDispatchMode throughout the whole compilation process,
and turning it back on for the runtime of the compiled artifact(s).
For example,
@torch.compile
def f(x):
return x.sin().cos()
with LoggingMode():
f(x)
The above example will not log anything if
``LoggingMode.ignore_compile_internals()`` is True.
torch.compile will fuse sin() and cos() into a single operation
and this TorchDispatchMode will not be passed sin and cos.
If ``False`` (default), :func:`torch.compile` will respect
the eager semantics of passing this TorchDispatchMode all
operators that would have run during eager execution.
The way this will usually happen is that :func:`torch.compile`
will just fallback to eager-mode PyTorch.
"""
if cls.is_infra_mode():
return True
return False
def _get_current_dispatch_mode() -> TorchDispatchMode | None:
"""
Return the top user mode on the stack (the next one that would be
executed) if there are any.
"""
stack_len = _len_torch_dispatch_stack()
if stack_len > 0:
return _get_dispatch_stack_at(stack_len - 1)
return None
def _detect_infra_mode(key):
if key not in (
torch._C._TorchDispatchModeKey.FUNCTIONAL,
torch._C._TorchDispatchModeKey.PROXY,
):
raise AssertionError(
f"key must be either FUNCTIONAL ({torch._C._TorchDispatchModeKey.FUNCTIONAL}) \
or PROXY ({torch._C._TorchDispatchModeKey.PROXY}) _TorchDispatchModeKey, \
got {key}"
)
from torch._ops import _get_dispatch_mode_pre_dispatch
pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(key)
post_dispatch_mode = torch._C._get_dispatch_mode(key)
if pre_dispatch_mode is not None and post_dispatch_mode is not None:
raise AssertionError(
"At most one of pre_dispatch_mode and post_dispatch_mode may be active"
)
if pre_dispatch_mode is None:
return post_dispatch_mode
return pre_dispatch_mode
def _unset_infra_mode(key):
from torch._ops import _get_dispatch_mode_pre_dispatch, unset_mode_pre_dispatch
pre_dispatch_mode = _get_dispatch_mode_pre_dispatch(key)
post_dispatch_mode = torch._C._get_dispatch_mode(key)
if pre_dispatch_mode and post_dispatch_mode:
raise AssertionError(
"Can't have active infra mode on both pre and post dispatch mode stack"
)
if pre_dispatch_mode:
mode = unset_mode_pre_dispatch(key)
return mode
if post_dispatch_mode:
return torch._C._unset_dispatch_mode(key)
def _disable_infra_mode(key):
if key not in (
torch._C._TorchDispatchModeKey.FUNCTIONAL,
torch._C._TorchDispatchModeKey.PROXY,
):
raise AssertionError(
"key must be either FUNCTIONAL or PROXY _TorchDispatchModeKey"
)
mode_unset = _unset_infra_mode(key)
try:
yield mode_unset
finally:
if mode_unset is not None:
_push_mode(mode_unset)
def _get_current_dispatch_mode_stack() -> list[TorchDispatchMode]:
"""
Returns the current stack of dispatch modes, with the most recent
(i.e., the one that will be processed first) at the end of the
list (standard stack convention).
"""
stack_len = _len_torch_dispatch_stack()
return [_get_dispatch_stack_at(i) for i in range(stack_len)]
def _push_mode(mode: TorchDispatchMode) -> None:
k = mode._dispatch_key if hasattr(mode, "_dispatch_key") else None
if k is not None and k != torch._C.DispatchKey.PreDispatch:
raise AssertionError(
"mode._dispatch_key must be None or DispatchKey.PreDispatch"
)
if k is None:
_push_on_torch_dispatch_stack(mode)
return
from torch._ops import _set_mode_pre_dispatch, get_cached_ops
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
# Clear the cache of every op that has been used so far, for this particular key.
ks = torch._C._functionality_to_backend_keys(k)
for op in get_cached_ops():
for key in ks:
op._uncache_dispatch(key)
_set_mode_pre_dispatch(mode)
def _pop_mode(k: DispatchKey | torch._C._TorchDispatchModeKey | None = None):
if k == torch._C.DispatchKey.PreDispatch: # type: ignore[attr-defined]
from torch._ops import _pop_mode_from_pre_dispatch
return _pop_mode_from_pre_dispatch()
if k is None or isinstance(k, torch._C._TorchDispatchModeKey):
return _pop_torch_dispatch_stack(k)
@contextlib.contextmanager
def _pop_mode_temporarily(k: DispatchKey | None = None):
old = _pop_mode(k)
try:
yield old
finally:
_push_mode(old)
@contextlib.contextmanager
def _disable_current_modes():
from torch._ops import (
_len_torch_dispatch_stack_pre_dispatch,
_pop_mode_from_pre_dispatch,
)
from torch._subclasses.functional_tensor import FunctionalTensorMode
from torch._subclasses.schema_check_mode import SchemaCheckMode
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
mode_len_pre_dispatch = _len_torch_dispatch_stack_pre_dispatch()
old_pre_dispatch_modes = [
_pop_mode_from_pre_dispatch() for _ in range(mode_len_pre_dispatch)
]
has_proxy_mode_in_pre_dispatch = False
has_functional_mode_in_pre_dispatch = False
has_schema_check_mode_in_pre_dispatch = False
for i in old_pre_dispatch_modes:
if isinstance(i, ProxyTorchDispatchMode):
has_proxy_mode_in_pre_dispatch = True
if isinstance(i, FunctionalTensorMode):
has_functional_mode_in_pre_dispatch = True
if isinstance(i, SchemaCheckMode):
has_schema_check_mode_in_pre_dispatch = True
mode_len = _len_torch_dispatch_stack()
old_modes = [_pop_mode() for _ in range(mode_len)]
for old in old_modes:
if (
isinstance(old, FunctionalTensorMode)
and has_functional_mode_in_pre_dispatch
):
raise AssertionError(
"Can't have FunctionalMode available both in PreDispatch and Python Key"
)
if isinstance(old, ProxyTorchDispatchMode) and has_proxy_mode_in_pre_dispatch:
raise AssertionError(
"Can't have ProxyTorchDispatchMode available both in PreDispatch and Python Key"
)
if isinstance(old, SchemaCheckMode) and has_schema_check_mode_in_pre_dispatch:
raise AssertionError(
"Can't have SchemaCheckMode available both in PreDispatch and Python Key"
)
# Manually disable proxy and fake modes, if any are active
try:
yield old_pre_dispatch_modes + old_modes
finally:
for mode in reversed(old_modes):
_push_mode(mode)
for mode in reversed(old_pre_dispatch_modes):
_push_mode(mode)
|
TorchDispatchMode
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-lancedb/llama_index/vector_stores/lancedb/base.py
|
{
"start": 2646,
"end": 20847
}
|
class ____(BasePydanticVectorStore):
"""
The LanceDB Vector Store.
Stores text and embeddings in LanceDB. The vector store will open an existing
LanceDB dataset or create the dataset if it does not exist.
Args:
uri (str, required): Location where LanceDB will store its files.
table_name (str, optional): The table name where the embeddings will be stored.
Defaults to "vectors".
vector_column_name (str, optional): The vector column name in the table if different from default.
Defaults to "vector", in keeping with lancedb convention.
nprobes (int, optional): The number of probes used.
A higher number makes search more accurate but also slower.
Defaults to 20.
refine_factor: (int, optional): Refine the results by reading extra elements
and re-ranking them in memory.
Defaults to None
text_key (str, optional): The key in the table that contains the text.
Defaults to "text".
doc_id_key (str, optional): The key in the table that contains the document id.
Defaults to "doc_id".
connection (Any, optional): The connection to use for LanceDB.
Defaults to None.
table (Any, optional): The table to use for LanceDB.
Defaults to None.
api_key (str, optional): The API key to use LanceDB cloud.
Defaults to None. You can also set the `LANCE_API_KEY` environment variable.
region (str, optional): The region to use for your LanceDB cloud db.
Defaults to None.
mode (str, optional): The mode to use for LanceDB.
Defaults to "overwrite".
query_type (str, optional): The type of query to use for LanceDB.
Defaults to "vector".
reranker (Any, optional): The reranker to use for LanceDB.
Defaults to None.
overfetch_factor (int, optional): The factor by which to fetch more results.
Defaults to 1.
Raises:
ImportError: Unable to import `lancedb`.
Returns:
LanceDBVectorStore: VectorStore that supports creating LanceDB datasets and
querying it.
Examples:
`pip install llama-index-vector-stores-lancedb`
```python
from llama_index.vector_stores.lancedb import LanceDBVectorStore
vector_store = LanceDBVectorStore() # native invocation
```
"""
stores_text: bool = True
flat_metadata: bool = True
uri: Optional[str]
vector_column_name: Optional[str]
nprobes: Optional[int]
refine_factor: Optional[int]
text_key: Optional[str]
doc_id_key: Optional[str]
api_key: Optional[str]
region: Optional[str]
mode: Optional[str]
query_type: Optional[str]
overfetch_factor: Optional[int]
_table_name: Optional[str] = PrivateAttr()
_connection: lancedb.DBConnection = PrivateAttr()
_table: Any = PrivateAttr()
_metadata_keys: Any = PrivateAttr()
_fts_index_ready: bool = PrivateAttr()
_reranker: Any = PrivateAttr()
def __init__(
self,
uri: Optional[str] = "/tmp/lancedb",
table_name: Optional[str] = "vectors",
vector_column_name: str = "vector",
nprobes: int = 20,
refine_factor: Optional[int] = None,
text_key: str = DEFAULT_TEXT_KEY,
doc_id_key: str = DEFAULT_DOC_ID_KEY,
connection: Optional[Any] = None,
table: Optional[Any] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
mode: str = "overwrite",
query_type: str = "vector",
reranker: Optional[Any] = None,
overfetch_factor: int = 1,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(
uri=uri,
table_name=table_name,
vector_column_name=vector_column_name,
nprobes=nprobes,
refine_factor=refine_factor,
text_key=text_key,
doc_id_key=doc_id_key,
mode=mode,
query_type=query_type,
overfetch_factor=overfetch_factor,
api_key=api_key,
region=region,
**kwargs,
)
self._table_name = table_name
self._metadata_keys = None
self._fts_index_ready = False
if isinstance(reranker, lancedb.rerankers.Reranker):
self._reranker = reranker
elif reranker is None:
self._reranker = None
else:
raise ValueError(
"`reranker` has to be a lancedb.rerankers.Reranker object."
)
if isinstance(connection, lancedb.db.LanceDBConnection):
self._connection = connection
elif isinstance(connection, str):
raise ValueError(
"`connection` has to be a lancedb.db.LanceDBConnection object."
)
else:
if api_key is None and os.getenv("LANCE_API_KEY") is None:
if uri.startswith("db://"):
raise ValueError("API key is required for LanceDB cloud.")
else:
self._connection = lancedb.connect(uri)
else:
if "db://" not in uri:
self._connection = lancedb.connect(uri)
warnings.warn(
"api key provided with local uri. The data will be stored locally"
)
self._connection = lancedb.connect(
uri, api_key=api_key or os.getenv("LANCE_API_KEY"), region=region
)
if table is not None:
try:
assert isinstance(
table, (lancedb.db.LanceTable, lancedb.remote.table.RemoteTable)
)
self._table = table
self._table_name = (
table.name if hasattr(table, "name") else "remote_table"
)
except AssertionError:
raise ValueError(
"`table` has to be a lancedb.db.LanceTable or lancedb.remote.table.RemoteTable object."
)
else:
if self._table_exists():
self._table = self._connection.open_table(table_name)
elif self.mode in ["create", "overwrite"]:
_logger.warning(
f"Table {table_name} doesn't exist yet. Please add some data to create it."
)
self._table = None
else:
raise TableNotFoundError(
f"Table {self._table_name} doesn't exist, mode must be either 'create' or 'overwrite' to create it dynamically"
)
@property
def client(self) -> None:
"""Get client."""
return self._connection
@property
def table(
self,
) -> Optional[Union[lancedb.db.LanceTable, lancedb.remote.table.RemoteTable]]:
"""Get table."""
if self._table is None:
raise TableNotFoundError(
f"Table {self._table_name} is not initialized. Please create it or add some data first."
)
return self._table
@classmethod
def from_table(cls, table: Any) -> "LanceDBVectorStore":
"""Create instance from table."""
try:
if not isinstance(
table, (lancedb.db.LanceTable, lancedb.remote.table.RemoteTable)
):
raise Exception("argument is not lancedb table instance")
return cls(table=table, connection=table._conn)
except Exception as e:
print("ldb version", lancedb.__version__)
raise
def _add_reranker(self, reranker: lancedb.rerankers.Reranker) -> None:
"""Add a reranker to an existing vector store."""
if reranker is None:
raise ValueError(
"`reranker` has to be a lancedb.rerankers.Reranker object."
)
self._reranker = reranker
def _table_exists(self, tbl_name: Optional[str] = None) -> bool:
return (tbl_name or self._table_name) in self._connection.table_names()
def create_index(
self,
scalar: Optional[bool] = False,
col_name: Optional[str] = None,
num_partitions: Optional[int] = 256,
num_sub_vectors: Optional[int] = 96,
index_cache_size: Optional[int] = None,
metric: Optional[str] = "L2",
**kwargs: Any,
) -> None:
"""
Create a scalar(for non-vector cols) or a vector index on a table.
Make sure your vector column has enough data before creating an index on it.
Args:
scalar: Create a scalar index on a column. Defaults to False
col_name: The column name to create the scalar index on. Defaults to None
num_partitions: Number of partitions to use for the index. Defaults to 256
num_sub_vectors: Number of sub-vectors to use for the index. Defaults to 96
index_cache_size: The size of the index cache. Defaults to None
metric: Provide the metric to use for vector index. Defaults to 'L2'
choice of metrics: 'L2', 'dot', 'cosine'
**kwargs: Additional keyword arguments. See lancedb.db.LanceTable.create_index docs
Returns:
None
"""
if not scalar:
self.table.create_index(
metric=metric,
vector_column_name=self.vector_column_name,
num_partitions=num_partitions,
num_sub_vectors=num_sub_vectors,
index_cache_size=index_cache_size,
**kwargs,
)
else:
if col_name is None:
raise ValueError("Column name is required for scalar index creation.")
self.table.create_scalar_index(col_name)
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
if not nodes:
_logger.debug("No nodes to add. Skipping the database operation.")
return []
data = []
ids = []
for node in nodes:
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=self.flat_metadata
)
if not self._metadata_keys:
self._metadata_keys = list(metadata.keys())
append_data = {
"id": node.node_id,
self.doc_id_key: node.ref_doc_id,
self.vector_column_name: node.get_embedding(),
self.text_key: node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
}
data.append(append_data)
ids.append(node.node_id)
if self._table is None:
_logger.info(f"Create new table {self._table_name} adding data.")
self._table = self._connection.create_table(
self._table_name, data, mode=self.mode
)
else:
self._table.add(data)
# new data requires re-creating the fts index
self._fts_index_ready = False
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
self.table.delete(f'{self.doc_id_key} = "' + ref_doc_id + '"')
def delete_nodes(self, node_ids: List[str], **delete_kwargs: Any) -> None:
"""
Delete nodes using with node_ids.
Args:
node_ids (List[str]): The list of node_ids to delete.
"""
self.table.delete('id in ("' + '","'.join(node_ids) + '")')
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**kwargs: Any,
) -> List[BaseNode]:
"""
Get nodes from the vector store.
"""
if isinstance(self.table, lancedb.remote.table.RemoteTable):
raise ValueError("get_nodes is not supported for LanceDB cloud yet.")
if filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(filters, self._metadata_keys)
else:
where = kwargs.pop("where", None)
if node_ids is not None:
where = f'id in ("' + '","'.join(node_ids) + '")'
results = self.table.search().where(where).to_pandas()
nodes = []
for _, item in results.iterrows():
try:
node = metadata_dict_to_node(item.metadata)
node.embedding = list(item[self.vector_column_name])
except Exception:
# deprecated legacy logic for backward compatibility
_logger.debug(
"Failed to parse Node metadata, fallback to legacy logic."
)
if item.metadata:
metadata, node_info, _relation = legacy_metadata_dict_to_node(
item.metadata, text_key=self.text_key
)
else:
metadata, node_info = {}, {}
node = TextNode(
text=item[self.text_key] or "",
id_=item.id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=item[self.doc_id_key]
),
},
)
nodes.append(node)
return nodes
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface."
)
where = _to_lance_filter(query.filters, self._metadata_keys)
else:
where = kwargs.pop("where", None)
query_type = kwargs.pop("query_type", self.query_type)
_logger.info(f"query_type :, {query_type}")
if query_type == "vector":
_query = query.query_embedding
else:
if not isinstance(self.table, lancedb.db.LanceTable):
raise ValueError(
"creating FTS index is not supported for LanceDB Cloud yet. "
"Please use a local table for FTS/Hybrid search."
)
if not self._fts_index_ready:
self.table.create_fts_index(self.text_key, replace=True)
self._fts_index_ready = True
if query_type == "hybrid":
_query = (query.query_embedding, query.query_str)
elif query_type == "fts":
_query = query.query_str
else:
raise ValueError(f"Invalid query type: {query_type}")
if query_type == "hybrid":
lance_query = (
self.table.search(
vector_column_name=self.vector_column_name, query_type="hybrid"
)
.vector(query.query_embedding)
.text(query.query_str)
)
else:
lance_query = self.table.search(
query=_query,
vector_column_name=self.vector_column_name,
)
lance_query.limit(query.similarity_top_k * self.overfetch_factor).where(where)
if query_type != "fts":
lance_query.nprobes(self.nprobes)
if query_type == "hybrid" and self._reranker is not None:
_logger.info(f"using {self._reranker} for reranking results.")
lance_query.rerank(reranker=self._reranker)
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
results = lance_query.to_pandas()
if len(results) == 0:
raise Warning("query results are empty..")
nodes = []
for _, item in results.iterrows():
try:
node = metadata_dict_to_node(item.metadata)
node.embedding = list(item[self.vector_column_name])
except Exception:
# deprecated legacy logic for backward compatibility
_logger.debug(
"Failed to parse Node metadata, fallback to legacy logic."
)
if item.metadata:
metadata, node_info, _relation = legacy_metadata_dict_to_node(
item.metadata, text_key=self.text_key
)
else:
metadata, node_info = {}, {}
node = TextNode(
text=item[self.text_key] or "",
id_=item.id,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id=item[self.doc_id_key]
),
},
)
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
|
LanceDBVectorStore
|
python
|
keras-team__keras
|
keras/src/ops/nn.py
|
{
"start": 30974,
"end": 34215
}
|
class ____(Operation):
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
*,
name=None,
):
super().__init__(name=name)
self.pool_size = pool_size
self.strides = strides
self.padding = padding.lower()
self.data_format = data_format
def call(self, inputs):
return backend.nn.average_pool(
inputs,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def compute_output_spec(self, inputs):
output_shape = operation_utils.compute_pooling_output_shape(
inputs.shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_export(
[
"keras.ops.average_pool",
"keras.ops.nn.average_pool",
]
)
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Average pooling operation.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`. Pooling happens over the spatial
dimensions only.
pool_size: int or tuple/list of integers of size
`len(inputs_spatial_shape)`, specifying the size of the pooling
window for each spatial dimension of the input tensor. If
`pool_size` is int, then every spatial dimension shares the same
`pool_size`.
strides: int or tuple/list of integers of size
`len(inputs_spatial_shape)`. The stride of the sliding window for
each spatial dimension of the input tensor. If `strides` is int,
then every spatial dimension shares the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
Returns:
A tensor of rank N+2, the result of the average pooling operation.
"""
data_format = standardize_data_format(data_format)
padding = padding.lower()
if any_symbolic_tensors((inputs,)):
return AveragePool(
pool_size,
strides,
padding,
data_format,
).symbolic_call(inputs)
return backend.nn.average_pool(
inputs, pool_size, strides, padding, data_format
)
|
AveragePool
|
python
|
ageron__handson-ml
|
future_encoders.py
|
{
"start": 8047,
"end": 29846
}
|
class ____(_BaseEncoder):
"""Encode categorical integer features as a one-hot numeric array.
The input to this transformer should be an array-like of integers or
strings, denoting the values taken on by categorical (discrete) features.
The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')
encoding scheme. This creates a binary column for each category and
returns a sparse matrix or dense array.
By default, the encoder derives the categories based on the unique values
in each feature. Alternatively, you can also specify the `categories`
manually.
The OneHotEncoder previously assumed that the input features take on
values in the range [0, max(values)). This behaviour is deprecated.
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
categories : 'auto' or a list of lists/arrays of values.
Categories (unique values) per feature:
- 'auto' : Determine categories automatically from the training data.
- list : ``categories[i]`` holds the categories expected in the ith
column. The passed categories must be sorted and should not mix
strings and numeric values.
The used categories can be found in the ``categories_`` attribute.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
dtype : number type, default=np.float
Desired dtype of output.
handle_unknown : 'error' (default) or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform (default is to raise). When this parameter
is set to 'ignore' and an unknown category is encountered during
transform, the resulting one-hot encoded columns for this feature
will be all zeros. In the inverse transform, an unknown category
will be denoted as None.
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
.. deprecated:: 0.20
The `n_values` keyword is deprecated and will be removed in 0.22.
Use `categories` instead.
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
.. deprecated:: 0.20
The `categorical_features` keyword is deprecated and will be
removed in 0.22.
Attributes
----------
categories_ : list of arrays
The categories of each feature determined during fitting
(in order corresponding with output of ``transform``).
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
.. deprecated:: 0.20
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
.. deprecated:: 0.20
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
.. deprecated:: 0.20
Examples
--------
Given a dataset with two features, we let the encoder find the unique
values per feature and transform the data to a binary one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder(handle_unknown='ignore')
>>> X = [['Male', 1], ['Female', 3], ['Female', 2]]
>>> enc.fit(X)
... # doctest: +ELLIPSIS
OneHotEncoder(categories='auto', dtype=<... 'numpy.float64'>,
handle_unknown='ignore', sparse=True)
>>> enc.categories_
[array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]
>>> enc.transform([['Female', 1], ['Male', 4]]).toarray()
array([[ 1., 0., 1., 0., 0.],
[ 0., 1., 0., 0., 0.]])
>>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])
array([['Male', 1],
[None, 2]], dtype=object)
See also
--------
sklearn.preprocessing.OrdinalEncoder : performs an ordinal (integer)
encoding of the categorical features.
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
"""
def __init__(self, n_values=None, categorical_features=None,
categories=None, sparse=True, dtype=np.float64,
handle_unknown='error'):
self._categories = categories
if categories is None:
self.categories = 'auto'
else:
self.categories = categories
self.sparse = sparse
self.dtype = dtype
self.handle_unknown = handle_unknown
if n_values is not None:
pass
# warnings.warn("Deprecated", DeprecationWarning)
else:
n_values = "auto"
self._deprecated_n_values = n_values
if categorical_features is not None:
pass
# warnings.warn("Deprecated", DeprecationWarning)
else:
categorical_features = "all"
self._deprecated_categorical_features = categorical_features
# Deprecated keywords
@property
def n_values(self):
warnings.warn("The 'n_values' parameter is deprecated.",
DeprecationWarning)
return self._deprecated_n_values
@n_values.setter
def n_values(self, value):
warnings.warn("The 'n_values' parameter is deprecated.",
DeprecationWarning)
self._deprecated_n_values = value
@property
def categorical_features(self):
warnings.warn("The 'categorical_features' parameter is deprecated.",
DeprecationWarning)
return self._deprecated_categorical_features
@categorical_features.setter
def categorical_features(self, value):
warnings.warn("The 'categorical_features' parameter is deprecated.",
DeprecationWarning)
self._deprecated_categorical_features = value
# Deprecated attributes
@property
def active_features_(self):
check_is_fitted(self, 'categories_')
warnings.warn("The 'active_features_' attribute is deprecated.",
DeprecationWarning)
return self._active_features_
@property
def feature_indices_(self):
check_is_fitted(self, 'categories_')
warnings.warn("The 'feature_indices_' attribute is deprecated.",
DeprecationWarning)
return self._feature_indices_
@property
def n_values_(self):
check_is_fitted(self, 'categories_')
warnings.warn("The 'n_values_' attribute is deprecated.",
DeprecationWarning)
return self._n_values_
def _handle_deprecations(self, X):
user_set_categories = False
if self._categories is not None:
self._legacy_mode = False
user_set_categories = True
elif self._deprecated_n_values != 'auto':
msg = (
"Passing 'n_values' is deprecated and will be removed in a "
"future release. You can use the 'categories' keyword instead."
" 'n_values=n' corresponds to 'n_values=[range(n)]'.")
warnings.warn(msg, DeprecationWarning)
# we internally translate this to the correct categories
# and don't use legacy mode
X = check_array(X, dtype=np.int)
if isinstance(self._deprecated_n_values, numbers.Integral):
n_features = X.shape[1]
self.categories = [
list(range(self._deprecated_n_values))
for _ in range(n_features)]
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self._deprecated_n_values)
else:
try:
n_values = np.asarray(self._deprecated_n_values, dtype=int)
self.categories = [list(range(i))
for i in self._deprecated_n_values]
except (ValueError, TypeError):
raise TypeError(
"Wrong type for parameter `n_values`. Expected 'auto',"
" int or array of ints, got %r".format(type(X)))
self._n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self._feature_indices_ = indices
self._legacy_mode = False
else: # n_values = 'auto'
if self.handle_unknown == 'ignore':
# no change in behaviour, no need to raise deprecation warning
self._legacy_mode = False
else:
# check if we have integer or categorical input
try:
X = check_array(X, dtype=np.int)
except ValueError:
self._legacy_mode = False
else:
warnings.warn(WARNING_MSG, DeprecationWarning)
self._legacy_mode = True
if (not isinstance(self._deprecated_categorical_features,
six.string_types)
or (isinstance(self._deprecated_categorical_features,
six.string_types)
and self._deprecated_categorical_features != 'all')):
if user_set_categories:
raise ValueError(
"The 'categorical_features' keyword is deprecated, and "
"cannot be used together with specifying 'categories'.")
warnings.warn("The 'categorical_features' keyword is deprecated.",
DeprecationWarning)
self._legacy_mode = True
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
The data to determine the categories of each feature.
Returns
-------
self
"""
if self.handle_unknown not in ['error', 'ignore']:
template = ("handle_unknown should be either 'error' or "
"'ignore', got %s")
raise ValueError(template % self.handle_unknown)
self._handle_deprecations(X)
if self._legacy_mode:
# TODO not with _transform_selected ??
self._legacy_fit_transform(X)
return self
else:
self._fit(X, handle_unknown=self.handle_unknown)
return self
def _legacy_fit_transform(self, X):
"""Assumes X contains only categorical features."""
self_n_values = self._deprecated_n_values
dtype = getattr(X, 'dtype', None)
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self_n_values, six.string_types) and
self_n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self_n_values, numbers.Integral):
if (np.max(X, axis=0) >= self_n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self_n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self_n_values)
else:
try:
n_values = np.asarray(self_n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self._n_values_ = n_values
self.categories_ = [np.arange(n_val - 1, dtype=dtype)
for n_val in n_values]
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self._feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self_n_values, six.string_types) and
self_n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self._active_features_ = active_features
self.categories_ = [
np.unique(X[:, i]).astype(dtype) if dtype else np.unique(X[:, i])
for i in range(n_features)]
#import pdb; pdb.set_trace()
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
"""
if self.handle_unknown not in ['error', 'ignore']:
template = ("handle_unknown should be either 'error' or "
"'ignore', got %s")
raise ValueError(template % self.handle_unknown)
self._handle_deprecations(X)
if self._legacy_mode:
return _transform_selected(X, self._legacy_fit_transform,
self._deprecated_categorical_features,
copy=True)
else:
return self.fit(X).transform(X)
def _legacy_transform(self, X):
"""Assumes X contains only categorical features."""
self_n_values = self._deprecated_n_values
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self._feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self._n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self_n_values, six.string_types) and
self_n_values == 'auto'):
out = out[:, self._active_features_]
return out if self.sparse else out.toarray()
def _transform_new(self, X):
"""New implementation assuming categorical input"""
X_temp = check_array(X, dtype=None)
if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):
X = check_array(X, dtype=np.object)
else:
X = X_temp
n_samples, n_features = X.shape
X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown)
mask = X_mask.ravel()
n_values = [cats.shape[0] for cats in self.categories_]
n_values = np.array([0] + n_values)
feature_indices = np.cumsum(n_values)
indices = (X_int + feature_indices[:-1]).ravel()[mask]
indptr = X_mask.sum(axis=1).cumsum()
indptr = np.insert(indptr, 0, 0)
data = np.ones(n_samples * n_features)[mask]
out = sparse.csr_matrix((data, indices, indptr),
shape=(n_samples, feature_indices[-1]),
dtype=self.dtype)
if not self.sparse:
return out.toarray()
else:
return out
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to encode.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array
Transformed input.
"""
if not self._legacy_mode:
return self._transform_new(X)
else:
return _transform_selected(X, self._legacy_transform,
self._deprecated_categorical_features,
copy=True)
def inverse_transform(self, X):
"""Convert back the data to the original representation.
In case unknown categories are encountered (all zero's in the
one-hot encoding), ``None`` is used to represent this category.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_encoded_features]
The transformed data.
Returns
-------
X_tr : array-like, shape [n_samples, n_features]
Inverse transformed array.
"""
# if self._legacy_mode:
# raise ValueError("only supported for categorical features")
check_is_fitted(self, 'categories_')
X = check_array(X, accept_sparse='csr')
n_samples, _ = X.shape
n_features = len(self.categories_)
n_transformed_features = sum([len(cats) for cats in self.categories_])
# validate shape of passed X
msg = ("Shape of the passed X data is not correct. Expected {0} "
"columns, got {1}.")
if X.shape[1] != n_transformed_features:
raise ValueError(msg.format(n_transformed_features, X.shape[1]))
# create resulting array of appropriate dtype
dt = np.find_common_type([cat.dtype for cat in self.categories_], [])
X_tr = np.empty((n_samples, n_features), dtype=dt)
j = 0
found_unknown = {}
for i in range(n_features):
n_categories = len(self.categories_[i])
sub = X[:, j:j + n_categories]
# for sparse X argmax returns 2D matrix, ensure 1D array
labels = np.asarray(_argmax(sub, axis=1)).flatten()
X_tr[:, i] = self.categories_[i][labels]
if self.handle_unknown == 'ignore':
# ignored unknown categories: we have a row of all zero's
unknown = np.asarray(sub.sum(axis=1) == 0).flatten()
if unknown.any():
found_unknown[i] = unknown
j += n_categories
# if ignored are found: potentially need to upcast result to
# insert None values
if found_unknown:
if X_tr.dtype != object:
X_tr = X_tr.astype(object)
for idx, mask in found_unknown.items():
X_tr[mask, idx] = None
return X_tr
|
OneHotEncoder
|
python
|
pennersr__django-allauth
|
allauth/account/views.py
|
{
"start": 30611,
"end": 36013
}
|
class ____(NextRedirectMixin, FormView):
template_name = (
"account/confirm_email_verification_code." + app_settings.TEMPLATE_EXTENSION
)
form_class = ConfirmEmailVerificationCodeForm
def dispatch(self, request, *args, **kwargs):
self.stage = LoginStageController.enter(request, EmailVerificationStage.key)
self._process = (
flows.email_verification_by_code.EmailVerificationProcess.resume(request)
)
# preventing enumeration?
verification_is_fake = self._process and "user_id" not in self._process.state
# Can we at all continue?
if (
# No verification pending?
(not self._process) # Anonymous, yet no stage (or fake verifcation)?
or (
request.user.is_anonymous
and not self.stage
and not verification_is_fake
)
):
return HttpResponseRedirect(
reverse(
"account_login" if request.user.is_anonymous else "account_email"
)
)
return super().dispatch(request, *args, **kwargs)
@cached_property
def _action(self):
action = self.request.POST.get("action")
valid_actions = ["verify"]
if self._process.can_change:
valid_actions.append("change")
if self._process.can_resend:
valid_actions.append("resend")
if action not in valid_actions:
action = "verify"
return action
def get_form_class(self):
if self._action == "change":
return self._get_change_form_class()
elif self._action == "resend":
return Form
return self._get_verify_form_class()
def _get_change_form_class(self):
return ChangeEmailForm
def _get_verify_form_class(self):
return get_form_class(
app_settings.FORMS, "confirm_email_verification_code", self.form_class
)
def get_form_kwargs(self):
ret = super().get_form_kwargs()
if self._action == "change":
pass
elif self._action == "verify":
ret["code"] = self._process.code if self._process else ""
ret["user"] = self._process.user
ret["email"] = self._process.email
return ret
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
ret["can_change"] = self._process.can_change
ret["can_resend"] = self._process.can_resend
ret["email"] = self._process.state["email"]
ret["cancel_url"] = None if self.stage else reverse("account_email")
if self._action == "change":
ret["change_form"] = ret["form"]
ret["verify_form"] = self._get_verify_form_class()()
else:
ret["change_form"] = self._get_change_form_class()()
ret["verify_form"] = ret["form"]
return ret
def form_valid(self, form):
if self._action == "change":
return self._change_form_valid(form)
elif self._action == "resend":
return self._resend_form_valid(form)
return self._verify_form_valid(form)
def _resend_form_valid(self, form):
adapter = get_adapter()
try:
self._process.resend()
except RateLimited:
adapter.add_message(
self.request,
messages.ERROR,
message=adapter.error_messages["rate_limited"],
)
return HttpResponseRedirect(
self.passthrough_next_url(reverse("account_email_verification_sent"))
)
def _change_form_valid(self, form):
self._process.change_to(form.cleaned_data["email"], form.account_already_exists)
return HttpResponseRedirect(
self.passthrough_next_url(reverse("account_email_verification_sent"))
)
def _verify_form_valid(self, form):
email_address = self._process.finish()
if self.stage:
if not email_address:
return self.stage.abort()
return self.stage.exit()
url = self.get_next_url()
if url:
pass
elif not email_address:
url = reverse("account_email")
else:
url = get_adapter(self.request).get_email_verification_redirect_url(
email_address
)
return HttpResponseRedirect(url)
def form_invalid(self, form):
if self._action == "change":
return self._change_form_invalid(form)
return self._verify_form_invalid(form)
def _verify_form_invalid(self, form):
attempts_left = self._process.record_invalid_attempt()
if attempts_left:
return super().form_invalid(form)
adapter = get_adapter(self.request)
adapter.add_message(
self.request,
messages.ERROR,
message=adapter.error_messages["too_many_login_attempts"],
)
return HttpResponseRedirect(reverse("account_login"))
@method_decorator(login_not_required, name="dispatch")
def email_verification_sent(request):
if app_settings.EMAIL_VERIFICATION_BY_CODE_ENABLED:
return ConfirmEmailVerificationCodeView.as_view()(request)
else:
return EmailVerificationSentView.as_view()(request)
|
ConfirmEmailVerificationCodeView
|
python
|
pandas-dev__pandas
|
pandas/tests/arrays/categorical/test_api.py
|
{
"start": 300,
"end": 15397
}
|
class ____:
def test_ordered_api(self):
# GH 9347
cat1 = Categorical(list("acb"), ordered=False)
tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"]))
assert not cat1.ordered
cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False)
tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"]))
assert not cat2.ordered
cat3 = Categorical(list("acb"), ordered=True)
tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"]))
assert cat3.ordered
cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True)
tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"]))
assert cat4.ordered
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
assert not cat2.ordered
cat2 = cat.as_ordered()
assert cat2.ordered
assert cat2.set_ordered(True).ordered
assert not cat2.set_ordered(False).ordered
# removed in 0.19.0
msg = "property 'ordered' of 'Categorical' object has no setter"
with pytest.raises(AttributeError, match=msg):
cat.ordered = True
with pytest.raises(AttributeError, match=msg):
cat.ordered = False
def test_rename_categories(self):
cat = Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
tm.assert_numpy_array_equal(
res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64)
)
tm.assert_index_equal(res.categories, Index([1, 2, 3]))
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
exp_cat = Index(["a", "b", "c"])
tm.assert_index_equal(cat.categories, exp_cat)
# GH18862 (let rename_categories take callables)
result = cat.rename_categories(lambda x: x.upper())
expected = Categorical(["A", "B", "C", "A"])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]])
def test_rename_categories_wrong_length_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"])
msg = (
"new categories need to have the same number of items as the "
"old categories!"
)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(new_categories)
def test_rename_categories_series(self):
# https://github.com/pandas-dev/pandas/issues/17981
c = Categorical(["a", "b"])
result = c.rename_categories(Series([0, 1], index=["a", "b"]))
expected = Categorical([0, 1])
tm.assert_categorical_equal(result, expected)
def test_rename_categories_dict(self):
# GH 17336
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1})
expected = Index([4, 3, 2, 1])
tm.assert_index_equal(res.categories, expected)
# Test for dicts of smaller length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "c": 3})
expected = Index([1, "b", 3, "d"])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with bigger length
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6})
expected = Index([1, 2, 3, 4])
tm.assert_index_equal(res.categories, expected)
# Test for dicts with no items from old categories
cat = Categorical(["a", "b", "c", "d"])
res = cat.rename_categories({"f": 1, "g": 3})
expected = Index(["a", "b", "c", "d"])
tm.assert_index_equal(res.categories, expected)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True
)
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
tm.assert_categorical_equal(cat, old)
# only res is changed
tm.assert_categorical_equal(res, new)
@pytest.mark.parametrize(
"new_categories",
[
["a"], # not all "old" included in "new"
["a", "b", "d"], # still not all "old" in "new"
["a", "b", "c", "d"], # all "old" included in "new", but too long
],
)
def test_reorder_categories_raises(self, new_categories):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
msg = "items in new_categories are not the same as in old categories"
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(new_categories)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(
["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True
)
res = cat.add_categories("d")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
tm.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
tm.assert_categorical_equal(res, expected)
def test_add_categories_existing_raises(self):
# new is in old categories
cat = Categorical(["a", "b", "c", "d"], ordered=True)
msg = re.escape("new categories must not include old categories: {'d'}")
with pytest.raises(ValueError, match=msg):
cat.add_categories(["d"])
def test_add_categories_losing_dtype_information(self):
# GH#48812
cat = Categorical(Series([1, 2], dtype="Int64"))
ser = Series([4], dtype="Int64")
result = cat.add_categories(ser)
expected = Categorical(
Series([1, 2], dtype="Int64"), categories=Series([1, 2, 4], dtype="Int64")
)
tm.assert_categorical_equal(result, expected)
cat = Categorical(Series(["a", "b", "a"], dtype=StringDtype()))
ser = Series(["d"], dtype=StringDtype())
result = cat.add_categories(ser)
expected = Categorical(
Series(["a", "b", "a"], dtype=StringDtype()),
categories=Series(["a", "b", "d"], dtype=StringDtype()),
)
tm.assert_categorical_equal(result, expected)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = Index(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_)
cat = cat.set_categories(["c", "b", "a"])
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
tm.assert_index_equal(cat.categories, exp_categories)
tm.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = Index(["a", "b", "c"])
tm.assert_index_equal(res.categories, exp_categories_back)
tm.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8))
tm.assert_index_equal(res.categories, Index(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = Index(["a", "b", "c", "d"])
tm.assert_index_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8))
tm.assert_index_equal(c.categories, Index([1, 2, 3, 4]))
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
# all "pointers" to '4' must be changed from 3 to 0,...
c = c.set_categories([4, 3, 2, 1])
# positions are changed
tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8))
# categories are now in new order
tm.assert_index_equal(c.categories, Index([4, 3, 2, 1]))
# output is the same
exp = np.array([1, 2, 3, 4, 1], dtype=np.int64)
tm.assert_numpy_array_equal(np.asarray(c), exp)
assert c.min() == 4
assert c.max() == 1
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
assert not c2.ordered
tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2))
@pytest.mark.parametrize(
"values, categories, new_categories",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"]),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"]),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"]),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"]),
(["a", "b", "c"], ["a", "b"], ["b", "a"]),
(["b", "a", "c"], ["a", "b"], ["a", "b"]),
(["b", "a", "c"], ["a", "b"], ["b", "a"]),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"]),
(["a", "b", "c"], ["a", "b"], ["b"]),
(["b", "a", "c"], ["a", "b"], ["a"]),
(["b", "a", "c"], ["a", "b"], ["b"]),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"]),
],
)
def test_set_categories_many(self, values, categories, new_categories, ordered):
msg = "Constructing a Categorical with a dtype and values containing"
warn1 = Pandas4Warning if set(values).difference(categories) else None
with tm.assert_produces_warning(warn1, match=msg):
c = Categorical(values, categories)
warn2 = Pandas4Warning if set(values).difference(new_categories) else None
with tm.assert_produces_warning(warn2, match=msg):
expected = Categorical(values, new_categories, ordered)
result = c.set_categories(new_categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_set_categories_rename_less(self):
# GH 24675
cat = Categorical(["A", "B"])
result = cat.set_categories(["A"], rename=True)
expected = Categorical(["A", np.nan])
tm.assert_categorical_equal(result, expected)
def test_set_categories_private(self):
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"])
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
# fastpath
cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"])
cat._set_categories(["a", "c", "d", "e"], fastpath=True)
expected = Categorical(["a", "c", "d"], categories=list("acde"))
tm.assert_categorical_equal(cat, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True)
res = cat.remove_categories("c")
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
tm.assert_categorical_equal(cat, old)
tm.assert_categorical_equal(res, new)
@pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]])
def test_remove_categories_raises(self, removals):
cat = Categorical(["a", "b", "a"])
message = re.escape("removals must all be in old categories: {'c'}")
with pytest.raises(ValueError, match=message):
cat.remove_categories(removals)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"])
exp_categories_all = Index(["a", "b", "c", "d", "e"])
exp_categories_dropped = Index(["a", "b", "c", "d"])
tm.assert_index_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, exp_categories_dropped)
tm.assert_index_equal(c.categories, exp_categories_all)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan], categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
tm.assert_index_equal(res.categories, Index(np.array(["a", "b", "c"])))
exp_codes = np.array([0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(res.codes, exp_codes)
tm.assert_index_equal(c.categories, exp_categories_all)
val = ["F", np.nan, "D", "B", "D", "F", np.nan]
cat = Categorical(values=val, categories=list("ABCDEFG"))
out = cat.remove_unused_categories()
tm.assert_index_equal(out.categories, Index(["B", "D", "F"]))
exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8)
tm.assert_numpy_array_equal(out.codes, exp_codes)
assert out.tolist() == val
alpha = list("abcdefghijklmnopqrstuvwxyz")
val = np.random.default_rng(2).choice(alpha[::2], 10000).astype("object")
val[np.random.default_rng(2).choice(len(val), 100)] = np.nan
cat = Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
assert out.tolist() == val.tolist()
|
TestCategoricalAPI
|
python
|
PyCQA__pylint
|
tests/regrtest_data/max_inferable_limit_for_classes/nodes/roles.py
|
{
"start": 635,
"end": 717
}
|
class ____(AllowsLambdaRole, UsesInspection, StructuralRole):
...
|
JoinTargetRole
|
python
|
pytest-dev__pytest
|
testing/test_doctest.py
|
{
"start": 38613,
"end": 43831
}
|
class ____:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, pytester: Pytester):
"""Test that session fixtures are initialized for doctest modules (#768)."""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
pytester.makeconftest(
"""
import pytest
import sys
@pytest.fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
pytester.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, pytester, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
pytester.makeconftest(
f"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
"""
)
pytester.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = pytester.runpytest(*params)
result.stdout.fnmatch_lines([f"*=== {passes} passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, pytester, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
pytester.makeconftest(
f"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
"""
)
if use_fixture_in_doctest:
pytester.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = pytester.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, pytester, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
pytester.makeconftest(
f"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
"""
)
pytester.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = pytester.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", [*SCOPES, "package"])
def test_auto_use_defined_in_same_module(
self, pytester: Pytester, scope: str
) -> None:
"""Autouse fixtures defined in the same module as the doctest get picked
up properly.
Regression test for #11929.
"""
pytester.makepyfile(
f"""
import pytest
AUTO = "the fixture did not run"
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
global AUTO
AUTO = "the fixture ran"
def my_doctest():
'''My doctest.
>>> my_doctest()
'the fixture ran'
'''
return AUTO
"""
)
result = pytester.runpytest("--doctest-modules")
result.assert_outcomes(passed=1)
|
TestDoctestAutoUseFixtures
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 371881,
"end": 376435
}
|
class ____(Request):
"""
Reset tasks
:param ids: IDs of the tasks to reset
:type ids: Sequence[str]
:param force: If not true, call fails if the task status is 'completed'
:type force: bool
:param clear_all: Clear script and execution sections completely
:type clear_all: bool
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by the tasks. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of the
tasks that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
"""
_service = "tasks"
_action = "reset_many"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"clear_all": {
"default": False,
"description": "Clear script and execution sections completely",
"type": "boolean",
},
"delete_output_models": {
"description": "If set to 'true' then delete output models of the tasks that are not referenced by other tasks. Default value is 'true'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'completed'",
"type": "boolean",
},
"ids": {
"description": "IDs of the tasks to reset",
"items": {"type": "string"},
"type": "array",
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by the tasks. Default value is 'false'",
"type": "boolean",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids: List[str],
force: Optional[bool] = False,
clear_all: Optional[bool] = False,
return_file_urls: Optional[bool] = None,
delete_output_models: Optional[bool] = None,
**kwargs: Any
) -> None:
super(ResetManyRequest, self).__init__(**kwargs)
self.ids = ids
self.force = force
self.clear_all = clear_all
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
@schema_property("ids")
def ids(self) -> List[str]:
return self._property_ids
@ids.setter
def ids(self, value: List[str]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("clear_all")
def clear_all(self) -> Optional[bool]:
return self._property_clear_all
@clear_all.setter
def clear_all(self, value: Optional[bool]) -> None:
if value is None:
self._property_clear_all = None
return
self.assert_isinstance(value, "clear_all", (bool,))
self._property_clear_all = value
@schema_property("return_file_urls")
def return_file_urls(self) -> Optional[bool]:
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value: Optional[bool]) -> None:
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self) -> Optional[bool]:
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value: Optional[bool]) -> None:
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
|
ResetManyRequest
|
python
|
ray-project__ray
|
python/ray/serve/_private/deployment_scheduler.py
|
{
"start": 9084,
"end": 25036
}
|
class ____(ABC):
"""A centralized scheduler for all Serve deployments.
It makes a batch of scheduling decisions in each update cycle.
"""
def __init__(
self,
cluster_node_info_cache: ClusterNodeInfoCache,
head_node_id: str,
create_placement_group_fn: Callable,
):
# {deployment_id: scheduling_policy}
self._deployments: Dict[DeploymentID, DeploymentSchedulingInfo] = {}
# Replicas that are waiting to be scheduled.
# {deployment_id: {replica_id: deployment_upscale_request}}
self._pending_replicas: Dict[
DeploymentID, Dict[str, ReplicaSchedulingRequest]
] = defaultdict(dict)
# Replicas that are being scheduled.
# The underlying actors have been submitted.
# {deployment_id: {replica_id: target_node_id}}
self._launching_replicas: Dict[
DeploymentID, Dict[str, LaunchingReplicaInfo]
] = defaultdict(dict)
# Replicas that are recovering.
# We don't know where those replicas are running.
# {deployment_id: {replica_id}}
self._recovering_replicas = defaultdict(set)
# Replicas that are running.
# We know where those replicas are running.
# {deployment_id: {replica_id: running_node_id}}
self._running_replicas = defaultdict(dict)
self._cluster_node_info_cache = cluster_node_info_cache
self._head_node_id = head_node_id
self._create_placement_group_fn = create_placement_group_fn
def on_deployment_created(
self,
deployment_id: DeploymentID,
scheduling_policy: SpreadDeploymentSchedulingPolicy,
) -> None:
"""Called whenever a new deployment is created."""
assert deployment_id not in self._pending_replicas
assert deployment_id not in self._launching_replicas
assert deployment_id not in self._recovering_replicas
assert deployment_id not in self._running_replicas
self._deployments[deployment_id] = DeploymentSchedulingInfo(
deployment_id=deployment_id, scheduling_policy=scheduling_policy
)
def on_deployment_deployed(
self,
deployment_id: DeploymentID,
replica_config: ReplicaConfig,
) -> None:
assert deployment_id in self._deployments
info = self._deployments[deployment_id]
info.actor_resources = Resources(replica_config.resource_dict)
info.max_replicas_per_node = replica_config.max_replicas_per_node
if replica_config.placement_group_bundles:
info.placement_group_bundles = [
Resources(bundle) for bundle in replica_config.placement_group_bundles
]
if replica_config.placement_group_strategy:
info.placement_group_strategy = replica_config.placement_group_strategy
def on_deployment_deleted(self, deployment_id: DeploymentID) -> None:
"""Called whenever a deployment is deleted."""
assert not self._pending_replicas[deployment_id]
self._pending_replicas.pop(deployment_id, None)
assert not self._launching_replicas[deployment_id]
self._launching_replicas.pop(deployment_id, None)
assert not self._recovering_replicas[deployment_id]
self._recovering_replicas.pop(deployment_id, None)
assert not self._running_replicas[deployment_id]
self._running_replicas.pop(deployment_id, None)
del self._deployments[deployment_id]
def on_replica_stopping(self, replica_id: ReplicaID) -> None:
"""Called whenever a deployment replica is being stopped."""
deployment_id = replica_id.deployment_id
self._pending_replicas[deployment_id].pop(replica_id, None)
self._launching_replicas[deployment_id].pop(replica_id, None)
self._recovering_replicas[deployment_id].discard(replica_id)
self._running_replicas[deployment_id].pop(replica_id, None)
def on_replica_running(self, replica_id: ReplicaID, node_id: str) -> None:
"""Called whenever a deployment replica is running with a known node id."""
deployment_id = replica_id.deployment_id
assert replica_id not in self._pending_replicas[deployment_id]
self._launching_replicas[deployment_id].pop(replica_id, None)
self._recovering_replicas[deployment_id].discard(replica_id)
self._running_replicas[deployment_id][replica_id] = node_id
def on_replica_recovering(self, replica_id: ReplicaID) -> None:
"""Called whenever a deployment replica is recovering."""
deployment_id = replica_id.deployment_id
assert replica_id not in self._pending_replicas[deployment_id]
assert replica_id not in self._launching_replicas[deployment_id]
assert replica_id not in self._running_replicas[deployment_id]
assert replica_id not in self._recovering_replicas[deployment_id]
self._recovering_replicas[deployment_id].add(replica_id)
def _on_replica_launching(
self,
replica_id: ReplicaID,
target_node_id: Optional[str] = None,
target_labels: Optional[Dict[str, Any]] = None,
):
deployment_id = replica_id.deployment_id
self._launching_replicas[deployment_id][replica_id] = LaunchingReplicaInfo(
target_node_id=target_node_id, target_labels=target_labels
)
def _get_node_to_running_replicas(
self, deployment_id: Optional[DeploymentID] = None
) -> Dict[str, Set[ReplicaID]]:
res = defaultdict(set)
if deployment_id:
for replica_id, node_id in self._running_replicas[deployment_id].items():
res[node_id].add(replica_id)
else:
for _, replicas in self._running_replicas.items():
for replica_id, node_id in replicas.items():
res[node_id].add(replica_id)
return res
def _get_available_resources_per_node(self) -> Dict[str, Resources]:
"""Gets current available resources per node.
This returns a conservative view of the available resources
currently in the cluster. It returns the minimum of:
1. The available resources per node fetched and cached from the
GCS every control loop.
2. The remaining resources left over on each node after
subtracting the resources taken up by running (already
scheduled by core) and launching (to-be-scheduled and soft
targeting that node) replicas.
Note that (1) may not be accurate because it uses cached info
and there is a delay from fetching data from GCS, and (2) may
not be accurate because there can be other actors (not replicas)
running in the cluster, and launching replicas may not end up on
the node we're targeting. So the information returned from this
method is only best effort.
"""
available_resources = (
self._cluster_node_info_cache.get_available_resources_per_node()
)
total_resources = self._cluster_node_info_cache.get_total_resources_per_node()
gcs_info = {node_id: Resources(r) for node_id, r in available_resources.items()}
# Manually calculate available resources per node by subtracting
# launching and running replicas from total resources
total_minus_replicas = {
node_id: Resources(resources)
for node_id, resources in total_resources.items()
}
for deployment_id, replicas in self._launching_replicas.items():
deployment = self._deployments[deployment_id]
for info in replicas.values():
target_node_id = info.target_node_id
if not target_node_id or target_node_id not in total_minus_replicas:
continue
total_minus_replicas[target_node_id] -= deployment.required_resources
for deployment_id, replicas in self._running_replicas.items():
deployment = self._deployments[deployment_id]
for node_id in replicas.values():
if node_id not in total_minus_replicas:
continue
total_minus_replicas[node_id] -= deployment.required_resources
def custom_min(a: Resources, b: Resources):
keys = set(a.keys()) | set(b.keys())
res = Resources()
for key in keys:
res[key] = min(a.get(key), b.get(key))
return res
# Filter by active node ids (alive but not draining)
return {
node_id: custom_min(
gcs_info.get(node_id, Resources()),
total_minus_replicas.get(node_id, Resources()),
)
for node_id in self._cluster_node_info_cache.get_active_node_ids()
}
def _best_fit_node(
self, required_resources: Resources, available_resources: Dict[str, Resources]
) -> Optional[str]:
"""Chooses a node using best fit strategy.
This strategy picks the node where, if the required resources
were to be scheduled on that node, it will leave the smallest
remaining space. This minimizes fragmentation of resources.
"""
min_remaining_space = None
chosen_node = None
for node_id in available_resources:
if not available_resources[node_id].can_fit(required_resources):
continue
# TODO(zcin): We can make this better by only considering
# custom resources that required_resources has.
remaining_space = available_resources[node_id] - required_resources
if min_remaining_space is None or remaining_space < min_remaining_space:
min_remaining_space = remaining_space
chosen_node = node_id
return chosen_node
@abstractmethod
def schedule(
self,
upscales: Dict[DeploymentID, List[ReplicaSchedulingRequest]],
downscales: Dict[DeploymentID, DeploymentDownscaleRequest],
) -> Dict[DeploymentID, Set[ReplicaID]]:
"""Called for each update cycle to do batch scheduling.
Args:
upscales: a dict of deployment name to a list of replicas to schedule.
downscales: a dict of deployment name to a downscale request.
Returns:
The name of replicas to stop for each deployment.
"""
raise NotImplementedError
def _schedule_replica(
self,
scheduling_request: ReplicaSchedulingRequest,
default_scheduling_strategy: str,
target_node_id: Optional[str] = None,
target_labels: Optional[LabelMatchExpressionsT] = None,
):
"""Schedule a replica from a scheduling request.
The following special scheduling strategies will be used, in
order of highest to lowest priority.
1. If a replica requires placement groups, we will choose to use
a `PlacementGroupSchedulingStrategy`. This can also take a
target node into consideration (soft target), if provided.
However it cannot take into account target labels.
2. If a `target_node_id` is provided, we will choose to use a
`NodeAffinitySchedulingStrategy`.
3. If `target_labels` is provided, we will choose to use a
`NodeLabelSchedulingStrategy`.
Args:
scheduling_request: A request to schedule a replica.
default_scheduling_strategy: The scheduling strategy to fall
back to if no special scheduling strategy is necessary.
target_node_id: Attempt to schedule this replica onto this
target node.
target_labels: Attempt to schedule this replica onto nodes
with these target labels.
"""
replica_id = scheduling_request.replica_id
deployment_id = replica_id.deployment_id
placement_group = None
scheduling_strategy = default_scheduling_strategy
if scheduling_request.placement_group_bundles is not None:
placement_group_strategy = (
scheduling_request.placement_group_strategy
if scheduling_request.placement_group_strategy
else "PACK"
)
try:
pg = self._create_placement_group_fn(
CreatePlacementGroupRequest(
bundles=scheduling_request.placement_group_bundles,
strategy=placement_group_strategy,
target_node_id=target_node_id,
name=scheduling_request.actor_options["name"],
)
)
except Exception:
# We add a defensive exception here, so the controller can
# make progress even if the placement group isn't created.
# See https://github.com/ray-project/ray/issues/43888.
logger.exception(
f"Failed to create a placement group for {replica_id}."
)
scheduling_request.status = (
ReplicaSchedulingRequestStatus.PLACEMENT_GROUP_CREATION_FAILED
)
return
scheduling_strategy = PlacementGroupSchedulingStrategy(
placement_group=pg,
placement_group_capture_child_tasks=True,
)
target_labels = None
elif target_node_id is not None:
scheduling_strategy = NodeAffinitySchedulingStrategy(
node_id=target_node_id, soft=True, _spill_on_unavailable=True
)
target_labels = None
elif target_labels is not None:
scheduling_strategy = NodeLabelSchedulingStrategy(
hard={}, soft=target_labels
)
target_node_id = None
actor_options = copy.deepcopy(scheduling_request.actor_options)
if scheduling_request.max_replicas_per_node is not None:
if "resources" not in actor_options:
actor_options["resources"] = {}
# Using implicit resource (resources that every node
# implicitly has and total is 1)
# to limit the number of replicas on a single node.
actor_options["resources"][
f"{ray._raylet.IMPLICIT_RESOURCE_PREFIX}"
f"{deployment_id.app_name}:{deployment_id.name}"
] = (1.0 / scheduling_request.max_replicas_per_node)
try:
actor_handle = scheduling_request.actor_def.options(
scheduling_strategy=scheduling_strategy,
**actor_options,
).remote(*scheduling_request.actor_init_args)
except Exception:
# We add a defensive exception here, so the controller can
# make progress even if the actor options are misconfigured.
logger.exception(f"Failed to create an actor for {replica_id}.")
scheduling_request.status = (
ReplicaSchedulingRequestStatus.ACTOR_CREATION_FAILED
)
return
del self._pending_replicas[deployment_id][replica_id]
self._on_replica_launching(
replica_id, target_node_id=target_node_id, target_labels=target_labels
)
if isinstance(scheduling_strategy, PlacementGroupSchedulingStrategy):
placement_group = scheduling_strategy.placement_group
scheduling_request.status = ReplicaSchedulingRequestStatus.SUCCEEDED
scheduling_request.on_scheduled(actor_handle, placement_group=placement_group)
@abstractmethod
def get_node_to_compact(
self, allow_new_compaction: bool
) -> Optional[Tuple[str, float]]:
"""Returns a node ID to be compacted and a compaction deadlne."""
raise NotImplementedError
|
DeploymentScheduler
|
python
|
PrefectHQ__prefect
|
tests/test_flow_engine.py
|
{
"start": 75421,
"end": 79943
}
|
class ____:
async def test_no_lease_renewal_sync(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
mock_maintain_concurrency_lease = MagicMock()
monkeypatch.setattr(
"prefect.flow_engine.maintain_concurrency_lease",
mock_maintain_concurrency_lease,
)
@flow
def foo():
return 42
flow_id = await prefect_client.create_flow(foo)
# No limit, no lease
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name=f"test_lease_renewal_{uuid.uuid4()}",
)
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert flow_run.state.is_scheduled()
state = await propose_state(prefect_client, states.Pending(), flow_run.id)
assert state.is_pending()
run_flow(foo, flow_run)
mock_maintain_concurrency_lease.assert_not_called()
async def test_no_lease_renewal_async(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
mock_maintain_concurrency_lease = MagicMock()
mock_maintain_concurrency_lease.return_value.__aenter__ = AsyncMock()
mock_maintain_concurrency_lease.return_value.__aenter__.return_value.__aexit__ = AsyncMock()
monkeypatch.setattr(
"prefect.flow_engine.amaintain_concurrency_lease",
mock_maintain_concurrency_lease,
)
@flow
async def foo():
return 42
flow_id = await prefect_client.create_flow(foo)
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name=f"test_lease_renewal_{uuid.uuid4()}",
)
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert flow_run.state.is_scheduled()
state = await propose_state(prefect_client, states.Pending(), flow_run.id)
assert state.is_pending()
await run_flow(foo, flow_run)
mock_maintain_concurrency_lease.assert_not_called()
async def test_lease_renewal_sync(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
mock_maintain_concurrency_lease = MagicMock()
monkeypatch.setattr(
"prefect.flow_engine.maintain_concurrency_lease",
mock_maintain_concurrency_lease,
)
@flow
def foo():
return 42
flow_id = await prefect_client.create_flow(foo)
# Lease is created for the limit server-side
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name=f"test_lease_renewal_{uuid.uuid4()}",
concurrency_limit=1,
)
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert flow_run.state.is_scheduled()
state = await propose_state(prefect_client, states.Pending(), flow_run.id)
assert state.is_pending()
run_flow(foo, flow_run)
mock_maintain_concurrency_lease.assert_called_once_with(
ANY, 300, raise_on_lease_renewal_failure=True
)
async def test_lease_renewal_async(
self, prefect_client: PrefectClient, monkeypatch: pytest.MonkeyPatch
):
mock_maintain_concurrency_lease = MagicMock()
mock_maintain_concurrency_lease.return_value.__aenter__ = AsyncMock()
mock_maintain_concurrency_lease.return_value.__aenter__.return_value.__aexit__ = AsyncMock()
monkeypatch.setattr(
"prefect.flow_engine.amaintain_concurrency_lease",
mock_maintain_concurrency_lease,
)
@flow
async def foo():
return 42
flow_id = await prefect_client.create_flow(foo)
# Lease is created for the limit server-side
deployment_id = await prefect_client.create_deployment(
flow_id=flow_id,
name=f"test_lease_renewal_{uuid.uuid4()}",
concurrency_limit=1,
)
flow_run = await prefect_client.create_flow_run_from_deployment(deployment_id)
assert flow_run.state.is_scheduled()
state = await propose_state(prefect_client, states.Pending(), flow_run.id)
assert state.is_pending()
await run_flow(foo, flow_run)
mock_maintain_concurrency_lease.assert_called_once_with(
ANY, 300, raise_on_lease_renewal_failure=True
)
|
TestLeaseRenewal
|
python
|
walkccc__LeetCode
|
solutions/1660. Correct a Binary Tree/1660.py
|
{
"start": 0,
"end": 388
}
|
class ____:
def __init__(self):
self.seen = set()
def correctBinaryTree(self, root: TreeNode | None) -> TreeNode | None:
if root == None:
return None
if root.right and root.right.val in self.seen:
return None
self.seen.add(root.val)
root.right = self.correctBinaryTree(root.right)
root.left = self.correctBinaryTree(root.left)
return root
|
Solution
|
python
|
pyca__cryptography
|
tests/x509/verification/test_verification.py
|
{
"start": 4192,
"end": 6633
}
|
class ____:
def test_build_client_verifier_missing_store(self):
with pytest.raises(
ValueError, match="A client verifier must have a trust store"
):
PolicyBuilder().build_client_verifier()
def test_verify(self):
# expires 2018-11-16 01:15:03 UTC
leaf = _load_cert(
os.path.join("x509", "cryptography.io.pem"),
x509.load_pem_x509_certificate,
)
store = Store([leaf])
validation_time = datetime.datetime.fromisoformat(
"2018-11-16T00:00:00+00:00"
)
max_chain_depth = 16
builder = PolicyBuilder().store(store)
builder = builder.time(validation_time).max_chain_depth(
max_chain_depth
)
verifier = builder.build_client_verifier()
assert verifier.policy.subject is None
assert verifier.policy.validation_time == validation_time.replace(
tzinfo=None
)
assert verifier.policy.max_chain_depth == max_chain_depth
assert (
verifier.policy.extended_key_usage
== ExtendedKeyUsageOID.CLIENT_AUTH
)
assert (
verifier.policy.minimum_rsa_modulus == WEBPKI_MINIMUM_RSA_MODULUS
)
assert verifier.store is store
verified_client = verifier.verify(leaf, [])
assert verified_client.chain == [leaf]
assert verified_client.subjects is not None
assert x509.DNSName("www.cryptography.io") in verified_client.subjects
assert x509.DNSName("cryptography.io") in verified_client.subjects
assert len(verified_client.subjects) == 2
def test_verify_fails_renders_oid(self):
leaf = _load_cert(
os.path.join("x509", "custom", "ekucrit-testuser-cert.pem"),
x509.load_pem_x509_certificate,
)
store = Store([leaf])
validation_time = datetime.datetime.fromisoformat(
"2024-06-26T00:00:00+00:00"
)
builder = PolicyBuilder().store(store)
builder = builder.time(validation_time)
verifier = builder.build_client_verifier()
pattern = (
r"invalid extension: 2\.5\.29\.37: "
r"Certificate extension has incorrect criticality"
)
with pytest.raises(
VerificationError,
match=pattern,
):
verifier.verify(leaf, [])
|
TestClientVerifier
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_excel2003_style06.py
|
{
"start": 315,
"end": 1171
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("excel2003_style06.xlsx")
self.ignore_elements = {
"xl/drawings/drawing1.xml": [
"<xdr:cNvPr",
"<a:picLocks",
"<a:srcRect/>",
"<xdr:spPr",
"<a:noFill/>",
]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename, {"excel2003_style": True})
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"B3", self.image_dir + "red.jpg", {"x_offset": 4, "y_offset": 3}
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django__django
|
tests/template_tests/syntax_tests/test_for.py
|
{
"start": 13096,
"end": 13447
}
|
class ____(SimpleTestCase):
def test_repr(self):
node = ForNode(
"x",
"sequence",
is_reversed=True,
nodelist_loop=["val"],
nodelist_empty=["val2"],
)
self.assertEqual(
repr(node), "<ForNode: for x in sequence, tail_len: 1 reversed>"
)
|
ForNodeTests
|
python
|
has2k1__plotnine
|
tests/test_geom_ribbon_area.py
|
{
"start": 3008,
"end": 4145
}
|
class ____:
x = np.arange(10)
d = 5
data = pd.DataFrame(
{"x": x, "y1": x, "y2": x + 2 * d, "y3": x + 4 * d, "y4": x + 6 * d}
)
p = (
ggplot(data, aes("x", ymax=after_stat("ymin + d")))
+ geom_ribbon(
aes(ymin="y1"),
size=1,
fill="bisque",
color="orange",
outline_type="upper",
)
+ geom_ribbon(
aes(ymin="y2"),
size=1,
fill="khaki",
color="darkkhaki",
outline_type="lower",
)
+ geom_ribbon(
aes(ymin="y3"),
size=1,
fill="plum",
color="purple",
outline_type="both",
)
+ geom_ribbon(
aes(ymin="y4"),
size=1,
fill="lightblue",
color="cadetblue",
outline_type="full",
)
)
def test_ribbon_outline_type(self):
assert self.p == "ribbon_outline_type"
def test_ribbon_outline_type_coord_flip(self):
assert self.p + coord_flip() == "ribbon_outline_type_coord_flip"
|
TestOutlineType
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/components/v2/test_bidi_component.py
|
{
"start": 8834,
"end": 12327
}
|
class ____(DeltaGeneratorTestCase):
"""Validate bi-directional component mixin behavior.
This suite verifies:
- Parsing of ``on_<event>_change`` kwargs into an event-to-callback mapping
- Registration of the per-run aggregator trigger widget with
``value_type`` equal to ``"json_trigger_value"``
- ``BidiComponentResult`` exposes event keys and merges persistent state
with trigger values
- Callbacks and widget metadata are correctly stored in ``SessionState``
for the current run
"""
def setUp(self):
super().setUp()
# Create and inject a fresh component manager for each test run
self.component_manager = BidiComponentManager()
runtime = Runtime.instance()
if runtime is None:
raise RuntimeError("Runtime.instance() returned None in test setup.")
runtime.bidi_component_registry = self.component_manager
# ---------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------
def _register_dummy_component(self, name: str = "dummy") -> None:
self.component_manager.register(
BidiComponentDefinition(name=name, js="console.log('hi');")
)
# ---------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------
def test_event_callback_parsing_and_trigger_widget_registration(self):
"""Providing ``on_click_change`` should register a trigger widget."""
self._register_dummy_component()
on_click_cb = MagicMock(name="on_click_cb")
on_hover_cb = MagicMock(name="on_hover_cb")
# Act
result = st._bidi_component(
"dummy",
on_click_change=on_click_cb,
on_hover_change=on_hover_cb,
)
# ------------------------------------------------------------------
# Assert - return type & merged keys
# ------------------------------------------------------------------
assert isinstance(result, BidiComponentResult)
# No state set yet, but we expect trigger keys to exist with None
assert "click" in result
assert result.click is None
assert "hover" in result
assert result.hover is None
# ------------------------------------------------------------------
# Assert - trigger widget metadata
# ------------------------------------------------------------------
ctx = get_script_run_ctx()
assert ctx is not None, "ScriptRunContext missing in test"
# Compute expected aggregator trigger id
base_id = next(
wid
for wid in ctx.widget_ids_this_run
if wid.startswith("$$ID") and EVENT_DELIM not in wid
)
aggregator_id = _make_trigger_id(base_id, "events")
# Access internal SessionState to retrieve widget metadata.
internal_state = ctx.session_state._state # SessionState instance
metadata_aggregator = internal_state._new_widget_state.widget_metadata[
aggregator_id
]
assert metadata_aggregator.value_type == "json_trigger_value"
# The callbacks must be wired by event name in metadata
assert metadata_aggregator.callbacks == {
"click": on_click_cb,
"hover": on_hover_cb,
}
|
BidiComponentMixinTest
|
python
|
pyca__cryptography
|
tests/x509/test_x509.py
|
{
"start": 257976,
"end": 259851
}
|
class ____:
def test_eq(self):
attr1 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
)
attr2 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
)
assert attr1 == attr2
def test_ne(self):
attr1 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
)
attr2 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
_ASN1Type.IA5String.value,
)
attr3 = x509.Attribute(
x509.oid.AttributeOID.UNSTRUCTURED_NAME,
b"value",
)
attr4 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"other value",
)
assert attr1 != attr2
assert attr1 != attr3
assert attr1 != attr4
assert attr1 != object()
def test_repr(self):
attr1 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
)
assert repr(attr1) == (
"<Attribute(oid=<ObjectIdentifier(oid=1.2.840.113549.1.9.7, name="
"challengePassword)>, value=b'value')>"
)
def test_hash(self):
attr1 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
_ASN1Type.UTF8String.value,
)
attr2 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
_ASN1Type.UTF8String.value,
)
attr3 = x509.Attribute(
x509.oid.AttributeOID.CHALLENGE_PASSWORD,
b"value",
_ASN1Type.IA5String.value,
)
assert hash(attr1) == hash(attr2)
assert hash(attr1) != hash(attr3)
|
TestAttribute
|
python
|
kamyu104__LeetCode-Solutions
|
Python/single-number-ii.py
|
{
"start": 50,
"end": 307
}
|
class ____(object):
# @param A, a list of integer
# @return an integer
def singleNumber(self, A):
one, two = 0, 0
for x in A:
one, two = (~x & one) | (x & ~one & ~two), (~x & two) | (x & one)
return one
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/util_test.py
|
{
"start": 13138,
"end": 13917
}
|
class ____(test.TestCase):
def testLogCombinationsBinomial(self):
n = [2, 5, 12, 15]
k = [1, 2, 4, 11]
if not special:
return
log_combs = np.log(special.binom(n, k))
n = np.array(n, dtype=np.float32)
counts = [[1., 1], [2., 3], [4., 8], [11, 4]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([4], log_binom.get_shape())
self.assertAllClose(log_combs, self.evaluate(log_binom))
def testLogCombinationsShape(self):
# Shape [2, 2]
n = [[2, 5], [12, 15]]
n = np.array(n, dtype=np.float32)
# Shape [2, 2, 4]
counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]
log_binom = du.log_combinations(n, counts)
self.assertEqual([2, 2], log_binom.get_shape())
|
LogCombinationsTest
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_execute.py
|
{
"start": 9465,
"end": 13609
}
|
class ____(fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
cls.table = Table(
"exec_test",
metadata,
Column("a", Integer),
Column("b", Integer),
test_needs_acid=True,
)
def _trans_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
return go
def _trans_rollback_fn(self, is_transaction=False):
def go(conn, x, value=None):
if is_transaction:
conn = conn.connection
conn.execute(self.table.insert().values(a=x, b=value))
raise SomeException("breakage")
return go
def _assert_no_data(self):
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count("*")).select_from(self.table)),
0,
)
def _assert_fn(self, x, value=None):
with testing.db.connect() as conn:
eq_(conn.execute(self.table.select()).fetchall(), [(x, value)])
def test_transaction_engine_ctx_commit(self):
fn = self._trans_fn()
ctx = testing.db.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_engine_ctx_begin_fails_dont_enter_enter(self):
"""test #7272"""
engine = engines.testing_engine()
mock_connection = Mock(
return_value=Mock(begin=Mock(side_effect=Exception("boom")))
)
with mock.patch.object(engine, "_connection_cls", mock_connection):
# context manager isn't entered, doesn't actually call
# connect() or connection.begin()
engine.begin()
eq_(mock_connection.return_value.close.mock_calls, [])
def test_transaction_engine_ctx_begin_fails_include_enter(self):
"""test #7272
Note this behavior for 2.0 required that we add a new flag to
Connection _allow_autobegin=False, so that the first-connect
initialization sequence in create.py does not actually run begin()
events. previously, the initialize sequence used a future=False
connection unconditionally (and I didn't notice this).
"""
engine = engines.testing_engine()
close_mock = Mock()
with (
mock.patch.object(
engine._connection_cls,
"begin",
Mock(side_effect=Exception("boom")),
),
mock.patch.object(engine._connection_cls, "close", close_mock),
):
with expect_raises_message(Exception, "boom"):
with engine.begin():
pass
eq_(close_mock.mock_calls, [call()])
def test_transaction_engine_ctx_rollback(self):
fn = self._trans_rollback_fn()
ctx = testing.db.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def test_transaction_connection_ctx_commit(self):
fn = self._trans_fn(True)
with testing.db.connect() as conn:
ctx = conn.begin()
testing.run_as_contextmanager(ctx, fn, 5, value=8)
self._assert_fn(5, value=8)
def test_transaction_connection_ctx_rollback(self):
fn = self._trans_rollback_fn(True)
with testing.db.connect() as conn:
ctx = conn.begin()
assert_raises_message(
Exception,
"breakage",
testing.run_as_contextmanager,
ctx,
fn,
5,
value=8,
)
self._assert_no_data()
def test_connection_as_ctx(self):
fn = self._trans_fn()
with testing.db.begin() as conn:
fn(conn, 5, value=8)
self._assert_fn(5, value=8)
|
ConvenienceExecuteTest
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 208603,
"end": 213170
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[4, 3]", L_y_: "f32[3, 4]"):
l_x_ = L_x_
l_y_ = L_y_
tensor: "i64[1]" = torch.tensor((12,))
cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
neg: "i64[0]" = getitem.neg(); getitem = None
unbind = neg.unbind(); neg = unbind = None
chunk: "f32[12, 12]" = l_x_.new_zeros(12, 12)
diagonal: "f32[12]" = chunk.diagonal(0)
fill_: "f32[12]" = diagonal.fill_(1); diagonal = fill_ = None
child: "f32[12, 4, 3]" = chunk.view(12, 4, 3); chunk = None
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(12, 'same'); _vmap_increment_nesting = None
child_1: "f32[4, 3]" = torch._functorch.predispatch._add_batch_dim(child, 0, 1); child = None
_jvp_increment_nesting = torch._C._functorch._jvp_increment_nesting(); _jvp_increment_nesting = None
_set_fwd_grad_enabled = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled = None
_enter_dual_level = torch._C._enter_dual_level(); _enter_dual_level = None
_maybe_load_decompositions = torch.autograd.forward_ad._maybe_load_decompositions(); _maybe_load_decompositions = None
child_3: "f32[4, 3]" = torch._make_dual(l_x_, child_1, level = 0); child_1 = None
_wrap_for_grad: "f32[4, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 2); l_x_ = _wrap_for_grad = None
_wrap_for_grad_1: "f32[3, 4]" = torch._C._functorch._wrap_for_grad(l_y_, 2); l_y_ = None
child_2: "f32[3, 4]" = _wrap_for_grad_1.sin(); _wrap_for_grad_1 = None
_unpack_dual = torch._unpack_dual(child_2, level = 0); child_2 = None
primal: "f32[3, 4]" = _unpack_dual[0]; _unpack_dual = None
tangent: "f32[3, 4]" = torch.zeros_like(primal)
_unpack_dual_1 = torch._unpack_dual(child_3, level = 0); child_3 = None
primal_1: "f32[4, 3]" = _unpack_dual_1[0]
dual: "f32[4, 3]" = _unpack_dual_1[1]; _unpack_dual_1 = None
child_4: "f32[3, 4]" = torch._C._functorch._unwrap_for_grad(primal, 2); primal = child_4 = None
child_5: "f32[4, 3]" = torch._C._functorch._unwrap_for_grad(primal_1, 2); primal_1 = child_5 = None
child_6: "f32[3, 4]" = torch._C._functorch._unwrap_for_grad(tangent, 2); tangent = None
child_7: "f32[4, 3]" = torch._C._functorch._unwrap_for_grad(dual, 2); dual = None
_exit_dual_level = torch._C._exit_dual_level(0); _exit_dual_level = None
_set_fwd_grad_enabled_1 = torch._C._set_fwd_grad_enabled(True); _set_fwd_grad_enabled_1 = None
_jvp_decrement_nesting = torch._C._functorch._jvp_decrement_nesting(); _jvp_decrement_nesting = None
child_8: "f32[12, 3, 4]" = torch._functorch.predispatch._remove_batch_dim(child_6, 1, 12, 0); child_6 = None
child_9: "f32[12, 4, 3]" = torch._functorch.predispatch._remove_batch_dim(child_7, 1, 12, 0); child_7 = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
movedim: "f32[3, 4, 12]" = child_8.movedim(0, -1); child_8 = None
split = movedim.split((12,), dim = -1); movedim = None
jac_out_in: "f32[3, 4, 12]" = split[0]; split = None
unflatten: "f32[3, 4, 4, 3]" = jac_out_in.unflatten(-1, (4, 3)); jac_out_in = None
movedim_1: "f32[4, 3, 12]" = child_9.movedim(0, -1); child_9 = None
split_1 = movedim_1.split((12,), dim = -1); movedim_1 = None
jac_out_in_1: "f32[4, 3, 12]" = split_1[0]; split_1 = None
unflatten_1: "f32[4, 3, 4, 3]" = jac_out_in_1.unflatten(-1, (4, 3)); jac_out_in_1 = None
return (unflatten, unflatten_1)
""",
)
def test_jvp_simple(self):
counters.clear()
def fn(x):
return x.sin().sum()
def wrapper_fn(x, v):
return torch.func.jvp(fn, (x,), (v,))
x = torch.randn(3, 3)
v = torch.randn(3, 3)
wrapped_gm = self._compile_check(wrapper_fn, (x, v))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
pytorch__pytorch
|
test/inductor/test_provenance_tracing.py
|
{
"start": 20731,
"end": 32113
}
|
class ____(TestCase):
@contextlib.contextmanager
def _setup_provenance_capture(self):
"""Helper to turn on and capture the 'inductor_tlparse_runtime' structured trace."""
payload_buffer = io.StringIO()
payload_handler = logging.StreamHandler(payload_buffer)
payload_handler.setLevel(logging.DEBUG)
payload_handler.setFormatter(StructuredTracePayloadFormatter())
payload_handler.addFilter(ProvenanceArtifactFilter())
trace_log.addHandler(payload_handler)
try:
yield payload_buffer
finally:
trace_log.removeHandler(payload_handler)
def extract_code_line(self, s, i=-2):
# Extract ith line
return s.split("\n")[i].strip()
@torch._inductor.config.patch({"trace.provenance_tracking_level": 2})
@requires_gpu_and_triton
def test_tlparse_kernel_stack_traces(self):
device = GPU_TYPE
model = Model4().to(device)
x = torch.randn(8, 10).to(device)
a = torch.randn(10, 20).to(device)
b = torch.randn(20, 30).to(device)
c = torch.randn(10, 30).to(device)
example_inputs = (x, a, b, c)
expected = {
"triton_poi_fused_addmm_relu_sigmoid_threshold_backward_0:2": [
"x = self.sigmoid(x)",
"x = self.fc1(x)",
"x = self.relu(x)",
],
"triton_poi_fused_mul_1:3": [
"d = a * 3.14",
],
"triton_poi_fused_addmm_gelu_2:5": [
"z = torch.nn.functional.gelu(y)",
"y = torch.addmm(c, d, b)",
],
"extern_kernels.mm:1": [
"x = self.fc1(x)",
],
"extern_kernels.mm:4": [
"y = torch.addmm(c, d, b)",
],
}
compiled = torch.compile(model)
# should produce the same provenance if there's cache hit
for _ in range(2):
# reset cache
torch._dynamo.reset()
reset_inductor_kernel_provenance_debug_handle()
with self._setup_provenance_capture() as payload_buffer:
compiled = torch.compile(model)
compiled(*example_inputs)
payload_content = payload_buffer.getvalue().strip()
data = json.loads(payload_content)
self.assertEqual(set(data.keys()), set(expected.keys()))
for key, expected_lines in expected.items():
actual_lines = [self.extract_code_line(s) for s in data[key]]
self.assertEqual(
sorted(actual_lines),
sorted(expected_lines),
f"Mismatch for key: {key}",
)
@torch._inductor.config.patch(
{"trace.provenance_tracking_level": 2, "max_autotune_gemm_backends": "ATEN"}
)
@requires_cuda_and_triton
def test_deferred_triton_kernels(self):
def foo(m, inp):
a = m(inp)
return a
foo_c = torch.compile(mode="max-autotune-no-cudagraphs")(foo)
m = torch.nn.Linear(512, 512, bias=True).half().cuda()
inp = torch.rand([1, 512]).half().cuda()
with self._setup_provenance_capture() as payload_buffer:
with torch.no_grad():
_, out_code = run_and_get_code(foo_c, m, inp)
payload_content = payload_buffer.getvalue().strip()
data = json.loads(payload_content)
self.assertTrue("a = m(inp)" in str(data))
# Check that debug handle is in the output code
FileCheck().check("Topologically Sorted Source Nodes: [a]").check(
"[Provenance debug handles]"
).run(out_code[0])
def _check_kernel_information_json(self, kernel_info, expected_kernels):
"""Validate kernel information JSON structure and content."""
self.assertIsInstance(kernel_info, dict)
for expected in expected_kernels:
self.assertIn(
expected,
kernel_info,
f"Expected kernel {expected} not found in {list(kernel_info)}",
)
for data in kernel_info.values():
self.assertIsInstance(data, dict)
for field in ["stack_traces", "post_grad_nodes", "pre_grad_nodes"]:
self.assertIn(field, data)
self.assertIsInstance(data[field], list)
for item in data[field]:
self.assertIsInstance(item, str)
@requires_gpu_and_triton
@torch._inductor.config.patch("trace.provenance_tracking_level", 1)
def test_kernel_information_generation(self):
"""Test basic kernel information generation in AOTI packages."""
model = Model4().to(GPU_TYPE)
x = torch.randn(8, 10, device=GPU_TYPE)
a = torch.randn(10, 20, device=GPU_TYPE)
b = torch.randn(20, 30, device=GPU_TYPE)
c = torch.randn(10, 30, device=GPU_TYPE)
inputs = (x, a, b, c)
with tempfile.TemporaryDirectory() as temp_dir:
ep = torch.export.export(model, inputs, strict=False)
pt2_file = os.path.join(temp_dir, "model.pt2")
reset_inductor_kernel_provenance_debug_handle()
torch._inductor.aoti_compile_and_package(ep, package_path=pt2_file)
# Extract and check kernel_information.json exists in the package
with zipfile.ZipFile(pt2_file, "r") as zip_ref:
zip_ref.extractall(temp_dir)
json_path = os.path.join(
temp_dir,
"model",
"data",
"aotinductor",
"model",
"kernel_information.json",
)
self.assertTrue(
os.path.exists(json_path),
f"kernel_information.json not found in extracted package at {json_path}",
)
with open(json_path) as f:
kernel_info = json.load(f)
expected = {
"triton_poi_fused_addmm_relu_sigmoid_0:2": {
"stack_traces": [
"x = self.sigmoid(x)",
"x = self.fc1(x)",
"x = self.relu(x)",
],
"post_grad_nodes": ["sigmoid", "relu", "add_tensor_1"],
"pre_grad_nodes": ["sigmoid", "relu", "linear"],
},
"triton_poi_fused_mul_1:3": {
"stack_traces": [
"d = a * 3.14",
],
"post_grad_nodes": ["mul"],
"pre_grad_nodes": ["mul"],
},
"triton_poi_fused_addmm_gelu_2:5": {
"stack_traces": [
"z = torch.nn.functional.gelu(y)",
"y = torch.addmm(c, d, b)",
],
"post_grad_nodes": [
"mul_3",
"mul_1",
"add_tensor",
"add",
"erf",
"mul_2",
],
"pre_grad_nodes": ["gelu", "addmm"],
},
f"aoti_torch_{GPU_TYPE}_mm_out:1": {
"stack_traces": [
"x = self.fc1(x)",
],
"post_grad_nodes": ["mm_default_1"],
"pre_grad_nodes": ["linear"],
},
f"aoti_torch_{GPU_TYPE}_mm_out:4": {
"stack_traces": [
"y = torch.addmm(c, d, b)",
],
"post_grad_nodes": ["mm_default"],
"pre_grad_nodes": ["addmm"],
},
}
self._check_kernel_information_json(kernel_info, expected.keys())
self.assertEqual(set(kernel_info.keys()), set(expected.keys()))
for key, data in expected.items():
all_lines = ",".join(kernel_info[key]["stack_traces"])
for s in data["stack_traces"]:
self.assertTrue(s in all_lines)
self.assertEqual(
sorted(kernel_info[key]["pre_grad_nodes"]),
sorted(data["pre_grad_nodes"]),
f"Mismatch for key: {key}",
)
self.assertEqual(
sorted(kernel_info[key]["post_grad_nodes"]),
sorted(data["post_grad_nodes"]),
f"Mismatch for key: {key}",
)
@torch._inductor.config.patch("trace.provenance_tracking_level", 0)
def test_no_kernel_information_without_provenance_tracking(self):
"""Test that kernel_information.json is not generated without provenance tracking."""
class SimpleModel(torch.nn.Module):
def forward(self, x):
return x * 2.0
model = SimpleModel()
x = torch.randn(4, 8)
# Compile with AOTI but without provenance tracking
with tempfile.TemporaryDirectory() as temp_dir:
ep = torch.export.export(model, (x,), strict=False)
pt2_file = os.path.join(temp_dir, "model.pt2")
torch._inductor.aoti_compile_and_package(ep, package_path=pt2_file)
# Extract and check kernel_information.json was NOT created in the package
extract_dir = os.path.join(temp_dir, "extracted")
os.makedirs(extract_dir, exist_ok=True)
with zipfile.ZipFile(pt2_file, "r") as zip_ref:
zip_ref.extractall(extract_dir)
expected_json_path = os.path.join(extract_dir, "kernel_information.json")
self.assertFalse(
os.path.exists(expected_json_path),
"kernel_information.json should not exist in package when provenance tracking is disabled",
)
def test_create_kernel_information_json_function(self):
"""Test the create_kernel_information_json function directly."""
# Test with empty state
result = create_kernel_information_json()
self.assertIsInstance(result, dict)
self.assertEqual(len(result), 0) # Should be empty with no provenance data
@unittest.skipIf(
IS_MACOS,
"MacOS generates different debug handles",
)
@torch._inductor.config.patch("trace.provenance_tracking_level", 1)
def test_cpu_extern_kernel(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(16, 33, 3)
def forward(self, x):
return self.conv(x)
model = Foo()
x = torch.randn(20, 16, 50, 100)
with self._setup_provenance_capture() as payload_buffer:
reset_inductor_kernel_provenance_debug_handle()
ep = torch.export.export(model, (x,))
torch._inductor.aoti_compile_and_package(ep)
payload_content = payload_buffer.getvalue().strip()
data = json.loads(payload_content)
keys = [k.split(":")[0] for k in data]
self.assertTrue("aoti_torch_cpu_convolution" in keys)
|
TestProvenanceTracingStackTraces
|
python
|
optuna__optuna
|
optuna/samplers/_tpe/parzen_estimator.py
|
{
"start": 1055,
"end": 1370
}
|
class ____(NamedTuple):
prior_weight: float
consider_magic_clip: bool
consider_endpoints: bool
weights: Callable[[int], np.ndarray]
multivariate: bool
categorical_distance_func: dict[
str, Callable[[CategoricalChoiceType, CategoricalChoiceType], float]
]
|
_ParzenEstimatorParameters
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-airtable/llama_index/readers/airtable/base.py
|
{
"start": 178,
"end": 905
}
|
class ____(BaseReader):
"""
Airtable reader. Reads data from a table in a base.
Args:
api_key (str): Airtable API key.
"""
def __init__(self, api_key: str) -> None:
"""Initialize Airtable reader."""
self.api_key = api_key
def load_data(self, base_id: str, table_id: str) -> List[Document]:
"""
Load data from a table in a base.
Args:
table_id (str): Table ID.
base_id (str): Base ID.
Returns:
List[Document]: List of documents.
"""
table = Table(self.api_key, base_id, table_id)
all_records = table.all()
return [Document(text=f"{all_records}", extra_info={})]
|
AirtableReader
|
python
|
charliermarsh__ruff
|
crates/ty_python_semantic/resources/corpus/74_class_kwargs_2.py
|
{
"start": 0,
"end": 31
}
|
class ____(int, x=42):
pass
|
Foo
|
python
|
openai__openai-python
|
src/openai/types/responses/response_mcp_call_failed_event.py
|
{
"start": 203,
"end": 582
}
|
class ____(BaseModel):
item_id: str
"""The ID of the MCP tool call item that failed."""
output_index: int
"""The index of the output item that failed."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.mcp_call.failed"]
"""The type of the event. Always 'response.mcp_call.failed'."""
|
ResponseMcpCallFailedEvent
|
python
|
faif__python-patterns
|
patterns/behavioral/catalog.py
|
{
"start": 220,
"end": 1368
}
|
class ____:
"""catalog of multiple static methods that are executed depending on an init parameter
"""
def __init__(self, param: str) -> None:
# dictionary that will be used to determine which static method is
# to be executed but that will be also used to store possible param
# value
self._static_method_choices = {
"param_value_1": self._static_method_1,
"param_value_2": self._static_method_2,
}
# simple test to validate param value
if param in self._static_method_choices.keys():
self.param = param
else:
raise ValueError(f"Invalid Value for Param: {param}")
@staticmethod
def _static_method_1() -> str:
return "executed method 1!"
@staticmethod
def _static_method_2() -> str:
return "executed method 2!"
def main_method(self) -> str:
"""will execute either _static_method_1 or _static_method_2
depending on self.param value
"""
return self._static_method_choices[self.param]()
# Alternative implementation for different levels of methods
|
Catalog
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
|
{
"start": 31063,
"end": 31989
}
|
class ____(Node):
__slots__ = ('loc', 'name', 'directives',)
_fields = ('name',)
def __init__(self, name, loc=None, directives=None):
self.loc = loc
self.name = name
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, EnumValueDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.directives == other.directives
)
)
def __repr__(self):
return ('EnumValueDefinition('
'name={self.name!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
|
EnumValueDefinition
|
python
|
walkccc__LeetCode
|
solutions/2323. Find Minimum Time to Finish All Jobs II/2323.py
|
{
"start": 0,
"end": 239
}
|
class ____:
def minimumTime(self, jobs: list[int], workers: list[int]) -> int:
ans = 0
jobs.sort()
workers.sort()
for job, worker in zip(jobs, workers):
ans = max(ans, (job - 1) // worker + 1)
return ans
|
Solution
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 108761,
"end": 109431
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"enterprise_id",
"invitee",
"email",
"role",
"client_mutation_id",
)
enterprise_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="enterpriseId"
)
invitee = sgqlc.types.Field(String, graphql_name="invitee")
email = sgqlc.types.Field(String, graphql_name="email")
role = sgqlc.types.Field(EnterpriseAdministratorRole, graphql_name="role")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
InviteEnterpriseAdminInput
|
python
|
ApeWorX__ape
|
src/ape/managers/project.py
|
{
"start": 1591,
"end": 12387
}
|
class ____(BaseManager):
"""
A manager of a local-project's source-paths.
Access via ``project.sources``. Allows source-access
from both ``source_id`` and ``path``. Handles
detecting modified sources as well as excluded sources.
Is meant to resemble a PackageManifest's source dict
but with more functionality for active development.
"""
_path_cache: Optional[list[Path]] = None
# perf: calculating paths from source Ids can be expensive.
_path_to_source_id: dict[Path, str] = {}
def __init__(
self,
root_path: Path,
get_contracts_path: Callable,
exclude_globs: Optional[set[Union[str, Pattern]]] = None,
):
self.root_path = root_path
self.get_contracts_path = get_contracts_path
self.exclude_globs = exclude_globs or set()
self._sources: dict[str, Source] = {}
self._exclude_cache: dict[str, bool] = {}
@log_instead_of_fail(default="<LocalSources>")
def __repr__(self) -> str:
path_str = f" {clean_path(self.get_contracts_path())}"
return f"<LocalSources{path_str}>"
def __len__(self) -> int:
if self._path_cache is not None:
return len(self._path_cache)
# Will set _path_cache, eliminates need to iterate (perf).
return len(list(self.paths))
def __iter__(self) -> Iterator[str]:
for path in self.paths:
yield self._get_source_id(path)
def __getitem__(self, source_id: str) -> Source:
src = self.get(source_id)
# NOTE: Can't use walrus operator here because empty Source objects
# are false-y.
if src is None:
raise KeyError(f"Source '{source_id}' not found.")
return src
def get(self, source_id: str) -> Optional[Source]:
"""
Get a Source by source_id.
Args:
source_id (str): The source identifier.
Returns:
Source | None
"""
if source_id in self._sources:
return self._sources[source_id]
for path in self.paths:
if self._get_source_id(path) == source_id:
text: Union[str, dict]
if path.is_file():
try:
text = path.read_text(encoding="utf8")
except Exception:
continue
else:
text = {}
src = Source.model_validate(text)
self._sources[source_id] = src
return src
return None
def items(self) -> Iterator[tuple[str, Source]]:
for source_id in self.keys():
yield source_id, self[source_id]
def keys(self) -> Iterator[str]:
for path in self.paths:
yield self._get_source_id(path)
def values(self) -> Iterator[Source]:
paths_to_rm = set()
for source_id in self.keys():
try:
yield self[source_id]
except KeyError:
# Deleted before yield.
path = self._get_path(source_id)
paths_to_rm.add(path)
continue
if paths_to_rm:
self._path_cache = (
None
if self._path_cache is None
else [p for p in (self._path_cache or []) if p not in paths_to_rm]
)
@singledispatchmethod
def __contains__(self, item) -> bool:
raise APINotImplementedError(f"__contains__ not implemented for {type(item)}.")
@__contains__.register
def __contains_str(self, source_id: str) -> bool:
for path in self.paths:
if self._get_source_id(path) == source_id:
return True
return False
@__contains__.register
def __contains_path(self, source_path: Path) -> bool:
for path in self.paths:
if path == source_path:
return True
return False
@cached_property
def _all_files(self) -> list[Path]:
try:
contracts_folder = self.get_contracts_path()
except ProjectError:
# No contracts folder found. Might not be in a project.
return []
return get_all_files_in_directory(contracts_folder, max_files=500)
@property
def paths(self) -> Iterator[Path]:
"""
All contract source paths.
"""
yield from self.get_source_paths()
def get_source_paths(self, include_missing_compilers: bool = False) -> Iterator[Path]:
"""
Get contract source paths.
Args:
include_missing_compilers (bool): Set to ``True`` to include the source path even if its extension is
not for a known compiler. Defaults to ``False``.
Returns:
Iterator[Path]
"""
for path in self._all_files:
if self.is_excluded(path, exclude_missing_compilers=not include_missing_compilers):
continue
yield path
def is_excluded(self, path: Path, exclude_missing_compilers: bool = True) -> bool:
"""
Check if the given path is considered an "excluded"
file based on the configured ignore-patterns.
Args:
path (Path): The path to check.
exclude_missing_compilers (bool): Set to ``False`` to not consider sources with missing compilers as
"excluded".
Returns:
bool
"""
source_id = self._get_source_id(path)
if source_id in self._exclude_cache:
return self._exclude_cache[source_id]
# Non-files and hidden files are ignored.
is_file = path.is_file()
if not is_file or path.name.startswith("."):
# Ignore random hidden files if they are known source types.
self._exclude_cache[source_id] = True
return True
if exclude_missing_compilers:
# Files with missing compiler extensions are also ignored.
suffix = get_full_extension(path)
registered = self.compiler_manager.registered_compilers
if suffix not in registered:
self._exclude_cache[source_id] = True
return True
# If we get here, we have a matching compiler and this source exists.
# Check if is excluded.
source_id = self._get_source_id(path)
options = (str(path), path.name, source_id)
parent_dir_name = path.parent.name
for excl in self.exclude_globs:
if isinstance(excl, Pattern):
for opt in options:
if not excl.match(opt):
continue
self._exclude_cache[source_id] = True
return True
else:
# perf: Check parent directory first to exclude faster by marking them all.
if path_match(parent_dir_name, excl):
self._exclude_cache[source_id] = True
for sub in get_all_files_in_directory(path.parent):
sub_source_id = self._get_source_id(sub)
self._exclude_cache[sub_source_id] = True
return True
for opt in options:
if path_match(opt, excl):
self._exclude_cache[source_id] = True
return True
self._exclude_cache[source_id] = False
return False
def lookup(self, path_id: Union[str, Path]) -> Optional[Path]:
"""
Look-up a path by given a sub-path or a source ID.
Args:
path_id (Union[str, Path]): Either part of a path
or a source ID.
Returns:
Path: The full path to the source file.
"""
input_path = Path(path_id)
if input_path.is_file() and input_path.is_relative_to(self.root_path):
# Already given an existing file.
return input_path.absolute()
input_stem = input_path.stem
input_extension = get_full_extension(input_path) or None
def find_in_dir(dir_path: Path, path: Path) -> Optional[Path]:
# Try exact match with or without extension
possible_matches = []
contracts_folder = self.get_contracts_path()
if path.is_absolute():
full_path = path
elif contracts_folder in (dir_path / path).parents:
# Check if a file with an exact match exists.
full_path = dir_path / path
else:
# User did not include contracts-prefix.
full_path = contracts_folder / path
if full_path.is_file():
return full_path
# Check for exact match with no given extension.
if input_extension is None:
if full_path.parent.is_dir():
for file in full_path.parent.iterdir():
if not file.is_file():
continue
# Check exact match w/o extension.
prefix = str(file.with_suffix("")).strip(" /\\")
if str(full_path).strip(" /\\") == prefix:
return file
# Look for stem-only matches (last resort).
for file_path in dir_path.rglob("*"):
if file_path.stem == input_stem:
possible_matches.append(file_path)
# If we have possible matches, return the one with the closest relative path
if possible_matches:
# Prioritize the exact relative path or first match in the list
possible_matches.sort(key=lambda p: len(str(p.relative_to(dir_path))))
return possible_matches[0]
return None
# Derive the relative path from the given key_contract_path.
relative_path = input_path.relative_to(input_path.anchor)
return find_in_dir(self.root_path, relative_path)
def refresh(self):
"""
Reset file-caches to handle session-changes.
(Typically not needed to be called by users).
"""
(self.__dict__ or {}).pop("_all_files", None)
self._path_to_source_id = {}
self._path_cache = None
def _get_source_id(self, path: Path) -> str:
if src_id := self._path_to_source_id.get(path):
return src_id
# Cache because this can be expensive.
src_id = _path_to_source_id(path, self.root_path)
self._path_to_source_id[path] = src_id
return src_id
def _get_path(self, source_id: str) -> Path:
return self.root_path / source_id
|
SourceManager
|
python
|
networkx__networkx
|
networkx/algorithms/centrality/tests/test_katz_centrality.py
|
{
"start": 52,
"end": 3716
}
|
class ____:
def test_K5(self):
"""Katz centrality: K5"""
G = nx.complete_graph(5)
alpha = 0.1
b = nx.katz_centrality(G, alpha)
v = math.sqrt(1 / 5.0)
b_answer = dict.fromkeys(G, v)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
nstart = {n: 1 for n in G}
b = nx.katz_centrality(G, alpha, nstart=nstart)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-7)
def test_P3(self):
"""Katz centrality: P3"""
alpha = 0.1
G = nx.path_graph(3)
b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
b = nx.katz_centrality(G, alpha)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
def test_maxiter(self):
with pytest.raises(nx.PowerIterationFailedConvergence):
nx.katz_centrality(nx.path_graph(3), 0.1, max_iter=0)
def test_beta_as_scalar(self):
alpha = 0.1
beta = 0.1
b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
G = nx.path_graph(3)
b = nx.katz_centrality(G, alpha, beta)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
def test_beta_as_dict(self):
alpha = 0.1
beta = {0: 1.0, 1: 1.0, 2: 1.0}
b_answer = {0: 0.5598852584152165, 1: 0.6107839182711449, 2: 0.5598852584152162}
G = nx.path_graph(3)
b = nx.katz_centrality(G, alpha, beta)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[n], abs=1e-4)
def test_multiple_alpha(self):
alpha_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
for alpha in alpha_list:
b_answer = {
0.1: {
0: 0.5598852584152165,
1: 0.6107839182711449,
2: 0.5598852584152162,
},
0.2: {
0: 0.5454545454545454,
1: 0.6363636363636365,
2: 0.5454545454545454,
},
0.3: {
0: 0.5333964609104419,
1: 0.6564879518897746,
2: 0.5333964609104419,
},
0.4: {
0: 0.5232045649263551,
1: 0.6726915834767423,
2: 0.5232045649263551,
},
0.5: {
0: 0.5144957746691622,
1: 0.6859943117075809,
2: 0.5144957746691622,
},
0.6: {
0: 0.5069794004195823,
1: 0.6970966755769258,
2: 0.5069794004195823,
},
}
G = nx.path_graph(3)
b = nx.katz_centrality(G, alpha)
for n in sorted(G):
assert b[n] == pytest.approx(b_answer[alpha][n], abs=1e-4)
def test_multigraph(self):
with pytest.raises(nx.NetworkXException):
nx.katz_centrality(nx.MultiGraph(), 0.1)
def test_empty(self):
e = nx.katz_centrality(nx.Graph(), 0.1)
assert e == {}
def test_bad_beta(self):
with pytest.raises(nx.NetworkXException):
G = nx.Graph([(0, 1)])
beta = {0: 77}
nx.katz_centrality(G, 0.1, beta=beta)
def test_bad_beta_number(self):
with pytest.raises(nx.NetworkXException):
G = nx.Graph([(0, 1)])
nx.katz_centrality(G, 0.1, beta="foo")
|
TestKatzCentrality
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_bigquery.py
|
{
"start": 34372,
"end": 34797
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.bigquery._BigQueryDbHookMixin.get_db_hook")
def test_get_db_hook(
self,
mock_get_db_hook,
operator_class,
kwargs,
):
operator = operator_class(task_id=TASK_ID, gcp_conn_id="google_cloud_default", **kwargs)
operator.get_db_hook()
mock_get_db_hook.assert_called_once()
|
TestBigQueryCheckOperators
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 17433,
"end": 17621
}
|
class ____(admin.ModelAdmin):
list_display = ("reference", "driver", "restaurant")
list_editable = ("driver", "restaurant")
show_facets = admin.ShowFacets.NEVER
|
FoodDeliveryAdmin
|
python
|
run-llama__llama_index
|
llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/df.py
|
{
"start": 551,
"end": 762
}
|
class ____(BaseModel):
"""Column in a DataFrame."""
column_name: str = Field(..., description="Column name.")
column_desc: Optional[str] = Field(..., description="Column description.")
|
DataFrameColumn
|
python
|
arrow-py__arrow
|
arrow/locales.py
|
{
"start": 136237,
"end": 137612
}
|
class ____(Locale):
names = ["ur", "ur-pk"]
past = "پہلے {0}"
future = "میں {0}"
and_word = "اور"
timeframes = {
"now": "ابھی",
"second": "ایک سیکنڈ",
"seconds": "{0} سیکنڈ",
"minute": "ایک منٹ",
"minutes": "{0} منٹ",
"hour": "ایک گھنٹے",
"hours": "{0} گھنٹے",
"day": "ایک دن",
"days": "{0} دن",
"week": "ایک ہفتے",
"weeks": "{0} ہفتے",
"month": "ایک مہینہ",
"months": "{0} ماہ",
"year": "ایک سال",
"years": "{0} سال",
}
month_names = [
"",
"جنوری",
"فروری",
"مارچ",
"اپریل",
"مئی",
"جون",
"جولائی",
"اگست",
"ستمبر",
"اکتوبر",
"نومبر",
"دسمبر",
]
month_abbreviations = [
"",
"جنوری",
"فروری",
"مارچ",
"اپریل",
"مئی",
"جون",
"جولائی",
"اگست",
"ستمبر",
"اکتوبر",
"نومبر",
"دسمبر",
]
day_names = [
"",
"سوموار",
"منگل",
"بدھ",
"جمعرات",
"جمعہ",
"ہفتہ",
"اتوار",
]
day_abbreviations = [
"",
"سوموار",
"منگل",
"بدھ",
"جمعرات",
"جمعہ",
"ہفتہ",
"اتوار",
]
|
UrduLocale
|
python
|
huggingface__transformers
|
tests/models/dinov3_vit/test_modeling_dinov3_vit.py
|
{
"start": 9628,
"end": 11486
}
|
class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m")
if is_vision_available()
else None
)
@slow
def test_inference_no_head(self):
model = DINOv3ViTModel.from_pretrained("facebook/dinov3-vits16-pretrain-lvd1689m").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
# in DINOv3 with Registers, the seq length equals the number of patches + 1 + num_register_tokens (we add 1 for the [CLS] token)
_, _, height, width = inputs["pixel_values"].shape
num_patches = (height // model.config.patch_size) * (width // model.config.patch_size)
expected_seq_length = num_patches + 1 + model.config.num_register_tokens
expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_size))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
last_layer_cls_token = outputs.pooler_output
expected_slice = torch.tensor([0.4637, -0.4160, 0.4086, -0.1265, -0.2865], device=torch_device)
torch.testing.assert_close(last_layer_cls_token[0, :5], expected_slice, rtol=1e-4, atol=1e-4)
last_layer_patch_tokens = outputs.last_hidden_state[:, model.config.num_register_tokens + 1 :]
expected_slice = torch.tensor([-0.0386, -0.2509, -0.0161, -0.4556, 0.5716], device=torch_device)
torch.testing.assert_close(last_layer_patch_tokens[0, 0, :5], expected_slice, rtol=1e-4, atol=1e-4)
|
DINOv3ViTModelIntegrationTest
|
python
|
tiangolo__fastapi
|
docs_src/body_nested_models/tutorial002_py39.py
|
{
"start": 104,
"end": 407
}
|
class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: list[str] = []
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
results = {"item_id": item_id, "item": item}
return results
|
Item
|
python
|
pytorch__pytorch
|
test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_autograd.py
|
{
"start": 217,
"end": 1041
}
|
class ____(TestCase):
# Support MPS and Windows platform later and fix torchdynamo issue
@skipIfMPS
@skipIfWindows()
@skipIfTorchDynamo()
def test_autograd_init(self):
# Make sure autograd is initialized
torch.ones(2, requires_grad=True, device="openreg").sum().backward()
pid = os.getpid()
task_path = f"/proc/{pid}/task"
all_threads = psutil.Process(pid).threads()
all_thread_names = set()
for t in all_threads:
with open(f"{task_path}/{t.id}/comm") as file:
thread_name = file.read().strip()
all_thread_names.add(thread_name)
for i in range(torch.accelerator.device_count()):
self.assertIn(f"pt_autograd_{i}", all_thread_names)
if __name__ == "__main__":
run_tests()
|
TestAutograd
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/callbacks/test_model_checkpoint_edge_cases.py
|
{
"start": 305,
"end": 837
}
|
class ____(Dataset):
def __init__(self, n: int = 8):
self.x = torch.arange(n, dtype=torch.float32).view(-1, 1)
self.y = self.x.clone()
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def _make_loaders(n=8, batch_size=2):
ds = TinyDataset(n=n)
train_loader = DataLoader(ds, batch_size=batch_size, shuffle=False)
val_loader = DataLoader(ds, batch_size=batch_size, shuffle=False)
return train_loader, val_loader
|
TinyDataset
|
python
|
optuna__optuna
|
optuna/trial/_trial.py
|
{
"start": 29607,
"end": 30143
}
|
class ____(UserDict):
def __init__(self, trial_id: int, storage: optuna.storages.BaseStorage) -> None:
super().__init__()
self._trial_id = trial_id
self._storage = storage
self._initialized = False
def __getattribute__(self, key: str) -> Any:
if key == "data":
if not self._initialized:
self._initialized = True
super().update(self._storage.get_trial_system_attrs(self._trial_id))
return super().__getattribute__(key)
|
_LazyTrialSystemAttrs
|
python
|
getsentry__sentry
|
tests/sentry/issue_detection/test_consecutive_http_detector.py
|
{
"start": 782,
"end": 16996
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self._settings = get_detection_settings()
def find_problems(self, event: dict[str, Any]) -> list[PerformanceProblem]:
detector = ConsecutiveHTTPSpanDetector(self._settings, event)
run_detector_on_data(detector, event)
return list(detector.stored_problems.values())
def create_issue_spans(self, span_duration: int = 2000) -> list[Span]:
spans = [
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint1", "hash1"
),
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint3", "hash3"
),
]
spans = [
modify_span_start(span, span_duration * spans.index(span)) for span in spans
] # ensure spans don't overlap
return spans
def create_issue_event(self, span_duration: int = 2000) -> dict[str, Any]:
spans = self.create_issue_spans(span_duration)
return create_event(spans)
def test_detects_consecutive_http_issue(self) -> None:
event = self.create_issue_event()
problems = self.find_problems(event)
assert problems == [
PerformanceProblem(
fingerprint="1-1009-00b8644b56309c8391aa365783145162ab9c589a",
op="http",
desc="GET /api/0/organizations/endpoint1",
type=PerformanceConsecutiveHTTPQueriesGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=[
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": [
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
],
"op": "http",
},
evidence_display=[],
)
]
def test_does_not_detects_consecutive_http_issue_low_time_saved(self) -> None:
spans = [ # min time saved by parallelizing is 2s
create_span("http.client", 1000, "GET /api/0/organizations/endpoint1", "hash1"),
create_span("http.client", 1000, "GET /api/0/organizations/endpoint2", "hash2"),
create_span("http.client", 1000, "GET /api/0/organizations/endpoint3", "hash3"),
]
spans = [
modify_span_start(span, 1000 * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert len(problems) == 1
spans = [ # min time saved by parallelizing is 1s
create_span("http.client", 500, "GET /api/0/organizations/endpoint1", "hash1"),
create_span("http.client", 500, "GET /api/0/organizations/endpoint2", "hash2"),
create_span("http.client", 1000, "GET /api/0/organizations/endpoint3", "hash3"),
]
spans = [
modify_span_start(span, 1000 * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert problems == []
def test_does_not_detect_consecutive_http_issue_with_frontend_events(self) -> None:
event = {
**self.create_issue_event(),
"sdk": {"name": "sentry.javascript.browser"},
}
problems = self.find_problems(event)
assert problems == []
def test_does_not_detect_consecutive_http_issue_with_low_count(self) -> None:
spans = [ # all thresholds are exceeded, except count
create_span("http.client", 3000, "GET /api/0/organizations/endpoint1", "hash1"),
create_span("http.client", 3000, "GET /api/0/organizations/endpoint2", "hash2"),
]
spans = [
modify_span_start(span, 3000 * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert problems == []
def test_detects_consecutive_http_issue_with_trailing_low_duration_span(self) -> None:
spans = [
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint1", "hash1"
), # all thresholds are exceeded.
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint3", "hash3"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint4", "hash4"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint5", "hash5"
),
]
spans = [
modify_span_start(span, MIN_SPAN_DURATION * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert len(problems) == 1
spans = [
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint1", "hash1"
), # some spans with low durations, all other thresholds are exceeded.
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint3", "hash3"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint4", "hash4"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint5", "hash5"
),
create_span("http.client", 400, "GET /api/0/organizations/endpoint6", "hash6"),
]
spans = [
modify_span_start(span, MIN_SPAN_DURATION * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert len(problems) == 1
def test_does_not_detect_consecutive_http_issue_with_low_duration_spans(self) -> None:
spans = [
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint1", "hash1"
), # all thresholds are exceeded.
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint3", "hash3"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint4", "hash4"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint5", "hash5"
),
]
spans = [
modify_span_start(span, MIN_SPAN_DURATION * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert len(problems) == 1
spans = [
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint1", "hash1"
), # some spans with low durations, all other thresholds are exceeded.
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint3", "hash3"
),
create_span("http.client", 400, "GET /api/0/organizations/endpoint4", "hash4"),
create_span("http.client", 400, "GET /api/0/organizations/endpoint5", "hash5"),
create_span("http.client", 400, "GET /api/0/organizations/endpoint5", "hash5"),
]
spans = [
modify_span_start(span, MIN_SPAN_DURATION * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert problems == []
def test_detects_consecutive_http_issue_with_low_duration_spans(self) -> None:
spans = [
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint1", "hash1"
), # spans with low durations, but min_time_saved
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint2", "hash2"
), # exceeds threshold
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint3", "hash3"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint4", "hash4"
),
create_span(
"http.client", MIN_SPAN_DURATION, "GET /api/0/organizations/endpoint5", "hash5"
),
]
spans = [
modify_span_start(span, MIN_SPAN_DURATION * spans.index(span)) for span in spans
] # ensure spans don't overlap
problems = self.find_problems(create_event(spans))
assert len(problems) == 1
def test_detects_consecutive_with_non_http_between_http_spans(self) -> None:
spans = self.create_issue_spans()
spans.insert(
1, modify_span_start(create_span("resource.script", 500, "/static/js/bundle.js"), 2000)
)
event = create_event(spans)
problems = self.find_problems(event)
assert problems == [
PerformanceProblem(
fingerprint="1-1009-00b8644b56309c8391aa365783145162ab9c589a",
op="http",
desc="GET /api/0/organizations/endpoint1",
type=PerformanceConsecutiveHTTPQueriesGroupType,
parent_span_ids=None,
cause_span_ids=[],
offender_span_ids=[
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": [
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
"bbbbbbbbbbbbbbbb",
],
"op": "http",
},
evidence_display=[],
)
]
def test_does_not_detect_nextjs_asset(self) -> None:
span_duration = 2000 # ms
spans = [
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint1", "hash1"
),
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint3", "hash3"
),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
assert len(self.find_problems(create_event(spans))) == 1
spans[0] = modify_span_start(
create_span("http.client", 2000, "GET /_next/static/css/file-hash-abc.css", "hash4"),
0,
)
assert self.find_problems(create_event(spans)) == []
def test_does_not_detect_with_high_duration_between_spans(self) -> None:
span_duration = 2000
spans = [
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint1", "hash1"
),
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint2", "hash2"
),
create_span(
"http.client", span_duration, "GET /api/0/organizations/endpoint3", "hash3"
),
]
spans = [
modify_span_start(span, (10000 + span_duration) * spans.index(span)) for span in spans
] # ensure spans don't overlap
assert self.find_problems(create_event(spans)) == []
def test_fingerprints_match_with_duplicate_http(self) -> None:
span_duration = 2000
spans = [
create_span("http.client", span_duration, "GET /api/endpoint1", "hash1"),
create_span("http.client", span_duration, "GET /api/endpoint2", "hash2"),
create_span("http.client", span_duration, "GET /api/endpoint3", "hash3"),
]
spans = [
modify_span_start(span, span_duration * spans.index(span)) for span in spans
] # ensure spans don't overlap
problem_1 = self.find_problems(create_event(spans))[0]
spans.append(
modify_span_start(
create_span("http.client", span_duration, "GET /api/endpoint3", "hash3"), 6000
)
)
problem_2 = self.find_problems(create_event(spans))[0]
assert problem_2.fingerprint == "1-1009-515a42c2614f98fa886b6d9ad1ddfe1929329f53"
assert problem_1.fingerprint == problem_2.fingerprint
def test_respects_project_option(self) -> None:
project = self.create_project()
event = self.create_issue_event()
settings = get_detection_settings(project.id)
detector = ConsecutiveHTTPSpanDetector(settings, event)
assert detector.is_creation_allowed_for_project(project)
ProjectOption.objects.set_value(
project=project,
key="sentry:performance_issue_settings",
value={"consecutive_http_spans_detection_enabled": False},
)
settings = get_detection_settings(project.id)
detector = ConsecutiveHTTPSpanDetector(settings, event)
assert not detector.is_creation_allowed_for_project(project)
def test_ignores_non_http_operations(self) -> None:
span_duration = 2000
spans = [
create_span("db", span_duration, "DELETE /api/endpoint2", "hash2"),
create_span("db", span_duration, "DELETE /api/endpoint1", "hash1"),
create_span("db", span_duration, "DELETE /api/endpoint3", "hash3"),
]
spans = [modify_span_start(span, span_duration * spans.index(span)) for span in spans]
problems = self.find_problems(create_event(spans))
assert len(problems) == 0
def test_ignores_http_spans_with_gen_ai_parent(self) -> None:
"""Test that HTTP spans with gen_ai.chat parent spans are ignored."""
span_duration = 2000
# Create a gen_ai.chat span first
gen_ai_span = create_span("gen_ai.chat", 1000, "AI Chat", "gen_ai_hash")
gen_ai_span_id = gen_ai_span["span_id"]
# Create HTTP spans that are children of the gen_ai span
http_spans = [
create_span("http.client", span_duration, "GET /api/endpoint1", "hash1"),
create_span("http.client", span_duration, "GET /api/endpoint2", "hash2"),
create_span("http.client", span_duration, "GET /api/endpoint3", "hash3"),
]
# Set the parent_span_id to the gen_ai span
for span in http_spans:
span["parent_span_id"] = gen_ai_span_id
# Ensure spans don't overlap
all_spans = [gen_ai_span] + http_spans
all_spans = [
modify_span_start(span, span_duration * all_spans.index(span)) for span in all_spans
]
problems = self.find_problems(create_event(all_spans))
assert len(problems) == 0
|
ConsecutiveHTTPSpansDetectorTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/workspace/context.py
|
{
"start": 4096,
"end": 28037
}
|
class ____(LoadingContext):
"""This class is a request-scoped object that stores (1) a reference to all repository locations
that exist on the `IWorkspaceProcessContext` at the start of the request and (2) a snapshot of the
workspace at the start of the request.
This object is needed because a process context and the repository locations on that context can
be updated (for example, from a thread on the process context). If a request is accessing a
repository location at the same time the repository location was being cleaned up, we would run
into errors.
"""
_exit_stack: ExitStack
@property
@abstractmethod
def instance(self) -> DagsterInstance: ...
@abstractmethod
def get_current_workspace(self) -> CurrentWorkspace: ...
# abstracted since they may be calculated without the full CurrentWorkspace
@abstractmethod
def get_location_entry(self, name: str) -> Optional[CodeLocationEntry]: ...
@abstractmethod
def get_code_location_statuses(self) -> Sequence[CodeLocationStatusEntry]: ...
# implemented here since they require the full CurrentWorkspace
def get_code_location_entries(self) -> Mapping[str, CodeLocationEntry]:
return self.get_current_workspace().code_location_entries
def __enter__(self) -> Self:
self._exit_stack = ExitStack()
self._exit_stack.enter_context(
partition_loading_context(dynamic_partitions_store=self.dynamic_partitions_loader)
)
return self
def __exit__(self, exception_type, exception_value, traceback) -> None:
self._exit_stack.close()
@property
def asset_graph(self) -> "RemoteWorkspaceAssetGraph":
return self.get_current_workspace().asset_graph
@cached_property
def instance_queryer(self) -> CachingInstanceQueryer:
return CachingInstanceQueryer(
instance=self.instance,
asset_graph=self.asset_graph,
loading_context=self,
)
@cached_property
def dynamic_partitions_loader(self) -> CachingDynamicPartitionsLoader:
return CachingDynamicPartitionsLoader(self.instance)
@cached_property
def stale_status_loader(self) -> CachingStaleStatusResolver:
return CachingStaleStatusResolver(
self.instance,
asset_graph=lambda: self.asset_graph,
loading_context=self,
)
@cached_property
def data_time_resolver(self) -> CachingDataTimeResolver:
return CachingDataTimeResolver(self.instance_queryer)
@property
@abstractmethod
def process_context(self) -> "IWorkspaceProcessContext": ...
@property
@abstractmethod
def version(self) -> Optional[str]: ...
@property
@abstractmethod
def permissions(self) -> Mapping[str, PermissionResult]: ...
@abstractmethod
def permissions_for_location(self, *, location_name: str) -> Mapping[str, PermissionResult]:
pass
@abstractmethod
def permissions_for_owner(self, *, owner: str) -> Mapping[str, PermissionResult]:
pass
def has_permission_for_location(self, permission: str, location_name: str) -> bool:
if self.has_code_location_name(location_name):
permissions = self.permissions_for_location(location_name=location_name)
return permissions[permission].enabled
# if not in workspace, fall back to the global permissions across all code locations
return self.has_permission(permission)
@abstractmethod
def has_permission(self, permission: str) -> bool: ...
@abstractmethod
def was_permission_checked(self, permission: str) -> bool: ...
def has_permission_for_selector(
self,
permission: str,
selector: Union[AssetKey, AssetCheckKey, JobSelector, ScheduleSelector, SensorSelector],
) -> bool:
if self.has_permission(permission):
return True
if isinstance(selector, (AssetKey, AssetCheckKey)):
if not self.asset_graph.has(selector):
return False
location_name = self.asset_graph.get_repository_handle(selector).location_name
else:
location_name = selector.location_name
if not self.has_code_location_name(location_name):
return False
if self.has_permission_for_location(permission, location_name):
return True
if not self.viewer_has_any_owner_definition_permissions():
return False
owners = self.get_owners_for_selector(selector)
return self.has_permission_for_owners(permission, owners)
def get_owners_for_selector(
self,
selector: Union[AssetKey, AssetCheckKey, JobSelector, ScheduleSelector, SensorSelector],
) -> Sequence[str]:
if isinstance(selector, AssetKey):
remote_definition = self.asset_graph.get(selector)
elif isinstance(selector, AssetCheckKey):
# make asset checks permissioned to the same owners as the underlying asset
remote_definition = self.asset_graph.get(selector.asset_key)
elif isinstance(selector, JobSelector):
remote_definition = self.get_full_job(selector)
elif isinstance(selector, ScheduleSelector):
remote_definition = self.get_schedule(selector)
elif isinstance(selector, SensorSelector):
remote_definition = self.get_sensor(selector)
if not remote_definition:
return []
return remote_definition.owners or []
def has_permission_for_owners(self, permission: str, owners: Sequence[str]) -> bool:
return any(
self.permissions_for_owner(owner=owner)
.get(permission, PermissionResult(enabled=False, disabled_reason=None))
.enabled
for owner in owners
)
@property
@abstractmethod
def records_for_run_default_limit(self) -> Optional[int]: ...
@property
def show_instance_config(self) -> bool:
return True
def viewer_has_any_owner_definition_permissions(self) -> bool:
return False
def read_partition_subsets_from_asset_health(self) -> bool:
return False
def get_viewer_tags(self) -> dict[str, str]:
return {}
def get_reporting_user_tags(self) -> dict[str, str]:
return {}
def get_code_location(self, location_name: str) -> CodeLocation:
location_entry = self.get_location_entry(location_name)
if not location_entry:
raise DagsterCodeLocationNotFoundError(
f"Location {location_name} does not exist in workspace"
)
if location_entry.code_location:
return location_entry.code_location
if location_entry.load_error:
error_info = location_entry.load_error
raise DagsterCodeLocationLoadError(
f"Failure loading {location_name}: {error_info.to_string()}",
load_error_infos=[error_info],
)
raise DagsterCodeLocationNotFoundError(
f"Location {location_name} is still loading",
)
@property
def code_locations(self) -> Sequence[CodeLocation]:
return [
entry.code_location
for entry in self.get_code_location_entries().values()
if entry.code_location
]
@property
def code_location_names(self) -> Sequence[str]:
# For some WorkspaceRequestContext subclasses, the CodeLocationEntry is more expensive
# than the CodeLocationStatusEntry, so use the latter for a faster check.
return [status_entry.location_name for status_entry in self.get_code_location_statuses()]
def code_location_errors(self) -> Sequence[SerializableErrorInfo]:
return [
entry.load_error
for entry in self.get_code_location_entries().values()
if entry.load_error
]
def has_code_location_error(self, name: str) -> bool:
return self.get_code_location_error(name) is not None
def get_code_location_error(self, name: str) -> Optional[SerializableErrorInfo]:
entry = self.get_location_entry(name)
return entry.load_error if entry else None
def has_code_location_name(self, name: str) -> bool:
# For some WorkspaceRequestContext subclasses, the CodeLocationEntry is more expensive
# than the CodeLocationStatusEntry, so use the latter for a faster check.
for status_entry in self.get_code_location_statuses():
if status_entry.location_name == name:
return True
return False
def has_code_location(self, name: str) -> bool:
location_entry = self.get_location_entry(name)
return bool(location_entry and location_entry.code_location is not None)
def is_reload_supported(self, name: str) -> bool:
entry = self.get_location_entry(name)
return entry.origin.is_reload_supported if entry else False
def is_shutdown_supported(self, name: str) -> bool:
entry = self.get_location_entry(name)
return entry.origin.is_shutdown_supported if entry else False
def reload_code_location(self, name: str) -> "BaseWorkspaceRequestContext":
# This method signals to the remote gRPC server that it should reload its
# code, and returns a new request context created from the updated process context
self.process_context.reload_code_location(name)
return self.process_context.create_request_context()
def shutdown_code_location(self, name: str):
self.process_context.shutdown_code_location(name)
def reload_workspace(self) -> "BaseWorkspaceRequestContext":
self.process_context.reload_workspace()
return self.process_context.create_request_context()
def has_job(self, selector: Union[JobSubsetSelector, JobSelector]) -> bool:
check.inst_param(selector, "selector", (JobSubsetSelector, JobSelector))
if not self.has_code_location(selector.location_name):
return False
loc = self.get_code_location(selector.location_name)
return loc.has_repository(selector.repository_name) and loc.get_repository(
selector.repository_name
).has_job(selector.job_name)
def get_full_job(self, selector: Union[JobSubsetSelector, JobSelector]) -> RemoteJob:
return (
self.get_code_location(selector.location_name)
.get_repository(selector.repository_name)
.get_full_job(selector.job_name)
)
async def gen_job(
self,
selector: JobSubsetSelector,
) -> RemoteJob:
if not selector.is_subset_selection:
return self.get_full_job(selector)
return await self.get_code_location(selector.location_name).gen_subset_job(
selector, lambda selector: self.get_full_job(selector)
)
def get_execution_plan(
self,
remote_job: RemoteJob,
run_config: Mapping[str, object],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
) -> RemoteExecutionPlan:
return self.get_code_location(remote_job.handle.location_name).get_execution_plan(
remote_job=remote_job,
run_config=run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=self.instance,
)
async def gen_execution_plan(
self,
remote_job: RemoteJob,
run_config: Mapping[str, object],
step_keys_to_execute: Optional[Sequence[str]],
known_state: Optional[KnownExecutionState],
) -> RemoteExecutionPlan:
return await self.get_code_location(remote_job.handle.location_name).gen_execution_plan(
remote_job=remote_job,
run_config=run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=self.instance,
)
def get_partition_config(
self,
repository_handle: RepositoryHandle,
job_name: str,
partition_name: str,
instance: DagsterInstance,
) -> Union["PartitionConfigSnap", "PartitionExecutionErrorSnap"]:
return self.get_code_location(repository_handle.location_name).get_partition_config(
repository_handle=repository_handle,
job_name=job_name,
partition_name=partition_name,
instance=instance,
)
def get_partition_tags(
self,
repository_selector: RepositorySelector,
job_name: str,
partition_name: str,
instance: DagsterInstance,
selected_asset_keys: Optional[AbstractSet[AssetKey]],
) -> Union["PartitionTagsSnap", "PartitionExecutionErrorSnap"]:
if is_implicit_asset_job_name(job_name):
# Implicit asset jobs never have custom tag-for-partition functions, and the
# PartitionsDefinitions on the assets are always available on the host, so we can just
# determine the tags using information on the host.
# In addition to the performance benefits, this is convenient in the case where the
# implicit asset job has assets with different PartitionsDefinitions, as the gRPC
# API for getting partition tags from the code server doesn't support an asset selection.
partitions_def = self._get_partitions_def_for_job(
job_selector=JobSelector(
location_name=repository_selector.location_name,
repository_name=repository_selector.repository_name,
job_name=job_name,
),
selected_asset_keys=selected_asset_keys,
)
return PartitionTagsSnap(
name=partition_name,
tags=check.not_none(partitions_def).get_tags_for_partition_key(partition_name),
)
location = self.get_code_location(repository_selector.location_name)
return location.get_partition_tags_from_repo(
repository_handle=RepositoryHandle.from_location(
repository_selector.repository_name,
location,
),
job_name=job_name,
partition_name=partition_name,
instance=instance,
)
def get_partition_names(
self,
repository_selector: RepositorySelector,
job_name: str,
instance: DagsterInstance,
selected_asset_keys: Optional[AbstractSet[AssetKey]],
) -> Union["PartitionNamesSnap", "PartitionExecutionErrorSnap"]:
partition_set_name = partition_set_snap_name_for_job_name(job_name)
partitions_sets = self.get_partition_sets(repository_selector)
match = next(
(
partitions_set
for partitions_set in partitions_sets
if partitions_set.name == partition_set_name
),
None,
)
if match:
partition_set = match
# Prefer to return the names without calling out to user code if there's a corresponding
# partition set that allows it
if partition_set.has_partition_name_data():
return PartitionNamesSnap(
partition_names=partition_set.get_partition_names(instance=instance)
)
else:
code_location = self.get_code_location(repository_selector.location_name)
return code_location.get_partition_names_from_repo(
RepositoryHandle.from_location(
repository_selector.repository_name,
code_location,
),
job_name,
)
else:
# Asset jobs might have no corresponding partition set but still have partitioned
# assets, so we get the partition names using the assets.
partitions_def = self._get_partitions_def_for_job(
job_selector=JobSelector(
location_name=repository_selector.location_name,
repository_name=repository_selector.repository_name,
job_name=job_name,
),
selected_asset_keys=selected_asset_keys,
)
if not partitions_def:
return PartitionNamesSnap([])
return PartitionNamesSnap(
partitions_def.get_partition_keys(dynamic_partitions_store=instance)
)
def _get_partitions_def_for_job(
self,
job_selector: JobSelector,
selected_asset_keys: Optional[AbstractSet[AssetKey]],
) -> Optional[PartitionsDefinition]:
asset_nodes = self.get_assets_in_job(job_selector, selected_asset_keys)
unique_partitions_defs: set[PartitionsDefinition] = set()
for asset_node in asset_nodes:
if asset_node.asset_node_snap.partitions is not None:
unique_partitions_defs.add(
asset_node.asset_node_snap.partitions.get_partitions_definition()
)
if len(unique_partitions_defs) == 0:
# Assets are all unpartitioned
return None
if len(unique_partitions_defs) == 1:
return next(iter(unique_partitions_defs))
else:
check.failed(
"There is no PartitionsDefinition shared by all the provided assets."
f" {len(unique_partitions_defs)} unique PartitionsDefinitions."
)
def get_partition_set_execution_param_data(
self,
repository_handle: RepositoryHandle,
partition_set_name: str,
partition_names: Sequence[str],
instance: DagsterInstance,
) -> Union["PartitionSetExecutionParamSnap", "PartitionExecutionErrorSnap"]:
return self.get_code_location(
repository_handle.location_name
).get_partition_set_execution_params(
repository_handle=repository_handle,
partition_set_name=partition_set_name,
partition_names=partition_names,
instance=instance,
)
def get_notebook_data(self, code_location_name: str, notebook_path: str) -> bytes:
check.str_param(code_location_name, "code_location_name")
check.str_param(notebook_path, "notebook_path")
code_location = self.get_code_location(code_location_name)
return code_location.get_notebook_data(notebook_path=notebook_path)
def get_base_deployment_asset_graph(
self, repository_selector: Optional["RepositorySelector"]
) -> Optional["RemoteAssetGraph"]:
return None
def get_repository(
self, selector: Union[RepositorySelector, RepositoryHandle]
) -> RemoteRepository:
return self.get_code_location(selector.location_name).get_repository(
selector.repository_name
)
def get_sensor(
self, selector: Union[SensorSelector, InstigatorSelector]
) -> Optional[RemoteSensor]:
if not self.has_code_location(selector.location_name):
return None
location = self.get_code_location(selector.location_name)
if not location.has_repository(selector.repository_name):
return None
repository = location.get_repository(selector.repository_name)
if not repository.has_sensor(selector.instigator_name):
return None
return repository.get_sensor(selector.instigator_name)
def get_schedule(
self, selector: Union[ScheduleSelector, InstigatorSelector]
) -> Optional[RemoteSchedule]:
if not self.has_code_location(selector.location_name):
return None
location = self.get_code_location(selector.location_name)
if not location.has_repository(selector.repository_name):
return None
repository = location.get_repository(selector.repository_name)
if not repository.has_schedule(selector.instigator_name):
return None
return repository.get_schedule(selector.instigator_name)
def get_node_def(
self,
job_selector: Union[JobSubsetSelector, JobSelector],
node_def_name: str,
) -> Union[OpDefSnap, GraphDefSnap]:
job = self.get_full_job(job_selector)
return job.get_node_def_snap(node_def_name)
def get_config_type(
self,
job_selector: Union[JobSubsetSelector, JobSelector],
type_key: str,
) -> ConfigTypeSnap:
job = self.get_full_job(job_selector)
return job.config_schema_snapshot.get_config_snap(type_key)
def get_dagster_type(
self,
job_selector: Union[JobSubsetSelector, JobSelector],
type_key: str,
) -> DagsterTypeSnap:
job = self.get_full_job(job_selector)
return job.job_snapshot.dagster_type_namespace_snapshot.get_dagster_type_snap(type_key)
def get_resources(
self,
job_selector: Union[JobSubsetSelector, JobSelector],
) -> Sequence[ResourceDefSnap]:
job = self.get_full_job(job_selector)
if not job.mode_def_snaps:
return []
return job.mode_def_snaps[0].resource_def_snaps
def get_dagster_library_versions(self, location_name: str) -> Optional[Mapping[str, str]]:
return self.get_code_location(location_name).get_dagster_library_versions()
def get_schedules_targeting_job(
self,
selector: Union[JobSubsetSelector, JobSelector],
) -> Sequence[RemoteSchedule]:
repository = self.get_code_location(selector.location_name).get_repository(
selector.repository_name
)
return repository.schedules_by_job_name.get(selector.job_name, [])
def get_sensors_targeting_job(
self,
selector: Union[JobSubsetSelector, JobSelector],
) -> Sequence[RemoteSensor]:
repository = self.get_code_location(selector.location_name).get_repository(
selector.repository_name
)
return repository.sensors_by_job_name.get(selector.job_name, [])
def get_asset_keys_in_job(
self,
selector: Union[JobSubsetSelector, JobSelector],
) -> Sequence[AssetKey]:
if not self.has_code_location(selector.location_name):
return []
location = self.get_code_location(selector.location_name)
if not location.has_repository(selector.repository_name):
return []
repository = location.get_repository(selector.repository_name)
return repository.get_asset_keys_in_job(job_name=selector.job_name)
def get_assets_in_job(
self,
selector: Union[JobSubsetSelector, JobSelector],
selected_asset_keys: Optional[AbstractSet[AssetKey]] = None,
) -> Sequence[RemoteRepositoryAssetNode]:
keys = self.get_asset_keys_in_job(selector)
if not keys:
return []
if selected_asset_keys is not None:
keys = [key for key in keys if key in selected_asset_keys]
repo_asset_graph = self.get_repository(selector.repository_selector).asset_graph
return [
repo_asset_graph.get(asset_key) for asset_key in keys if repo_asset_graph.has(asset_key)
]
def get_partition_sets(
self,
repository_selector: RepositorySelector,
) -> Sequence[RemotePartitionSet]:
if not self.has_code_location(repository_selector.location_name):
return []
location = self.get_code_location(repository_selector.location_name)
if not location.has_repository(repository_selector.repository_name):
return []
repository = location.get_repository(repository_selector.repository_name)
return repository.get_partition_sets()
|
BaseWorkspaceRequestContext
|
python
|
pytorch__pytorch
|
test/distributed/_composable/test_composability/test_2d_composability.py
|
{
"start": 25607,
"end": 39242
}
|
class ____(DTensorTestBase):
@property
def backend(self):
# need to specify gloo backend for testing cpu offload
return "cpu:gloo,xpu:xccl" if TEST_XPU else "cpu:gloo,cuda:nccl"
@with_comms
@skip_if_lt_x_gpu(4)
def test_fsdp_2d_extension(self):
"""
Test whether _fsdp_extension from FSDPstate has been set correctly.
"""
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
"net3": ColwiseParallel(),
}
model_2d = parallelize_module(
SimpleModel().to(device_type),
mesh_2d["tp"],
parallelize_plan=parallelize_plan,
)
model_2d = FSDP(model_2d, device_mesh=mesh_2d["dp"], use_orig_params=True)
model_2d_fsdp_state = _get_module_fsdp_state(model_2d)
self.assertTrue(
isinstance(model_2d_fsdp_state._fsdp_extension, DTensorExtensions)
)
mesh_1d = init_device_mesh(device_type, (self.world_size,))
model_1d = FSDP(
SimpleModel().to(device_type), device_mesh=mesh_1d, use_orig_params=True
)
model_1d_fsdp_state = _get_module_fsdp_state(model_1d)
self.assertEqual(model_1d_fsdp_state._fsdp_extension, None)
@with_comms
@skip_if_lt_x_gpu(4)
@parametrize("is_even_sharded_model", [True, False])
def test_2d_state_dict(self, is_even_sharded_model):
simple_model = SimpleModel if is_even_sharded_model else SimpleModelUneven
# Create a model without wrapper
torch.manual_seed(0)
no_wrap_model = simple_model().to(f"{device_type}:{self.rank}")
no_wrap_state_dict = no_wrap_model.state_dict()
# Create a model and sharded it with 2D FSDP + TP
torch.manual_seed(0)
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model_2d = parallelize_module(
simple_model().to(device_type), tp_mesh, parallelize_plan
)
model_2d = FSDP(model_2d, device_mesh=dp_mesh, use_orig_params=True)
FSDP.set_state_dict_type(
model_2d,
StateDictType.SHARDED_STATE_DICT,
)
state_dict_2d = model_2d.state_dict()
for no_wrap_items, two_d_items in zip(
no_wrap_state_dict.items(), state_dict_2d.items()
):
no_wrap_k, no_wrap_v = no_wrap_items
two_d_k, two_d_v = two_d_items
self.assertEqual(no_wrap_k, two_d_k)
# check if all value in 2D state_dict are DTensor
self.assertTrue(isinstance(two_d_v, DTensor))
self.assertEqual(len(two_d_v.placements), 2)
# the outer dimension is the FSDP dimension and the placement is always Shard(0)
self.assertEqual(two_d_v.placements[0], Shard(0))
self.assertEqual(two_d_v.device_mesh, mesh_2d)
# check if the parameter value is the same between 2D model and the model without wrapper
all_gather_two_d_v = two_d_v.redistribute(
mesh_2d, (Replicate(), Replicate())
)
self.assertEqual(
torch.allclose(no_wrap_v, all_gather_two_d_v.to_local()), True
)
@with_comms
@skip_if_lt_x_gpu(4)
@parametrize("is_even_sharded_model", [True, False])
def test_2d_load_state_dict(self, is_even_sharded_model):
simple_model = SimpleModel if is_even_sharded_model else SimpleModelUneven
torch.manual_seed(0)
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model_2d = parallelize_module(
simple_model().to(device_type), tp_mesh, parallelize_plan
)
model_2d = FSDP(model_2d, device_mesh=dp_mesh, use_orig_params=True)
optim_2d = torch.optim.Adam(model_2d.parameters(), lr=0.01)
FSDP.set_state_dict_type(
model_2d,
StateDictType.SHARDED_STATE_DICT,
)
checkpoint = io.BytesIO()
torch.save(model_2d.state_dict(), checkpoint)
# Deepcopy to save current state_dict to compare with the state_dict loaded back below.
ref_state_dict = deepcopy(model_2d.state_dict())
# Update the parameters so model.state_dict() will be different from ref_dtensor_sd.
model_2d(model_2d.get_input().to(f"{device_type}:{self.rank}")).sum().backward()
optim_2d.step()
# Load ref_state_dict back.
checkpoint.seek(0)
load_ref_state_dict = torch.load(checkpoint)
model_2d.load_state_dict(load_ref_state_dict)
new_state_dict = model_2d.state_dict()
# Check whether new_state_dict is the same as ref_state_dict.
for (k1, v1), (k2, v2) in zip(ref_state_dict.items(), new_state_dict.items()):
# check whether fqn are the same
self.assertEqual(k1, k2)
self.assertEqual(type(v1), DTensor)
self.assertEqual(type(v2), DTensor)
# check whether DTensor are the same
# TODO: 2D DTensor comparison is not supported at the time, so we are comparing the spec and the local tensor for now.
# TODO: Update it to compare the two DTensors once 2D DTensor comparison is supported.
self.assertEqual(v1.to_local(), v2.to_local())
self.assertEqual(v1.device_mesh, v2.device_mesh)
self.assertEqual(v1.placements, v2.placements)
@with_comms
@skip_if_lt_x_gpu(4)
@parametrize("is_even_sharded_model", [True, False])
def test_2d_optim_state_dict(self, is_even_sharded_model):
simple_model = SimpleModel if is_even_sharded_model else SimpleModelUneven
# Create a model without wrapper
torch.manual_seed(0)
no_wrap_model = simple_model().to(f"{device_type}:{self.rank}")
no_wrap_optim = torch.optim.Adam(no_wrap_model.parameters(), lr=0.01)
no_wrap_model(
no_wrap_model.get_input().to(f"{device_type}:{self.rank}")
).sum().backward()
no_wrap_optim.step()
no_wrap_osd = get_optimizer_state_dict(no_wrap_model, optimizers=no_wrap_optim)
# Create a model and sharded it with 2D FSDP + TP
torch.manual_seed(0)
mesh_2d = init_device_mesh(
self.device_type, (2, self.world_size // 2), mesh_dim_names=("dp", "tp")
)
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
}
model_2d = parallelize_module(
simple_model().to(device_type), mesh_2d["tp"], parallelize_plan
)
model_2d = FSDP(model_2d, device_mesh=mesh_2d["dp"], use_orig_params=True)
FSDP.set_state_dict_type(
model_2d,
StateDictType.SHARDED_STATE_DICT,
)
optim_2d = torch.optim.Adam(model_2d.parameters(), lr=0.01)
model_2d(model_2d.get_input().to(f"{device_type}:{self.rank}")).sum().backward()
optim_2d.step()
optim_2d_osd = get_optimizer_state_dict(model_2d, optimizers=optim_2d)
ref_optim_2d_osd = deepcopy(optim_2d_osd)
no_wrap_osd_states = no_wrap_osd["state"]
optim_2d_osd_states = optim_2d_osd["state"]
self.assertEqual(len(no_wrap_osd_states), len(optim_2d_osd_states))
self.assertEqual(no_wrap_osd_states.keys(), optim_2d_osd_states.keys())
for fqn, states in no_wrap_osd_states.items():
dist_states = optim_2d_osd_states.get(fqn)
for state_name, state in states.items():
dist_state = dist_states.get(state_name)
# If a state is DTensor, we all gather it in both DP and TP dimension to
# compare with no_wrap state.
if isinstance(dist_state, DTensor):
dist_state = (
dist_state.to(device_type)
.redistribute(placements=(Replicate(), Replicate()))
.to_local()
)
self.assertTrue(isinstance(dist_state, torch.Tensor))
self.assertTrue(torch.allclose(state, dist_state))
# Update the parameters 2d optim states will be different from ref_optim_state_dict.
model_2d(model_2d.get_input().to(f"{device_type}:{self.rank}")).sum().backward()
optim_2d.step()
set_optimizer_state_dict(
model_2d, optimizers=optim_2d, optim_state_dict=ref_optim_2d_osd
)
ref_optim_2d_osd_states = ref_optim_2d_osd["state"]
new_optim_2d_osd_states = optim_2d_osd["state"]
# Compare the new optim state dict after load with the reference one
self.assertEqual(len(ref_optim_2d_osd_states), len(new_optim_2d_osd_states))
self.assertEqual(ref_optim_2d_osd_states.keys(), new_optim_2d_osd_states.keys())
for fqn, states in ref_optim_2d_osd_states.items():
new_states = new_optim_2d_osd_states.get(fqn)
for state_name, state in states.items():
new_state = new_states.get(state_name)
if isinstance(new_state, DTensor):
self.assertEqual(new_state.placements, state.placements)
self.assertEqual(new_state.device_mesh, state.device_mesh)
self.assertTrue(
torch.allclose(new_state.to_local(), state.to_local())
)
else:
self.assertEqual(new_state, state)
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(4)
def test_fsdp1_tp_2d_set_full_state_dict(self):
"""
This is a workaround for loading full state dict into a FSDP1+TP 2D model.
Since named_parameters() in FSDP1 does not return DTensor, we don't have the information to shard the full_state_dict
and load it directly into the 2d model. In order to load a full state dict in FSDP1+TP 2D model, we need to do:
1) load the full state dict into a 1D FSDP model
2) dcp.save the full/shard state dict into storage
3) initialize a 2D FSDP1+TP model
4) get the default sharded state dict for the 2D model (full_state_dict=False)
5) dcp.load the state dict from storage
6) load the state dict into the 2D model
"""
dummy_model = SimpleModel().to(device_type)
mesh_1d = init_device_mesh(device_type, (self.world_size,))
model = FSDP(dummy_model, device_mesh=mesh_1d)
optim = torch.optim.Adam(model.parameters(), lr=0.01)
model(model.get_input()).sum().backward()
optim.step()
ref_full_msd = get_model_state_dict(
model, options=StateDictOptions(full_state_dict=True, cpu_offload=True)
)
ref_full_osd = get_optimizer_state_dict(
model,
optimizers=optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
state_dict = {"model": ref_full_msd, "optim": ref_full_osd}
# save the full state dict into storage first
dcp.save(state_dict, checkpoint_id=self.temp_dir)
# initialize 2d model
dummy_model = SimpleModel().to(device_type)
mesh_2d = init_device_mesh(
device_type,
(2, self.world_size // 2),
mesh_dim_names=("dp", "tp"),
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
parallelize_plan = {
"net1": ColwiseParallel(),
"net2": RowwiseParallel(),
"net3": ColwiseParallel(),
}
model_2d = parallelize_module(dummy_model, tp_mesh, parallelize_plan)
model_2d = FSDP(model_2d, device_mesh=dp_mesh, use_orig_params=True)
optim_2d = torch.optim.Adam(model_2d.parameters(), lr=0.01)
# get the default sharded state dict for model_2d
# note this is because we can not set full_state_dict back to 2D directly
msd = get_model_state_dict(model_2d)
osd = get_optimizer_state_dict(model_2d, optimizers=optim_2d)
state_dict = {"model": msd, "optim": osd}
dcp.load(state_dict=state_dict, checkpoint_id=self.temp_dir)
set_model_state_dict(model_2d, state_dict["model"])
set_optimizer_state_dict(
model_2d, optimizers=optim_2d, optim_state_dict=state_dict["optim"]
)
# check after setting sharded state dict, the model and optim full state dict
# are the same as the initial full state dict.
new_full_msd = get_model_state_dict(
model, options=StateDictOptions(full_state_dict=True, cpu_offload=True)
)
new_full_osd = get_optimizer_state_dict(
model,
optimizers=optim,
options=StateDictOptions(full_state_dict=True, cpu_offload=True),
)
self.assertEqual(ref_full_msd, new_full_msd)
self.assertEqual(ref_full_osd, new_full_osd)
instantiate_parametrized_tests(TestNew2dParallelStateDict)
if __name__ == "__main__":
run_tests()
|
TestNew2dParallelStateDict
|
python
|
getsentry__sentry
|
src/sentry/types/actor.py
|
{
"start": 10217,
"end": 11696
}
|
class ____(Protocol):
"""Protocol for objects that are owned by Actor but need to store ownership in discrete columns"""
@property
def owner(self) -> Actor | None: ...
@owner.setter
def owner(self, actor: Actor | None) -> None: ...
def parse_and_validate_actor(actor_identifier: str | None, organization_id: int) -> Actor | None:
if not actor_identifier:
return None
try:
actor = Actor.from_identifier(actor_identifier, organization_id)
except Exception:
raise serializers.ValidationError(
"Could not parse actor. Format should be `type:id` where type is `team` or `user`."
)
validate_actor(actor, organization_id)
return actor
def validate_actor(actor: Actor, organization_id: int) -> None:
from sentry.models.organizationmember import OrganizationMember
from sentry.models.team import Team
try:
obj = actor.resolve()
except Actor.InvalidActor:
raise serializers.ValidationError(f"{actor.actor_type} does not exist")
if isinstance(obj, Team):
if obj.organization_id != organization_id:
raise serializers.ValidationError("Team is not a member of this organization")
elif isinstance(obj, RpcUser):
if not OrganizationMember.objects.filter(
organization_id=organization_id, user_id=obj.id
).exists():
raise serializers.ValidationError("User is not a member of this organization")
|
ActorOwned
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py
|
{
"start": 41271,
"end": 44344
}
|
class ____(OutbrainAmplifyStream, HttpSubStream):
primary_key = None
def __init__(self, authenticator, config, parent: Marketers, **kwargs):
super().__init__(parent=parent, **kwargs)
self.config = config
self._authenticator = authenticator
self._session = requests.sessions.Session()
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {}
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def stream_slices(
self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"marketer_id": record.get("id")}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if response.json():
for fetched in response.json().get("campaignResults"):
for x in fetched.get("results"):
x["marketer_id"] = stream_slice["marketer_id"]
x["campaign_id"] = fetched.get("campaignId")
yield x
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
stream_start, stream_end = self._get_time_interval(self.config.get("start_date"), self.config.get("end_date"))
stream_conversion_count = self._get_bool_conversion_count_by_click_date(
self.config.get("conversion_count", DEFAULT_REPORT_CONVERSION_COUNT_BY_CLICK_DATE)
)
return (
f"reports/marketers/{stream_slice['marketer_id']}/campaigns/geo?from="
+ str(stream_start.date())
+ "&to="
+ str(stream_end.date())
+ "&breakdown="
+ str(self.config.get("geo_location_breakdown", DEFAULT_GEO_LOCATION_BREAKDOWN))
+ "&limit=500"
+ "&includeVideoStats=true"
+ "&conversionsByClickDate="
+ str(stream_conversion_count)
)
# Retrieve performance statistics for a Marketer by interest.
# The API in this sub-section allows retrieving performance statistics by interest at different levels: marketer and campaign.
|
PerformanceReportMarketersCampaignsByGeo
|
python
|
scrapy__scrapy
|
tests/test_command_version.py
|
{
"start": 53,
"end": 678
}
|
class ____:
def test_output(self) -> None:
_, out, _ = proc("version")
assert out.strip() == f"Scrapy {scrapy.__version__}"
def test_verbose_output(self) -> None:
_, out, _ = proc("version", "-v")
headers = [line.partition(":")[0].strip() for line in out.strip().splitlines()]
assert headers == [
"Scrapy",
"lxml",
"libxml2",
"cssselect",
"parsel",
"w3lib",
"Twisted",
"Python",
"pyOpenSSL",
"cryptography",
"Platform",
]
|
TestVersionCommand
|
python
|
huggingface__transformers
|
src/transformers/models/sew_d/modeling_sew_d.py
|
{
"start": 14698,
"end": 15142
}
|
class ____(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.sew.modeling_sew.SEWUpsampling with SEW->SEWD
|
SEWDSamePadLayer
|
python
|
django__django
|
django/test/testcases.py
|
{
"start": 65162,
"end": 67864
}
|
class ____(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory
sqlite) and each thread needs to commit all their transactions so that the
other thread can see the changes.
"""
host = "localhost"
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return "http://%s:%s" % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def _make_connections_override(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == "sqlite" and conn.is_in_memory_db():
connections_override[conn.alias] = conn
return connections_override
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.enterClassContext(
modify_settings(ALLOWED_HOSTS={"append": cls.allowed_host})
)
cls._start_server_thread()
@classmethod
def _start_server_thread(cls):
connections_override = cls._make_connections_override()
for conn in connections_override.values():
# Explicitly enable thread-shareability for this connection.
conn.inc_thread_sharing()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
cls.addClassCleanup(cls._terminate_thread)
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _terminate_thread(cls):
# Terminate the live server's thread.
cls.server_thread.terminate()
# Restore shared connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
|
LiveServerTestCase
|
python
|
psf__black
|
tests/data/cases/preview_long_strings__regression.py
|
{
"start": 11333,
"end": 11919
}
|
class ____:
class B:
def foo():
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
|
A
|
python
|
google__jax
|
jax/_src/export/shape_poly.py
|
{
"start": 68502,
"end": 72072
}
|
class ____:
comp: Comparator
left: DimSize
right: DimSize
# `error_message_pieces` is a list of strings and DimSize. The error message
# is formed by evaluating the DimSize and concatenating the sequence.
error_message_pieces: Sequence[str | DimSize]
def check_statically(self, eval: ShapeEvaluator) -> None:
"""Evaluates a constraint statically."""
left, right = eval.evaluate(self.left), eval.evaluate(self.right)
try:
if self.comp == Comparator.EQ:
ok = (left == right)
elif self.comp == Comparator.GEQ:
ok = (left >= right)
else:
assert False # We are in a context where we know we can evaluate
# all symbolic expressions to constants.
except InconclusiveDimensionOperation as e:
raise self.make_error(eval) from e
if not ok:
raise self.make_error(eval)
def compute(self, eval: ShapeEvaluator) -> typing.Array | None:
"""Computes if the constraint is satisfied.
If the constraint can be resolved statically returns None
or raises ValueError otherwise. If the constraint cannot be
resolved statically, returns a value representing if the
constraint is satisfied.
"""
from jax._src.lax import lax # pytype: disable=import-error
left, right = eval.evaluate(self.left), eval.evaluate(self.right)
# Try to evaluate the constraint statically.
if core.is_constant_shape((left, right)):
left_int, right_int = op.index(left), op.index(right)
if self.comp == Comparator.EQ:
if not (left_int == right_int):
raise self.make_error(eval)
elif self.comp == Comparator.GEQ:
if not (left_int >= right_int):
raise self.make_error(eval)
else: assert False
return None
if self.comp == Comparator.EQ:
is_ok = lax.eq(left, right)
elif self.comp == Comparator.GEQ:
is_ok = lax.ge(left, right)
else: assert False
return is_ok
def __str__(self):
return (f"{self.left} {'==' if self.comp == Comparator.EQ else '>='} {self.right}"
f" ({self.error_message_pieces})")
__repr__ = __str__
def error_message_and_inputs(
self,
eval: ShapeEvaluator) -> tuple[str, Sequence[Any]]:
"""Forms the error_message and error message_inputs.
See shape_assertion.
"""
# There is currently a limitation in the shape assertion checker that
# it supports at most 32 error_message_inputs. We try to stay within the
# limit, reusing a format specifier if possible.
max_error_message_inputs = 32
format_specifiers: dict[DimSize, str] = {}
error_message_inputs: list[Any] = []
error_message_strings: list[str] = []
for e in self.error_message_pieces:
if isinstance(e, str):
error_message_strings.append(e)
continue
cached_spec = format_specifiers.get(e)
if cached_spec is not None:
error_message_strings.append(cached_spec)
continue
if len(error_message_inputs) >= max_error_message_inputs:
error_message_strings.append("N/A")
continue
spec = "{" + str(len(error_message_inputs)) + "}"
format_specifiers[e] = spec
error_message_strings.append(spec)
error_message_inputs.append(eval.evaluate(e))
return ("".join(error_message_strings),
error_message_inputs)
def make_error(self, eval: ShapeEvaluator) -> Exception:
error_message, error_message_inputs = self.error_message_and_inputs(eval)
return ValueError(error_message.format(*error_message_inputs))
|
ShapeConstraint
|
python
|
sympy__sympy
|
sympy/polys/agca/modules.py
|
{
"start": 33500,
"end": 41088
}
|
class ____(SubModule):
"""
Submodule of a free module over a generalized polynomial ring.
Do not instantiate this, use the constructor method of FreeModule instead:
>>> from sympy.abc import x, y
>>> from sympy import QQ
>>> F = QQ.old_poly_ring(x, y).free_module(2)
>>> F.submodule([x, y], [1, 0])
<[x, y], [1, 0]>
Attributes:
- order - monomial order used
"""
#self._gb - cached groebner basis
#self._gbe - cached groebner basis relations
def __init__(self, gens, container, order="lex", TOP=True):
SubModule.__init__(self, gens, container)
if not isinstance(container, FreeModulePolyRing):
raise NotImplementedError('This implementation is for submodules of '
+ 'FreeModulePolyRing, got %s' % container)
self.order = ModuleOrder(monomial_key(order), self.ring.order, TOP)
self._gb = None
self._gbe = None
def __eq__(self, other):
if isinstance(other, SubModulePolyRing) and self.order != other.order:
return False
return SubModule.__eq__(self, other)
def _groebner(self, extended=False):
"""Returns a standard basis in sdm form."""
from sympy.polys.distributedmodules import sdm_groebner, sdm_nf_mora
if self._gbe is None and extended:
gb, gbe = sdm_groebner(
[self.ring._vector_to_sdm(x, self.order) for x in self.gens],
sdm_nf_mora, self.order, self.ring.dom, extended=True)
self._gb, self._gbe = tuple(gb), tuple(gbe)
if self._gb is None:
self._gb = tuple(sdm_groebner(
[self.ring._vector_to_sdm(x, self.order) for x in self.gens],
sdm_nf_mora, self.order, self.ring.dom))
if extended:
return self._gb, self._gbe
else:
return self._gb
def _groebner_vec(self, extended=False):
"""Returns a standard basis in element form."""
if not extended:
return [FreeModuleElement(self,
tuple(self.ring._sdm_to_vector(x, self.rank)))
for x in self._groebner()]
gb, gbe = self._groebner(extended=True)
return ([self.convert(self.ring._sdm_to_vector(x, self.rank))
for x in gb],
[self.ring._sdm_to_vector(x, len(self.gens)) for x in gbe])
def _contains(self, x):
from sympy.polys.distributedmodules import sdm_zero, sdm_nf_mora
return sdm_nf_mora(self.ring._vector_to_sdm(x, self.order),
self._groebner(), self.order, self.ring.dom) == \
sdm_zero()
def _syzygies(self):
"""Compute syzygies. See [SCA, algorithm 2.5.4]."""
# NOTE if self.gens is a standard basis, this can be done more
# efficiently using Schreyer's theorem
# First bullet point
k = len(self.gens)
r = self.rank
zero = self.ring.convert(0)
one = self.ring.convert(1)
Rkr = self.ring.free_module(r + k)
newgens = []
for j, f in enumerate(self.gens):
m = [0]*(r + k)
for i, v in enumerate(f):
m[i] = v
for i in range(k):
m[r + i] = one if j == i else zero
m = FreeModuleElement(Rkr, tuple(m))
newgens.append(m)
# Note: we need *descending* order on module index, and TOP=False to
# get an elimination order
F = Rkr.submodule(*newgens, order='ilex', TOP=False)
# Second bullet point: standard basis of F
G = F._groebner_vec()
# Third bullet point: G0 = G intersect the new k components
G0 = [x[r:] for x in G if all(y == zero for y in x[:r])]
# Fourth and fifth bullet points: we are done
return G0
def _in_terms_of_generators(self, e):
"""Expression in terms of generators. See [SCA, 2.8.1]."""
# NOTE: if gens is a standard basis, this can be done more efficiently
M = self.ring.free_module(self.rank).submodule(*((e,) + self.gens))
S = M.syzygy_module(
order="ilex", TOP=False) # We want decreasing order!
G = S._groebner_vec()
# This list cannot not be empty since e is an element
e = [x for x in G if self.ring.is_unit(x[0])][0]
return [-x/e[0] for x in e[1:]]
def reduce_element(self, x, NF=None):
"""
Reduce the element ``x`` of our container modulo ``self``.
This applies the normal form ``NF`` to ``x``. If ``NF`` is passed
as none, the default Mora normal form is used (which is not unique!).
"""
from sympy.polys.distributedmodules import sdm_nf_mora
if NF is None:
NF = sdm_nf_mora
return self.container.convert(self.ring._sdm_to_vector(NF(
self.ring._vector_to_sdm(x, self.order), self._groebner(),
self.order, self.ring.dom),
self.rank))
def _intersect(self, other, relations=False):
# See: [SCA, section 2.8.2]
fi = self.gens
hi = other.gens
r = self.rank
ci = [[0]*(2*r) for _ in range(r)]
for k in range(r):
ci[k][k] = 1
ci[k][r + k] = 1
di = [list(f) + [0]*r for f in fi]
ei = [[0]*r + list(h) for h in hi]
syz = self.ring.free_module(2*r).submodule(*(ci + di + ei))._syzygies()
nonzero = [x for x in syz if any(y != self.ring.zero for y in x[:r])]
res = self.container.submodule(*([-y for y in x[:r]] for x in nonzero))
reln1 = [x[r:r + len(fi)] for x in nonzero]
reln2 = [x[r + len(fi):] for x in nonzero]
if relations:
return res, reln1, reln2
return res
def _module_quotient(self, other, relations=False):
# See: [SCA, section 2.8.4]
if relations and len(other.gens) != 1:
raise NotImplementedError
if len(other.gens) == 0:
return self.ring.ideal(1)
elif len(other.gens) == 1:
# We do some trickery. Let f be the (vector!) generating ``other``
# and f1, .., fn be the (vectors) generating self.
# Consider the submodule of R^{r+1} generated by (f, 1) and
# {(fi, 0) | i}. Then the intersection with the last module
# component yields the quotient.
g1 = list(other.gens[0]) + [1]
gi = [list(x) + [0] for x in self.gens]
# NOTE: We *need* to use an elimination order
M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi),
order='ilex', TOP=False)
if not relations:
return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if
all(y == self.ring.zero for y in x[:-1])])
else:
G, R = M._groebner_vec(extended=True)
indices = [i for i, x in enumerate(G) if
all(y == self.ring.zero for y in x[:-1])]
return (self.ring.ideal(*[G[i][-1] for i in indices]),
[[-x for x in R[i][1:]] for i in indices])
# For more generators, we use I : <h1, .., hn> = intersection of
# {I : <hi> | i}
# TODO this can be done more efficiently
return reduce(lambda x, y: x.intersect(y),
(self._module_quotient(self.container.submodule(x)) for x in other.gens))
|
SubModulePolyRing
|
python
|
astropy__astropy
|
astropy/io/ascii/basic.py
|
{
"start": 5984,
"end": 6123
}
|
class ____(core.DefaultSplitter):
"""
Split on comma for CSV (comma-separated-value) tables.
"""
delimiter = ","
|
CsvSplitter
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_decomp_update.py
|
{
"start": 47698,
"end": 47761
}
|
class ____(BaseQRinsert):
dtype = np.dtype('D')
|
TestQRinsert_D
|
python
|
pandas-dev__pandas
|
pandas/io/pytables.py
|
{
"start": 90243,
"end": 90345
}
|
class ____(DataIndexableCol):
"""represent a generic pytables data column"""
|
GenericDataIndexableCol
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/resources/beta/skills/skills.py
|
{
"start": 23756,
"end": 24375
}
|
class ____:
def __init__(self, skills: Skills) -> None:
self._skills = skills
self.create = to_streamed_response_wrapper(
skills.create,
)
self.retrieve = to_streamed_response_wrapper(
skills.retrieve,
)
self.list = to_streamed_response_wrapper(
skills.list,
)
self.delete = to_streamed_response_wrapper(
skills.delete,
)
@cached_property
def versions(self) -> VersionsWithStreamingResponse:
return VersionsWithStreamingResponse(self._skills.versions)
|
SkillsWithStreamingResponse
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/autoVariance4.py
|
{
"start": 797,
"end": 855
}
|
class ____(Generic[T_contra]):
pass
|
Parent_Contravariant
|
python
|
huggingface__transformers
|
src/transformers/models/layoutlmv3/modeling_layoutlmv3.py
|
{
"start": 8360,
"end": 8834
}
|
class ____(PreTrainedModel):
config: LayoutLMv3Config
base_model_prefix = "layoutlmv3"
input_modalities = ("image", "text")
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, LayoutLMv3Model):
if self.config.visual_embed:
init.zeros_(module.cls_token)
init.zeros_(module.pos_embed)
|
LayoutLMv3PreTrainedModel
|
python
|
walkccc__LeetCode
|
solutions/2391. Minimum Amount of Time to Collect Garbage/2391.py
|
{
"start": 0,
"end": 488
}
|
class ____:
def garbageCollection(self, garbage: list[str], travel: list[int]) -> int:
prefix = list(itertools.accumulate(travel))
def getTime(c: str) -> int:
characterCount = 0
lastIndex = -1
for i, s in enumerate(garbage):
if any(g == c for g in s):
lastIndex = i
characterCount += s.count(c)
return characterCount + (0 if lastIndex <= 0 else prefix[lastIndex - 1])
return getTime('M') + getTime('P') + getTime('G')
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/modeling_outputs.py
|
{
"start": 83948,
"end": 85514
}
|
class ____(ModelOutput):
"""
Base class for outputs of image super resolution models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Reconstruction loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed images, possibly upscaled.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
|
ImageSuperResolutionOutput
|
python
|
pytorch__pytorch
|
test/inductor/test_inductor_freezing.py
|
{
"start": 3766,
"end": 4280
}
|
class ____(torch.nn.Module):
def __init__(self, in_channels, out_channels, bias=False, **kwargs):
super().__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=bias, **kwargs)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001, dtype=torch.float)
self.bn2 = torch.nn.BatchNorm2d(out_channels, eps=0.1, dtype=torch.float)
def forward(self, x):
tmp = self.bn(self.conv(x))
tmp2 = self.bn2(self.conv(x))
return tmp + tmp2
|
ConvMultiBN
|
python
|
huggingface__transformers
|
src/transformers/models/ijepa/modeling_ijepa.py
|
{
"start": 9874,
"end": 10517
}
|
class ____(nn.Module):
"""
The residual connection is defined in IJepaLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: IJepaConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
IJepaSelfOutput
|
python
|
doocs__leetcode
|
solution/0000-0099/0083.Remove Duplicates from Sorted List/Solution.py
|
{
"start": 151,
"end": 448
}
|
class ____:
def deleteDuplicates(self, head: Optional[ListNode]) -> Optional[ListNode]:
cur = head
while cur and cur.next:
if cur.val == cur.next.val:
cur.next = cur.next.next
else:
cur = cur.next
return head
|
Solution
|
python
|
huggingface__transformers
|
tests/pipelines/test_pipelines_any_to_any.py
|
{
"start": 1165,
"end": 15166
}
|
class ____(unittest.TestCase):
model_mapping = MODEL_FOR_MULTIMODAL_LM_MAPPING
# We only need `processor` but the Mixin will pass all possible preprocessing classes for a model.
# So we add them all in signature
def get_test_pipeline(
self, model, tokenizer, processor, image_processor=None, feature_extractor=None, dtype="float32"
):
_is_images_supported = hasattr(processor, "image_processor")
_is_videos_supported = hasattr(processor, "video_processor")
_is_audios_supported = hasattr(processor, "feature_extractor")
image_token = getattr(processor.tokenizer, "image_token", "")
video_token = getattr(processor.tokenizer, "video_token", "")
audio_token = getattr(processor.tokenizer, "audio_token", "")
images_examples = [
{
"images": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"text": f"{image_token}This is a ",
},
{
"images": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"text": f"{image_token}Here I see a ",
},
]
videos_examples = [
{
"videos": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Big_Buck_Bunny_720_10s_10MB.mp4"
),
"text": f"{video_token}This video shows a ",
},
{
"video": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/sample_demo_1.mp4"
),
"text": f"{video_token}In the video I see a ",
},
]
audio_examples = [
{
"audio": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3"
),
"text": f"{audio_token}This is sound of a ",
},
{
"audio": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"
),
"text": f"{audio_token}Here I hear a ",
},
]
examples = []
if _is_images_supported:
examples.extend(images_examples)
if _is_videos_supported:
examples.extend(videos_examples)
if _is_audios_supported:
examples.extend(audio_examples)
pipe = AnyToAnyPipeline(model=model, processor=processor, dtype=dtype, max_new_tokens=10)
return pipe, examples
def run_pipeline_test(self, pipe, examples):
# Single
outputs = pipe(examples[0])
self.assertEqual(
outputs,
[
{"input_text": ANY(str), "generated_text": ANY(str)},
],
)
# Batched but limit to last 2 examples
outputs = pipe(examples[:2])
self.assertEqual(
outputs,
[
[
{"input_text": ANY(str), "generated_text": ANY(str)},
],
[
{"input_text": ANY(str), "generated_text": ANY(str)},
],
],
)
# `generation_mode` raises errors when dosn't match with other params
with self.assertRaises(ValueError):
pipe(examples, generation_mode="video")
with self.assertRaises(ValueError):
pipe(examples, generation_mode="audio", return_full_text=True)
with self.assertRaises(ValueError):
pipe(examples, generation_mode="image", return_type=1)
# Chat template
if getattr(pipe.processor, "chat_template", None) is not None:
messages = []
for example in examples[:2]:
example.pop("text")
modality_type, modality_data = list(example.items())[0]
message = {
"role": "user",
"content": [
{"type": "text", "text": "This is a "},
{"type": modality_type, "path": modality_data},
],
}
messages.append([message])
outputs = pipe(messages, return_full_text=True, max_new_tokens=10)
self.assertEqual(
outputs,
[
[
{"input_text": ANY(str), "generated_text": ANY(str)},
],
[
{"input_text": ANY(str), "generated_text": ANY(str)},
],
],
)
@slow
def test_small_model_pt_token_text_only(self):
pipe = pipeline("any-to-any", model="google/gemma-3n-E4B-it")
text = "What is the capital of France? Assistant:"
outputs = pipe(text=text, generate_kwargs={"do_sample": False})
self.assertEqual(
outputs,
[
{
"input_text": "What is the capital of France? Assistant:",
"generated_None": "What is the capital of France? Assistant: The capital of France is Paris.\n",
}
],
)
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "Write a poem on Hugging Face, the company"},
],
},
],
[
{
"role": "user",
"content": [
{"type": "text", "text": "What is the capital of France?"},
],
},
],
]
outputs = pipe(text=messages, generate_kwargs={"do_sample": False})
self.assertEqual(
outputs,
[
[
{
"input_text": [
{
"role": "user",
"content": [{"type": "text", "text": "Write a poem on Hugging Face, the company"}],
}
],
"generated_None": [
{
"role": "user",
"content": [{"type": "text", "text": "Write a poem on Hugging Face, the company"}],
},
{
"role": "assistant",
"content": "A digital embrace, a friendly face,\nHugging Face rises, setting the pace.\nFor AI's heart, a vibrant core,\nOpen source models, and so much more.\n\nFrom transformers deep, a powerful might,\nNLP's future, shining so bright.\nDatasets curated, a treasure trove found,\nFor researchers and builders, on fertile ground.\n\nA community thriving, a collaborative art,\nSharing knowledge, playing a vital part.\nSpaces to showcase, creations unfold,\nStories in code, bravely told.\n\nWith libraries sleek, and tools so refined,\nDemocratizing AI, for all humankind.\nFrom sentiment analysis to text generation's grace,\nHugging Face empowers, at a rapid pace.\n\nA platform of learning, a place to explore,\nUnlocking potential, and asking for more.\nSo let's give a cheer, for this innovative team,\nHugging Face's vision, a beautiful dream. \n",
},
],
}
],
[
{
"input_text": [
{"role": "user", "content": [{"type": "text", "text": "What is the capital of France?"}]}
],
"generated_None": [
{"role": "user", "content": [{"type": "text", "text": "What is the capital of France?"}]},
{"role": "assistant", "content": "The capital of France is **Paris**. \n"},
],
}
],
],
)
@slow
def test_small_model_pt_token_audio_input(self):
pipe = pipeline("any-to-any", model="google/gemma-3n-E4B-it")
audio_path = url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"
)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "What do you hear in this audio?"},
{"type": "audio", "url": audio_path},
],
},
]
outputs = pipe(text=messages, return_type=1, generate_kwargs={"do_sample": False}) # return new text
self.assertEqual(
outputs,
[
{
"input_text": [
{
"role": "user",
"content": [
{"type": "text", "text": "What do you hear in this audio?"},
{
"type": "audio",
"url": "https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav",
},
],
}
],
"generated_None": "user\nWhat do you hear in this audio?\n\n\n\n\nmodel\nThe audio contains the repeated sound of someone **coughing**. It's a fairly consistent, forceful cough throughout the duration.",
}
],
)
@slow
def test_small_model_pt_token_audio_gen(self):
pipe = pipeline("any-to-any", model="Qwen/Qwen2.5-Omni-3B", dtype="bfloat16")
video_path = url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4"
)
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this video."},
{"type": "video", "video": video_path},
],
},
]
outputs = pipe(
text=messages,
num_frames=16,
max_new_tokens=50,
load_audio_from_video=True,
generate_kwargs={"use_audio_in_video": True, "talker_do_sample": False, "do_sample": False},
)
self.assertEqual(
outputs,
[
{
"input_text": [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this video."},
{
"type": "video",
"video": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4",
},
],
}
],
"generated_None": [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this video."},
{
"type": "video",
"video": "https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/Cooking_cake.mp4",
},
],
},
{
"role": "assistant",
"content": "system\nYou are a helpful assistant.\nuser\nDescribe this video.\nassistant\nThe video begins with a man standing in a kitchen, wearing a black shirt. He is holding a large glass bowl filled with flour and a spoon. The man starts to mix the flour in the bowl, creating a dough. As he mixes, he continues to talk to the camera, explaining the process. The kitchen has wooden cabinets and a white refrigerator in the background. The man's movements are deliberate and focused as he works with the dough. The video ends with the man still mixing the dough in the bowl. Overall, the video provides a clear and detailed demonstration of how to make dough using flour and a spoon.",
},
],
}
],
)
outputs = pipe(text=messages, generation_mode="audio", num_frames=16, max_new_tokens=20)
self.assertEqual(len(outputs), len(messages))
self.assertIsInstance(outputs[0], dict)
for out in outputs:
self.assertTrue("input_text" in out)
self.assertTrue("generated_audio" in out)
self.assertIsInstance(out["generated_audio"], np.ndarray)
@slow
def test_small_model_pt_image_gen(self):
pipe = pipeline("any-to-any", model="deepseek-community/Janus-Pro-1B")
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "A dog running under the rain."},
],
},
]
outputs = pipe(text=messages, generation_mode="image")
self.assertEqual(len(outputs), len(messages))
self.assertIsInstance(outputs[0], dict)
for out in outputs:
self.assertTrue("input_text" in out)
self.assertTrue("generated_image" in out)
self.assertIsInstance(out["generated_image"], PIL.Image.Image)
|
AnyToAnyPipelineTests
|
python
|
simonw__datasette
|
datasette/filters.py
|
{
"start": 9528,
"end": 15261
}
|
class ____:
_filters = (
[
# key, display, sql_template, human_template, format=, numeric=, no_argument=
TemplatedFilter(
"exact",
"=",
'"{c}" = :{p}',
lambda c, v: "{c} = {v}" if v.isdigit() else '{c} = "{v}"',
),
TemplatedFilter(
"not",
"!=",
'"{c}" != :{p}',
lambda c, v: "{c} != {v}" if v.isdigit() else '{c} != "{v}"',
),
TemplatedFilter(
"contains",
"contains",
'"{c}" like :{p}',
'{c} contains "{v}"',
format="%{}%",
),
TemplatedFilter(
"notcontains",
"does not contain",
'"{c}" not like :{p}',
'{c} does not contain "{v}"',
format="%{}%",
),
TemplatedFilter(
"endswith",
"ends with",
'"{c}" like :{p}',
'{c} ends with "{v}"',
format="%{}",
),
TemplatedFilter(
"startswith",
"starts with",
'"{c}" like :{p}',
'{c} starts with "{v}"',
format="{}%",
),
TemplatedFilter("gt", ">", '"{c}" > :{p}', "{c} > {v}", numeric=True),
TemplatedFilter(
"gte", "\u2265", '"{c}" >= :{p}', "{c} \u2265 {v}", numeric=True
),
TemplatedFilter("lt", "<", '"{c}" < :{p}', "{c} < {v}", numeric=True),
TemplatedFilter(
"lte", "\u2264", '"{c}" <= :{p}', "{c} \u2264 {v}", numeric=True
),
TemplatedFilter("like", "like", '"{c}" like :{p}', '{c} like "{v}"'),
TemplatedFilter(
"notlike", "not like", '"{c}" not like :{p}', '{c} not like "{v}"'
),
TemplatedFilter("glob", "glob", '"{c}" glob :{p}', '{c} glob "{v}"'),
InFilter(),
NotInFilter(),
]
+ (
[
TemplatedFilter(
"arraycontains",
"array contains",
""":{p} in (select value from json_each([{t}].[{c}]))""",
'{c} contains "{v}"',
),
TemplatedFilter(
"arraynotcontains",
"array does not contain",
""":{p} not in (select value from json_each([{t}].[{c}]))""",
'{c} does not contain "{v}"',
),
]
if detect_json1()
else []
)
+ [
TemplatedFilter(
"date", "date", 'date("{c}") = :{p}', '"{c}" is on date {v}'
),
TemplatedFilter(
"isnull", "is null", '"{c}" is null', "{c} is null", no_argument=True
),
TemplatedFilter(
"notnull",
"is not null",
'"{c}" is not null',
"{c} is not null",
no_argument=True,
),
TemplatedFilter(
"isblank",
"is blank",
'("{c}" is null or "{c}" = "")',
"{c} is blank",
no_argument=True,
),
TemplatedFilter(
"notblank",
"is not blank",
'("{c}" is not null and "{c}" != "")',
"{c} is not blank",
no_argument=True,
),
]
)
_filters_by_key = {f.key: f for f in _filters}
def __init__(self, pairs):
self.pairs = pairs
def lookups(self):
"""Yields (lookup, display, no_argument) pairs"""
for filter in self._filters:
yield filter.key, filter.display, filter.no_argument
def human_description_en(self, extra=None):
bits = []
if extra:
bits.extend(extra)
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
bits.append(filter.human_clause(column, value))
# Comma separated, with an ' and ' at the end
and_bits = []
commas, tail = bits[:-1], bits[-1:]
if commas:
and_bits.append(", ".join(commas))
if tail:
and_bits.append(tail[0])
s = " and ".join(and_bits)
if not s:
return ""
return f"where {s}"
def selections(self):
"""Yields (column, lookup, value) tuples"""
for key, value in self.pairs:
if "__" in key:
column, lookup = key.rsplit("__", 1)
else:
column = key
lookup = "exact"
yield column, lookup, value
def has_selections(self):
return bool(self.pairs)
def build_where_clauses(self, table):
sql_bits = []
params = {}
i = 0
for column, lookup, value in self.selections():
filter = self._filters_by_key.get(lookup, None)
if filter:
sql_bit, param = filter.where_clause(table, column, value, i)
sql_bits.append(sql_bit)
if param is not None:
if not isinstance(param, list):
param = [param]
for individual_param in param:
param_id = f"p{i}"
params[param_id] = individual_param
i += 1
return sql_bits, params
|
Filters
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/proxied_state.py
|
{
"start": 6369,
"end": 8412
}
|
class ____(Exception):
pass
def load_proxied_state_from_yaml(proxied_yaml_path: Path) -> AirflowProxiedState:
"""Loads the proxied state from a directory of yaml files.
Expects the directory to contain yaml files, where each file corresponds to the id of a dag (ie: `dag_id.yaml`).
This directory is typically constructed using the `dagster-airlift` CLI:
.. code-block:: bash
AIRFLOW_HOME=... dagster-airlift proxy scaffold
The file should have either of the following structure.
In the case of task-level proxying:
.. code-block:: yaml
tasks:
- id: task_id
proxied: true
- id: task_id
proxied: false
In the case of dag-level proxying:
.. code-block:: yaml
proxied: true
Args:
proxied_yaml_path (Path): The path to the directory containing the yaml files.
Returns:
AirflowProxiedState: The proxied state of the dags and tasks in Airflow.
"""
# Expect proxied_yaml_path to be a directory, where each file represents a dag, and each
# file in the subdir represents a task. The dictionary for each task should contain two keys;
# id: the task id, and proxied: a boolean indicating whether the task has been proxied.
dag_proxied_states = {}
try:
for dag_file in proxied_yaml_path.iterdir():
# Check that the file is a yaml file or yml file
if dag_file.suffix not in [".yaml", ".yml"]:
continue
dag_id = dag_file.stem
yaml_dict = yaml.safe_load(dag_file.read_text())
if not isinstance(yaml_dict, dict):
raise Exception("Expected a dictionary")
dag_proxied_states[dag_id] = DagProxiedState.from_dict(yaml_dict)
except Exception as e:
raise ProxiedStateParsingError("Error parsing proxied state yaml") from e
return AirflowProxiedState(dags=dag_proxied_states)
|
ProxiedStateParsingError
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/sandbox.py
|
{
"start": 13937,
"end": 14561
}
|
class ____(Formatter):
def __init__(self, env: Environment, **kwargs: t.Any) -> None:
self._env = env
super().__init__(**kwargs) # type: ignore
def get_field(
self, field_name: str, args: t.Sequence[t.Any], kwargs: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, str]:
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
|
SandboxedFormatter
|
python
|
walkccc__LeetCode
|
solutions/131. Palindrome Partitioning/131.py
|
{
"start": 0,
"end": 445
}
|
class ____:
def partition(self, s: str) -> list[list[str]]:
ans = []
def isPalindrome(s: str) -> bool:
return s == s[::-1]
def dfs(s: str, j: int, path: list[str], ans: list[list[str]]) -> None:
if j == len(s):
ans.append(path)
return
for i in range(j, len(s)):
if isPalindrome(s[j: i + 1]):
dfs(s, i + 1, path + [s[j: i + 1]], ans)
dfs(s, 0, [], ans)
return ans
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/live-tests/src/live_tests/report.py
|
{
"start": 915,
"end": 1026
}
|
class ____(Enum):
INITIALIZING = "initializing"
RUNNING = "running"
FINISHED = "finished"
|
ReportState
|
python
|
coleifer__peewee
|
tests/base.py
|
{
"start": 4808,
"end": 6574
}
|
class ____(unittest.TestCase):
def setUp(self):
self._qh = QueryLogHandler()
logger.setLevel(logging.DEBUG)
logger.addHandler(self._qh)
def tearDown(self):
logger.removeHandler(self._qh)
def assertIsNone(self, value):
self.assertTrue(value is None, '%r is not None' % value)
def assertIsNotNone(self, value):
self.assertTrue(value is not None, '%r is None' % value)
@contextmanager
def assertRaisesCtx(self, exceptions):
try:
yield
except Exception as exc:
if not isinstance(exc, exceptions):
raise AssertionError('Got %s, expected %s' % (exc, exceptions))
else:
raise AssertionError('No exception was raised.')
def assertSQL(self, query, sql, params=None, **state):
database = getattr(self, 'database', None) or db
state.setdefault('conflict_statement', database.conflict_statement)
state.setdefault('conflict_update', database.conflict_update)
qsql, qparams = __sql__(query, **state)
self.assertEqual(qsql, sql)
if params is not None:
self.assertEqual(qparams, params)
def assertHistory(self, n, expected):
queries = [logrecord.msg for logrecord in self._qh.queries[-n:]]
queries = [(sql.replace('%s', '?').replace('`', '"'), params)
for sql, params in queries]
self.assertEqual(queries, expected)
@property
def history(self):
return self._qh.queries
def reset_sql_history(self):
self._qh.queries = []
@contextmanager
def assertQueryCount(self, num):
qc = len(self.history)
yield
self.assertEqual(len(self.history) - qc, num)
|
BaseTestCase
|
python
|
cython__cython
|
Cython/Compiler/ExprNodes.py
|
{
"start": 414685,
"end": 416705
}
|
class ____(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# class_def_node PyClassDefNode PyClassDefNode defining this class
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['doc']
type = py_object_type
is_temp = True
def analyse_annotations(self, env):
pass
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
class_def_node = self.class_def_node
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
class_def_node.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
class_def_node.bases.py_result(),
class_def_node.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
|
ClassNode
|
python
|
getsentry__sentry
|
src/sentry/sentry_metrics/querying/data/execution.py
|
{
"start": 11221,
"end": 19072
}
|
class ____:
"""
Represents the result of a ScheduledQuery containing its associated series and totals results.
Attributes:
has_more: True if the query has more groups stored than they were returned. This is used in conjunction with
dynamic limiting.
"""
series_query: ScheduledQuery | None
totals_query: ScheduledQuery | None
result: Mapping[str, Any]
has_more: bool
def __post_init__(self) -> None:
if not self.series_query and not self.totals_query:
raise MetricsQueryExecutionError(
"A query result must contain at least one series or totals query"
)
@classmethod
def from_scheduled_query(
cls, scheduled_query: ScheduledQuery, query_result: Mapping[str, Any], has_more: bool
) -> "QueryResult":
"""
Creates a QueryResult from a ScheduledQuery.
Returns:
A QueryResult which contains the scheduled query and its results.
"""
# We add these fields as top level, so that when merging `QueryResult`(s) we are able to do that easily.
extended_result = {
"modified_start": query_result["modified_start"],
"modified_end": query_result["modified_end"],
}
if scheduled_query.type == ScheduledQueryType.SERIES:
extended_result["series"] = query_result
return QueryResult(
series_query=scheduled_query,
totals_query=None,
result=extended_result,
has_more=has_more,
)
elif scheduled_query.type == ScheduledQueryType.TOTALS:
extended_result["totals"] = query_result
return QueryResult(
series_query=None,
totals_query=scheduled_query,
result=extended_result,
has_more=has_more,
)
raise MetricsQueryExecutionError(
f"Can't build query result from query type {scheduled_query.type}"
)
def _any_query(self) -> ScheduledQuery:
return cast(ScheduledQuery, self.series_query or self.totals_query)
def merge(self, other: "QueryResult") -> "QueryResult":
"""
Merges two QueryResult(s) into a single QueryResult by arbitrarily taking attributes of either of them.
Returns:
A QueryResult which contains the data of both QueryResult(s).
"""
return QueryResult(
series_query=self.series_query or other.series_query,
totals_query=self.totals_query or other.totals_query,
# We merge the dictionaries and in case of duplicated keys, the ones from `other` will be used, as per
# Python semantics.
result={**self.result, **other.result},
has_more=self.has_more or other.has_more,
)
@property
def modified_start(self) -> datetime:
return self.result["modified_start"]
@property
def modified_end(self) -> datetime:
return self.result["modified_end"]
@property
def series(self) -> Sequence[Mapping[str, Any]]:
if "series" not in self.result:
return []
return self.result["series"]["data"]
@series.setter
def series(self, value: Sequence[Mapping[str, Any]]) -> None:
self.result["series"]["data"] = value
@property
def totals(self) -> Sequence[Mapping[str, Any]]:
if "totals" not in self.result:
return []
return self.result["totals"]["data"]
@totals.setter
def totals(self, value: Sequence[Mapping[str, Any]]) -> None:
self.result["totals"]["data"] = value
@property
def meta(self) -> Sequence[Mapping[str, str]]:
# By default, we extract the metadata from the totals query, if that is not there we extract from the series
# query.
meta_source = "totals" if "totals" in self.result else "series"
return self.result[meta_source]["meta"]
@property
def group_bys(self) -> list[str]:
# We return the groups directly from the query and not the actual groups returned by the query. This is done so
# that we can correctly render groups in case they are not returned from the db because of missing data.
#
# Sorting of the groups is done to maintain consistency across function calls.
scheduled_query = self._any_query()
mappers = [mapper for mapper in scheduled_query.mappers if mapper.applied_on_groupby]
return sorted(
UsedGroupBysVisitor(mappers=mappers).visit(scheduled_query.metrics_query.query)
)
@property
def interval(self) -> int | None:
if self.series_query:
return self.series_query.metrics_query.rollup.interval
return None
@property
def order(self) -> Direction | None:
if self.totals_query:
return self.totals_query.metrics_query.rollup.orderby
return None
@property
def limit(self) -> int | None:
# The totals limit is the only one that controls the number of groups that are returned.
# TODO: we might want to return the limit that is actually returned to users. In that, we would need to check
# if the queries run have a dynamic interval, since in that case we might need to return limit - 1.
if self.totals_query:
return self.totals_query.metrics_query.limit.limit
return None
@property
def unit_family(self) -> UnitFamily | None:
return self._any_query().unit_family
@property
def unit(self) -> MeasurementUnit | None:
return self._any_query().unit
@property
def scaling_factor(self) -> float | None:
return self._any_query().scaling_factor
def align_series_to_totals(self, organization: Organization) -> "QueryResult":
"""
Aligns the series to the totals of the same query.
The alignment process just tries to place values belonging to the same groups in the same order.
Returns:
A mutated QueryResult objects with the aligned series to totals.
"""
alignment_keys = self.group_bys
if not alignment_keys:
return self
indexed_series: dict[tuple[tuple[str, str], ...], list[int]] = {}
for index, data in enumerate(self.series):
composite_key = _build_composite_key_from_dict(data, alignment_keys)
# Since serieses have also the time component, we store multiple indexes of multiple times for the same
# group.
indexed_series.setdefault(composite_key, []).append(index)
aligned_series = []
for data in self.totals:
composite_key = _build_composite_key_from_dict(data, alignment_keys)
indexes = indexed_series.get(composite_key)
# It can happen that the groups in series are not matching the groups in totals, due to Snuba bugs or just
# limiting taking place in queries. Since this is a problem, we want to keep track of it.
if indexes is None:
with sentry_sdk.isolation_scope() as scope:
scope.set_tag("organization_id", organization.id)
scope.set_extra("totals_query", self.totals_query)
scope.set_extra("series_query", self.series_query)
sentry_sdk.capture_message(
"The series groups are not matching the totals groups"
)
for index in indexes or ():
aligned_series.append(self.series[index])
# For the sake of simplicity we are mutating the original data.
if aligned_series:
self.result["series"]["data"] = aligned_series
return self
@dataclass
|
QueryResult
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-size-of-a-set-after-removals.py
|
{
"start": 486,
"end": 989
}
|
class ____(object):
def maximumSetSize(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
lookup1, lookup2 = set(nums1), set(nums2)
n, c = len(nums1), len(lookup1&lookup2)
d1, d2 = min(len(lookup1)-c, n//2), min(len(lookup2)-c, n//2)
r1, r2 = n//2-d1, n//2-d2
return d1+d2+min(r1+r2, c) # = min(d1+d2+r1+r2, d1+d2+c) = min(d1+d2+(n//2-d1)+(n//2-d2), d1+d2+c) = min(n, d1+d2+c)
|
Solution2
|
python
|
mlflow__mlflow
|
tests/pyfunc/test_pyfunc_model_with_type_hints.py
|
{
"start": 16973,
"end": 17455
}
|
class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: {type_hint}, params=None) -> {type_hint}:
return model_input
set_model(TestModel())
"""
file_content = f"""
import mlflow
from mlflow.models import set_model
import datetime
import pydantic
from typing import Any, Optional, Union
{extra_def}
{model_def}
"""
model_path = tmp_path / "model.py"
model_path.write_text(file_content)
return {"python_model": model_path}
|
TestModel
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/textfmts.py
|
{
"start": 525,
"end": 2817
}
|
class ____(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/])* # Date as - or /-separated groups of digits
(?:\d{1,4})
[T ])? # Date/time separator: T or space
(?: \d?\d [:.])* # Time as :/.-separated groups of 1 or 2 digits
(?: \d?\d)
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
(\S+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
(\S+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"\S+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
|
IrcLogsLexer
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dlt/dagster_dlt/components/dlt_load_collection/scaffolder.py
|
{
"start": 545,
"end": 626
}
|
class ____(NamedTuple):
pipeline_src: str
source_src: str
|
PipelineAndSource
|
python
|
kamyu104__LeetCode-Solutions
|
Python/ugly-number-ii.py
|
{
"start": 43,
"end": 1442
}
|
class ____(object):
# @param {integer} n
# @return {integer}
def nthUglyNumber(self, n):
ugly_number = 0
heap = []
heapq.heappush(heap, 1)
for _ in xrange(n):
ugly_number = heapq.heappop(heap)
if ugly_number % 2 == 0:
heapq.heappush(heap, ugly_number * 2)
elif ugly_number % 3 == 0:
heapq.heappush(heap, ugly_number * 2)
heapq.heappush(heap, ugly_number * 3)
else:
heapq.heappush(heap, ugly_number * 2)
heapq.heappush(heap, ugly_number * 3)
heapq.heappush(heap, ugly_number * 5)
return ugly_number
def nthUglyNumber2(self, n):
ugly = [1]
i2 = i3 = i5 = 0
while len(ugly) < n:
while ugly[i2] * 2 <= ugly[-1]: i2 += 1
while ugly[i3] * 3 <= ugly[-1]: i3 += 1
while ugly[i5] * 5 <= ugly[-1]: i5 += 1
ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5))
return ugly[-1]
def nthUglyNumber3(self, n):
q2, q3, q5 = [2], [3], [5]
ugly = 1
for u in heapq.merge(q2, q3, q5):
if n == 1:
return ugly
if u > ugly:
ugly = u
n -= 1
q2 += 2 * u,
q3 += 3 * u,
q5 += 5 * u,
|
Solution
|
python
|
astropy__astropy
|
astropy/table/tests/test_table.py
|
{
"start": 54069,
"end": 65010
}
|
class ____:
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.asarray(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[("c", "i8"), ("d", "i8")])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = (">", "<")
native_order = byte_orders[sys.byteorder == "little"]
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name="a", dtype=order + "f8")
t = table_types.Table([col])
arr = t.as_array()
assert arr["a"].dtype.byteorder in (native_order, "=")
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr["a"].dtype.byteorder in (order, "=")
else:
assert arr["a"].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = (">", "<")[sys.byteorder != "little"]
filename = get_pkg_data_filename("data/tb.fits", "astropy.io.fits.tests")
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert data[colname].dtype.byteorder == arr2[colname].dtype.byteorder
def test_convert_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
# Single table
np_d = np.array(d, dtype=object)
assert isinstance(np_d, np.ndarray)
assert np_d[()] is d
def test_convert_list_numpy_object_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=("a", "b"))
ds = [d, d, d]
np_ds = np.array(ds, dtype=object)
assert all(isinstance(t, table_types.Table) for t in np_ds)
assert all(np.array_equal(t, d) for t in np_ds)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table(
[[1, 2, 3], [2, 3, 4]], names=["x", "y"], masked=True, meta={"name": "test"}
)
t["x"].mask = [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=["x", "y"])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2 # noqa: B015
with pytest.raises(TypeError):
t < 1.1 # noqa: B015
with pytest.raises(TypeError):
t >= 5.5 # noqa: B015
with pytest.raises(TypeError):
t <= -1.1 # noqa: B015
def test_values_equal_part1():
col1 = [1, 2]
col2 = [1.0, 2.0]
col3 = ["a", "b"]
t1 = table.Table([col1, col2, col3], names=["a", "b", "c"])
t2 = table.Table([col1, col2], names=["a", "b"])
t3 = table.table_helpers.simple_table()
tm = t1.copy()
tm["time"] = Time([1, 2], format="cxcsec")
tm1 = tm.copy()
tm1["time"][0] = np.ma.masked
tq = table.table_helpers.simple_table()
tq["quantity"] = [1.0, 2.0, 3.0] * u.m
tsk = table.table_helpers.simple_table()
tsk["sk"] = SkyCoord(1, 2, unit="deg")
eqsk = tsk.values_equal(tsk)
for col in eqsk.itercols():
assert np.all(col)
with pytest.raises(
ValueError, match="cannot compare tables with different column names"
):
t2.values_equal(t1)
with pytest.raises(ValueError, match="unable to compare column a"):
# Shape mismatch
t3.values_equal(t1)
eq = t2.values_equal(2)
for col in eq.colnames:
assert np.all(eq[col] == [False, True])
eq = t2.values_equal([1, 2])
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq = t2.values_equal(t2)
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq1 = tm1.values_equal(tm)
for col in eq1.colnames:
assert np.all(eq1[col] == [True, True])
eq2 = tq.values_equal(tq)
for col in eq2.colnames:
assert np.all(eq2[col] == [True, True, True])
eq3 = t2.values_equal(2)
for col in eq3.colnames:
assert np.all(eq3[col] == [False, True])
eq4 = t2.values_equal([1, 2])
for col in eq4.colnames:
assert np.all(eq4[col] == [True, True])
# Compare table to its first row
t = table.Table(rows=[(1, "a"), (1, "b")])
eq = t.values_equal(t[0])
assert np.all(eq["col0"] == [True, True])
assert np.all(eq["col1"] == [True, False])
def test_rows_equal():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
assert np.all(
(t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
def test_table_from_rows():
# see https://github.com/astropy/astropy/issues/5923
t1 = Table()
t1["a"] = [1, 2, 3]
t1["b"] = [2.0, 3.0, 4.0]
rows = [row for row in t1] # noqa: C416
t2 = Table(rows=rows)
assert_array_equal(t2.colnames, t1.colnames)
def test_equality_masked():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask["a"][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all(
(t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
t = table.Table(t, masked=True)
t2 = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 3 b 6.0 2",
" 2 a 4.0 3",
" 0 a 1.0 4",
" 1 b 3.0 5",
" 1 c 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert np.all(
(t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool)
)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
|
TestConvertNumpyArray
|
python
|
doocs__leetcode
|
solution/2400-2499/2438.Range Product Queries of Powers/Solution.py
|
{
"start": 0,
"end": 412
}
|
class ____:
def productQueries(self, n: int, queries: List[List[int]]) -> List[int]:
powers = []
while n:
x = n & -n
powers.append(x)
n -= x
mod = 10**9 + 7
ans = []
for l, r in queries:
x = 1
for i in range(l, r + 1):
x = x * powers[i] % mod
ans.append(x)
return ans
|
Solution
|
python
|
fluentpython__example-code
|
06-dp-1class-func/classic_strategy.py
|
{
"start": 2228,
"end": 2468
}
|
class ____(Promotion): # first Concrete Strategy
"""5% discount for customers with 1000 or more fidelity points"""
def discount(self, order):
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
|
FidelityPromo
|
python
|
getlogbook__logbook
|
src/logbook/base.py
|
{
"start": 5040,
"end": 5547
}
|
class ____:
"""Helper for exception caught blocks."""
def __init__(self, logger, args, kwargs):
self.logger = logger
self.args = args
self.kwargs = kwargs
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None:
kwargs = self.kwargs.copy()
kwargs["exc_info"] = (exc_type, exc_value, tb)
self.logger.exception(*self.args, **kwargs)
return True
|
_ExceptionCatcher
|
python
|
celery__celery
|
t/unit/worker/test_consumer.py
|
{
"start": 40528,
"end": 49197
}
|
class ____:
def test_init(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
assert g.enabled
assert c.gossip is g
def test_callbacks(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
on_node_join = Mock(name='on_node_join')
on_node_join2 = Mock(name='on_node_join2')
on_node_leave = Mock(name='on_node_leave')
on_node_lost = Mock(name='on.node_lost')
g.on.node_join.add(on_node_join)
g.on.node_join.add(on_node_join2)
g.on.node_leave.add(on_node_leave)
g.on.node_lost.add(on_node_lost)
worker = Mock(name='worker')
g.on_node_join(worker)
on_node_join.assert_called_with(worker)
on_node_join2.assert_called_with(worker)
g.on_node_leave(worker)
on_node_leave.assert_called_with(worker)
g.on_node_lost(worker)
on_node_lost.assert_called_with(worker)
def test_election(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
g.start(c)
g.election('id', 'topic', 'action')
assert g.consensus_replies['id'] == []
g.dispatcher.send.assert_called_with(
'worker-elect', id='id', topic='topic', cver=1, action='action',
)
def test_call_task(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
g.start(c)
signature = g.app.signature = Mock(name='app.signature')
task = Mock()
g.call_task(task)
signature.assert_called_with(task)
signature.return_value.apply_async.assert_called_with()
signature.return_value.apply_async.side_effect = MemoryError()
with patch('celery.worker.consumer.gossip.logger') as logger:
g.call_task(task)
logger.exception.assert_called()
def Event(self, id='id', clock=312,
hostname='foo@example.com', pid=4312,
topic='topic', action='action', cver=1):
return {
'id': id,
'clock': clock,
'hostname': hostname,
'pid': pid,
'topic': topic,
'action': action,
'cver': cver,
}
def test_on_elect(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
g.start(c)
event = self.Event('id1')
g.on_elect(event)
in_heap = g.consensus_requests['id1']
assert in_heap
g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1')
event.pop('clock')
with patch('celery.worker.consumer.gossip.logger') as logger:
g.on_elect(event)
logger.exception.assert_called()
def Consumer(self, hostname='foo@x.com', pid=4312):
c = Mock()
c.app.connection = _amqp_connection()
c.hostname = hostname
c.pid = pid
c.app.events.Receiver.return_value = Mock(accept=[])
return c
def setup_election(self, g, c):
g.start(c)
g.clock = self.app.clock
assert 'idx' not in g.consensus_replies
assert g.on_elect_ack({'id': 'idx'}) is None
g.state.alive_workers.return_value = [
'foo@x.com', 'bar@x.com', 'baz@x.com',
]
g.consensus_replies['id1'] = []
g.consensus_requests['id1'] = []
e1 = self.Event('id1', 1, 'foo@x.com')
e2 = self.Event('id1', 2, 'bar@x.com')
e3 = self.Event('id1', 3, 'baz@x.com')
g.on_elect(e1)
g.on_elect(e2)
g.on_elect(e3)
assert len(g.consensus_requests['id1']) == 3
with patch('celery.worker.consumer.gossip.info'):
g.on_elect_ack(e1)
assert len(g.consensus_replies['id1']) == 1
g.on_elect_ack(e2)
assert len(g.consensus_replies['id1']) == 2
g.on_elect_ack(e3)
with pytest.raises(KeyError):
g.consensus_replies['id1']
def test_on_elect_ack_win(self):
c = self.Consumer(hostname='foo@x.com') # I will win
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
handler = g.election_handlers['topic'] = Mock()
self.setup_election(g, c)
handler.assert_called_with('action')
def test_on_elect_ack_lose(self):
c = self.Consumer(hostname='bar@x.com') # I will lose
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
handler = g.election_handlers['topic'] = Mock()
self.setup_election(g, c)
handler.assert_not_called()
def test_on_elect_ack_win_but_no_action(self):
c = self.Consumer(hostname='foo@x.com') # I will win
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
g.election_handlers = {}
with patch('celery.worker.consumer.gossip.logger') as logger:
self.setup_election(g, c)
logger.exception.assert_called()
def test_on_node_join(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
with patch('celery.worker.consumer.gossip.debug') as debug:
g.on_node_join(c)
debug.assert_called_with('%s joined the party', 'foo@x.com')
def test_on_node_leave(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
with patch('celery.worker.consumer.gossip.debug') as debug:
g.on_node_leave(c)
debug.assert_called_with('%s left', 'foo@x.com')
def test_on_node_lost(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
with patch('celery.worker.consumer.gossip.info') as info:
g.on_node_lost(c)
info.assert_called_with('missed heartbeat from %s', 'foo@x.com')
def test_register_timer(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
g.register_timer()
c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic)
tref = g._tref
g.register_timer()
tref.cancel.assert_called_with()
def test_periodic(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
g.on_node_lost = Mock()
state = g.state = Mock()
worker = Mock()
state.workers = {'foo': worker}
worker.alive = True
worker.hostname = 'foo'
g.periodic()
worker.alive = False
g.periodic()
g.on_node_lost.assert_called_with(worker)
with pytest.raises(KeyError):
state.workers['foo']
def test_on_message__task(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
assert g.enabled
message = Mock(name='message')
message.delivery_info = {'routing_key': 'task.failed'}
g.on_message(Mock(name='prepare'), message)
def test_on_message(self):
c = self.Consumer()
c.app.connection_for_read = _amqp_connection()
g = Gossip(c)
assert g.enabled
prepare = Mock()
prepare.return_value = 'worker-online', {}
c.app.events.State.assert_called_with(
on_node_join=g.on_node_join,
on_node_leave=g.on_node_leave,
max_tasks_in_memory=1,
)
g.update_state = Mock()
worker = Mock()
g.on_node_join = Mock()
g.on_node_leave = Mock()
g.update_state.return_value = worker, 1
message = Mock()
message.delivery_info = {'routing_key': 'worker-online'}
message.headers = {'hostname': 'other'}
handler = g.event_handlers['worker-online'] = Mock()
g.on_message(prepare, message)
handler.assert_called_with(message.payload)
g.event_handlers = {}
g.on_message(prepare, message)
message.delivery_info = {'routing_key': 'worker-offline'}
prepare.return_value = 'worker-offline', {}
g.on_message(prepare, message)
message.delivery_info = {'routing_key': 'worker-baz'}
prepare.return_value = 'worker-baz', {}
g.update_state.return_value = worker, 0
g.on_message(prepare, message)
message.headers = {'hostname': g.hostname}
g.on_message(prepare, message)
g.clock.forward.assert_called_with()
|
test_Gossip
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 179220,
"end": 185212
}
|
class ____(Response):
"""
Response of tasks.delete_many endpoint.
:param deleted: Number of tasks deleted
:type deleted: int
:param updated_children: Number of child tasks whose parent property was
updated
:type updated_children: int
:param updated_models: Number of models whose task property was updated
:type updated_models: int
:param deleted_models: Number of deleted output models
:type deleted_models: int
:param urls: The urls of the files that were uploaded by the tasks. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"deleted": {
"description": "Number of tasks deleted",
"type": ["integer", "null"],
},
"deleted_models": {
"description": "Number of deleted output models",
"type": ["integer", "null"],
},
"updated_children": {
"description": "Number of child tasks whose parent property was updated",
"type": ["integer", "null"],
},
"updated_models": {
"description": "Number of models whose task property was updated",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the tasks. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
}
def __init__(
self,
deleted: Optional[int] = None,
updated_children: Optional[int] = None,
updated_models: Optional[int] = None,
deleted_models: Optional[int] = None,
urls: Any = None,
**kwargs: Any
) -> None:
super(DeleteManyResponse, self).__init__(**kwargs)
self.deleted = deleted
self.updated_children = updated_children
self.updated_models = updated_models
self.deleted_models = deleted_models
self.urls = urls
@schema_property("deleted")
def deleted(self) -> Optional[int]:
return self._property_deleted
@deleted.setter
def deleted(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("updated_children")
def updated_children(self) -> Optional[int]:
return self._property_updated_children
@updated_children.setter
def updated_children(self, value: Optional[int]) -> None:
if value is None:
self._property_updated_children = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_children", six.integer_types)
self._property_updated_children = value
@schema_property("updated_models")
def updated_models(self) -> Optional[int]:
return self._property_updated_models
@updated_models.setter
def updated_models(self, value: Optional[int]) -> None:
if value is None:
self._property_updated_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_models", six.integer_types)
self._property_updated_models = value
@schema_property("deleted_models")
def deleted_models(self) -> Optional[int]:
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value: Optional[int]) -> None:
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("urls")
def urls(self) -> Any:
return self._property_urls
@urls.setter
def urls(self, value: Any) -> None:
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
|
DeleteManyResponse
|
python
|
spyder-ide__spyder
|
spyder/utils/stylesheet.py
|
{
"start": 23017,
"end": 24340
}
|
class ____(SpecialTabBarStyleSheet, SpyderFontsMixin):
"""Style for tab bars in our Preferences dialog."""
# This is necessary because this class needs to access fonts
SET_STYLESHEET_AT_INIT = False
def set_stylesheet(self):
super().set_stylesheet()
# Main constants
css = self.get_stylesheet()
font = self.get_font(SpyderFontType.Interface, font_size_delta=1)
# Set font size to be one point bigger than the regular text.
css.QTabBar.setValues(
fontSize=f'{font.pointSize()}pt',
)
# Make scroll buttons a bit bigger on Windows and Mac (this has no
# effect on Linux).
if WIN or MAC:
css['QTabBar QToolButton'].setValues(
padding=f'{self.SCROLL_BUTTONS_PADDING - 1}px',
)
# Increase padding around text because we're using a larger font.
css['QTabBar::tab'].setValues(
padding='6px 10px',
)
# Remove border and add padding for content inside tabs
css['QTabWidget::pane'].setValues(
border='0px',
paddingTop=f'{AppStyle.InnerContentPadding}px',
paddingLeft=f'{3 * AppStyle.MarginSize}px',
paddingRight=f'{3 * AppStyle.MarginSize}px',
)
|
PreferencesTabBarStyleSheet
|
python
|
huggingface__transformers
|
examples/modular-transformers/modeling_super.py
|
{
"start": 10820,
"end": 12602
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: SuperConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = SuperAttention(config=config, layer_idx=layer_idx)
self.mlp = SuperMLP(config)
self.input_layernorm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
SuperDecoderLayer
|
python
|
cython__cython
|
Cython/Compiler/ParseTreeTransforms.py
|
{
"start": 182255,
"end": 186630
}
|
class ____(TreeVisitor):
"""
Used by finalExceptClauseNode to work out if the body
needs to handle exceptions at all. This includes:
1. Can raise an exception.
2. May try to access the traceback.
"""
def __init__(self):
self.uses_no_exceptions = True
self.assignment_lhs = None
super().__init__()
def __call__(self, node) -> bool:
self.visit(node)
return self.uses_no_exceptions
def visit_Node(self, node):
self.uses_no_exceptions = False # In general, nodes use exceptions
def visit_ExprStatNode(self, node):
self.visitchildren(node)
def visit_StatListNode(self, node):
self.visitchildren(node)
def visit_ExprNode(self, node):
if not node.is_literal:
self.uses_no_exceptions = False
def visit_CallNode(self, node):
# Implement this to make the behaviour as explicit as possible.
# Even noexcept functions might end up printing a traceback.
self.uses_no_exceptions = False
def visit_PassStatNode(self, node):
pass # Does nothing. Good.
def visit_ReturnStatNode(self, node):
if not self.uses_no_exceptions:
return # shortcut
self.visitchildren(node)
def visit_SingleAssignmentNode(self, node):
if not self.uses_no_exceptions:
return # shortcut
self.assignment_lhs = node.lhs
self.visit(node.lhs)
self.assignment_lhs = None
rhs_type = node.rhs.type
if not (rhs_type.is_numeric or rhs_type.is_pyobject or rhs_type.is_memoryviewslice):
# Treat everything we haven't explicitly thought about as potentially dubious.
# cpp classes may have non-trivial assignment operators for example.
self.uses_no_exceptions = False
if not self.uses_no_exceptions:
return
self.visitchildren(node, exclude=["lhs"])
def visit_NameNode(self, node):
if not self.uses_no_exceptions:
return # shortcut
entry = node.entry
if self.assignment_lhs is node:
if not (entry.is_cglobal or entry.is_arg or
entry.is_local or entry.in_closure or entry.from_closure):
self.uses_no_exceptions = False
return
else:
if entry.is_cglobal:
if entry.is_cpp_optional and node.initialized_check:
# Otherwise, reading C globals should be safe.
self.uses_no_exceptions = False
return
elif entry.is_arg or entry.is_local or entry.in_closure or entry.from_closure:
if (node.cf_is_null or node.cf_maybe_null) and not node.type.is_numeric:
# The logic here is slightly simpler than for NameNode error checking.
# This gives a few false negatives (which is always the safe thing to do)
# for memoryviews and cpp_optionals
self.uses_no_exceptions = False
return
else:
# Probably a py_global.
self.uses_no_exceptions = False
return
def visit_AttributeNode(self, node):
if node.is_py_attr:
self.uses_no_exceptions = False
elif (node.type.is_memoryviewslice or node.entry.is_cpp_optional) and self.assignment_lhs is not node:
# Memoryviewslices and cpp_optional are OK as a target, but reading them involves checks.
# (Although cpp optionals are currently banned elsewhere
# because C++ classes may have non-trivial assignment).
self.uses_no_exceptions = False
# Python objects just need an incref and simple C types are fine, too. Others may not be.
if not (node.type.is_pyobject or node.type.is_numeric or node.type.is_memoryviewslice):
self.uses_no_exceptions = False
if self.uses_no_exceptions:
self.visitchildren(node)
def visit_IndexNode(self, node):
if not (node.base.type.is_array or node.base.type.is_ptr):
self.uses_no_exceptions = False
if not self.uses_no_exceptions:
return
self.visitchildren(node)
def visit_CoerceToTempNode(self, node):
self.visitchildren(node)
|
HasNoExceptionHandlingVisitor
|
python
|
sympy__sympy
|
sympy/polys/matrices/tests/test_xxm.py
|
{
"start": 11456,
"end": 29778
}
|
class ____:
def __getitem__(self, item):
return item
_slice = _Sliced()
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_extract_slice(DM):
A = DM([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert A.extract_slice(*_slice[:,:]) == A
assert A.extract_slice(*_slice[1:,:]) == DM([[4, 5, 6], [7, 8, 9]])
assert A.extract_slice(*_slice[1:,1:]) == DM([[5, 6], [8, 9]])
assert A.extract_slice(*_slice[1:,:-1]) == DM([[4, 5], [7, 8]])
assert A.extract_slice(*_slice[1:,:-1:2]) == DM([[4], [7]])
assert A.extract_slice(*_slice[:,::2]) == DM([[1, 3], [4, 6], [7, 9]])
assert A.extract_slice(*_slice[::2,:]) == DM([[1, 2, 3], [7, 8, 9]])
assert A.extract_slice(*_slice[::2,::2]) == DM([[1, 3], [7, 9]])
assert A.extract_slice(*_slice[::2,::-2]) == DM([[3, 1], [9, 7]])
assert A.extract_slice(*_slice[::-2,::2]) == DM([[7, 9], [1, 3]])
assert A.extract_slice(*_slice[::-2,::-2]) == DM([[9, 7], [3, 1]])
assert A.extract_slice(*_slice[:,::-1]) == DM([[3, 2, 1], [6, 5, 4], [9, 8, 7]])
assert A.extract_slice(*_slice[::-1,:]) == DM([[7, 8, 9], [4, 5, 6], [1, 2, 3]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_extract(DM):
A = DM([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert A.extract([0, 1, 2], [0, 1, 2]) == A
assert A.extract([1, 2], [1, 2]) == DM([[5, 6], [8, 9]])
assert A.extract([1, 2], [0, 1]) == DM([[4, 5], [7, 8]])
assert A.extract([1, 2], [0, 2]) == DM([[4, 6], [7, 9]])
assert A.extract([1, 2], [0]) == DM([[4], [7]])
assert A.extract([1, 2], []) == DM([[1]]).zeros((2, 0), ZZ)
assert A.extract([], [0, 1, 2]) == DM([[1]]).zeros((0, 3), ZZ)
raises(IndexError, lambda: A.extract([1, 2], [0, 3]))
raises(IndexError, lambda: A.extract([1, 2], [0, -4]))
raises(IndexError, lambda: A.extract([3, 1], [0, 1]))
raises(IndexError, lambda: A.extract([-4, 2], [3, 1]))
B = DM([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert B.extract([1, 2], [1, 2]) == DM([[0, 0], [0, 0]])
def test_XXM_str():
A = DomainMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], (3, 3), ZZ)
assert str(A) == \
'DomainMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], (3, 3), ZZ)'
assert str(A.to_ddm()) == \
'[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'
assert str(A.to_sdm()) == \
'{0: {0: 1, 1: 2, 2: 3}, 1: {0: 4, 1: 5, 2: 6}, 2: {0: 7, 1: 8, 2: 9}}'
assert repr(A) == \
'DomainMatrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], (3, 3), ZZ)'
assert repr(A.to_ddm()) == \
'DDM([[1, 2, 3], [4, 5, 6], [7, 8, 9]], (3, 3), ZZ)'
assert repr(A.to_sdm()) == \
'SDM({0: {0: 1, 1: 2, 2: 3}, 1: {0: 4, 1: 5, 2: 6}, 2: {0: 7, 1: 8, 2: 9}}, (3, 3), ZZ)'
B = DomainMatrix({0: {0: ZZ(1), 1: ZZ(2)}, 1: {0: ZZ(3)}}, (2, 2), ZZ)
assert str(B) == \
'DomainMatrix({0: {0: 1, 1: 2}, 1: {0: 3}}, (2, 2), ZZ)'
assert str(B.to_ddm()) == \
'[[1, 2], [3, 0]]'
assert str(B.to_sdm()) == \
'{0: {0: 1, 1: 2}, 1: {0: 3}}'
assert repr(B) == \
'DomainMatrix({0: {0: 1, 1: 2}, 1: {0: 3}}, (2, 2), ZZ)'
if GROUND_TYPES != 'gmpy':
assert repr(B.to_ddm()) == \
'DDM([[1, 2], [3, 0]], (2, 2), ZZ)'
assert repr(B.to_sdm()) == \
'SDM({0: {0: 1, 1: 2}, 1: {0: 3}}, (2, 2), ZZ)'
else:
assert repr(B.to_ddm()) == \
'DDM([[mpz(1), mpz(2)], [mpz(3), mpz(0)]], (2, 2), ZZ)'
assert repr(B.to_sdm()) == \
'SDM({0: {0: mpz(1), 1: mpz(2)}, 1: {0: mpz(3)}}, (2, 2), ZZ)'
if GROUND_TYPES == 'flint':
assert str(A.to_dfm()) == \
'[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'
assert str(B.to_dfm()) == \
'[[1, 2], [3, 0]]'
assert repr(A.to_dfm()) == \
'DFM([[1, 2, 3], [4, 5, 6], [7, 8, 9]], (3, 3), ZZ)'
assert repr(B.to_dfm()) == \
'DFM([[1, 2], [3, 0]], (2, 2), ZZ)'
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_from_list(DM):
T = type(DM([[0]]))
lol = [[1, 2, 4], [4, 5, 6]]
lol_ZZ = [[ZZ(1), ZZ(2), ZZ(4)], [ZZ(4), ZZ(5), ZZ(6)]]
lol_ZZ_bad = [[ZZ(1), ZZ(2), ZZ(4)], [ZZ(4), ZZ(5), ZZ(6), ZZ(7)]]
assert T.from_list(lol_ZZ, (2, 3), ZZ) == DM(lol)
raises(DMBadInputError, lambda: T.from_list(lol_ZZ_bad, (3, 2), ZZ))
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_to_list(DM):
lol = [[1, 2, 4], [4, 5, 6]]
assert DM(lol).to_list() == [[ZZ(1), ZZ(2), ZZ(4)], [ZZ(4), ZZ(5), ZZ(6)]]
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_to_list_flat(DM):
lol = [[1, 2, 4], [4, 5, 6]]
assert DM(lol).to_list_flat() == [ZZ(1), ZZ(2), ZZ(4), ZZ(4), ZZ(5), ZZ(6)]
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_from_list_flat(DM):
T = type(DM([[0]]))
flat = [ZZ(1), ZZ(2), ZZ(4), ZZ(4), ZZ(5), ZZ(6)]
assert T.from_list_flat(flat, (2, 3), ZZ) == DM([[1, 2, 4], [4, 5, 6]])
raises(DMBadInputError, lambda: T.from_list_flat(flat, (3, 3), ZZ))
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_to_flat_nz(DM):
M = DM([[1, 2, 0], [0, 0, 0], [0, 0, 3]])
elements = [ZZ(1), ZZ(2), ZZ(3)]
indices = ((0, 0), (0, 1), (2, 2))
assert M.to_flat_nz() == (elements, (indices, M.shape))
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_from_flat_nz(DM):
T = type(DM([[0]]))
elements = [ZZ(1), ZZ(2), ZZ(3)]
indices = ((0, 0), (0, 1), (2, 2))
data = (indices, (3, 3))
result = DM([[1, 2, 0], [0, 0, 0], [0, 0, 3]])
assert T.from_flat_nz(elements, data, ZZ) == result
raises(DMBadInputError, lambda: T.from_flat_nz(elements, (indices, (2, 3)), ZZ))
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_to_dod(DM):
dod = {0: {0: ZZ(1), 2: ZZ(4)}, 1: {0: ZZ(4), 1: ZZ(5), 2: ZZ(6)}}
assert DM([[1, 0, 4], [4, 5, 6]]).to_dod() == dod
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_from_dod(DM):
T = type(DM([[0]]))
dod = {0: {0: ZZ(1), 2: ZZ(4)}, 1: {0: ZZ(4), 1: ZZ(5), 2: ZZ(6)}}
assert T.from_dod(dod, (2, 3), ZZ) == DM([[1, 0, 4], [4, 5, 6]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_to_dok(DM):
dod = {(0, 0): ZZ(1), (0, 2): ZZ(4),
(1, 0): ZZ(4), (1, 1): ZZ(5), (1, 2): ZZ(6)}
assert DM([[1, 0, 4], [4, 5, 6]]).to_dok() == dod
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_from_dok(DM):
T = type(DM([[0]]))
dod = {(0, 0): ZZ(1), (0, 2): ZZ(4),
(1, 0): ZZ(4), (1, 1): ZZ(5), (1, 2): ZZ(6)}
assert T.from_dok(dod, (2, 3), ZZ) == DM([[1, 0, 4], [4, 5, 6]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_iter_values(DM):
values = [ZZ(1), ZZ(4), ZZ(4), ZZ(5), ZZ(6)]
assert sorted(DM([[1, 0, 4], [4, 5, 6]]).iter_values()) == values
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_iter_items(DM):
items = [((0, 0), ZZ(1)), ((0, 2), ZZ(4)),
((1, 0), ZZ(4)), ((1, 1), ZZ(5)), ((1, 2), ZZ(6))]
assert sorted(DM([[1, 0, 4], [4, 5, 6]]).iter_items()) == items
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_from_ddm(DM):
T = type(DM([[0]]))
ddm = DDM([[1, 2, 4], [4, 5, 6]], (2, 3), ZZ)
assert T.from_ddm(ddm) == DM([[1, 2, 4], [4, 5, 6]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_zeros(DM):
T = type(DM([[0]]))
assert T.zeros((2, 3), ZZ) == DM([[0, 0, 0], [0, 0, 0]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_ones(DM):
T = type(DM([[0]]))
assert T.ones((2, 3), ZZ) == DM([[1, 1, 1], [1, 1, 1]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_eye(DM):
T = type(DM([[0]]))
assert T.eye(3, ZZ) == DM([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
assert T.eye((3, 2), ZZ) == DM([[1, 0], [0, 1], [0, 0]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_diag(DM):
T = type(DM([[0]]))
assert T.diag([1, 2, 3], ZZ) == DM([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_transpose(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
assert A.transpose() == DM([[1, 4], [2, 5], [3, 6]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_add(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[1, 2, 3], [4, 5, 6]])
C = DM([[2, 4, 6], [8, 10, 12]])
assert A.add(B) == C
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_sub(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[1, 2, 3], [4, 5, 6]])
C = DM([[0, 0, 0], [0, 0, 0]])
assert A.sub(B) == C
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_mul(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
b = ZZ(2)
assert A.mul(b) == DM([[2, 4, 6], [8, 10, 12]])
assert A.rmul(b) == DM([[2, 4, 6], [8, 10, 12]])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_matmul(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[1, 2], [3, 4], [5, 6]])
C = DM([[22, 28], [49, 64]])
assert A.matmul(B) == C
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_mul_elementwise(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[1, 2, 3], [4, 5, 6]])
C = DM([[1, 4, 9], [16, 25, 36]])
assert A.mul_elementwise(B) == C
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_neg(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
C = DM([[-1, -2, -3], [-4, -5, -6]])
assert A.neg() == C
@pytest.mark.parametrize('DM', DM_all)
def test_XXM_convert_to(DM):
A = DM([[1, 2, 3], [4, 5, 6]], ZZ)
B = DM([[1, 2, 3], [4, 5, 6]], QQ)
assert A.convert_to(QQ) == B
assert B.convert_to(ZZ) == A
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_scc(DM):
A = DM([
[0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 1]])
assert A.scc() == [[0, 1], [2], [3, 5], [4]]
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_hstack(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[7, 8], [9, 10]])
C = DM([[1, 2, 3, 7, 8], [4, 5, 6, 9, 10]])
ABC = DM([[1, 2, 3, 7, 8, 1, 2, 3, 7, 8],
[4, 5, 6, 9, 10, 4, 5, 6, 9, 10]])
assert A.hstack(B) == C
assert A.hstack(B, C) == ABC
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_vstack(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[7, 8, 9]])
C = DM([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
ABC = DM([[1, 2, 3], [4, 5, 6], [7, 8, 9], [1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert A.vstack(B) == C
assert A.vstack(B, C) == ABC
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_applyfunc(DM):
A = DM([[1, 2, 3], [4, 5, 6]])
B = DM([[2, 4, 6], [8, 10, 12]])
assert A.applyfunc(lambda x: 2*x, ZZ) == B
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_is_upper(DM):
assert DM([[1, 2, 3], [0, 5, 6]]).is_upper() is True
assert DM([[1, 2, 3], [4, 5, 6]]).is_upper() is False
assert DM([]).is_upper() is True
assert DM([[], []]).is_upper() is True
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_is_lower(DM):
assert DM([[1, 0, 0], [4, 5, 0]]).is_lower() is True
assert DM([[1, 2, 3], [4, 5, 6]]).is_lower() is False
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_is_diagonal(DM):
assert DM([[1, 0, 0], [0, 5, 0]]).is_diagonal() is True
assert DM([[1, 2, 3], [4, 5, 6]]).is_diagonal() is False
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_diagonal(DM):
assert DM([[1, 0, 0], [0, 5, 0]]).diagonal() == [1, 5]
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_is_zero_matrix(DM):
assert DM([[0, 0, 0], [0, 0, 0]]).is_zero_matrix() is True
assert DM([[1, 0, 0], [0, 0, 0]]).is_zero_matrix() is False
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_det_ZZ(DM):
assert DM([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).det() == 0
assert DM([[1, 2, 3], [4, 5, 6], [7, 8, 10]]).det() == -3
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_det_QQ(DM):
dM1 = DM([[(1,2), (2,3)], [(3,4), (4,5)]])
assert dM1.det() == QQ(-1,10)
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_inv_QQ(DM):
dM1 = DM([[(1,2), (2,3)], [(3,4), (4,5)]])
dM2 = DM([[(-8,1), (20,3)], [(15,2), (-5,1)]])
assert dM1.inv() == dM2
assert dM1.matmul(dM2) == DM([[1, 0], [0, 1]])
dM3 = DM([[(1,2), (2,3)], [(1,4), (1,3)]])
raises(DMNonInvertibleMatrixError, lambda: dM3.inv())
dM4 = DM([[(1,2), (2,3), (3,4)], [(1,4), (1,3), (1,2)]])
raises(DMNonSquareMatrixError, lambda: dM4.inv())
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_inv_ZZ(DM):
dM1 = DM([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
# XXX: Maybe this should return a DM over QQ instead?
# XXX: Handle unimodular matrices?
raises(DMDomainError, lambda: dM1.inv())
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_charpoly_ZZ(DM):
dM1 = DM([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
assert dM1.charpoly() == [1, -16, -12, 3]
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_charpoly_QQ(DM):
dM1 = DM([[(1,2), (2,3)], [(3,4), (4,5)]])
assert dM1.charpoly() == [QQ(1,1), QQ(-13,10), QQ(-1,10)]
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_lu_solve_ZZ(DM):
dM1 = DM([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
dM2 = DM([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
raises(DMDomainError, lambda: dM1.lu_solve(dM2))
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_lu_solve_QQ(DM):
dM1 = DM([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
dM2 = DM([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
dM3 = DM([[(-2,3),(-4,3),(1,1)],[(-2,3),(11,3),(-2,1)],[(1,1),(-2,1),(1,1)]])
assert dM1.lu_solve(dM2) == dM3 == dM1.inv()
dM4 = DM([[1, 2, 3], [4, 5, 6]])
dM5 = DM([[1, 0], [0, 1], [0, 0]])
raises(DMShapeError, lambda: dM4.lu_solve(dM5))
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_nullspace_QQ(DM):
dM1 = DM([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# XXX: Change the signature to just return the nullspace. Possibly
# returning the rank or nullity makes sense but the list of nonpivots is
# not useful.
assert dM1.nullspace() == (DM([[1, -2, 1]]), [2])
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_lll(DM):
M = DM([[1, 2, 3], [4, 5, 20]])
M_lll = DM([[1, 2, 3], [-1, -5, 5]])
T = DM([[1, 0], [-5, 1]])
assert M.lll() == M_lll
assert M.lll_transform() == (M_lll, T)
assert T.matmul(M) == M_lll
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_mixed_signs(DM):
lol = [[QQ(1), QQ(-2)], [QQ(-3), QQ(4)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_large_matrix(DM):
lol = [[QQ(i + j) for j in range(10)] for i in range(10)]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_identity_matrix(DM):
T = type(DM([[0]]))
A = T.eye(3, QQ)
Q, R = A.qr()
assert Q == A
assert R == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
assert Q.shape == (3, 3)
assert R.shape == (3, 3)
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_square_matrix(DM):
lol = [[QQ(3), QQ(1)], [QQ(4), QQ(3)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_matrix_with_zero_columns(DM):
lol = [[QQ(3), QQ(0)], [QQ(4), QQ(0)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_linearly_dependent_columns(DM):
lol = [[QQ(1), QQ(2)], [QQ(2), QQ(4)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_qr_non_field(DM):
lol = [[ZZ(3), ZZ(1)], [ZZ(4), ZZ(3)]]
A = DM(lol)
with pytest.raises(DMDomainError):
A.qr()
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_field(DM):
lol = [[QQ(3), QQ(1)], [QQ(4), QQ(3)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_tall_matrix(DM):
lol = [[QQ(1), QQ(2)], [QQ(3), QQ(4)], [QQ(5), QQ(6)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_wide_matrix(DM):
lol = [[QQ(1), QQ(2), QQ(3)], [QQ(4), QQ(5), QQ(6)]]
A = DM(lol)
Q, R = A.qr()
assert Q.matmul(R) == A
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_empty_matrix_0x0(DM):
T = type(DM([[0]]))
A = T.zeros((0, 0), QQ)
Q, R = A.qr()
assert Q.matmul(R).shape == A.shape
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
assert Q.shape == (0, 0)
assert R.shape == (0, 0)
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_empty_matrix_2x0(DM):
T = type(DM([[0]]))
A = T.zeros((2, 0), QQ)
Q, R = A.qr()
assert Q.matmul(R).shape == A.shape
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
assert Q.shape == (2, 0)
assert R.shape == (0, 0)
@pytest.mark.parametrize('DM', DMQ_all)
def test_XXM_qr_empty_matrix_0x2(DM):
T = type(DM([[0]]))
A = T.zeros((0, 2), QQ)
Q, R = A.qr()
assert Q.matmul(R).shape == A.shape
assert (Q.transpose().matmul(Q)).is_diagonal
assert R.is_upper
assert Q.shape == (0, 0)
assert R.shape == (0, 2)
@pytest.mark.parametrize('DM', DMZ_all)
def test_XXM_fflu(DM):
A = DM([[1, 2], [3, 4]])
P, L, D, U = A.fflu()
A_field = A.convert_to(QQ)
P_field = P.convert_to(QQ)
L_field = L.convert_to(QQ)
D_field = D.convert_to(QQ)
U_field = U.convert_to(QQ)
assert P.shape == A.shape
assert L.shape == A.shape
assert D.shape == A.shape
assert U.shape == A.shape
assert P == DM([[1, 0], [0, 1]])
assert L == DM([[1, 0], [3, -2]])
assert D == DM([[1, 0], [0, -2]])
assert U == DM([[1, 2], [0, -2]])
assert L_field.matmul(D_field.inv()).matmul(U_field) == P_field.matmul(A_field)
|
_Sliced
|
python
|
scrapy__scrapy
|
tests/test_downloader_handlers.py
|
{
"start": 1198,
"end": 2734
}
|
class ____:
def test_enabled_handler(self):
handlers = {"scheme": DummyDH}
crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers})
dh = DownloadHandlers(crawler)
assert "scheme" in dh._schemes
assert "scheme" in dh._handlers
assert "scheme" not in dh._notconfigured
def test_not_configured_handler(self):
handlers = {"scheme": OffDH}
crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers})
dh = DownloadHandlers(crawler)
assert "scheme" in dh._schemes
assert "scheme" not in dh._handlers
assert "scheme" in dh._notconfigured
def test_disabled_handler(self):
handlers = {"scheme": None}
crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers})
dh = DownloadHandlers(crawler)
assert "scheme" not in dh._schemes
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
assert "scheme" not in dh._handlers
assert "scheme" in dh._notconfigured
def test_lazy_handlers(self):
handlers = {"scheme": DummyLazyDH}
crawler = get_crawler(settings_dict={"DOWNLOAD_HANDLERS": handlers})
dh = DownloadHandlers(crawler)
assert "scheme" in dh._schemes
assert "scheme" not in dh._handlers
for scheme in handlers: # force load lazy handler
dh._get_handler(scheme)
assert "scheme" in dh._handlers
assert "scheme" not in dh._notconfigured
|
TestLoad
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.