language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
django__django
|
django/contrib/postgres/forms/ranges.py
|
{
"start": 2939,
"end": 3120
}
|
class ____(BaseRangeField):
default_error_messages = {"invalid": _("Enter two whole numbers.")}
base_field = forms.IntegerField
range_type = NumericRange
|
IntegerRangeField
|
python
|
scrapy__scrapy
|
tests/test_command_startproject.py
|
{
"start": 397,
"end": 3911
}
|
class ____:
project_name = "testproject"
@staticmethod
def _assert_files_exist(project_dir: Path, project_name: str) -> None:
assert (project_dir / "scrapy.cfg").exists()
assert (project_dir / project_name).exists()
assert (project_dir / project_name / "__init__.py").exists()
assert (project_dir / project_name / "items.py").exists()
assert (project_dir / project_name / "pipelines.py").exists()
assert (project_dir / project_name / "settings.py").exists()
assert (project_dir / project_name / "spiders" / "__init__.py").exists()
def test_startproject(self, tmp_path: Path) -> None:
# with no dir argument creates the project in the "self.project_name" subdir of cwd
assert call("startproject", self.project_name, cwd=tmp_path) == 0
self._assert_files_exist(tmp_path / self.project_name, self.project_name)
assert call("startproject", self.project_name, cwd=tmp_path) == 1
assert call("startproject", "wrong---project---name") == 1
assert call("startproject", "sys") == 1
def test_startproject_with_project_dir(self, tmp_path: Path) -> None:
# with a dir arg creates the project in the specified dir
project_dir = tmp_path / "project"
assert (
call("startproject", self.project_name, str(project_dir), cwd=tmp_path) == 0
)
self._assert_files_exist(project_dir, self.project_name)
assert (
call(
"startproject", self.project_name, str(project_dir) + "2", cwd=tmp_path
)
== 0
)
assert (
call("startproject", self.project_name, str(project_dir), cwd=tmp_path) == 1
)
assert (
call(
"startproject", self.project_name + "2", str(project_dir), cwd=tmp_path
)
== 1
)
assert call("startproject", "wrong---project---name") == 1
assert call("startproject", "sys") == 1
assert call("startproject") == 2
assert (
call("startproject", self.project_name, str(project_dir), "another_params")
== 2
)
def test_existing_project_dir(self, tmp_path: Path) -> None:
project_name = self.project_name + "_existing"
project_path = tmp_path / project_name
project_path.mkdir()
assert call("startproject", project_name, cwd=tmp_path) == 0
self._assert_files_exist(project_path, project_name)
def get_permissions_dict(
path: str | os.PathLike, renamings=None, ignore=None
) -> dict[str, str]:
def get_permissions(path: Path) -> str:
return oct(path.stat().st_mode)
path_obj = Path(path)
renamings = renamings or ()
permissions_dict = {
".": get_permissions(path_obj),
}
for root, dirs, files in os.walk(path_obj):
nodes = list(chain(dirs, files))
if ignore:
ignored_names = ignore(root, nodes)
nodes = [node for node in nodes if node not in ignored_names]
for node in nodes:
absolute_path = Path(root, node)
relative_path = str(absolute_path.relative_to(path))
for search_string, replacement in renamings:
relative_path = relative_path.replace(search_string, replacement)
permissions = get_permissions(absolute_path)
permissions_dict[relative_path] = permissions
return permissions_dict
|
TestStartprojectCommand
|
python
|
openai__openai-python
|
src/openai/resources/fine_tuning/jobs/jobs.py
|
{
"start": 1268,
"end": 17174
}
|
class ____(SyncAPIResource):
@cached_property
def checkpoints(self) -> Checkpoints:
return Checkpoints(self._client)
@cached_property
def with_raw_response(self) -> JobsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return JobsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> JobsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return JobsWithStreamingResponse(self)
def create(
self,
*,
model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini"]],
training_file: str,
hyperparameters: job_create_params.Hyperparameters | Omit = omit,
integrations: Optional[Iterable[job_create_params.Integration]] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
method: job_create_params.Method | Omit = omit,
seed: Optional[int] | Omit = omit,
suffix: Optional[str] | Omit = omit,
validation_file: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Creates a fine-tuning job which begins the process of creating a new model from
a given dataset.
Response includes details of the enqueued job including job status and the name
of the fine-tuned models once complete.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
Args:
model: The name of the model to fine-tune. You can select one of the
[supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
training_file: The ID of an uploaded file that contains training data.
See [upload file](https://platform.openai.com/docs/api-reference/files/create)
for how to upload a file.
Your dataset must be formatted as a JSONL file. Additionally, you must upload
your file with the purpose `fine-tune`.
The contents of the file should differ depending on if the model uses the
[chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
[completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
format, or if the fine-tuning method uses the
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
format.
See the
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
for more details.
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
in favor of `method`, and should be passed in under the `method` parameter.
integrations: A list of integrations to enable for your fine-tuning job.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
method: The method used for fine-tuning.
seed: The seed controls the reproducibility of the job. Passing in the same seed and
job parameters should produce the same results, but may differ in rare cases. If
a seed is not specified, one will be generated for you.
suffix: A string of up to 64 characters that will be added to your fine-tuned model
name.
For example, a `suffix` of "custom-model-name" would produce a model name like
`ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
validation_file: The ID of an uploaded file that contains validation data.
If you provide this file, the data is used to generate validation metrics
periodically during fine-tuning. These metrics can be viewed in the fine-tuning
results file. The same data should not be present in both train and validation
files.
Your dataset must be formatted as a JSONL file. You must upload your file with
the purpose `fine-tune`.
See the
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
for more details.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/fine_tuning/jobs",
body=maybe_transform(
{
"model": model,
"training_file": training_file,
"hyperparameters": hyperparameters,
"integrations": integrations,
"metadata": metadata,
"method": method,
"seed": seed,
"suffix": suffix,
"validation_file": validation_file,
},
job_create_params.JobCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def retrieve(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Get info about a fine-tuning job.
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get(
f"/fine_tuning/jobs/{fine_tuning_job_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
metadata: Optional[Dict[str, str]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FineTuningJob]:
"""
List your organization's fine-tuning jobs
Args:
after: Identifier for the last job from the previous pagination request.
limit: Number of fine-tuning jobs to retrieve.
metadata: Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
Alternatively, set `metadata=null` to indicate no metadata.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/fine_tuning/jobs",
page=SyncCursorPage[FineTuningJob],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
},
job_list_params.JobListParams,
),
),
model=FineTuningJob,
)
def cancel(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Immediately cancel a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def list_events(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[FineTuningJobEvent]:
"""
Get status updates for a fine-tuning job.
Args:
after: Identifier for the last event from the previous pagination request.
limit: Number of events to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/events",
page=SyncCursorPage[FineTuningJobEvent],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
job_list_events_params.JobListEventsParams,
),
),
model=FineTuningJobEvent,
)
def pause(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Pause a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/pause",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
def resume(
self,
fine_tuning_job_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FineTuningJob:
"""
Resume a fine-tune job.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._post(
f"/fine_tuning/jobs/{fine_tuning_job_id}/resume",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FineTuningJob,
)
|
Jobs
|
python
|
huggingface__transformers
|
tests/models/qwen2_5_omni/test_processing_qwen2_5_omni.py
|
{
"start": 1227,
"end": 14010
}
|
class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Qwen2_5OmniProcessor
model_id = "Qwen/Qwen2.5-Omni-7B"
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
return image_processor_class.from_pretrained(
cls.model_id, size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56}
)
@classmethod
def _setup_video_processor(cls):
video_processor_class = cls._get_component_class_from_processor("video_processor")
return video_processor_class.from_pretrained(
cls.model_id, size={"shortest_edge": 28 * 28, "longest_edge": 56 * 56}
)
def prepare_audio_inputs(self, batch_size: int = 3):
"""This function prepares a list of numpy audios."""
audio_inputs = [np.random.rand(160000) * 2 - 1] * batch_size
return audio_inputs
@require_torch
def _test_apply_chat_template(
self,
modality: str,
batch_size: int,
return_tensors: str,
input_name: str,
processor_name: str,
input_data: list[str],
):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
if processor_name not in self.processor_class.get_attributes():
self.skipTest(f"{processor_name} attribute not present in {self.processor_class}")
batch_messages = [
[
{
"role": "user",
"content": [{"type": "text", "text": "Describe this."}],
},
]
] * batch_size
# Test that jinja can be applied
formatted_prompt = processor.apply_chat_template(batch_messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), batch_size)
# Test that tokenizing with template and directly with `self.tokenizer` gives same output
formatted_prompt_tokenized = processor.apply_chat_template(
batch_messages, add_generation_prompt=True, tokenize=True, return_tensors=return_tensors
)
add_special_tokens = True
if processor.tokenizer.bos_token is not None and formatted_prompt[0].startswith(processor.tokenizer.bos_token):
add_special_tokens = False
tok_output = processor.tokenizer(
formatted_prompt, return_tensors=return_tensors, add_special_tokens=add_special_tokens
)
expected_output = tok_output.input_ids
self.assertListEqual(expected_output.tolist(), formatted_prompt_tokenized.tolist())
# Test that kwargs passed to processor's `__call__` are actually used
tokenized_prompt_100 = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
padding="max_length",
truncation=True,
return_tensors=return_tensors,
max_length=100,
)
self.assertEqual(len(tokenized_prompt_100[0]), 100)
# Test that `return_dict=True` returns text related inputs in the dict
out_dict_text = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
)
self.assertTrue(all(key in out_dict_text for key in ["input_ids", "attention_mask"]))
self.assertEqual(len(out_dict_text["input_ids"]), batch_size)
self.assertEqual(len(out_dict_text["attention_mask"]), batch_size)
# Test that with modality URLs and `return_dict=True`, we get modality inputs in the dict
for idx, url in enumerate(input_data[:batch_size]):
batch_messages[idx][0]["content"] = [batch_messages[idx][0]["content"][0], {"type": modality, "url": url}]
out_dict = processor.apply_chat_template(
batch_messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors=return_tensors,
num_frames=2, # by default no more than 2 frames, otherwise too slow
)
input_name = getattr(self, input_name)
self.assertTrue(input_name in out_dict)
self.assertEqual(len(out_dict["input_ids"]), batch_size)
self.assertEqual(len(out_dict["attention_mask"]), batch_size)
if modality == "video":
# qwen pixels don't scale with bs same way as other models, calculate expected video token count based on video_grid_thw
expected_video_token_count = 0
for thw in out_dict["video_grid_thw"]:
expected_video_token_count += thw[0] * thw[1] * thw[2]
mm_len = expected_video_token_count
elif modality == "audio":
mm_len = batch_size
else:
mm_len = batch_size * 1564
self.assertEqual(len(out_dict[input_name]), mm_len)
return_tensor_to_type = {"pt": torch.Tensor, "np": np.ndarray, None: list}
for k in out_dict:
self.assertIsInstance(out_dict[k], return_tensor_to_type[return_tensors])
@require_av
def test_apply_chat_template_video_frame_sampling(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest("Processor doesn't accept videos at input")
messages = [
[
{
"role": "user",
"content": [
{"type": "text", "text": "What is shown in this video?"},
],
},
]
]
formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1)
formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_output = processor.tokenizer(formatted_prompt, return_tensors=None).input_ids
self.assertListEqual(expected_output, formatted_prompt_tokenized)
out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True)
self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
# Add video URL for return dict and load with `num_frames` arg
messages[0][0]["content"].append(
{
"type": "video",
"url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/videos-test/resolve/main/tiny_video.mp4"
),
}
)
num_frames = 3
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
num_frames=num_frames,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 5760)
# Load with `fps` arg
fps = 1
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 5760)
# Load with `fps` and `num_frames` args, should raise an error
with self.assertRaises(ValueError):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
fps=fps,
num_frames=num_frames,
)
# Load without any arg should load the whole video
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 17280)
# Load video as a list of frames (i.e. images). NOTE: each frame should have same size
# because we assume they come from one video
messages[0][0]["content"][-1] = {
"type": "video",
"url": [
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
],
}
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
)
self.assertTrue(self.videos_input_name in out_dict_with_video)
self.assertEqual(len(out_dict_with_video[self.videos_input_name]), 2904)
# When the inputs are frame URLs/paths we expect that those are already
# sampled and will raise an error is asked to sample again.
with self.assertRaisesRegex(
ValueError, "Sampling frames from a list of images is not supported! Set `do_sample_frames=False`"
):
out_dict_with_video = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
do_sample_frames=True,
)
@require_librosa
@require_av
def test_chat_template_audio_from_video(self):
processor = self.get_processor()
if processor.chat_template is None:
self.skipTest("Processor has no chat template")
signature = inspect.signature(processor.__call__)
if "videos" not in {*signature.parameters.keys()} or (
signature.parameters.get("videos") is not None
and signature.parameters["videos"].annotation == inspect._empty
):
self.skipTest(f"{self.processor_class} does not support video inputs")
if "feature_extractor" not in self.processor_class.get_attributes():
self.skipTest(f"feature_extractor attribute not present in {self.processor_class}")
video_file_path = hf_hub_download(
repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset"
)
messages = [
{
"role": "user",
"content": [
{"type": "video", "path": video_file_path},
{"type": "text", "text": "Which of these animals is making the sound?"},
],
},
{
"role": "assistant",
"content": [{"type": "text", "text": "It is a cow."}],
},
{
"role": "user",
"content": [
{"type": "text", "text": "Tell me all about this animal."},
],
},
]
formatted_prompt = processor.apply_chat_template([messages], add_generation_prompt=True, tokenize=False)
self.assertEqual(len(formatted_prompt), 1) # batch size=1
out_dict = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
load_audio_from_video=True,
)
self.assertTrue(self.audio_input_name in out_dict)
self.assertTrue(self.videos_input_name in out_dict)
# should always have input_ids and attention_mask
self.assertEqual(len(out_dict["input_ids"]), 1) # batch-size=1
self.assertEqual(len(out_dict["attention_mask"]), 1) # batch-size=1
self.assertEqual(len(out_dict[self.audio_input_name]), 1) # 1 audio in the conversation
self.assertEqual(len(out_dict[self.videos_input_name]), 145912) # 1 video in the conversation
|
Qwen2_5OmniProcessorTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/dungeon-game.py
|
{
"start": 636,
"end": 2158
}
|
class ____(object):
# @param dungeon, a list of lists of integers
# @return a integer
def calculateMinimumHP(self, dungeon):
maximum_loses = 0
for rooms in dungeon:
for room in rooms:
if room < 0:
maximum_loses += abs(room)
return self.binarySearch(dungeon, maximum_loses)
def binarySearch(self, dungeon, maximum_loses):
start, end = 1, maximum_loses + 1
result = 0
while start < end:
mid = start + (end - start) / 2
if self.DP(dungeon, mid):
end = mid
else:
start = mid + 1
return start
def DP(self, dungeon, HP):
remain_HP = [0 for _ in dungeon[0]]
remain_HP[0] = HP + dungeon[0][0]
for j in xrange(1, len(remain_HP)):
if remain_HP[j - 1] > 0:
remain_HP[j] = max(remain_HP[j - 1] + dungeon[0][j], 0)
for i in xrange(1, len(dungeon)):
if remain_HP[0] > 0:
remain_HP[0] = max(remain_HP[0] + dungeon[i][0], 0)
else:
remain_HP[0] = 0
for j in xrange(1, len(remain_HP)):
remain = 0
if remain_HP[j - 1] > 0:
remain = max(remain_HP[j - 1] + dungeon[i][j], remain)
if remain_HP[j] > 0:
remain = max(remain_HP[j] + dungeon[i][j], remain)
remain_HP[j] = remain
return remain_HP[-1] > 0
|
Solution2
|
python
|
doocs__leetcode
|
solution/0000-0099/0024.Swap Nodes in Pairs/Solution.py
|
{
"start": 151,
"end": 436
}
|
class ____:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
if head is None or head.next is None:
return head
t = self.swapPairs(head.next.next)
p = head.next
p.next = head
head.next = t
return p
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry_plugins/bitbucket/test_plugin.py
|
{
"start": 464,
"end": 4294
}
|
class ____(PluginTestCase):
@cached_property
def plugin(self) -> BitbucketPlugin:
return BitbucketPlugin()
@cached_property
def request(self) -> RequestFactory:
return RequestFactory()
def test_get_issue_label(self) -> None:
group = self.create_group(message="Hello world", culprit="foo.bar")
assert self.plugin.get_issue_label(group, "1") == "Bitbucket-1"
def test_get_issue_url(self) -> None:
self.plugin.set_option("repo", "maxbittker/newsdiffs", self.project)
group = self.create_group(message="Hello world", culprit="foo.bar")
assert (
self.plugin.get_issue_url(group, "1")
== "https://bitbucket.org/maxbittker/newsdiffs/issue/1/"
)
def test_is_configured(self) -> None:
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option("repo", "maxbittker/newsdiffs", self.project)
assert self.plugin.is_configured(self.project) is True
@responses.activate
def test_create_issue(self) -> None:
responses.add(
responses.POST,
"https://api.bitbucket.org/1.0/repositories/maxbittker/newsdiffs/issues",
json={"local_id": 1, "title": "Hello world"},
)
self.plugin.set_option("repo", "maxbittker/newsdiffs", self.project)
group = self.create_group(message="Hello world", culprit="foo.bar")
request = drf_request_from_request(self.request.get("/"))
request.user = AnonymousUser()
form_data = {
"title": "Hello",
"description": "Fix this.",
"issue_type": "bug",
"priority": "trivial",
}
with pytest.raises(PluginError):
self.plugin.create_issue(request, group, form_data)
request.user = self.user
self.login_as(self.user)
self.create_usersocialauth(
user=self.user,
provider=self.plugin.auth_provider,
extra_data={
"access_token": (
"oauth_token=123456789abcdefghi&"
"oauth_token_secret="
"123456789123456789abcdefghijklmn"
)
},
)
assert self.plugin.create_issue(request, group, form_data) == 1
request = responses.calls[-1].request
assert request.headers["Authorization"].startswith("OAuth ")
@responses.activate
def test_link_issue(self) -> None:
responses.add(
responses.GET,
"https://api.bitbucket.org/1.0/repositories/maxbittker/newsdiffs/issues/1",
json={"local_id": 1, "title": "Hello world"},
)
responses.add(
responses.POST,
"https://api.bitbucket.org/1.0/repositories/maxbittker/newsdiffs/issues/1/comments",
json={"body": "Hello"},
)
self.plugin.set_option("repo", "maxbittker/newsdiffs", self.project)
group = self.create_group(message="Hello world", culprit="foo.bar")
request = drf_request_from_request(self.request.get("/"))
request.user = AnonymousUser()
form_data = {"comment": "Hello", "issue_id": "1"}
with pytest.raises(PluginError):
self.plugin.link_issue(request, group, form_data)
request.user = self.user
self.login_as(self.user)
self.create_usersocialauth(
user=self.user,
provider=self.plugin.auth_provider,
extra_data={
"access_token": (
"oauth_token=123456789abcdefghi&oauth_token_secret="
"123456789123456789abcdefghijklmn"
)
},
)
assert self.plugin.link_issue(request, group, form_data) == {"title": "Hello world"}
|
BitbucketPluginTest
|
python
|
optuna__optuna
|
optuna/storages/_rdb/models.py
|
{
"start": 19134,
"end": 19700
}
|
class ____(BaseModel):
__tablename__ = "version_info"
# setting check constraint to ensure the number of rows is at most 1
__table_args__: Any = (CheckConstraint("version_info_id=1"),)
version_info_id = _Column(Integer, primary_key=True, autoincrement=False, default=1)
schema_version = _Column(Integer)
library_version = _Column(String(MAX_VERSION_LENGTH))
@classmethod
def find(cls, session: orm.Session) -> "VersionInfoModel" | None:
version_info = session.query(cls).one_or_none()
return version_info
|
VersionInfoModel
|
python
|
cython__cython
|
Cython/Compiler/ParseTreeTransforms.py
|
{
"start": 155663,
"end": 157107
}
|
class ____(EnvTransform, SkipDeclarations):
"""
For temporary expression that are implemented using std::optional it's necessary the temps are
assigned using `__pyx_t_x = value;` but accessed using `something = (*__pyx_t_x)`. This transform
inserts a coercion node to take care of this, and runs absolutely last (once nothing else can be
inserted into the tree)
TODO: a possible alternative would be to split ExprNode.result() into ExprNode.rhs_result() and ExprNode.lhs_result()???
"""
def visit_ModuleNode(self, node):
if self.current_env().cpp:
# skipping this makes it essentially free for C files
self.visitchildren(node)
return node
def visit_ExprNode(self, node):
self.visitchildren(node)
if (self.current_env().directives['cpp_locals'] and
node.result_in_temp() and node.type.is_cpp_class and
# Fake references are not replaced with "std::optional()".
not node.type.is_fake_reference):
node = ExprNodes.CppOptionalTempCoercion(node)
return node
def visit_CppOptionalTempCoercion(self, node):
return node
def visit_CppIteratorNode(self, node):
return node
def visit_ExprStatNode(self, node):
# Deliberately skip `expr` in ExprStatNode - we don't need to access it.
self.visitchildren(node.expr)
return node
|
CoerceCppTemps
|
python
|
encode__django-rest-framework
|
tests/test_renderers.py
|
{
"start": 24537,
"end": 25176
}
|
class ____(TestCase):
"""
Tests specific for Static HTML Renderer
"""
def setUp(self):
self.renderer = StaticHTMLRenderer()
def test_static_renderer(self):
data = '<html><body>text</body></html>'
result = self.renderer.render(data)
assert result == data
def test_static_renderer_with_exception(self):
context = {
'response': Response(status=500, exception=True),
'request': Request(HttpRequest())
}
result = self.renderer.render({}, renderer_context=context)
assert result == '500 Internal Server Error'
|
StaticHTMLRendererTests
|
python
|
scipy__scipy
|
scipy/ndimage/tests/test_interpolation.py
|
{
"start": 54503,
"end": 61290
}
|
class ____:
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate01(self, order, xp):
data = xp.asarray([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=xp.float64)
out = ndimage.rotate(data, 0, order=order)
assert_array_almost_equal(out, data)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate02(self, order, xp):
data = xp.asarray([[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0]], dtype=xp.float64)
expected = xp.asarray([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=xp.float64)
out = ndimage.rotate(data, 90, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
@pytest.mark.parametrize('dtype', ["float64", "complex128"])
def test_rotate03(self, order, dtype, xp):
dtype = getattr(xp, dtype)
data = xp.asarray([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=dtype)
expected = xp.asarray([[0, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=dtype)
if xp.isdtype(data.dtype, 'complex floating'):
data -= 1j * data
expected -= 1j * expected
out = ndimage.rotate(data, 90, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate04(self, order, xp):
data = xp.asarray([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=xp.float64)
expected = xp.asarray([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]], dtype=xp.float64)
out = ndimage.rotate(data, 90, reshape=False, order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate05(self, order, xp):
data = np.empty((4, 3, 3))
for i in range(3):
data[:, :, i] = np.asarray([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.float64)
data = xp.asarray(data)
expected = xp.asarray([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=xp.float64)
out = ndimage.rotate(data, 90, order=order)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate06(self, order, xp):
data = np.empty((3, 4, 3))
for i in range(3):
data[:, :, i] = np.asarray([[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]], dtype=np.float64)
data = xp.asarray(data)
expected = xp.asarray([[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]], dtype=xp.float64)
out = ndimage.rotate(data, 90, order=order)
for i in range(3):
assert_array_almost_equal(out[:, :, i], expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate07(self, order, xp):
data = xp.asarray([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=xp.float64)
data = xp.permute_dims(data, (2, 1, 0))
expected = xp.asarray([[[0, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]] * 2, dtype=xp.float64)
expected = xp.permute_dims(expected, (2, 1, 0))
out = ndimage.rotate(data, 90, axes=(0, 1), order=order)
assert_array_almost_equal(out, expected)
@pytest.mark.parametrize('order', range(0, 6))
def test_rotate08(self, order, xp):
data = xp.asarray([[[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=xp.float64)
data = xp.permute_dims(data, (2, 1, 0)) # == np.transpose
expected = xp.asarray([[[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]]] * 2, dtype=xp.float64)
expected = xp.permute_dims(expected, (2, 1, 0))
out = ndimage.rotate(data, 90, axes=(0, 1), reshape=False, order=order)
assert_array_almost_equal(out, expected)
def test_rotate09(self, xp):
data = xp.asarray([[0, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 0]] * 2, dtype=xp.float64)
with assert_raises(ValueError):
ndimage.rotate(data, 90, axes=(0, data.ndim))
def test_rotate10(self, xp):
data = xp.reshape(xp.arange(45, dtype=xp.float64), (3, 5, 3))
# The output of ndimage.rotate before refactoring
expected = xp.asarray([[[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[6.54914793, 7.54914793, 8.54914793],
[10.84520162, 11.84520162, 12.84520162],
[0.0, 0.0, 0.0]],
[[6.19286575, 7.19286575, 8.19286575],
[13.4730712, 14.4730712, 15.4730712],
[21.0, 22.0, 23.0],
[28.5269288, 29.5269288, 30.5269288],
[35.80713425, 36.80713425, 37.80713425]],
[[0.0, 0.0, 0.0],
[31.15479838, 32.15479838, 33.15479838],
[35.45085207, 36.45085207, 37.45085207],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]], dtype=xp.float64)
out = ndimage.rotate(data, angle=12, reshape=False)
#assert_array_almost_equal(out, expected)
xp_assert_close(out, expected, rtol=1e-6, atol=2e-6)
@xfail_xp_backends("cupy", reason="https://github.com/cupy/cupy/issues/8400")
def test_rotate_exact_180(self, xp):
a = xp.asarray(np.tile(np.arange(5), (5, 1)))
b = ndimage.rotate(ndimage.rotate(a, 180), -180)
xp_assert_equal(a, b)
|
TestRotate
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py
|
{
"start": 35327,
"end": 38161
}
|
class ____(AwsBaseOperator[EmrHook]):
"""
An operator that modifies an existing EMR cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EmrModifyClusterOperator`
:param cluster_id: cluster identifier
:param step_concurrency_level: Concurrency of the cluster
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param do_xcom_push: if True, cluster_id is pushed to XCom with key cluster_id.
"""
aws_hook_class = EmrHook
template_fields: Sequence[str] = aws_template_fields("cluster_id", "step_concurrency_level")
template_ext: Sequence[str] = ()
ui_color = "#f9c915"
operator_extra_links = (
EmrClusterLink(),
EmrLogsLink(),
)
def __init__(
self,
*,
cluster_id: str,
step_concurrency_level: int,
**kwargs,
):
super().__init__(**kwargs)
self.cluster_id = cluster_id
self.step_concurrency_level = step_concurrency_level
def execute(self, context: Context) -> int:
if self.do_xcom_push:
context["ti"].xcom_push(key="cluster_id", value=self.cluster_id)
EmrClusterLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.cluster_id,
)
EmrLogsLink.persist(
context=context,
operator=self,
region_name=self.hook.conn_region_name,
aws_partition=self.hook.conn_partition,
job_flow_id=self.cluster_id,
log_uri=get_log_uri(emr_client=self.hook.conn, job_flow_id=self.cluster_id),
)
self.log.info("Modifying cluster %s", self.cluster_id)
response = self.hook.conn.modify_cluster(
ClusterId=self.cluster_id, StepConcurrencyLevel=self.step_concurrency_level
)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
raise AirflowException(f"Modify cluster failed: {response}")
self.log.info("Steps concurrency level %d", response["StepConcurrencyLevel"])
return response["StepConcurrencyLevel"]
|
EmrModifyClusterOperator
|
python
|
rq__rq
|
rq/scheduler.py
|
{
"start": 746,
"end": 854
}
|
class ____(str, Enum):
STARTED = 'started'
WORKING = 'working'
STOPPED = 'stopped'
|
SchedulerStatus
|
python
|
Textualize__textual
|
src/textual/widgets/_markdown.py
|
{
"start": 22424,
"end": 22497
}
|
class ____(MarkdownBlock):
"""A table row Markdown block."""
|
MarkdownTR
|
python
|
wandb__wandb
|
wandb/integration/weave/interface.py
|
{
"start": 301,
"end": 1165
}
|
class ____:
entity: str
"""The entity to which the run is logging. Never empty."""
project: str
"""The project to which the run is logging. Never empty."""
run_id: str
"""The run's ID. Never empty."""
def active_run_path() -> RunPath | None:
"""Returns the path of an initialized, unfinished run.
Returns None if all initialized runs are finished. If there is
more than one active run, an arbitrary path is returned.
The run may be finished by the time its path is returned.
Thread-safe.
"""
singleton = wandb_setup.singleton()
if (
(run := singleton.most_recent_active_run)
and run.entity
and run.project
and run.id
):
return RunPath(
entity=run.entity,
project=run.project,
run_id=run.id,
)
return None
|
RunPath
|
python
|
PrefectHQ__prefect
|
tests/server/database/test_dependencies.py
|
{
"start": 4359,
"end": 7193
}
|
class ____:
@pytest.fixture(autouse=True)
def _setup(self):
self.db: PrefectDBInterface = dependencies.provide_database_interface()
def test_decorated_function(self):
@dependencies.db_injector
def function_with_injected_db(
db: PrefectDBInterface, foo: int
) -> PrefectDBInterface:
"""The documentation is sublime"""
return db
assert function_with_injected_db(42) is self.db
unwrapped = function_with_injected_db.__wrapped__
assert function_with_injected_db.__doc__ == unwrapped.__doc__
function_with_injected_db.__doc__ = "Something else"
assert function_with_injected_db.__doc__ == "Something else"
assert unwrapped.__doc__ == function_with_injected_db.__doc__
del function_with_injected_db.__doc__
assert function_with_injected_db.__doc__ is None
assert unwrapped.__doc__ is function_with_injected_db.__doc__
class SomeClass:
@dependencies.db_injector
def method_with_injected_db(
self, db: PrefectDBInterface, foo: int
) -> PrefectDBInterface:
"""The documentation is sublime"""
return db
def test_decorated_method(self):
instance = self.SomeClass()
assert instance.method_with_injected_db(42) is self.db
def test_unbound_decorated_method(self):
instance = self.SomeClass()
# manually binding the unbound descriptor to an instance
bound = self.SomeClass.method_with_injected_db.__get__(instance)
assert bound(42) is self.db
def test_bound_method_attributes(self):
instance = self.SomeClass()
bound = instance.method_with_injected_db
assert bound.__self__ is instance
assert bound.__func__ is self.SomeClass.method_with_injected_db.__wrapped__
unwrapped = bound.__wrapped__
assert bound.__doc__ == unwrapped.__doc__
before = bound.__doc__
with pytest.raises(AttributeError, match="is not writable$"):
bound.__doc__ = "Something else"
with pytest.raises(AttributeError, match="is not writable$"):
del bound.__doc__
assert unwrapped.__doc__ == before
def test_decorated_coroutine_function(self):
@dependencies.db_injector
async def coroutine_with_injected_db(
db: PrefectDBInterface, foo: int
) -> PrefectDBInterface:
return db
if sys.version_info < (3, 10):
# `inspect.iscoroutinefunction` is not flexible enough in Python 3.9
assert asyncio.iscoroutinefunction(coroutine_with_injected_db)
else:
assert inspect.iscoroutinefunction(coroutine_with_injected_db)
assert asyncio.run(coroutine_with_injected_db(42)) is self.db
|
TestDBInject
|
python
|
h5py__h5py
|
api_gen.py
|
{
"start": 1569,
"end": 6235
}
|
class ____:
"""
Represents one line from the api_functions.txt file.
Exists to provide the following attributes:
nogil: String indicating if we should release the GIL to call this
function. Any Python callbacks it could trigger must
acquire the GIL (e.g. using 'with gil' in Cython).
mpi: Bool indicating if MPI required
ros3: Bool indicating if ROS3 required
direct_vfd: Bool indicating if DIRECT_VFD required
version: None or a minimum-version tuple
code: String with function return type
fname: String with function name
sig: String with raw function signature
args: String with sequence of arguments to call function
Example: MPI 1.12.2 int foo(char* a, size_t b)
.nogil: ""
.mpi: True
.ros3: False
.direct_vfd: False
.version: (1, 12, 2)
.code: "int"
.fname: "foo"
.sig: "char* a, size_t b"
.args: "a, b"
"""
PATTERN = re.compile(r"""(?P<mpi>(MPI)[ ]+)?
(?P<ros3>(ROS3)[ ]+)?
(?P<direct_vfd>(DIRECT_VFD)[ ]+)?
(?P<min_version>([0-9]+\.[0-9]+\.[0-9]+))?
(-(?P<max_version>([0-9]+\.[0-9]+\.[0-9]+)))?
([ ]+)?
(?P<code>(unsigned[ ]+)?[a-zA-Z_]+[a-zA-Z0-9_]*\**)[ ]+
(?P<fname>[a-zA-Z_]+[a-zA-Z0-9_]*)[ ]*
\((?P<sig>[a-zA-Z0-9_,* ]*)\)
([ ]+)?
(?P<nogil>(nogil))?
""", re.VERBOSE)
SIG_PATTERN = re.compile(r"""
(?:unsigned[ ]+)?
(?:[a-zA-Z_]+[a-zA-Z0-9_]*\**)
[ ]+[ *]*
(?P<param>[a-zA-Z_]+[a-zA-Z0-9_]*)
""", re.VERBOSE)
def __init__(self, text):
""" Break the line into pieces and populate object attributes.
text: A valid function line, with leading/trailing whitespace stripped.
"""
m = self.PATTERN.match(text)
if m is None:
raise ValueError("Invalid line encountered: {0}".format(text))
parts = m.groupdict()
self.nogil = "nogil" if parts['nogil'] else ""
self.mpi = parts['mpi'] is not None
self.ros3 = parts['ros3'] is not None
self.direct_vfd = parts['direct_vfd'] is not None
self.min_version = parts['min_version']
if self.min_version is not None:
self.min_version = tuple(int(x) for x in self.min_version.split('.'))
self.max_version = parts['max_version']
if self.max_version is not None:
self.max_version = tuple(int(x) for x in self.max_version.split('.'))
self.code = parts['code']
self.fname = parts['fname']
self.sig = parts['sig']
sig_const_stripped = self.sig.replace('const', '')
self.args = self.SIG_PATTERN.findall(sig_const_stripped)
if self.args is None:
raise ValueError("Invalid function signature: {0}".format(self.sig))
self.args = ", ".join(self.args)
# Figure out what test and return value to use with error reporting
if '*' in self.code or self.code in ('H5T_conv_t',):
self.err_condition = "==NULL"
self.err_value = f"<{self.code}>NULL"
elif self.code in ('int', 'herr_t', 'htri_t', 'hid_t', 'hssize_t', 'ssize_t') \
or re.match(r'H5[A-Z]+_[a-zA-Z_]+_t', self.code):
self.err_condition = "<0"
self.err_value = f"<{self.code}>-1"
elif self.code in ('unsigned int', 'haddr_t', 'hsize_t', 'size_t'):
self.err_condition = "==0"
self.err_value = f"<{self.code}>0"
else:
raise ValueError("Return code <<%s>> unknown" % self.code)
raw_preamble = """\
#
# Warning: this file is auto-generated from api_gen.py. DO NOT EDIT!
#
from .api_types_hdf5 cimport *
from .api_types_ext cimport *
"""
def_preamble = """\
#
# Warning: this file is auto-generated from api_gen.py. DO NOT EDIT!
#
from .api_types_hdf5 cimport *
from .api_types_ext cimport *
"""
imp_preamble = """\
#
# Warning: this file is auto-generated from api_gen.py. DO NOT EDIT!
#
from .api_types_ext cimport *
from .api_types_hdf5 cimport *
from . cimport _hdf5
from ._errors cimport set_exception, set_default_error_handler
"""
|
Line
|
python
|
huggingface__transformers
|
tests/models/falcon_mamba/test_modeling_falcon_mamba.py
|
{
"start": 1629,
"end": 9463
}
|
class ____:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
intermediate_size=32,
hidden_act="silu",
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
num_labels=3,
num_choices=4,
scope=None,
tie_word_embeddings=False,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
self.tie_word_embeddings = tie_word_embeddings
def prepare_config_and_inputs(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = ids_tensor([self.batch_size, self.seq_length], 1)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config(
gradient_checkpointing=gradient_checkpointing,
scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx,
reorder_and_upcast_attn=reorder_and_upcast_attn,
)
return (
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
return FalconMambaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
intermediate_size=self.intermediate_size,
activation_function=self.hidden_act,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
tie_word_embeddings=self.tie_word_embeddings,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
return (
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_falcon_mamba_model(self, config, input_ids, *args):
config.output_hidden_states = True
model = FalconMambaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1)
def create_and_check_causal_lm(self, config, input_ids, *args):
model = FalconMambaForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_state_equivalency(self, config, input_ids, *args):
model = FalconMambaModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
output_whole = outputs.last_hidden_state
outputs = model(
input_ids[:, :-1],
use_cache=True,
cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device),
)
output_one = outputs.last_hidden_state
# Using the state computed on the first inputs, we will get the same output
outputs = model(
input_ids[:, -1:],
use_cache=True,
cache_params=outputs.cache_params,
cache_position=torch.arange(config.conv_kernel, config.conv_kernel + 1, device=input_ids.device),
)
output_two = outputs.last_hidden_state
self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5))
# TODO the original mamba does not support decoding more than 1 token neither do we
def create_and_check_falcon_mamba_cached_slow_forward_and_backwards(
self, config, input_ids, *args, gradient_checkpointing=False
):
model = FalconMambaModel(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
# create cache
cache = model(input_ids, use_cache=True).cache_params
cache.reset()
# use cache
token_emb = model.embeddings(input_ids)
outputs = model.layers[0].mixer.slow_forward(
token_emb, cache, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device)
)
loss = torch.log1p(torch.abs(outputs.sum()))
self.parent.assertEqual(loss.shape, ())
self.parent.assertEqual(outputs.shape, (self.batch_size, self.seq_length, self.hidden_size))
loss.backward()
def create_and_check_falcon_mamba_lm_head_forward_and_backwards(
self, config, input_ids, *args, gradient_checkpointing=False
):
model = FalconMambaForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def prepare_config_and_inputs_for_common(self):
(
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
# Copied from transformers.tests.models.mamba.MambaModelTest with Mamba->Falcon,mamba->falcon_mamba,FalconMambaCache->MambaCache
|
FalconMambaModelTester
|
python
|
ansible__ansible
|
test/units/module_utils/basic/test_heuristic_log_sanitize.py
|
{
"start": 831,
"end": 3638
}
|
class ____:
def setup_method(self):
self.URL_SECRET = 'http://username:pas:word@foo.com/data'
self.SSH_SECRET = 'username:pas:word@foo.com/data'
self.clean_data = repr(self._gen_data(3, True, True, 'no_secret_here'))
self.url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
self.ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {
'host%s' % i: {
'pstack': {
'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']
},
}
}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def test_did_not_hide_too_much(self):
assert heuristic_log_sanitize(self.clean_data) == self.clean_data
def test_hides_url_secrets(self):
url_output = heuristic_log_sanitize(self.url_data)
# Basic functionality: Successfully hid the password
assert 'pas:word' not in url_output
# Slightly more advanced, we hid all of the password despite the ":"
assert 'pas' not in url_output
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
assert len(url_output) == len(self.url_data)
def test_hides_ssh_secrets(self):
ssh_output = heuristic_log_sanitize(self.ssh_data)
assert 'pas:word' not in ssh_output
# Slightly more advanced, we hid all of the password despite the ":"
assert 'pas' not in ssh_output
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
assert ssh_output.startswith("{'")
assert ssh_output.endswith("}")
assert ":********@foo.com/data'" in ssh_output
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
assert 'secret' not in output
def test_no_password(self):
assert heuristic_log_sanitize('foo@bar') == 'foo@bar'
|
TestHeuristicLogSanitize
|
python
|
readthedocs__readthedocs.org
|
readthedocs/embed/v3/views.py
|
{
"start": 16362,
"end": 16436
}
|
class ____(SettingsOverrideObject):
_default_class = EmbedAPIBase
|
EmbedAPI
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/pool_test.py
|
{
"start": 3583,
"end": 4092
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, D, H, W, device, op_func):
self.inputs = {"input": torch.rand(N, C, D, H, W, device=device)}
self.op_func = op_func(kernel, stride=stride)
def forward(self, input):
return self.op_func(input)
op_bench.generate_pt_tests_from_op_list(
pool_3d_ops_list, pool_3d_configs_short + pool_3d_configs_long, Pool3dBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
Pool3dBenchmark
|
python
|
walkccc__LeetCode
|
solutions/2126. Destroying Asteroids/2126.py
|
{
"start": 0,
"end": 229
}
|
class ____:
def asteroidsDestroyed(self, mass: int, asteroids: list[int]) -> bool:
for asteroid in sorted(asteroids):
if mass >= asteroid:
mass += asteroid
else:
return False
return True
|
Solution
|
python
|
numpy__numpy
|
numpy/_core/tests/test_shape_base.py
|
{
"start": 585,
"end": 1813
}
|
class ____:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
|
TestAtleast1d
|
python
|
kamyu104__LeetCode-Solutions
|
Python/correct-a-binary-tree.py
|
{
"start": 66,
"end": 159
}
|
class ____(object):
def __init__(self, val=0, left=None, right=None):
pass
|
TreeNode
|
python
|
ApeWorX__ape
|
src/ape/utils/basemodel.py
|
{
"start": 1471,
"end": 1743
}
|
class ____(property):
_cache = None
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner) -> Any: # type: ignore[override]
if self._cache is None:
self._cache = self.fn(owner)
return self._cache
|
manager_access
|
python
|
bokeh__bokeh
|
src/bokeh/core/has_props.py
|
{
"start": 9059,
"end": 9135
}
|
class ____:
"""Resolve this class by a non-qualified name. """
|
NonQualified
|
python
|
aimacode__aima-python
|
utils.py
|
{
"start": 17768,
"end": 18815
}
|
class ____:
"""Given 'P |'==>'| Q, first form PartialExpr('==>', P), then combine with Q."""
def __init__(self, op, lhs):
self.op, self.lhs = op, lhs
def __or__(self, rhs):
return Expr(self.op, self.lhs, rhs)
def __repr__(self):
return "PartialExpr('{}', {})".format(self.op, self.lhs)
def expr(x):
"""Shortcut to create an Expression. x is a str in which:
- identifiers are automatically defined as Symbols.
- ==> is treated as an infix |'==>'|, as are <== and <=>.
If x is already an Expression, it is returned unchanged. Example:
>>> expr('P & Q ==> Q')
((P & Q) ==> Q)
"""
return eval(expr_handle_infix_ops(x), defaultkeydict(Symbol)) if isinstance(x, str) else x
infix_ops = '==> <== <=>'.split()
def expr_handle_infix_ops(x):
"""Given a str, return a new str with ==> replaced by |'==>'|, etc.
>>> expr_handle_infix_ops('P ==> Q')
"P |'==>'| Q"
"""
for op in infix_ops:
x = x.replace(op, '|' + repr(op) + '|')
return x
|
PartialExpr
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/connectors/asyncio.py
|
{
"start": 1219,
"end": 1782
}
|
class ____(Protocol):
"""protocol representing an async adapted version of a
:pep:`249` database connection.
"""
# note that async DBAPIs dont agree if close() should be awaitable,
# so it is omitted here and picked up by the __getattr__ hook below
async def commit(self) -> None: ...
def cursor(self, *args: Any, **kwargs: Any) -> AsyncIODBAPICursor: ...
async def rollback(self) -> None: ...
def __getattr__(self, key: str) -> Any: ...
def __setattr__(self, key: str, value: Any) -> None: ...
|
AsyncIODBAPIConnection
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/ddl.py
|
{
"start": 15093,
"end": 15274
}
|
class ____(_CreateDropBase[_SI]):
def __init__(self, element: _SI, if_exists: bool = False) -> None:
super().__init__(element)
self.if_exists = if_exists
|
_DropBase
|
python
|
bokeh__bokeh
|
src/bokeh/application/handlers/handler.py
|
{
"start": 2554,
"end": 7953
}
|
class ____:
''' Provide a mechanism for Bokeh applications to build up new Bokeh
Documents.
'''
_failed: bool
_error: str | None
_error_detail: str | None
_static: str | None
def __init__(self) -> None:
self._failed = False
self._error = None
self._error_detail = None
self._static = None
# Properties --------------------------------------------------------------
@property
def error(self) -> str | None:
''' If the handler fails, may contain a related error message.
'''
return self._error
@property
def error_detail(self) -> str | None:
''' If the handler fails, may contain a traceback or other details.
'''
return self._error_detail
@property
def failed(self) -> bool:
''' ``True`` if the handler failed to modify the doc
'''
return self._failed
@property
def safe_to_fork(self) -> bool:
return True
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' Modify an application document in a specified manner.
When a Bokeh server session is initiated, the Bokeh server asks the
Application for a new Document to service the session. To do this,
the Application first creates a new empty Document, then it passes
this Document to the ``modify_document`` method of each of its
handlers. When all handlers have updated the Document, it is used to
service the user session.
*Subclasses must implement this method*
Args:
doc (Document) : A Bokeh Document to update in-place
Returns:
Document
'''
raise NotImplementedError("implement modify_document()")
def on_server_loaded(self, server_context: ServerContext) -> None:
''' Execute code when the server is first started.
Subclasses may implement this method to provide for any one-time
initialization that is necessary after the server starts, but
before any sessions are created.
Args:
server_context (ServerContext) :
'''
pass
def on_server_unloaded(self, server_context: ServerContext) -> None:
''' Execute code when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Subclasses may implement this method to provide for any one-time
tear down that is necessary before the server exits.
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
pass
async def on_session_created(self, session_context: SessionContext) -> None:
''' Execute code when a new session is created.
Subclasses may implement this method to provide for any per-session
initialization that is necessary before ``modify_doc`` is called for
the session.
Args:
session_context (SessionContext) :
'''
pass
async def on_session_destroyed(self, session_context: SessionContext) -> None:
''' Execute code when a session is destroyed.
Subclasses may implement this method to provide for any per-session
tear-down that is necessary when sessions are destroyed.
Args:
session_context (SessionContext) :
'''
pass
def process_request(self, request: HTTPServerRequest) -> dict[str, Any]:
''' Processes incoming HTTP request returning a dictionary of
additional data to add to the session_context.
Args:
request: HTTP request
Returns:
A dictionary of JSON serializable data to be included on
the session context.
'''
return {}
def static_path(self) -> str | None:
''' Return a path to app-specific static resources, if applicable.
'''
if self.failed:
return None
else:
return self._static
def url_path(self) -> str | None:
''' Returns a default URL path, if applicable.
Handlers subclasses may optionally implement this method, to inform
the Bokeh application what URL it should be installed at.
If multiple handlers specify ``url_path`` the Application will use the
value from the first handler in its list of handlers.
'''
return None
def handle_exception(handler: Handler | CodeRunner, e: Exception) -> None:
''' Record an exception and details on a Handler.
'''
handler._failed = True
handler._error_detail = traceback.format_exc()
_, _, exc_traceback = sys.exc_info()
filename, line_number, func, txt = traceback.extract_tb(exc_traceback)[-1]
basename = os.path.basename(filename)
handler._error = f"{e}\nFile {basename!r}, line {line_number}, in {func}:\n{txt}"
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Handler
|
python
|
numpy__numpy
|
numpy/lib/_arraysetops_impl.py
|
{
"start": 14805,
"end": 14944
}
|
class ____(NamedTuple):
values: np.ndarray
indices: np.ndarray
inverse_indices: np.ndarray
counts: np.ndarray
|
UniqueAllResult
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/memory_checker.py
|
{
"start": 1373,
"end": 4522
}
|
class ____(object):
"""Memory leak detection class.
This is a utility class to detect Python and C++ memory leaks. It's intended
for both testing and debugging. Basic usage:
>>> # MemoryChecker() context manager tracks memory status inside its scope.
>>> with MemoryChecker() as memory_checker:
>>> tensors = []
>>> for _ in range(10):
>>> # Simulating `tf.constant(1)` object leak every iteration.
>>> tensors.append(tf.constant(1))
>>>
>>> # Take a memory snapshot for later analysis.
>>> memory_checker.record_snapshot()
>>>
>>> # `report()` generates a html graph file showing allocations over
>>> # snapshots per every stack trace.
>>> memory_checker.report()
>>>
>>> # This assertion will detect `tf.constant(1)` object leak.
>>> memory_checker.assert_no_leak_if_all_possibly_except_one()
`record_snapshot()` must be called once every iteration at the same location.
This is because the detection algorithm relies on the assumption that if there
is a leak, it's happening similarly on every snapshot.
"""
@trace.trace_wrapper
def __enter__(self):
self._python_memory_checker = _PythonMemoryChecker()
return self
@trace.trace_wrapper
def __exit__(self, exc_type, exc_value, traceback):
pass
# We do not enable trace_wrapper on this function to avoid contaminating
# the snapshot.
def record_snapshot(self):
"""Take a memory snapshot for later analysis.
`record_snapshot()` must be called once every iteration at the same
location. This is because the detection algorithm relies on the assumption
that if there is a leak, it's happening similarly on every snapshot.
The recommended number of `record_snapshot()` call depends on the testing
code complexity and the allocation pattern.
"""
self._python_memory_checker.record_snapshot()
@trace.trace_wrapper
def report(self):
"""Generates a html graph file showing allocations over snapshots.
It create a temporary directory and put all the output files there.
If this is running under Google internal testing infra, it will use the
directory provided the infra instead.
"""
self._python_memory_checker.report()
@trace.trace_wrapper
def assert_no_leak_if_all_possibly_except_one(self):
"""Raises an exception if a leak is detected.
This algorithm classifies a series of allocations as a leak if it's the same
type(Python) or it happens at the same stack trace(C++) at every snapshot,
but possibly except one snapshot.
"""
self._python_memory_checker.assert_no_leak_if_all_possibly_except_one()
@trace.trace_wrapper
def assert_no_new_python_objects(self, threshold=None):
"""Raises an exception if there are new Python objects created.
It computes the number of new Python objects per type using the first and
the last snapshots.
Args:
threshold: A dictionary of [Type name string], [count] pair. It won't
raise an exception if the new Python objects are under this threshold.
"""
self._python_memory_checker.assert_no_new_objects(threshold=threshold)
|
MemoryChecker
|
python
|
django__django
|
tests/syndication_tests/feeds.py
|
{
"start": 1684,
"end": 2429
}
|
class ____(TestRss2Feed):
class TimeToLive:
@wraps_decorator
def __call__(self):
return 800
@staticmethod
@wraps_decorator
def feed_copyright():
return "Copyright (c) 2022, John Doe"
ttl = TimeToLive()
@staticmethod
def categories():
return ("javascript", "vue")
@wraps_decorator
def title(self):
return "Overridden title"
@wraps_decorator
def item_title(self, item):
return f"Overridden item title: {item.title}"
@wraps_decorator
def description(self, obj):
return "Overridden description"
@wraps_decorator
def item_description(self):
return "Overridden item description"
|
TestRss2FeedWithDecoratedMethod
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/organization_traces.py
|
{
"start": 4295,
"end": 4501
}
|
class ____(OrganizationEventsV2EndpointBase):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.DATA_BROWSING
@region_silo_endpoint
|
OrganizationTracesEndpointBase
|
python
|
pytorch__pytorch
|
test/inductor/test_remote_cache.py
|
{
"start": 577,
"end": 618
}
|
class ____:
fail: str = None
|
TestSample
|
python
|
doocs__leetcode
|
solution/0300-0399/0323.Number of Connected Components in an Undirected Graph/Solution2.py
|
{
"start": 563,
"end": 751
}
|
class ____:
def countComponents(self, n: int, edges: List[List[int]]) -> int:
uf = UnionFind(n)
for a, b in edges:
n -= uf.union(a, b)
return n
|
Solution
|
python
|
ray-project__ray
|
rllib/env/wrappers/pettingzoo_env.py
|
{
"start": 5232,
"end": 6645
}
|
class ____(MultiAgentEnv):
def __init__(self, env):
super().__init__()
self.par_env = env
self.par_env.reset()
self._agent_ids = set(self.par_env.agents)
# If these important attributes are not set, try to infer them.
if not self.agents:
self.agents = list(self._agent_ids)
if not self.possible_agents:
self.possible_agents = self.agents.copy()
self.observation_space = gym.spaces.Dict(
{aid: self.par_env.observation_space(aid) for aid in self._agent_ids}
)
self.action_space = gym.spaces.Dict(
{aid: self.par_env.action_space(aid) for aid in self._agent_ids}
)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
obs, info = self.par_env.reset(seed=seed, options=options)
return obs, info or {}
def step(self, action_dict):
obss, rews, terminateds, truncateds, infos = self.par_env.step(action_dict)
terminateds["__all__"] = all(terminateds.values())
truncateds["__all__"] = all(truncateds.values())
return obss, rews, terminateds, truncateds, infos
def close(self):
self.par_env.close()
def render(self):
return self.par_env.render(self.render_mode)
@property
def get_sub_environments(self):
return self.par_env.unwrapped
|
ParallelPettingZooEnv
|
python
|
cython__cython
|
tests/run/pure_mode_cmethod_inheritance_T583.py
|
{
"start": 1108,
"end": 1475
}
|
class ____(Base):
'''
>>> derived = Derived2()
>>> print(derived.noargs())
Derived2
>>> print(derived.int_arg(1))
Derived2
>>> print(derived._class())
Derived2
'''
def noargs(self):
return "Derived2"
def int_arg(self, i):
return "Derived2"
@classmethod
def _class(tp):
return "Derived2"
|
Derived2
|
python
|
apache__airflow
|
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_pools.py
|
{
"start": 1841,
"end": 2074
}
|
class ____:
@pytest.fixture(autouse=True)
def setup(self) -> None:
clear_db_pools()
def teardown_method(self) -> None:
clear_db_pools()
def create_pools(self):
_create_pools()
|
TestPoolsEndpoint
|
python
|
eventlet__eventlet
|
tests/patcher_test.py
|
{
"start": 7661,
"end": 9510
}
|
class ____(ProcessBase):
TEST_TIMEOUT = 3
def test_simple(self):
new_mod = """
import eventlet
from eventlet import patcher
patcher.monkey_patch()
from eventlet import tpool
print("newmod {0}".format(tpool.execute(len, "hi")))
print("newmod {0}".format(tpool.execute(len, "hi2")))
tpool.killall()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, output)
assert lines[0].startswith('newmod'), repr(output)
assert '2' in lines[0], repr(output)
assert '3' in lines[1], repr(output)
def test_unpatched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=False)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, lines)
def test_patched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=True)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
def test_subprocess_after_monkey_patch():
code = '''\
import sys
import eventlet
eventlet.monkey_patch()
from eventlet.green import subprocess
subprocess.Popen([sys.executable, '-c', ''], stdin=subprocess.PIPE).wait()
print('pass')
'''
output = tests.run_python(
path=None,
args=['-c', code],
)
assert output.rstrip() == b'pass'
|
Tpool
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/inputs.py
|
{
"start": 14487,
"end": 16097
}
|
class ____(graphene.InputObjectType):
statuses = graphene.List(
graphene.NonNull("dagster_graphql.schema.backfill.GrapheneBulkActionStatus")
)
createdBefore = graphene.InputField(graphene.Float)
createdAfter = graphene.InputField(graphene.Float)
class Meta:
description = """This type represents a filter on Dagster Bulk Actions (backfills)."""
name = "BulkActionsFilter"
def to_selector(self):
statuses = (
[BulkActionStatus[status.value] for status in self.statuses] if self.statuses else None
)
created_before = datetime_from_timestamp(self.createdBefore) if self.createdBefore else None
created_after = datetime_from_timestamp(self.createdAfter) if self.createdAfter else None
return BulkActionsFilter(
statuses=statuses,
created_before=created_before,
created_after=created_after,
)
types = [
GrapheneAssetKeyInput,
GrapheneExecutionMetadata,
GrapheneExecutionParams,
GrapheneExecutionTag,
GrapheneInstigationSelector,
GrapheneMarshalledInput,
GrapheneMarshalledOutput,
GrapheneLaunchBackfillParams,
GraphenePartitionSetSelector,
GraphenePartitionsByAssetSelector,
GrapheneRunsFilter,
GraphenePipelineSelector,
GrapheneRepositorySelector,
GrapheneResourceSelector,
GrapheneScheduleSelector,
GrapheneSensorSelector,
GrapheneStepExecution,
GrapheneStepOutputHandle,
GrapheneTagInput,
GrapheneReportRunlessAssetEventsParams,
GrapheneBulkActionsFilter,
]
|
GrapheneBulkActionsFilter
|
python
|
huggingface__transformers
|
src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
{
"start": 68710,
"end": 69606
}
|
class ____(ModelOutput):
r"""
loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`):
Total loss.
prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Prediction output from the classification head.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`):
Backbone embeddings before passing through the head.
hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
loss: Optional[torch.FloatTensor] = None
prediction_outputs: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
PatchTSMixerForTimeSeriesClassificationOutput
|
python
|
spyder-ide__spyder
|
spyder/app/tests/script_outline_3.py
|
{
"start": 326,
"end": 431
}
|
class ____:
E = 1
def five(self):
return 5
def six(self):
return 4
|
AnotherClass
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/sparse_ops/sparse_concat_op_test.py
|
{
"start": 1078,
"end": 15232
}
|
class ____(test.TestCase):
def _SparseTensor_UnknownShape(self,
ind_shape=None,
val_shape=None,
shape_shape=None):
return sparse_tensor.SparseTensor(
array_ops.placeholder(
dtypes.int64, shape=ind_shape),
array_ops.placeholder(
dtypes.float32, shape=val_shape),
array_ops.placeholder(
dtypes.int64, shape=shape_shape))
def _SparseTensorValue_3x3(self):
# [ 1]
# [2 ]
# [3 4]
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array([1, 2, 3, 4])
shape = np.array([3, 3])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x3(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x3())
def _SparseTensorValue_3x5(self):
# [ ]
# [ 1 ]
# [2 1 0]
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array([1, 2, 1, 0])
shape = np.array([3, 5])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.float32), np.array(shape, np.int64))
def _SparseTensor_3x5(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x5())
def _SparseTensor_3x2(self):
# [ ]
# [1 ]
# [2 ]
ind = np.array([[1, 0], [2, 0]])
val = np.array([1, 2])
shape = np.array([3, 2])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3(self):
# [ 1 ]
# [1 2]
ind = np.array([[0, 1], [1, 0], [1, 2]])
val = np.array([1, 1, 2])
shape = np.array([2, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self):
ind = np.array([
[0, 0, 1],
[0, 1, 0], [0, 1, 2],
[1, 0, 3],
[1, 1, 1], [1, 1, 3],
[1, 2, 2]])
val = np.array([1, 10, 12, 103, 111, 113, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_NoNonZeros(self, dense_shape):
ind = np.empty(shape=(0, len(dense_shape)))
val = np.array([])
shape = np.array(dense_shape)
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x3(self):
# [ a]
# [b ]
# [c d]
ind = np.array([[0, 2], [1, 0], [2, 0], [2, 2]])
val = np.array(["a", "b", "c", "d"])
shape = np.array([3, 3])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_String3x5(self):
# [ ]
# [ e ]
# [f g h]
ind = np.array([[1, 1], [2, 0], [2, 3], [2, 4]])
val = np.array(["e", "f", "g", "h"])
shape = np.array([3, 5])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def testConcat1(self):
with self.session() as sess:
# concat(A):
# [ 1]
# [2 ]
# [3 4]
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
# Note that we ignore concat_dim in this case since we short-circuit the
# single-input case in python.
for concat_dim in (-2000, 1, 2000):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a])
self.assertEqual(sp_concat.indices.get_shape(), [4, 2])
self.assertEqual(sp_concat.values.get_shape(), [4])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2]])
self.assertAllEqual(concat_out.values, [1, 2, 3, 4])
self.assertAllEqual(concat_out.dense_shape, [3, 3])
def testConcat2(self):
with self.session() as sess:
# concat(A, B):
# [ 1 ]
# [2 1 ]
# [3 4 2 1 0]
for sp_a in (self._SparseTensorValue_3x3(), self._SparseTensor_3x3()):
for sp_b in (self._SparseTensorValue_3x5(), self._SparseTensor_3x5()):
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4],
[2, 0], [2, 2], [2, 3],
[2, 6], [2, 7]])
self.assertAllEqual(concat_out.values, [1, 2, 1, 3, 4, 2, 1, 0])
self.assertAllEqual(concat_out.dense_shape, [3, 8])
def testConcatDim0(self):
with self.session() as sess:
# concat(A, D):
# [ 1]
# [2 ]
# [3 4]
# [ 1 ]
# [1 2]
sp_a = self._SparseTensor_3x3()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-2, 0):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_d])
self.assertEqual(sp_concat.indices.get_shape(), [7, 2])
self.assertEqual(sp_concat.values.get_shape(), [7])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(
concat_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2], [3, 1], [4, 0], [4, 2]])
self.assertAllEqual(concat_out.values, np.array([1, 2, 3, 4, 1, 1, 2]))
self.assertAllEqual(concat_out.dense_shape, np.array([5, 3]))
def testConcat3(self):
with self.session() as sess:
# concat(A, B, C):
# [ 1 ]
# [2 1 1 ]
# [3 4 2 1 0 2 ]
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
self.assertEqual(sp_concat.indices.get_shape(), [10, 2])
self.assertEqual(sp_concat.values.get_shape(), [10])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices, [[0, 2], [1, 0], [1, 4], [1, 8],
[2, 0], [2, 2], [2, 3], [2, 6],
[2, 7], [2, 8]])
self.assertAllEqual(concat_out.values, [1, 2, 1, 1, 3, 4, 2, 1, 0, 2])
self.assertAllEqual(concat_out.dense_shape, [3, 10])
def testConcatNoNonZeros(self):
sp_a = self._SparseTensor_NoNonZeros((2, 3, 4))
sp_b = self._SparseTensor_NoNonZeros((2, 7, 4))
sp_c = self._SparseTensor_NoNonZeros((2, 5, 4))
with self.session() as sess:
concat_dim = 1
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
self.assertEqual(sp_concat.indices.get_shape(), [0, 3])
self.assertEqual(sp_concat.values.get_shape(), [0])
self.assertEqual(sp_concat.dense_shape.get_shape(), [3])
concat_out = self.evaluate(sp_concat)
self.assertEqual(concat_out.indices.shape, (0, 3))
self.assertEqual(concat_out.values.shape, (0,))
self.assertAllEqual(concat_out.dense_shape, [2, 15, 4])
def testConcatSomeNoNonZeros(self):
sp_a = self._SparseTensor_NoNonZeros((2, 7, 4))
sp_b = self._SparseTensor_2x3x4()
sp_c = self._SparseTensor_NoNonZeros((2, 5, 4))
output_nnz = sp_b.indices.get_shape()[0]
with self.session() as sess:
concat_dim = 1
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b, sp_c])
self.assertEqual(sp_concat.indices.get_shape(), [output_nnz, 3])
self.assertEqual(sp_concat.values.get_shape(), [output_nnz])
self.assertEqual(sp_concat.dense_shape.get_shape(), [3])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(concat_out.indices,
sp_b.indices + [0, sp_a.dense_shape[1], 0])
self.assertAllEqual(concat_out.values, sp_b.values)
self.assertAllEqual(concat_out.dense_shape, [2, 15, 4])
def testConcatNonNumeric(self):
with self.session(use_gpu=False) as sess:
# concat(A, B):
# [ a ]
# [b e ]
# [c d f g h]
sp_a = self._SparseTensor_String3x3()
sp_b = self._SparseTensor_String3x5()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim, [sp_a, sp_b])
self.assertEqual(sp_concat.indices.get_shape(), [8, 2])
self.assertEqual(sp_concat.values.get_shape(), [8])
self.assertEqual(sp_concat.dense_shape.get_shape(), [2])
concat_out = self.evaluate(sp_concat)
self.assertAllEqual(
concat_out.indices,
[[0, 2], [1, 0], [1, 4], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7]])
self.assertAllEqual(concat_out.values,
[b"a", b"b", b"e", b"c", b"d", b"f", b"g", b"h"])
self.assertAllEqual(concat_out.dense_shape, [3, 8])
@test_util.run_deprecated_v1
def testMismatchedRank(self):
with self.session():
sp_a = self._SparseTensor_3x3()
sp_e = self._SparseTensor_2x3x4()
# Rank mismatches can be caught at shape-inference time
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
sparse_ops.sparse_concat(concat_dim, [sp_a, sp_e])
@test_util.run_deprecated_v1
def testMismatchedRankExpandNonconcatDim(self):
with self.session():
sp_a = self._SparseTensor_3x3()
sp_e = self._SparseTensor_2x3x4()
# Rank mismatches should be caught at shape-inference time, even for
# expand_nonconcat_dim=True.
for concat_dim in (-1, 1):
with self.assertRaises(ValueError):
sparse_ops.sparse_concat(
concat_dim, [sp_a, sp_e], expand_nonconcat_dim=True)
@test_util.run_deprecated_v1
def testMismatchedShapes(self):
with self.session() as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim in (-1, 1):
sp_concat = sparse_ops.sparse_concat(concat_dim,
[sp_a, sp_b, sp_c, sp_d])
# Shape mismatches can only be caught when the op is run
with self.assertRaisesOpError("Input shapes must match"):
self.evaluate(sp_concat)
def testMismatchedShapesExpandNonconcatDim(self):
with self.session() as sess:
sp_a = self._SparseTensor_3x3()
sp_b = self._SparseTensor_3x5()
sp_c = self._SparseTensor_3x2()
sp_d = self._SparseTensor_2x3()
for concat_dim0 in (-2, 0):
for concat_dim1 in (-1, 1):
sp_concat_dim0 = sparse_ops.sparse_concat(
concat_dim0, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim1 = sparse_ops.sparse_concat(
concat_dim1, [sp_a, sp_b, sp_c, sp_d], expand_nonconcat_dim=True)
sp_concat_dim0_out = self.evaluate(sp_concat_dim0)
sp_concat_dim1_out = self.evaluate(sp_concat_dim1)
self.assertAllEqual(sp_concat_dim0_out.indices,
[[0, 2], [1, 0], [2, 0], [2, 2], [4, 1], [5, 0],
[5, 3], [5, 4], [7, 0], [8, 0], [9, 1], [10, 0],
[10, 2]])
self.assertAllEqual(sp_concat_dim0_out.values,
[1, 2, 3, 4, 1, 2, 1, 0, 1, 2, 1, 1, 2])
self.assertAllEqual(sp_concat_dim0_out.dense_shape, [11, 5])
self.assertAllEqual(sp_concat_dim1_out.indices,
[[0, 2], [0, 11], [1, 0], [1, 4], [1, 8], [1, 10],
[1, 12], [2, 0], [2, 2], [2, 3], [2, 6], [2, 7],
[2, 8]])
self.assertAllEqual(sp_concat_dim1_out.values,
[1, 1, 2, 1, 1, 1, 2, 3, 4, 2, 1, 0, 2])
self.assertAllEqual(sp_concat_dim1_out.dense_shape, [3, 13])
@test_util.run_deprecated_v1
def testShapeInferenceUnknownShapes(self):
with self.session():
sp_inputs = [
self._SparseTensor_UnknownShape(),
self._SparseTensor_UnknownShape(val_shape=[3]),
self._SparseTensor_UnknownShape(ind_shape=[1, 3]),
self._SparseTensor_UnknownShape(shape_shape=[3])
]
for concat_dim in (-2, 0):
sp_concat = sparse_ops.sparse_concat(concat_dim, sp_inputs)
self.assertEqual(sp_concat.indices.get_shape().as_list(), [None, 3])
self.assertEqual(sp_concat.values.get_shape().as_list(), [None])
self.assertEqual(sp_concat.dense_shape.get_shape(), [3])
def testConcatShape(self):
# Test case for GitHub 21964.
x = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1, 2], dense_shape=[2, 2])
y = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=[1, 2], dense_shape=[2, 2])
z = sparse_ops.sparse_concat(-1, [x, y])
self.assertEqual(z.get_shape().as_list(), [2, 4])
if __name__ == "__main__":
test.main()
|
SparseConcatTest
|
python
|
numpy__numpy
|
numpy/_core/tests/test_mem_overlap.py
|
{
"start": 18028,
"end": 29317
}
|
class ____:
"""
Test ufunc call memory overlap handling
"""
def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
count=5000):
shapes = [7, 13, 8, 21, 29, 32]
rng = np.random.RandomState(1234)
for ndim in range(1, 6):
x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = count // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
a_orig = a.copy()
b_orig = b.copy()
if get_out_axis_size is None:
assert_copy_equivalent(operation, [a], out=b)
if np.shares_memory(a, b):
overlapping += 1
else:
for axis in itertools.chain(range(ndim), [None]):
a[...] = a_orig
b[...] = b_orig
# Determine size for reduction axis (None if scalar)
outsize, scalarize = get_out_axis_size(a, b, axis)
if outsize == 'skip':
continue
# Slice b to get an output array of the correct size
sl = [slice(None)] * ndim
if axis is None:
if outsize is None:
sl = [slice(0, 1)] + [0] * (ndim - 1)
else:
sl = [slice(0, outsize)] + [0] * (ndim - 1)
elif outsize is None:
k = b.shape[axis] // 2
if ndim == 1:
sl[axis] = slice(k, k + 1)
else:
sl[axis] = k
else:
assert b.shape[axis] >= outsize
sl[axis] = slice(0, outsize)
b_out = b[tuple(sl)]
if scalarize:
b_out = b_out.reshape([])
if np.shares_memory(a, b_out):
overlapping += 1
# Check result
assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
@pytest.mark.slow
def test_unary_ufunc_call_fuzz(self):
self.check_unary_fuzz(np.invert, None, np.int16)
@pytest.mark.slow
def test_unary_ufunc_call_complex_fuzz(self):
# Complex typically has a smaller alignment than itemsize
self.check_unary_fuzz(np.negative, None, np.complex128, count=500)
def test_binary_ufunc_accumulate_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # accumulate doesn't support this
else:
return a.shape[axis], False
self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduce_fuzz(self):
def get_out_axis_size(a, b, axis):
return None, (axis is None or a.ndim == 1)
self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_fuzz(self):
def get_out_axis_size(a, b, axis):
if axis is None:
if a.ndim == 1:
return a.size, False
else:
return 'skip', False # reduceat doesn't support this
else:
return a.shape[axis], False
def do_reduceat(a, out, axis):
if axis is None:
size = len(a)
step = size // len(out)
else:
size = a.shape[axis]
step = a.shape[axis] // out.shape[axis]
idx = np.arange(0, size, step)
return np.add.reduceat(a, idx, out=out, axis=axis)
self.check_unary_fuzz(do_reduceat, get_out_axis_size,
dtype=np.int16, count=500)
def test_binary_ufunc_reduceat_manual(self):
def check(ufunc, a, ind, out):
c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
c2 = ufunc.reduceat(a, ind, out=out)
assert_array_equal(c1, c2)
# Exactly same input/output arrays
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1].copy(), a)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.add, a, a[::-1], a)
@pytest.mark.slow
def test_unary_gufunc_fuzz(self):
shapes = [7, 13, 8, 21, 29, 32]
gufunc = _umath_tests.euclidean_pdist
rng = np.random.RandomState(1234)
for ndim in range(2, 6):
x = rng.rand(*shapes[:ndim])
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
min_count = 500 // (ndim + 1)**2
overlapping = 0
while overlapping < min_count:
a, b = next(it)
if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
continue
# Ensure the shapes are so that euclidean_pdist is happy
if b.shape[-1] > b.shape[-2]:
b = b[..., 0, :]
else:
b = b[..., :, 0]
n = a.shape[-2]
p = n * (n - 1) // 2
if p <= b.shape[-1] and p > 0:
b = b[..., :p]
else:
n = max(2, int(np.sqrt(b.shape[-1])) // 2)
p = n * (n - 1) // 2
a = a[..., :n, :]
b = b[..., :p]
# Call
if np.shares_memory(a, b):
overlapping += 1
with np.errstate(over='ignore', invalid='ignore'):
assert_copy_equivalent(gufunc, [a], out=b)
def test_ufunc_at_manual(self):
def check(ufunc, a, ind, b=None):
a0 = a.copy()
if b is None:
ufunc.at(a0, ind.copy())
c1 = a0.copy()
ufunc.at(a, ind)
c2 = a.copy()
else:
ufunc.at(a0, ind.copy(), b.copy())
c1 = a0.copy()
ufunc.at(a, ind, b)
c2 = a.copy()
assert_array_equal(c1, c2)
# Overlap with index
a = np.arange(10000, dtype=np.int16)
check(np.invert, a[::-1], a)
# Overlap with second data array
a = np.arange(100, dtype=np.int16)
ind = np.arange(0, 100, 2, dtype=np.int16)
check(np.add, a, ind, a[25:75])
def test_unary_ufunc_1d_manual(self):
# Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)
def check(a, b):
a_orig = a.copy()
b_orig = b.copy()
b0 = b.copy()
c1 = ufunc(a, out=b0)
c2 = ufunc(a, out=b)
assert_array_equal(c1, c2)
# Trigger "fancy ufunc loop" code path
mask = view_element_first_byte(b).view(np.bool)
a[...] = a_orig
b[...] = b_orig
c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
a[...] = a_orig
b[...] = b_orig
c2 = ufunc(a, out=b, where=mask.copy()).copy()
# Also, mask overlapping with output
a[...] = a_orig
b[...] = b_orig
c3 = ufunc(a, out=b, where=mask).copy()
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
np.float64, np.complex64, np.complex128]
dtypes = [np.dtype(x) for x in dtypes]
for dtype in dtypes:
if np.issubdtype(dtype, np.integer):
ufunc = np.invert
else:
ufunc = np.reciprocal
n = 1000
k = 10
indices = [
np.index_exp[:n],
np.index_exp[k:k + n],
np.index_exp[n - 1::-1],
np.index_exp[k + n - 1:k - 1:-1],
np.index_exp[:2 * n:2],
np.index_exp[k:k + 2 * n:2],
np.index_exp[2 * n - 1::-2],
np.index_exp[k + 2 * n - 1:k - 1:-2],
]
for xi, yi in itertools.product(indices, indices):
v = np.arange(1, 1 + n * 2 + k, dtype=dtype)
x = v[xi]
y = v[yi]
with np.errstate(all='ignore'):
check(x, y)
# Scalar cases
check(x[:1], y)
check(x[-1:], y)
check(x[:1].reshape([]), y)
check(x[-1:].reshape([]), y)
def test_unary_ufunc_where_same(self):
# Check behavior at wheremask overlap
ufunc = np.invert
def check(a, out, mask):
c1 = ufunc(a, out=out.copy(), where=mask.copy())
c2 = ufunc(a, out=out, where=mask)
assert_array_equal(c1, c2)
# Check behavior with same input and output arrays
x = np.arange(100).astype(np.bool)
check(x, x, x)
check(x, x.copy(), x)
check(x, x, x.copy())
@pytest.mark.slow
def test_binary_ufunc_1d_manual(self):
ufunc = np.add
def check(a, b, c):
c0 = c.copy()
c1 = ufunc(a, b, out=c0)
c2 = ufunc(a, b, out=c)
assert_array_equal(c1, c2)
for dtype in [np.int8, np.int16, np.int32, np.int64,
np.float32, np.float64, np.complex64, np.complex128]:
# Check different data dependency orders
n = 1000
k = 10
indices = []
for p in [1, 2]:
indices.extend([
np.index_exp[:p * n:p],
np.index_exp[k:k + p * n:p],
np.index_exp[p * n - 1::-p],
np.index_exp[k + p * n - 1:k - 1:-p],
])
for x, y, z in itertools.product(indices, indices, indices):
v = np.arange(6 * n).astype(dtype)
x = v[x]
y = v[y]
z = v[z]
check(x, y, z)
# Scalar cases
check(x[:1], y, z)
check(x[-1:], y, z)
check(x[:1].reshape([]), y, z)
check(x[-1:].reshape([]), y, z)
check(x, y[:1], z)
check(x, y[-1:], z)
check(x, y[:1].reshape([]), z)
check(x, y[-1:].reshape([]), z)
def test_inplace_op_simple_manual(self):
rng = np.random.RandomState(1234)
x = rng.rand(200, 200) # bigger than bufsize
x += x.T
assert_array_equal(x - x.T, 0)
|
TestUFunc
|
python
|
coleifer__peewee
|
playhouse/psycopg3_ext.py
|
{
"start": 1952,
"end": 4067
}
|
class ____(IndexedFieldMixin, Field):
field_type = 'JSONB'
_json_datatype = 'jsonb'
__hash__ = Field.__hash__
def __init__(self, dumps=None, *args, **kwargs):
self.dumps = dumps or json.dumps
super(BinaryJSONField, self).__init__(*args, **kwargs)
def db_value(self, value):
if value is None:
return value
if not isinstance(value, Jsonb):
return Cast(self.dumps(value), self._json_datatype)
return value
def __getitem__(self, value):
return JsonLookup(self, [value])
def path(self, *keys):
return JsonPath(self, keys)
def concat(self, value):
if not isinstance(value, Node):
value = Jsonb(value)
return super(BinaryJSONField, self).concat(value)
def contains(self, other):
if isinstance(other, BinaryJSONField):
return Expression(self, JSONB_CONTAINS, other)
return Expression(cast_jsonb(self), JSONB_CONTAINS, Jsonb(other))
def contained_by(self, other):
return Expression(cast_jsonb(self), JSONB_CONTAINED_BY, Jsonb(other))
def contains_any(self, *items):
return Expression(
cast_jsonb(self),
JSONB_CONTAINS_ANY_KEY,
Value(list(items), unpack=False))
def contains_all(self, *items):
return Expression(
cast_jsonb(self),
JSONB_CONTAINS_ALL_KEYS,
Value(list(items), unpack=False))
def has_key(self, key):
return Expression(cast_jsonb(self), JSONB_CONTAINS_KEY, key)
def remove(self, *items):
return Expression(
cast_jsonb(self),
JSONB_REMOVE,
# Hack: psycopg3 parameterizes this as an array, e.g. '{k1,k2}',
# but that doesn't seem to be working, so we explicitly cast.
# Perhaps postgres is interpreting it as a string. Using the more
# explicit ARRAY['k1','k2'] also works just fine -- but we'll make
# the cast explicit to get it working.
Cast(Value(list(items), unpack=False), 'text[]'))
|
BinaryJSONField
|
python
|
PyCQA__pyflakes
|
pyflakes/messages.py
|
{
"start": 3805,
"end": 3920
}
|
class ____(Message):
message = 'from __future__ imports must occur at the beginning of the file'
|
LateFutureImport
|
python
|
pytorch__pytorch
|
test/export/test_verifier.py
|
{
"start": 519,
"end": 7969
}
|
class ____(TestCase):
def test_verifier_basic(self) -> None:
class Foo(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
f = Foo()
ep = export(f, (torch.randn(100), torch.randn(100)), strict=True)
verifier = Verifier()
verifier.check(ep)
def test_verifier_call_module(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x: Tensor) -> Tensor:
return self.linear(x)
gm = torch.fx.symbolic_trace(M())
verifier = Verifier()
with self.assertRaises(SpecViolationError):
verifier._check_graph_module(gm)
def test_verifier_no_functional(self) -> None:
class Foo(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
f = Foo()
ep = export(
f, (torch.randn(100), torch.randn(100)), strict=True
).run_decompositions({})
for node in ep.graph.nodes:
if node.target == torch.ops.aten.add.Tensor:
node.target = torch.ops.aten.add_.Tensor
verifier = Verifier()
with self.assertRaises(SpecViolationError):
verifier.check(ep)
@unittest.skipIf(IS_WINDOWS, "Windows not supported for this test")
def test_verifier_higher_order(self) -> None:
class Foo(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
def true_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def false_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x - y
return control_flow.cond(x.sum() > 2, true_fn, false_fn, [x, y])
f = Foo()
ep = export(f, (torch.randn(3, 3), torch.randn(3, 3)), strict=True)
verifier = Verifier()
verifier.check(ep)
@unittest.skipIf(IS_WINDOWS, "Windows not supported for this test")
def test_verifier_nested_invalid_module(self) -> None:
class Foo(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
def true_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def false_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x - y
return control_flow.cond(x.sum() > 2, true_fn, false_fn, [x, y])
f = Foo()
ep = export(
f, (torch.randn(3, 3), torch.randn(3, 3)), strict=True
).run_decompositions({})
for node in ep.graph_module.true_graph_0.graph.nodes:
if node.target == torch.ops.aten.add.Tensor:
node.target = torch.ops.aten.add_.Tensor
verifier = Verifier()
with self.assertRaises(SpecViolationError):
verifier.check(ep)
def test_ep_verifier_basic(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 10)
def forward(self, x: Tensor) -> Tensor:
return self.linear(x)
ep = export(M(), (torch.randn(10, 10),), strict=True)
ep.validate()
def test_ep_verifier_invalid_param(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.register_parameter(
name="a", param=torch.nn.Parameter(torch.randn(100))
)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y + self.a
ep = export(M(), (torch.randn(100), torch.randn(100)), strict=True)
# Parameter doesn't exist in the state dict
ep.graph_signature.input_specs[0] = InputSpec(
kind=InputKind.PARAMETER, arg=TensorArgument(name="p_a"), target="bad_param"
)
with self.assertRaisesRegex(SpecViolationError, "not in the state dict"):
ep.validate()
# Add non-torch.nn.Parameter parameter to the state dict
ep.state_dict["bad_param"] = torch.randn(100)
with self.assertRaisesRegex(
SpecViolationError, "not an instance of torch.nn.Parameter"
):
ep.validate()
def test_ep_verifier_invalid_buffer(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor(3.0)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y + self.a
ep = export(M(), (torch.randn(100), torch.randn(100)), strict=True)
# Buffer doesn't exist in the state dict
ep.graph_signature.input_specs[0] = InputSpec(
kind=InputKind.BUFFER,
arg=TensorArgument(name="c_a"),
target="bad_buffer",
persistent=True,
)
with self.assertRaisesRegex(SpecViolationError, "not in the state dict"):
ep.validate()
def test_ep_verifier_buffer_mutate(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.my_parameter = torch.nn.Parameter(torch.tensor(2.0))
self.my_buffer1 = torch.nn.Buffer(torch.tensor(3.0))
self.my_buffer2 = torch.nn.Buffer(torch.tensor(4.0))
def forward(self, x1, x2):
# Use the parameter, buffers, and both inputs in the forward method
output = (
x1 + self.my_parameter
) * self.my_buffer1 + x2 * self.my_buffer2
# Mutate one of the buffers (e.g., increment it by 1)
self.my_buffer2.add_(1.0)
return output
ep = export(M(), (torch.tensor(5.0), torch.tensor(6.0)), strict=True)
ep.validate()
def test_ep_verifier_invalid_output(self) -> None:
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.my_parameter = torch.nn.Parameter(torch.tensor(2.0))
self.my_buffer1 = torch.nn.Buffer(torch.tensor(3.0))
self.my_buffer2 = torch.nn.Buffer(torch.tensor(4.0))
def forward(self, x1, x2):
# Use the parameter, buffers, and both inputs in the forward method
output = (
x1 + self.my_parameter
) * self.my_buffer1 + x2 * self.my_buffer2
# Mutate one of the buffers (e.g., increment it by 1)
self.my_buffer2.add_(1.0)
return output
ep = export(M(), (torch.tensor(5.0), torch.tensor(6.0)), strict=True)
output_node = list(ep.graph.nodes)[-1]
output_node.args = (
(
output_node.args[0][0],
next(iter(ep.graph.nodes)),
),
)
with self.assertRaisesRegex(SpecViolationError, "Number of output nodes"):
ep.validate()
if __name__ == "__main__":
run_tests()
|
TestVerifier
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/metrics_test.py
|
{
"start": 66188,
"end": 82075
}
|
class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions, thresholds)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
self.evaluate([prec_op, rec_op])
initial_prec = self.evaluate(prec)
initial_rec = self.evaluate(rec)
for _ in range(10):
self.evaluate([prec_op, rec_op])
self.assertAllClose(initial_prec, prec)
self.assertAllClose(initial_rec, rec)
# TODO(nsilberman): fix tests (passing but incorrect).
@test_util.run_deprecated_v1
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session():
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertEqual(1, self.evaluate(prec))
self.assertEqual(1, self.evaluate(rec))
@test_util.run_deprecated_v1
def testSomeCorrect_multipleLabelDtypes(self):
with self.cached_session():
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0.5, self.evaluate(prec))
self.assertAlmostEqual(0.5, self.evaluate(rec))
@test_util.run_deprecated_v1
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session():
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0, self.evaluate(prec))
self.assertAlmostEqual(0, self.evaluate(rec))
@test_util.run_deprecated_v1
def testWeights1d(self):
with self.cached_session():
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(1.0, self.evaluate(prec_low), places=5)
self.assertAlmostEqual(0.0, self.evaluate(prec_high), places=5)
self.assertAlmostEqual(1.0, self.evaluate(rec_low), places=5)
self.assertAlmostEqual(0.0, self.evaluate(rec_high), places=5)
@test_util.run_deprecated_v1
def testWeights2d(self):
with self.cached_session():
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(1.0, self.evaluate(prec_low), places=5)
self.assertAlmostEqual(0.0, self.evaluate(prec_high), places=5)
self.assertAlmostEqual(1.0, self.evaluate(rec_low), places=5)
self.assertAlmostEqual(0.0, self.evaluate(rec_high), places=5)
@test_util.run_deprecated_v1
def testExtremeThresholds(self):
with self.cached_session():
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0.75, self.evaluate(prec_low))
self.assertAlmostEqual(0.0, self.evaluate(prec_high))
self.assertAlmostEqual(1.0, self.evaluate(rec_low))
self.assertAlmostEqual(0.0, self.evaluate(rec_high))
@test_util.run_deprecated_v1
def testZeroLabelsPredictions(self):
with self.cached_session():
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
self.evaluate([prec_op, rec_op])
self.assertAlmostEqual(0, self.evaluate(prec), 6)
self.assertAlmostEqual(0, self.evaluate(rec), 6)
@test_util.run_deprecated_v1
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.precision_at_thresholds(tf_labels, tf_predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(tf_labels, tf_predictions,
thresholds)
self.evaluate(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
self.evaluate([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, self.evaluate(prec), 2)
self.assertAlmostEqual(expected_rec, self.evaluate(rec), 2)
def _test_precision_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_precision_at_top_k(
predictions_idx,
labels,
expected,
k=None,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.precision_at_top_k(
predictions_idx=constant_op.constant(predictions_idx, dtypes_lib.int32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
test_case.assertTrue(math.isnan(update.eval()))
test_case.assertTrue(math.isnan(metric.eval()))
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_average_precision_at_k(predictions,
labels,
k,
expected,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.average_precision_at_k(
labels, predictions, k, weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertAlmostEqual(expected, update.eval())
test_case.assertAlmostEqual(expected, metric.eval())
|
PrecisionRecallThresholdsTest
|
python
|
huggingface__transformers
|
tests/models/align/test_modeling_align.py
|
{
"start": 3841,
"end": 7858
}
|
class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as ALIGN does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (AlignVisionModel,) if is_torch_available() else ()
test_resize_embeddings = False
has_attentions = False
def setUp(self):
self.model_tester = AlignVisionModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=AlignVisionConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["num_channels", "image_size"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="AlignVisionModel does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="AlignVisionModel does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="AlignVisionModel does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
num_blocks = sum(config.num_block_repeats) * 4
self.assertEqual(len(hidden_states), num_blocks)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_size // 2, self.model_tester.image_size // 2],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "kakaobrain/align-base"
model = AlignVisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
|
AlignVisionModelTest
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/_config.py
|
{
"start": 225860,
"end": 227312
}
|
class ____(TypedDict, total=False):
"""
:class:`altair.RadialGradient` ``TypedDict`` wrapper.
Parameters
----------
gradient
The type of gradient. Use ``"radial"`` for a radial gradient.
stops
An array of gradient stops defining the gradient color sequence.
id
r1
The radius length, in normalized [0, 1] coordinates, of the inner circle for the
gradient.
**Default value:** ``0``
r2
The radius length, in normalized [0, 1] coordinates, of the outer circle for the
gradient.
**Default value:** ``0.5``
x1
The x-coordinate, in normalized [0, 1] coordinates, for the center of the inner
circle for the gradient.
**Default value:** ``0.5``
x2
The x-coordinate, in normalized [0, 1] coordinates, for the center of the outer
circle for the gradient.
**Default value:** ``0.5``
y1
The y-coordinate, in normalized [0, 1] coordinates, for the center of the inner
circle for the gradient.
**Default value:** ``0.5``
y2
The y-coordinate, in normalized [0, 1] coordinates, for the center of the outer
circle for the gradient.
**Default value:** ``0.5``
"""
gradient: Literal["radial"]
stops: Sequence[GradientStopKwds]
id: str
r1: float
r2: float
x1: float
x2: float
y1: float
y2: float
|
RadialGradientKwds
|
python
|
getsentry__sentry
|
src/sentry/integrations/msteams/card_builder/block.py
|
{
"start": 2049,
"end": 2136
}
|
class ____(_TextBlockNotRequired):
type: Literal["TextBlock"]
text: str
|
TextBlock
|
python
|
doocs__leetcode
|
solution/3100-3199/3125.Maximum Number That Makes Result of Bitwise AND Zero/Solution.py
|
{
"start": 0,
"end": 103
}
|
class ____:
def maxNumber(self, n: int) -> int:
return (1 << (n.bit_length() - 1)) - 1
|
Solution
|
python
|
encode__httpx
|
tests/client/test_auth.py
|
{
"start": 2474,
"end": 3348
}
|
class ____(httpx.Auth):
"""
A mock authentication scheme that requires clients to send
the request a fixed number of times, and then send a last request containing
an aggregation of nonces that the server sent in 'WWW-Authenticate' headers
of intermediate responses.
"""
requires_request_body = True
def __init__(self, repeat: int) -> None:
self.repeat = repeat
def auth_flow(
self, request: httpx.Request
) -> typing.Generator[httpx.Request, httpx.Response, None]:
nonces = []
for index in range(self.repeat):
request.headers["Authorization"] = f"Repeat {index}"
response = yield request
nonces.append(response.headers["www-authenticate"])
key = ".".join(nonces)
request.headers["Authorization"] = f"Repeat {key}"
yield request
|
RepeatAuth
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/team_issue_breakdown.py
|
{
"start": 917,
"end": 4759
}
|
class ____(TeamEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, team: Team) -> Response:
"""
Returns a dict of team projects, and a time-series dict of issue stat breakdowns for each.
If a list of statuses is passed then we return the count of each status and the totals.
Otherwise we the count of reviewed issues and the total count of issues.
"""
if not features.has("organizations:team-insights", team.organization, actor=request.user):
return Response({"detail": "You do not have the insights feature enabled"}, status=400)
start, end = get_date_range_from_params(request.GET)
end = end.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
start = start.replace(hour=0, minute=0, second=0, microsecond=0) + timedelta(days=1)
environments = [e.id for e in get_environments(request, team.organization)]
if "statuses" in request.GET:
statuses = [
STRING_TO_STATUS_LOOKUP[status] for status in request.GET.getlist("statuses")
]
new_format = True
else:
statuses = [GroupHistoryStatus.UNRESOLVED] + ACTIONED_STATUSES
new_format = False
new_issues = []
base_day_format = {"total": 0}
if new_format:
for status in statuses:
base_day_format[STATUS_TO_STRING_LOOKUP[status]] = 0
else:
base_day_format["reviewed"] = 0
if GroupHistoryStatus.NEW in statuses:
group_environment_filter = (
Q(groupenvironment__environment_id=environments[0]) if environments else Q()
)
statuses.remove(GroupHistoryStatus.NEW)
new_issues = list(
Group.objects.filter_to_team(team)
.filter(group_environment_filter, first_seen__gte=start, first_seen__lte=end)
.annotate(bucket=TruncDay("first_seen"))
.order_by("bucket")
.values("project", "bucket")
.annotate(
count=Count("id"),
status=Value(GroupHistoryStatus.NEW, output_field=IntegerField()),
)
)
group_history_environment_filter = (
Q(group__groupenvironment__environment_id=environments[0]) if environments else Q()
)
bucketed_issues = (
GroupHistory.objects.filter_to_team(team)
.filter(
group_history_environment_filter,
status__in=statuses,
date_added__gte=start,
date_added__lte=end,
)
.annotate(bucket=TruncDay("date_added"))
.order_by("bucket")
.values("project", "bucket", "status")
.annotate(count=Count("id"))
)
current_day, date_series_dict = start, {}
while current_day < end:
date_series_dict[current_day.isoformat()] = copy.deepcopy(base_day_format)
current_day += timedelta(days=1)
project_list = Project.objects.get_for_team_ids(team_ids=[team.id])
agg_project_counts = {
project.id: copy.deepcopy(date_series_dict) for project in project_list
}
for r in chain(bucketed_issues, new_issues):
bucket = agg_project_counts[r["project"]][r["bucket"].isoformat()]
bucket["total"] += r["count"]
if not new_format and r["status"] != GroupHistoryStatus.UNRESOLVED:
bucket["reviewed"] += r["count"]
if new_format:
bucket[STATUS_TO_STRING_LOOKUP[r["status"]]] += r["count"]
return Response(agg_project_counts)
|
TeamIssueBreakdownEndpoint
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial002_py310.py
|
{
"start": 300,
"end": 3335
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(
default=None, foreign_key="team.id", ondelete="SET NULL"
)
team: Team | None = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
def delete_team():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
session.delete(team)
session.commit()
print("Deleted team:", team)
def select_deleted_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Black Lion")
result = session.exec(statement)
hero = result.first()
print("Black Lion has no team:", hero)
statement = select(Hero).where(Hero.name == "Princess Sure-E")
result = session.exec(statement)
hero = result.first()
print("Princess Sure-E has no team:", hero)
def main():
create_db_and_tables()
create_heroes()
delete_team()
select_deleted_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_redshift_data.py
|
{
"start": 2083,
"end": 18560
}
|
class ____:
def test_init(self):
op = RedshiftDataOperator(
task_id="fake_task_id",
database="fake-db",
sql="SELECT 1",
aws_conn_id="fake-conn-id",
region_name="eu-central-1",
verify="/spam/egg.pem",
botocore_config={"read_timeout": 42},
)
assert op.hook.client_type == "redshift-data"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "eu-central-1"
assert op.hook._verify == "/spam/egg.pem"
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
op = RedshiftDataOperator(task_id="fake_task_id", database="fake-db", sql="SELECT 1")
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.execute_query")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_execute(self, mock_conn, mock_exec_query):
cluster_identifier = "cluster_identifier"
workgroup_name = None
db_user = "db_user"
secret_arn = "secret_arn"
statement_name = "statement_name"
parameters = [{"name": "id", "value": "1"}]
poll_interval = 5
mock_conn.execute_statement.return_value = {"Id": STATEMENT_ID}
mock_conn.describe_statement.return_value = {"Status": "FINISHED"}
mock_exec_query.return_value = QueryExecutionOutput(statement_id=STATEMENT_ID, session_id=None)
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
wait_for_completion=True,
poll_interval=poll_interval,
)
# Mock the TaskInstance, call the execute method
mock_ti = mock.MagicMock(name="MockedTaskInstance")
actual_result = operator.execute({"ti": mock_ti})
mock_exec_query.assert_called_once_with(
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
workgroup_name=workgroup_name,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
with_event=False,
wait_for_completion=True, # Matches above
poll_interval=poll_interval,
session_id=None,
session_keep_alive_seconds=None,
)
# Check that the result returned is a list of the statement_id's
assert actual_result == [STATEMENT_ID]
mock_ti.xcom_push.assert_not_called()
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.execute_query")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_execute_with_workgroup_name(self, mock_conn, mock_exec_query):
cluster_identifier = None
workgroup_name = "workgroup_name"
db_user = "db_user"
secret_arn = "secret_arn"
statement_name = "statement_name"
parameters = [{"name": "id", "value": "1"}]
poll_interval = 5
# Like before, return a statement ID and a status
mock_conn.execute_statement.return_value = {"Id": STATEMENT_ID}
mock_conn.describe_statement.return_value = {"Status": "FINISHED"}
mock_exec_query.return_value = QueryExecutionOutput(statement_id=STATEMENT_ID, session_id=None)
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
sql=SQL,
database=DATABASE,
workgroup_name=workgroup_name,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
wait_for_completion=True,
poll_interval=poll_interval,
)
# Mock the TaskInstance, call the execute method
mock_ti = mock.MagicMock(name="MockedTaskInstance")
actual_result = operator.execute({"ti": mock_ti})
# Assertions
assert actual_result == [STATEMENT_ID]
mock_exec_query.assert_called_once_with(
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
workgroup_name=workgroup_name, # Called with workgroup_name
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
with_event=False,
wait_for_completion=True,
poll_interval=poll_interval,
session_id=None,
session_keep_alive_seconds=None,
)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.execute_query")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_execute_new_session(self, mock_conn, mock_exec_query):
cluster_identifier = "cluster_identifier"
workgroup_name = None
db_user = "db_user"
secret_arn = "secret_arn"
statement_name = "statement_name"
parameters = [{"name": "id", "value": "1"}]
poll_interval = 5
wait_for_completion = True
# Like before, return a statement ID and a status
mock_conn.execute_statement.return_value = {"Id": STATEMENT_ID}
mock_conn.describe_statement.return_value = {"Status": "FINISHED"}
mock_exec_query.return_value = QueryExecutionOutput(statement_id=STATEMENT_ID, session_id=SESSION_ID)
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
wait_for_completion=True,
poll_interval=poll_interval,
session_keep_alive_seconds=123,
)
# Mock the TaskInstance and call the execute method
mock_ti = mock.MagicMock(name="MockedTaskInstance")
operator.execute({"ti": mock_ti})
mock_exec_query.assert_called_once_with(
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
workgroup_name=workgroup_name,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
with_event=False,
wait_for_completion=wait_for_completion,
poll_interval=poll_interval,
session_id=None,
session_keep_alive_seconds=123,
)
assert mock_ti.xcom_push.call_args.kwargs["key"] == "session_id"
assert mock_ti.xcom_push.call_args.kwargs["value"] == SESSION_ID
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_on_kill_without_query(self, mock_conn):
mock_conn.execute_statement.return_value = {"Id": STATEMENT_ID}
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
sql=SQL,
database=DATABASE,
wait_for_completion=False,
)
operator.on_kill()
mock_conn.cancel_statement.assert_not_called()
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_on_kill_with_query(self, mock_conn):
mock_conn.execute_statement.return_value = {"Id": STATEMENT_ID, "SessionId": SESSION_ID}
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
cluster_identifier="cluster_identifier",
sql=SQL,
database=DATABASE,
wait_for_completion=False,
)
mock_ti = mock.MagicMock(name="MockedTaskInstance")
operator.execute({"ti": mock_ti})
operator.on_kill()
mock_conn.cancel_statement.assert_called_once_with(
Id=STATEMENT_ID,
)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_return_sql_result(self, mock_conn):
expected_result = [{"Result": True}]
cluster_identifier = "cluster_identifier"
db_user = "db_user"
secret_arn = "secret_arn"
statement_name = "statement_name"
# Mock the conn object
mock_conn.execute_statement.return_value = {"Id": STATEMENT_ID, "SessionId": SESSION_ID}
mock_conn.describe_statement.return_value = {"Status": "FINISHED"}
mock_conn.get_statement_result.return_value = {"Result": True}
operator = RedshiftDataOperator(
task_id=TASK_ID,
cluster_identifier=cluster_identifier,
database=DATABASE,
db_user=db_user,
sql=SQL,
statement_name=statement_name,
secret_arn=secret_arn,
aws_conn_id=CONN_ID,
return_sql_result=True,
)
# Mock the TaskInstance, run the execute method
mock_ti = mock.MagicMock(name="MockedTaskInstance")
actual_result = operator.execute({"ti": mock_ti})
assert actual_result == expected_result
mock_conn.execute_statement.assert_called_once_with(
Database=DATABASE,
Sql=SQL,
ClusterIdentifier=cluster_identifier,
DbUser=db_user,
SecretArn=secret_arn,
StatementName=statement_name,
WithEvent=False,
)
mock_conn.get_statement_result.assert_called_once_with(Id=STATEMENT_ID)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
@mock.patch("airflow.providers.amazon.aws.operators.redshift_data.RedshiftDataOperator.defer")
@mock.patch(
"airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.check_query_is_finished",
return_value=True,
)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.execute_query")
def test_execute_finished_before_defer(
self, mock_exec_query, check_query_is_finished, mock_defer, mock_conn
):
cluster_identifier = "cluster_identifier"
workgroup_name = None
db_user = "db_user"
secret_arn = "secret_arn"
statement_name = "statement_name"
parameters = [{"name": "id", "value": "1"}]
poll_interval = 5
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
wait_for_completion=False,
poll_interval=poll_interval,
deferrable=True,
)
mock_ti = mock.MagicMock(name="MockedTaskInstance")
operator.execute({"ti": mock_ti})
assert not mock_defer.called
mock_exec_query.assert_called_once_with(
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
workgroup_name=workgroup_name,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
with_event=False,
wait_for_completion=False,
poll_interval=poll_interval,
session_id=None,
session_keep_alive_seconds=None,
)
@mock.patch(
"airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.check_query_is_finished",
return_value=False,
)
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.execute_query")
def test_execute_defer(self, mock_exec_query, check_query_is_finished, deferrable_operator):
mock_ti = mock.MagicMock(name="MockedTaskInstance")
with pytest.raises(TaskDeferred) as exc:
deferrable_operator.execute({"ti": mock_ti})
assert isinstance(exc.value.trigger, RedshiftDataTrigger)
def test_execute_complete_failure(self, deferrable_operator):
"""Tests that an AirflowException is raised in case of error event"""
with pytest.raises(AirflowException):
deferrable_operator.execute_complete(
context=None, event={"status": "error", "message": "test failure message"}
)
def test_execute_complete_exception(self, deferrable_operator):
"""Tests that an AirflowException is raised in case of empty event"""
with pytest.raises(AirflowException) as exc:
deferrable_operator.execute_complete(context=None, event=None)
assert exc.value.args[0] == "Trigger error: event is None"
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
def test_execute_complete(self, mock_conn, deferrable_operator):
"""Asserts that logging occurs as expected"""
deferrable_operator.statement_id = "uuid"
with mock.patch.object(deferrable_operator.log, "info") as mock_log_info:
assert deferrable_operator.execute_complete(
context=None,
event={"status": "success", "message": "Job completed", "statement_id": "uuid"},
) == ["uuid"]
mock_log_info.assert_called_with("%s completed successfully.", TASK_ID)
@mock.patch("airflow.providers.amazon.aws.operators.redshift_data.RedshiftDataOperator.defer")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.check_query_is_finished")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.conn")
@mock.patch("airflow.providers.amazon.aws.hooks.redshift_data.RedshiftDataHook.execute_query")
def test_no_wait_for_completion(
self, mock_exec_query, mock_conn, mock_check_query_is_finished, mock_defer
):
"""Tests that the operator does not check for completion nor defers when wait_for_completion is False,
no matter the value of deferrable"""
cluster_identifier = "cluster_identifier"
db_user = "db_user"
secret_arn = "secret_arn"
statement_name = "statement_name"
parameters = [{"name": "id", "value": "1"}]
poll_interval = 5
wait_for_completion = False
# Mock the describe_statement call
mock_conn.describe_statement.return_value = {"Status": "FINISHED"}
mock_exec_query.return_value = QueryExecutionOutput(statement_id=STATEMENT_ID, session_id=SESSION_ID)
for deferrable in [True, False]:
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
sql=SQL,
database=DATABASE,
cluster_identifier=cluster_identifier,
db_user=db_user,
secret_arn=secret_arn,
statement_name=statement_name,
parameters=parameters,
wait_for_completion=wait_for_completion,
poll_interval=poll_interval,
deferrable=deferrable,
)
# Mock the TaskInstance, call the execute method
mock_ti = mock.MagicMock(name="MockedTaskInstance")
actual_results = operator.execute({"ti": mock_ti})
assert not mock_check_query_is_finished.called
assert not mock_defer.called
assert actual_results == [STATEMENT_ID]
def test_template_fields(self):
operator = RedshiftDataOperator(
aws_conn_id=CONN_ID,
task_id=TASK_ID,
cluster_identifier="cluster_identifier",
sql=SQL,
database=DATABASE,
wait_for_completion=False,
)
validate_template_fields(operator)
|
TestRedshiftDataOperator
|
python
|
getsentry__sentry
|
tests/sentry/incidents/serializers/test_workflow_engine_data_condition.py
|
{
"start": 707,
"end": 8047
}
|
class ____(TestWorkflowEngineSerializer):
def setUp(self) -> None:
super().setUp()
self.add_warning_trigger()
def create_rule_triggers_and_actions(
self,
) -> tuple[
AlertRule,
AlertRuleTrigger,
AlertRuleTrigger,
AlertRuleTriggerAction,
AlertRuleTriggerAction,
]:
alert_rule = self.create_alert_rule()
critical_trigger = self.create_alert_rule_trigger(
alert_rule=alert_rule, alert_threshold=500, label="critical"
)
critical_action = self.create_alert_rule_trigger_action(alert_rule_trigger=critical_trigger)
warning_trigger = self.create_alert_rule_trigger(
alert_rule=alert_rule, alert_threshold=200, label="warning"
)
warning_action = self.create_alert_rule_trigger_action(alert_rule_trigger=warning_trigger)
return (
alert_rule,
critical_trigger,
warning_trigger,
critical_action,
warning_action,
)
def test_simple(self) -> None:
serialized_data_condition = serialize(
self.critical_detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
assert serialized_data_condition == self.expected_triggers[0]
def test_warning_trigger(self) -> None:
serialized_data_condition = serialize(
self.warning_detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
assert serialized_data_condition == self.expected_triggers[1]
def test_multiple_actions(self) -> None:
self.critical_trigger_action_2 = self.create_alert_rule_trigger_action(
alert_rule_trigger=self.critical_trigger
)
self.critical_action_2, _, _ = migrate_metric_action(self.critical_trigger_action_2)
serialized_data_condition = serialize(
self.critical_detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
expected_actions = self.expected_critical_action.copy()
actions_2 = {
"id": str(self.critical_trigger_action_2.id),
"alertRuleTriggerId": str(self.critical_trigger.id),
"type": "email",
"targetType": "user",
"targetIdentifier": str(self.user.id),
"inputChannelId": None,
"integrationId": None,
"sentryAppId": None,
"dateCreated": self.critical_trigger_action.date_added,
"desc": f"Send a notification to {self.user.email}",
"priority": self.critical_action.data.get("priority"),
}
expected_actions.append(actions_2)
expected_trigger = self.expected_triggers[0].copy()
expected_trigger["actions"] = expected_actions
assert serialized_data_condition == expected_trigger
def test_comparison_delta(self) -> None:
comparison_delta_rule = self.create_alert_rule(comparison_delta=60)
comparison_delta_trigger = self.create_alert_rule_trigger(
alert_rule=comparison_delta_rule, label="critical"
)
comparison_delta_trigger_action = self.create_alert_rule_trigger_action(
alert_rule_trigger=comparison_delta_trigger
)
_, _, _, detector, _, _, _, _ = migrate_alert_rule(comparison_delta_rule)
comparison_detector_trigger, _, _ = migrate_metric_data_conditions(comparison_delta_trigger)
migrate_resolve_threshold_data_condition(comparison_delta_rule)
action, _, _ = migrate_metric_action(comparison_delta_trigger_action)
serialized_data_condition = serialize(
comparison_detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
expected_actions = self.expected_critical_action.copy()
expected_actions[0]["id"] = str(comparison_delta_trigger_action.id)
expected_actions[0]["alertRuleTriggerId"] = str(comparison_delta_trigger.id)
expected_trigger = self.expected_triggers[0].copy()
expected_trigger["actions"] = expected_actions
expected_trigger["alertThreshold"] = translate_data_condition_type(
detector.config.get("comparison_delta"),
comparison_detector_trigger.type,
comparison_detector_trigger.comparison,
)
expected_trigger["resolveThreshold"] = expected_trigger["alertThreshold"]
expected_trigger["id"] = str(comparison_delta_trigger.id)
expected_trigger["alertRuleId"] = str(comparison_delta_rule.id)
assert serialized_data_condition == expected_trigger
def test_anomaly_detection(self) -> None:
dynamic_rule = self.create_dynamic_alert()
critical_trigger = self.create_alert_rule_trigger(
alert_rule=dynamic_rule, label="critical", alert_threshold=0
)
trigger_action = self.create_alert_rule_trigger_action(alert_rule_trigger=critical_trigger)
_, _, _, detector, _, _, _, _ = migrate_alert_rule(dynamic_rule)
detector_trigger, _, _ = migrate_metric_data_conditions(critical_trigger)
action, _, _ = migrate_metric_action(trigger_action)
serialized_data_condition = serialize(
detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
assert (
serialized_data_condition["thresholdType"]
== AlertRuleThresholdType.ABOVE_AND_BELOW.value
)
assert serialized_data_condition["alertThreshold"] == 0
assert serialized_data_condition["resolveThreshold"] is None
def test_multiple_rules(self) -> None:
# create another comprehensive alert rule in the DB
alert_rule, critical_trigger, warning_trigger, critical_action, warning_action = (
self.create_rule_triggers_and_actions()
)
migrate_alert_rule(alert_rule)
critical_detector_trigger, _, _ = migrate_metric_data_conditions(critical_trigger)
warning_detector_trigger, _, _ = migrate_metric_data_conditions(warning_trigger)
migrate_resolve_threshold_data_condition(alert_rule)
migrate_metric_action(critical_action)
migrate_metric_action(warning_action)
serialized_critical_condition = serialize(
critical_detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
assert serialized_critical_condition["id"] == str(critical_trigger.id)
assert serialized_critical_condition["alertRuleId"] == str(alert_rule.id)
assert len(serialized_critical_condition["actions"]) == 1
assert serialized_critical_condition["actions"][0]["id"] == str(critical_action.id)
serialized_warning_condition = serialize(
warning_detector_trigger,
self.user,
WorkflowEngineDataConditionSerializer(),
)
assert serialized_warning_condition["id"] == str(warning_trigger.id)
assert serialized_warning_condition["alertRuleId"] == str(alert_rule.id)
assert len(serialized_warning_condition["actions"]) == 1
assert serialized_warning_condition["actions"][0]["id"] == str(warning_action.id)
|
TestDataConditionSerializer
|
python
|
ray-project__ray
|
python/ray/_common/formatters.py
|
{
"start": 3662,
"end": 3939
}
|
class ____(AbstractFormatter):
def format(self, record: logging.LogRecord) -> str:
record_format_attrs = self.generate_record_format_attrs(
record, exclude_default_standard_attrs=False
)
return json.dumps(record_format_attrs)
|
JSONFormatter
|
python
|
getsentry__sentry
|
src/sentry/issue_detection/detectors/consecutive_http_detector.py
|
{
"start": 923,
"end": 7616
}
|
class ____(PerformanceDetector):
type = DetectorType.CONSECUTIVE_HTTP_OP
settings_key = DetectorType.CONSECUTIVE_HTTP_OP
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.consecutive_http_spans: list[Span] = []
self.lcp = None
self.gen_ai_chat_spans: list[str] = [
span.get("span_id")
for span in event.get("spans", [])
if span.get("op") == "gen_ai.chat"
]
lcp_value = get_path(self.event(), "measurements", "lcp", "value")
lcp_unit = get_path(self.event(), "measurements", "lcp", "unit")
if lcp_value and (lcp_unit is None or lcp_unit == "millisecond"):
self.lcp = lcp_value
@classmethod
def is_event_eligible(cls, event: dict[str, Any], project: Project | None = None) -> bool:
return not is_event_from_browser_javascript_sdk(event)
def visit_span(self, span: Span) -> None:
span_id = span.get("span_id", None)
if not span_id or not self._is_eligible_http_span(span):
return
span_duration = get_span_duration(span).total_seconds() * 1000
if span_duration < self.settings.get("span_duration_threshold"):
return
if self._overlaps_last_span(span):
self._validate_and_store_performance_problem()
self._reset_variables()
self._add_problem_span(span)
def _add_problem_span(self, span: Span) -> None:
self.consecutive_http_spans.append(span)
def _validate_and_store_performance_problem(self) -> None:
exceeds_count_threshold = len(self.consecutive_http_spans) >= self.settings.get(
"consecutive_count_threshold"
)
if not exceeds_count_threshold:
return
exceeds_min_time_saved_duration = False
if self.consecutive_http_spans:
exceeds_min_time_saved_duration = self._calculate_time_saved() >= self.settings.get(
"min_time_saved"
)
if not exceeds_min_time_saved_duration:
return
subceeds_duration_between_spans_threshold = all(
get_duration_between_spans(
self.consecutive_http_spans[idx - 1], self.consecutive_http_spans[idx]
)
< self.settings.get("max_duration_between_spans")
for idx in range(1, len(self.consecutive_http_spans))
)
if not subceeds_duration_between_spans_threshold:
return
self._store_performance_problem()
def _calculate_time_saved(self) -> float:
total_time = get_total_span_duration(self.consecutive_http_spans)
max_span_duration = get_max_span_duration(self.consecutive_http_spans)
return total_time - max_span_duration
def _store_performance_problem(self) -> None:
# Check if spans are from multiple hosts
hosts = {self._extract_host_from_span(span) for span in self.consecutive_http_spans}
if len(hosts) >= 3:
self._reset_variables()
return
fingerprint = self._fingerprint()
offender_span_ids = [span["span_id"] for span in self.consecutive_http_spans]
desc: str = self.consecutive_http_spans[0].get("description", "")
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint,
"http",
desc=desc,
type=PerformanceConsecutiveHTTPQueriesGroupType,
cause_span_ids=[],
parent_span_ids=None,
offender_span_ids=offender_span_ids,
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
"http",
desc,
),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
evidence_data={
"parent_span_ids": [],
"cause_span_ids": [],
"offender_span_ids": offender_span_ids,
"op": "http",
"transaction_name": self._event.get("transaction", ""),
"repeating_spans": get_span_evidence_value(self.consecutive_http_spans[0]),
"repeating_spans_compact": get_span_evidence_value(
self.consecutive_http_spans[0], include_op=False
),
"num_repeating_spans": str(len(self.consecutive_http_spans)),
},
)
self._reset_variables()
def _overlaps_last_span(self, span: Span) -> bool:
if len(self.consecutive_http_spans) == 0:
return False
last_span = self.consecutive_http_spans[-1]
return does_overlap_previous_span(last_span, span)
def _reset_variables(self) -> None:
self.consecutive_http_spans = []
def _is_eligible_http_span(self, span: Span) -> bool:
span_id = span.get("span_id", None)
op: str = span.get("op", "") or ""
hash = span.get("hash", None)
description: str = span.get("description", "") or ""
if not span_id or not op or not hash or not description:
return False
if not op.startswith("http.client"):
return False
if span.get("parent_span_id") in self.gen_ai_chat_spans:
return False
if (
not description.strip().upper().startswith(("GET", "POST", "DELETE", "PUT", "PATCH"))
): # Just using all methods to see if anything interesting pops up
return False
if any([x in description for x in ["_next/static/", "_next/data/", "googleapis.com"]]):
return False
url = get_url_from_span(span)
if is_filtered_url(url):
return False
return True
def _extract_host_from_span(self, span: Span) -> str:
"""Extract the host from a span's URL."""
url = get_url_from_span(span)
return urlparse(url).netloc
def _fingerprint(self) -> str:
hashed_url_paths = fingerprint_http_spans(self.consecutive_http_spans)
return f"1-{PerformanceConsecutiveHTTPQueriesGroupType.type_id}-{hashed_url_paths}"
def on_complete(self) -> None:
self._validate_and_store_performance_problem()
def is_creation_allowed_for_organization(self, organization: Organization) -> bool:
return True
def is_creation_allowed_for_project(self, project: Project) -> bool:
return self.settings["detection_enabled"]
|
ConsecutiveHTTPSpanDetector
|
python
|
modin-project__modin
|
modin/core/dataframe/algebra/default2pandas/rolling.py
|
{
"start": 2474,
"end": 4477
}
|
class ____(DefaultMethod):
"""Builder for default-to-pandas aggregation on an expanding window functions."""
OBJECT_TYPE = "Expanding"
@classmethod
def _build_expanding(cls, func, squeeze_self):
"""
Build function that creates an expanding window and executes `func` on it.
Parameters
----------
func : callable
Function to execute on a expanding window.
squeeze_self : bool
Whether or not to squeeze frame before executing the window function.
Returns
-------
callable
Function that takes pandas DataFrame and applies `func` on a expanding window.
"""
def fn(df, rolling_args, *args, **kwargs):
"""Create rolling window for the passed frame and execute specified `func` on it."""
if squeeze_self:
df = df.squeeze(axis=1)
roller = df.expanding(*rolling_args)
if type(func) is property:
return func.fget(roller)
return func(roller, *args, **kwargs)
return fn
@classmethod
def register(cls, func, squeeze_self=False, **kwargs):
"""
Build function that do fallback to pandas to apply `func` on a expanding window.
Parameters
----------
func : callable
Function to execute on an expanding window.
squeeze_self : bool, default: False
Whether or not to squeeze frame before executing the window function.
**kwargs : kwargs
Additional arguments that will be passed to function builder.
Returns
-------
callable
Function that takes query compiler and defaults to pandas to apply aggregation
`func` on an expanding window.
"""
return super().register(
cls._build_expanding(func, squeeze_self=squeeze_self),
fn_name=func.__name__,
**kwargs
)
|
ExpandingDefault
|
python
|
google__pytype
|
pytype/ast/visitor_test.py
|
{
"start": 120,
"end": 365
}
|
class ____(visitor.BaseVisitor):
"""Tests visit order."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.funcs = []
def visit_FunctionDef(self, node):
self.funcs.append(node.name)
|
_VisitOrderVisitor
|
python
|
spyder-ide__spyder
|
spyder/plugins/ipythonconsole/widgets/debugging.py
|
{
"start": 6632,
"end": 27026
}
|
class ____(DebuggingHistoryWidget, SpyderConfigurationAccessor):
"""
Widget with the necessary attributes and methods to handle
communications between a console in debugging mode and
Spyder
"""
CONF_SECTION = 'ipython_console'
def __init__(self, *args, **kwargs):
# Communication state
self._pdb_recursion_level = 0 # Number of debbuging loop we are in
self._pdb_input_ready = False # Can we send a command now
self._waiting_pdb_input = False # Are we waiting on the user
# Other state
self._pdb_prompt = None # prompt
self._pdb_prompt_input = False # wether pdb waits for input or comm
self._pdb_last_cmd = '' # last command sent to pdb
self._pdb_frame_loc = (None, None) # fname, lineno
self._pdb_take_focus = True # Focus to shell after command execution
# Command queue
self._pdb_input_queue = [] # List of (code, hidden, echo_stack_entry)
# Temporary flags
self._tmp_reading = False
# super init
super().__init__(*args, **kwargs)
# Adapted from qtconsole/frontend_widget.py
# This adds the IPdb as a prompt self._highlighter recognises
self._highlighter._ipy_prompt_re = re.compile(
r'^({})?('.format(re.escape(self.other_output_prefix)) +
r'[ \t]*\(*IPdb \[\d+\]\)*: |' +
r'[ \t]*In \[\d+\]: |[ \t]*\ \ \ \.\.\.+: )')
# Reset debug state when debugging is done
self.sig_prompt_ready.connect(self.reset_debug_state)
# --- Public API --------------------------------------------------
def shutdown(self):
"""
Close the save thread and database file.
"""
try:
# Make sure the database will not be called after closing
self.sig_prompt_ready.disconnect(self.reset_debug_state)
except (TypeError, RuntimeError):
# Already disconnected
pass
if self._pdb_history_file is not None:
try:
self._pdb_history_file.save_thread.stop()
# Now that it was called, no need to call it at exit
atexit.unregister(self._pdb_history_file.save_thread.stop)
except AttributeError:
pass
try:
self._pdb_history_file.db.close()
except AttributeError:
pass
# --- Comm API --------------------------------------------------
def set_debug_state(self, recursion_level):
"""Update the debug state."""
if recursion_level == self._pdb_recursion_level:
# Nothing to change
return
if recursion_level > self._pdb_recursion_level:
# Start debugging
if self._pdb_recursion_level > 0:
# Recursive debugging, save state
self._saved_pdb_history_input_number[
self._pdb_recursion_level] = self._pdb_history_input_number
self.end_history_session()
self.new_history_session()
elif recursion_level < self._pdb_recursion_level:
# Stop debugging
self.end_history_session()
if recursion_level > 0:
# Still debugging, restore state
self.new_history_session()
self._pdb_history_input_number = (
self._saved_pdb_history_input_number.pop(
recursion_level, 0))
# If debugging starts or stops, clear the input queue.
self._pdb_recursion_level = recursion_level
self._pdb_input_queue = []
self._pdb_frame_loc = (None, None)
def _pdb_cmd_prefix(self):
"""Return the command prefix"""
prefix = ''
if self.spyder_kernel_ready and self.is_pdb_using_exclamantion_mark():
prefix = '!'
return prefix
def pdb_execute_command(self, command):
"""
Execute a pdb command
"""
self._pdb_take_focus = False
self.pdb_execute(
self._pdb_cmd_prefix() + command, hidden=False,
echo_stack_entry=False, add_history=False)
def _handle_input_request(self, msg):
"""Process an input request."""
if not self.is_spyder_kernel and "ipdb>" in msg['content']['prompt']:
# Check if we can guess a path from the shell content:
self._flush_pending_stream()
cursor = self._get_end_cursor()
cursor.setPosition(self._prompt_pos, QTextCursor.KeepAnchor)
text = cursor.selection().toPlainText()
match = re.search(r"> (.*\.py)\((\d+)\)", text)
state = None
if match:
fname, lineno = match.groups()
state = {
'step': {
'fname': fname,
'lineno': int(lineno)
}
}
prompt = msg['content']['prompt']
password = msg['content']['password']
self.pdb_input(prompt, password, state, from_input=True)
return
return super()._handle_input_request(msg)
def pdb_execute(self, line, hidden=False, echo_stack_entry=True,
add_history=True):
"""
Send line to the pdb kernel if possible.
Parameters
----------
line: str
the line to execute
hidden: bool
If the line should be hidden
echo_stack_entry: bool
If not hidden, if the stack entry should be printed
add_history: bool
If not hidden, wether the line should be added to history
"""
if not self.is_debugging():
return
if not line.strip():
# Must get the last genuine command
line = self._pdb_last_cmd
if hidden:
# Don't show stack entry if hidden
echo_stack_entry = False
else:
if not self.is_waiting_pdb_input():
# We can't execute this if we are not waiting for pdb input
self._pdb_input_queue.append(
(line, hidden, echo_stack_entry, add_history))
return
if line.strip():
self._pdb_last_cmd = line
# Print the text if it is programatically added.
if line.strip() != self.input_buffer.strip():
self.input_buffer = line
self._append_plain_text('\n')
if add_history:
# Save history to browse it later
self.add_to_pdb_history(line)
# Set executing to true and save the input buffer
self._input_buffer_executing = self.input_buffer
self._executing = True
self._waiting_pdb_input = False
# Disable the console
self._tmp_reading = False
self._finalize_input_request()
hidden = True
# Emit executing
self.executing.emit(line)
self.sig_pdb_state_changed.emit(False)
if self._pdb_input_ready:
# Print the string to the console
self._pdb_input_ready = False
self.pdb_input_reply(line, echo_stack_entry)
return
self._pdb_input_queue.append(
(line, hidden, echo_stack_entry, add_history))
def reset_debug_state(self):
"""Reset debug state if the debugger crashed."""
self.set_debug_state(0)
# --- To Sort --------------------------------------------------
def stop_debugging(self):
"""Stop debugging."""
if self.spyder_kernel_ready and not self.is_waiting_pdb_input():
self.interrupt_kernel()
self.pdb_execute_command("exit")
def is_pdb_using_exclamantion_mark(self):
return self.get_conf('pdb_use_exclamation_mark', section='debugger')
def refresh_from_pdb(self, pdb_state):
"""
Refresh Variable Explorer and Editor from a Pdb session,
after running any pdb command.
See publish_pdb_state and notify_spyder in spyder_kernels
"""
pdb_step = pdb_state.pop('step', None)
if pdb_step and 'fname' in pdb_step:
fname = pdb_step['fname']
lineno = pdb_step['lineno']
last_pdb_loc = self._pdb_frame_loc
self._pdb_frame_loc = (fname, lineno)
# Only step if the location changed
if (fname, lineno) != last_pdb_loc:
self.sig_pdb_step.emit(fname, lineno)
if "do_where" in pdb_state:
fname, lineno = self._pdb_frame_loc
if fname:
self.sig_pdb_step.emit(fname, lineno)
pdb_stack = pdb_state.pop('stack', None)
if pdb_stack:
pdb_stack, pdb_index = pdb_stack
self.sig_pdb_stack.emit(pdb_stack, pdb_index)
request_pdb_input = pdb_state.pop('request_pdb_input', None)
if request_pdb_input:
self.pdb_execute(request_pdb_input)
self.update_state(pdb_state)
def show_pdb_output(self, text):
"""Show Pdb output."""
self._append_plain_text(self.output_sep, before_prompt=True)
prompt = self._current_out_prompt()
self._append_html(
'<span class="out-prompt">%s</span>' % prompt,
before_prompt=True
)
# If the repr is multiline, make sure we start on a new line,
# so that its lines are aligned.
if "\n" in text and not self.output_sep.endswith("\n"):
self._append_plain_text('\n', before_prompt=True)
self._append_plain_text(text + self.output_sep2, before_prompt=True)
self._append_plain_text('\n', before_prompt=True)
def get_pdb_last_step(self):
"""Get last pdb step retrieved from a Pdb session."""
return self._pdb_frame_loc
def is_debugging(self):
"""Check if we are debugging."""
return self._pdb_recursion_level > 0
def debugging_depth(self):
"""Debugging depth"""
return self._pdb_recursion_level
def is_waiting_pdb_input(self):
"""Check if we are waiting a pdb input."""
# If the comm is not open, self._pdb_recursion_level can not be set
return self.is_debugging() and self._waiting_pdb_input
# ---- Public API (overrode by us) ----------------------------
def reset(self, clear=False):
"""
Resets the widget to its initial state if ``clear`` parameter
is True
"""
super().reset(clear)
# Make sure the prompt is printed
if clear and self.is_waiting_pdb_input():
prompt = self._pdb_prompt
try:
# This is necessary to avoid an error when the iopub channel is
# closed.
# See jupyter/qtconsole#574
if not self.kernel_client.iopub_channel.closed():
self.kernel_client.iopub_channel.flush()
except AttributeError:
self.kernel_client.iopub_channel.flush()
self._reading = False
self._readline(prompt=prompt, callback=self.pdb_execute)
# --- Private API --------------------------------------------------
def _current_prompt(self):
prompt = "IPdb [{}]".format(self._pdb_history_input_number + 1)
for i in range(self._pdb_recursion_level - 1):
# Add recursive debugger prompt
prompt = "({})".format(prompt)
return prompt + ": "
def _current_out_prompt(self):
"""Get current out prompt."""
prompt = "Out\u00A0\u00A0[{}]".format(self._pdb_history_input_number)
for i in range(self._pdb_recursion_level - 1):
# Add recursive debugger prompt
prompt = "({})".format(prompt)
return prompt + ": "
def _handle_kernel_info_reply(self, rep):
"""Handle kernel info replies."""
super()._handle_kernel_info_reply(rep)
pygments_lexer = rep['content']['language_info'].get(
'pygments_lexer', '')
try:
# add custom lexer
if pygments_lexer == 'ipython3':
lexer = SpyderIPy3Lexer()
else:
return
self._highlighter._lexer = lexer
except ClassNotFound:
pass
def _redefine_complete_for_dbg(self, client):
"""Redefine kernel client's complete method to work while debugging."""
original_complete = client.complete
def complete(code, cursor_pos=None):
if self.is_waiting_pdb_input():
shell_channel = client.shell_channel
client._shell_channel = client.control_channel
try:
return original_complete(code, cursor_pos)
finally:
client._shell_channel = shell_channel
else:
return original_complete(code, cursor_pos)
client.complete = complete
def _update_pdb_prompt(self, prompt):
"""Update the prompt that is recognised as a pdb prompt."""
if prompt == self._pdb_prompt:
# Nothing to do
return
self._pdb_prompt = prompt
# Update continuation prompt to reflect (possibly) new prompt length.
self._set_continuation_prompt(
self._make_continuation_prompt(prompt), html=True)
def _is_pdb_complete(self, source):
"""
Check if the pdb input is ready to be executed.
"""
if source and source[0] == '!':
source = source[1:]
tm = TransformerManager()
complete, indent = tm.check_complete(source)
if indent is not None:
indent = indent * ' '
return complete != 'incomplete', indent
def execute(self, source=None, hidden=False, interactive=False):
"""
Executes source or the input buffer, possibly prompting for more
input.
Do not use to run pdb commands (such as `continue`).
Use pdb_execute instead. This will add a '!' in front of the code.
"""
if self.is_waiting_pdb_input():
if source is None:
if hidden:
# Nothing to execute
return
else:
source = self.input_buffer
else:
if not self.is_pdb_using_exclamantion_mark():
source = '!' + source
if not hidden:
self.input_buffer = source
if interactive:
# Add a continuation prompt if not complete
complete, indent = self._is_pdb_complete(source)
if not complete:
self.do_execute(source, complete, indent)
return
if hidden:
self.pdb_execute(source, hidden)
else:
if self._reading_callback:
self._reading_callback()
return
return super().execute(source, hidden, interactive)
def pdb_input(self, prompt, password=None, state=None, from_input=False):
"""Get input for a command."""
self.set_debug_state(1 + prompt.count("("))
if state is not None and isinstance(state, dict):
self.refresh_from_pdb(state)
# Replace with numbered prompt
prompt = self._current_prompt()
self._update_pdb_prompt(prompt)
self._pdb_prompt_input = from_input
# The prompt should be printed unless:
# 1. The prompt is already printed (self._reading is True)
# 2. A hidden command is in the queue
print_prompt = (not self._reading
and (len(self._pdb_input_queue) == 0
or not self._pdb_input_queue[0][1]))
if print_prompt:
# Make sure that all output from the SUB channel has been processed
# before writing a new prompt.
try:
# This is necessary to avoid an error when the iopub channel is
# closed.
# See jupyter/qtconsole#574
if not self.kernel_client.iopub_channel.closed():
self.kernel_client.iopub_channel.flush()
except AttributeError:
self.kernel_client.iopub_channel.flush()
self._waiting_pdb_input = True
self._readline(prompt=prompt,
callback=self.pdb_execute,
password=password)
self._executing = False
self._highlighter.highlighting_on = True
# The previous code finished executing
self.executed.emit(self._pdb_prompt)
self.sig_pdb_prompt_ready.emit()
self.sig_pdb_state_changed.emit(True)
self._pdb_input_ready = True
start_line = self.get_conf(
'startup/pdb_run_lines', default='', section='debugger'
)
# Only run these lines when printing a new prompt
if start_line and print_prompt and self.is_waiting_pdb_input():
# Send a few commands
self.pdb_execute(start_line, hidden=True)
return
# While the widget thinks only one input is going on,
# other functions can be sending messages to the kernel.
# This must be properly processed to avoid dropping messages.
# If the kernel was not ready, the messages are queued.
if len(self._pdb_input_queue) > 0:
args = self._pdb_input_queue.pop(0)
self.pdb_execute(*args)
return
def pdb_input_reply(self, line, echo_stack_entry):
"""Send a pdb input to the kernel."""
if self._pdb_prompt_input:
# Send line to input
self.kernel_client.input(line)
return
self.call_kernel(interrupt=True).pdb_input_reply(
line, echo_stack_entry=echo_stack_entry)
# --- Private API (overrode by us) ----------------------------------------
def _show_prompt(self, prompt=None, html=False, newline=True,
separator=True):
"""
Writes a new prompt at the end of the buffer.
"""
if prompt == self._pdb_prompt:
html = True
prompt = '<span class="in-prompt">%s</span>' % prompt
super()._show_prompt(prompt, html, newline, separator)
def _event_filter_console_keypress(self, event):
"""Handle Key_Up/Key_Down while debugging."""
if self.is_waiting_pdb_input():
self._control.current_prompt_pos = self._prompt_pos
# Pretend this is a regular prompt
self._tmp_reading = self._reading
self._reading = False
try:
ret = super(DebuggingWidget,
self)._event_filter_console_keypress(event)
return ret
finally:
self._reading = self._tmp_reading
else:
return super(DebuggingWidget,
self)._event_filter_console_keypress(event)
def _register_is_complete_callback(self, source, callback):
"""Call the callback with the result of is_complete."""
# Add a continuation prompt if not complete
if self.is_waiting_pdb_input():
# As the work is done on this side, check synchronously.
complete, indent = self._is_pdb_complete(source)
callback(complete, indent)
else:
return super()._register_is_complete_callback(source, callback)
# ---- Qt methods ---------------------------------------------------------
def eventFilter(self, obj, event):
# When using PySide, it can happen that "event" is of type QWidgetItem
# (reason unknown). This causes an exception in eventFilter() in
# console_widget.py in the QtConsole package: Therein event.type() is
# accessed which fails due to an AttributeError. Catch this here and
# ignore the event.
if not isinstance(event, QEvent):
# Note for debugging: event.layout() or event.widget() SEGFAULTs
return True
return super().eventFilter(obj, event)
|
DebuggingWidget
|
python
|
mlflow__mlflow
|
examples/llama_index/workflow/workflow/workflow.py
|
{
"start": 771,
"end": 6387
}
|
class ____(Workflow):
VALID_RETRIEVERS = {"vector_search", "bm25", "web_search"}
def __init__(self, retrievers=None, **kwargs):
super().__init__(**kwargs)
self.llm = Settings.llm
self.retrievers = retrievers or []
if invalid_retrievers := set(self.retrievers) - self.VALID_RETRIEVERS:
raise ValueError(f"Invalid retrievers specified: {invalid_retrievers}")
self._use_vs_retriever = "vector_search" in self.retrievers
self._use_bm25_retriever = "bm25" in self.retrievers
self._use_web_search = "web_search" in self.retrievers
if self._use_vs_retriever:
qd_client = qdrant_client.QdrantClient(host=_QDRANT_HOST, port=_QDRANT_PORT)
vector_store = QdrantVectorStore(
client=qd_client, collection_name=_QDRANT_COLLECTION_NAME
)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
self.vs_retriever = index.as_retriever()
if self._use_bm25_retriever:
self.bm25_retriever = BM25Retriever.from_persist_dir(_BM25_PERSIST_DIR)
if self._use_web_search:
self.tavily_tool = TavilyToolSpec(api_key=os.environ.get("TAVILY_AI_API_KEY"))
@step
async def route_retrieval(
self, ctx: Context, ev: StartEvent
) -> VectorSearchRetrieveEvent | BM25RetrieveEvent | TransformQueryEvent | QueryEvent | None:
"""Route query to the retrieval steps based on the model config."""
query = ev.get("query")
if query is None:
return None
# Setting the query in the Context object to access it globally
await ctx.set("query", query)
# If not retriever is specified, direct to the final query step with an empty context
if len(self.retrievers) == 0:
return QueryEvent(context="")
# Trigger the retrieval steps based on the model config
if self._use_vs_retriever:
ctx.send_event(VectorSearchRetrieveEvent(query=query))
if self._use_bm25_retriever:
ctx.send_event(BM25RetrieveEvent(query=query))
if self._use_web_search:
ctx.send_event(TransformQueryEvent(query=query))
@step
async def query_vector_store(self, ev: VectorSearchRetrieveEvent) -> RetrievalResultEvent:
"""Perform retrieval using the vector store."""
nodes = self.vs_retriever.retrieve(ev.query)
return RetrievalResultEvent(nodes=nodes, retriever="vector_search")
@step
async def query_bm25(self, ev: BM25RetrieveEvent) -> RetrievalResultEvent:
"""Perform retrieval using the BM25 retriever."""
nodes = self.bm25_retriever.retrieve(ev.query)
return RetrievalResultEvent(nodes=nodes, retriever="bm25")
@step
async def transform_query(self, ev: TransformQueryEvent) -> WebsearchEvent:
"""Transform the user query into a search query."""
prompt = TRANSFORM_QUERY_TEMPLATE.format(query=ev.query)
transformed_query = self.llm.complete(prompt).text
return WebsearchEvent(search_query=transformed_query)
@step
async def query_web_search(self, ev: WebsearchEvent) -> RetrievalResultEvent:
"""Perform web search with the transformed query string"""
search_results = self.tavily_tool.search(ev.search_query, max_results=5)
nodes = [NodeWithScore(node=document, score=None) for document in search_results]
return RetrievalResultEvent(nodes=nodes, retriever="web_search")
@step
async def gather_retrieval_results(
self, ctx: Context, ev: RetrievalResultEvent
) -> RerankEvent | QueryEvent | None:
"""Gather the retrieved texts and send them to the reranking step."""
# Wait for results from all retrievers
results = ctx.collect_events(ev, [RetrievalResultEvent] * len(self.retrievers))
# Llama Index workflow polls for results until all retrievers have responded.
# If any retriever has not responded, collect_events will return None and we
# should return None to wait for the next poll.
if results is None:
return None
# If only one retriever is used, we can skip reranking
if len(results) == 1:
context = "\n".join(node.text for node in results[0].nodes)
return QueryEvent(context=context)
# Combine the nodes from all retrievers for reranking
all_nodes = []
for result in results:
# Record the source of the retrieved nodes
for node in result.nodes:
node.node.metadata["retriever"] = result.retriever
all_nodes.extend(result.nodes)
return RerankEvent(nodes=all_nodes)
@step
async def rerank(self, ctx: Context, ev: RerankEvent) -> QueryEvent:
"""Evaluate relevancy of retrieved documents with the query."""
query = await ctx.get("query")
# Rerank the nodes using LLM (RankGPT based)
reranker = RankGPTRerank(llm=self.llm, top_n=5)
reranked_nodes = reranker.postprocess_nodes(ev.nodes, query_str=query)
reranked_context = "\n".join(node.text for node in reranked_nodes)
return QueryEvent(context=reranked_context)
@step
async def query_result(self, ctx: Context, ev: QueryEvent) -> StopEvent:
"""Get result with relevant text."""
query = await ctx.get("query")
prompt = FINAL_QUERY_TEMPLATE.format(context=ev.context, query=query)
response = self.llm.complete(prompt).text
return StopEvent(result=response)
|
HybridRAGWorkflow
|
python
|
django__django
|
tests/gis_tests/geos_tests/test_mutable_list.py
|
{
"start": 17053,
"end": 17120
}
|
class ____(ListMixinTest):
listType = UserListB
|
ListMixinTestSingle
|
python
|
cython__cython
|
Cython/Debugger/Tests/TestLibCython.py
|
{
"start": 5382,
"end": 7501
}
|
class ____(DebuggerTestCase):
def setUp(self):
if not test_gdb():
return
super().setUp()
prefix_code = textwrap.dedent('''\
python
import os
import sys
import traceback
def excepthook(type, value, tb):
traceback.print_exception(type, value, tb)
sys.stderr.flush()
sys.stdout.flush()
os._exit(1)
sys.excepthook = excepthook
# Have tracebacks end up on sys.stderr (gdb replaces sys.stderr
# with an object that calls gdb.write())
sys.stderr = sys.__stderr__
end
''')
code = textwrap.dedent('''\
python
from Cython.Debugger.Tests import test_libcython_in_gdb
test_libcython_in_gdb.main(version=%r)
end
''' % (sys.version_info[:2],))
self.gdb_command_file = cygdb.make_command_file(self.tempdir,
prefix_code)
with open(self.gdb_command_file, 'a') as f:
f.write(code)
args = ['gdb', '-batch', '-x', self.gdb_command_file, '-n', '--args',
sys.executable, '-c', 'import codefile']
paths = []
path = os.environ.get('PYTHONPATH')
if path:
paths.append(path)
paths.append(os.path.dirname(os.path.dirname(
os.path.abspath(Cython.__file__))))
env = dict(os.environ, PYTHONPATH=os.pathsep.join(paths))
self.p = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
def tearDown(self):
if not test_gdb():
return
try:
super().tearDown()
if self.p:
try: self.p.stdout.close()
except: pass
try: self.p.stderr.close()
except: pass
self.p.wait()
finally:
os.remove(self.gdb_command_file)
|
GdbDebuggerTestCase
|
python
|
coleifer__peewee
|
tests/sql.py
|
{
"start": 93486,
"end": 95013
}
|
class ____(BaseTestCase):
def _test_sql_to_string(self, _param):
class FakeDB(SqliteDatabase):
param = _param
db = FakeDB(None)
T = Table('tbl', ('id', 'val')).bind(db)
query = (T.select()
.where((T.val == 'foo') |
(T.val == b'bar') |
(T.val == True) | (T.val == False) |
(T.val == 2) |
(T.val == -3.14) |
(T.val == datetime.datetime(2018, 1, 1)) |
(T.val == datetime.date(2018, 1, 2)) |
T.val.is_null() |
T.val.is_null(False) |
T.val.in_(['aa', 'bb', 'cc'])))
self.assertEqual(query_to_string(query), (
'SELECT "t1"."id", "t1"."val" FROM "tbl" AS "t1" WHERE ((((((((((('
'"t1"."val" = \'foo\') OR '
'("t1"."val" = \'bar\')) OR '
'("t1"."val" = 1)) OR '
'("t1"."val" = 0)) OR '
'("t1"."val" = 2)) OR '
'("t1"."val" = -3.14)) OR '
'("t1"."val" = \'2018-01-01 00:00:00\')) OR '
'("t1"."val" = \'2018-01-02\')) OR '
'("t1"."val" IS NULL)) OR '
'("t1"."val" IS NOT NULL)) OR '
'("t1"."val" IN (\'aa\', \'bb\', \'cc\')))'))
def test_sql_to_string_qmark(self):
self._test_sql_to_string('?')
def test_sql_to_string_default(self):
self._test_sql_to_string('%s')
|
TestSqlToString
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/dashboard.py
|
{
"start": 15490,
"end": 15628
}
|
class ____(TypedDict, total=False):
period: str
utc: str
expired: bool
start: datetime
end: datetime
|
PageFiltersOptional
|
python
|
huggingface__transformers
|
src/transformers/quantizers/auto.py
|
{
"start": 3678,
"end": 5770
}
|
class ____:
"""
The Auto-HF quantization config class that takes care of automatically dispatching to the correct
quantization config given a quantization config stored in a dictionary.
"""
@classmethod
def from_dict(cls, quantization_config_dict: dict):
quant_method = quantization_config_dict.get("quant_method")
# We need a special care for bnb models to make sure everything is BC ..
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
elif quant_method is None:
raise ValueError(
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
)
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING:
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
return target_cls.from_dict(quantization_config_dict)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if getattr(model_config, "quantization_config", None) is None:
raise ValueError(
f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
)
quantization_config_dict = model_config.quantization_config
quantization_config = cls.from_dict(quantization_config_dict)
# Update with potential kwargs that are passed through from_pretrained.
quantization_config.update(**kwargs)
return quantization_config
|
AutoQuantizationConfig
|
python
|
wandb__wandb
|
wandb/integration/lightning/fabric/logger.py
|
{
"start": 1328,
"end": 27233
}
|
class ____(Logger):
r"""Log using `Weights and Biases <https://docs.wandb.ai/integrations/lightning>`_.
**Installation and set-up**
Install with pip:
.. code-block:: bash
pip install wandb
Create a `WandbLogger` instance:
.. code-block:: python
from lightning.fabric.loggers import WandbLogger
wandb_logger = WandbLogger(project="MNIST")
Pass the logger instance to the `Trainer`:
.. code-block:: python
trainer = Trainer(logger=wandb_logger)
A new W&B run will be created when training starts if you have not created one manually before with `wandb.init()`.
**Log metrics**
Log from :class:`~lightning.pytorch.core.LightningModule`:
.. code-block:: python
class LitModule(LightningModule):
def training_step(self, batch, batch_idx):
self.log("train/loss", loss)
Use directly wandb module:
.. code-block:: python
wandb.log({"train/loss": loss})
**Log hyper-parameters**
Save :class:`~lightning.pytorch.core.LightningModule` parameters:
.. code-block:: python
class LitModule(LightningModule):
def __init__(self, *args, **kwarg):
self.save_hyperparameters()
Add other config parameters:
.. code-block:: python
# add one parameter
wandb_logger.experiment.config["key"] = value
# add multiple parameters
wandb_logger.experiment.config.update({key1: val1, key2: val2})
# use directly wandb module
wandb.config["key"] = value
wandb.config.update()
**Log gradients, parameters and model topology**
Call the `watch` method for automatically tracking gradients:
.. code-block:: python
# log gradients and model topology
wandb_logger.watch(model)
# log gradients, parameter histogram and model topology
wandb_logger.watch(model, log="all")
# change log frequency of gradients and parameters (100 steps by default)
wandb_logger.watch(model, log_freq=500)
# do not log graph (in case of errors)
wandb_logger.watch(model, log_graph=False)
The `watch` method adds hooks to the model which can be removed at the end of training:
.. code-block:: python
wandb_logger.experiment.unwatch(model)
**Log model checkpoints**
Log model checkpoints at the end of training:
.. code-block:: python
wandb_logger = WandbLogger(log_model=True)
Log model checkpoints as they get created during training:
.. code-block:: python
wandb_logger = WandbLogger(log_model="all")
Custom checkpointing can be set up through :class:`~lightning.pytorch.callbacks.ModelCheckpoint`:
.. code-block:: python
# log model only if `val_accuracy` increases
wandb_logger = WandbLogger(log_model="all")
checkpoint_callback = ModelCheckpoint(monitor="val_accuracy", mode="max")
trainer = Trainer(logger=wandb_logger, callbacks=[checkpoint_callback])
`latest` and `best` aliases are automatically set to easily retrieve a model checkpoint:
.. code-block:: python
# reference can be retrieved in artifacts panel
# "VERSION" can be a version (ex: "v2") or an alias ("latest or "best")
checkpoint_reference = "USER/PROJECT/MODEL-RUN_ID:VERSION"
# download checkpoint locally (if not already cached)
run = wandb.init(project="MNIST")
artifact = run.use_artifact(checkpoint_reference, type="model")
artifact_dir = artifact.download()
# load checkpoint
model = LitModule.load_from_checkpoint(Path(artifact_dir) / "model.ckpt")
**Log media**
Log text with:
.. code-block:: python
# using columns and data
columns = ["input", "label", "prediction"]
data = [["cheese", "english", "english"], ["fromage", "french", "spanish"]]
wandb_logger.log_text(key="samples", columns=columns, data=data)
# using a pandas DataFrame
wandb_logger.log_text(key="samples", dataframe=my_dataframe)
Log images with:
.. code-block:: python
# using tensors, numpy arrays or PIL images
wandb_logger.log_image(key="samples", images=[img1, img2])
# adding captions
wandb_logger.log_image(
key="samples", images=[img1, img2], caption=["tree", "person"]
)
# using file path
wandb_logger.log_image(key="samples", images=["img_1.jpg", "img_2.jpg"])
More arguments can be passed for logging segmentation masks and bounding boxes. Refer to
`Image Overlays documentation <https://docs.wandb.ai/guides/track/log/media#image-overlays>`_.
**Log Tables**
`W&B Tables <https://docs.wandb.ai/guides/tables/visualize-tables>`_ can be used to log,
query and analyze tabular data.
They support any type of media (text, image, video, audio, molecule, html, etc) and are great for storing,
understanding and sharing any form of data, from datasets to model predictions.
.. code-block:: python
columns = ["caption", "image", "sound"]
data = [
["cheese", wandb.Image(img_1), wandb.Audio(snd_1)],
["wine", wandb.Image(img_2), wandb.Audio(snd_2)],
]
wandb_logger.log_table(key="samples", columns=columns, data=data)
**Downloading and Using Artifacts**
To download an artifact without starting a run, call the ``download_artifact``
function on the class:
.. code-block:: python
artifact_dir = wandb_logger.download_artifact(artifact="path/to/artifact")
To download an artifact and link it to an ongoing run call the ``download_artifact``
function on the logger instance:
.. code-block:: python
class MyModule(LightningModule):
def any_lightning_module_function_or_hook(self):
self.logger.download_artifact(artifact="path/to/artifact")
To link an artifact from a previous run you can use ``use_artifact`` function:
.. code-block:: python
wandb_logger.use_artifact(artifact="path/to/artifact")
See Also:
- `Demo in Google Colab <http://wandb.me/lightning>`__ with hyperparameter search and model logging
- `W&B Documentation <https://docs.wandb.ai/integrations/lightning>`__
Args:
name: Display name for the run.
save_dir: Path where data is saved.
version: Sets the version, mainly used to resume a previous run.
offline: Run offline (data can be streamed later to wandb servers).
dir: Same as save_dir.
id: Same as version.
anonymous: Enables or explicitly disables anonymous logging.
project: The name of the project to which this run will belong. If not set, the environment variable
`WANDB_PROJECT` will be used as a fallback. If both are not set, it defaults to ``'lightning_logs'``.
log_model: Log checkpoints created by :class:`~lightning.pytorch.callbacks.ModelCheckpoint`
as W&B artifacts. `latest` and `best` aliases are automatically set.
* if ``log_model == 'all'``, checkpoints are logged during training.
* if ``log_model == True``, checkpoints are logged at the end of training, except when
`~lightning.pytorch.callbacks.ModelCheckpoint.save_top_k` ``== -1``
which also logs every checkpoint during training.
* if ``log_model == False`` (default), no checkpoint is logged.
prefix: A string to put at the beginning of metric keys.
experiment: WandB experiment object. Automatically set when creating a run.
checkpoint_name: Name of the model checkpoint artifact being logged.
log_checkpoint_on: When to log model checkpoints as W&B artifacts. Only used if ``log_model`` is ``True``.
Options: ``"success"``, ``"all"``. Default: ``"success"``.
\**kwargs: Arguments passed to :func:`wandb.init` like `entity`, `group`, `tags`, etc.
Raises:
ModuleNotFoundError:
If required WandB package is not installed on the device.
MisconfigurationException:
If both ``log_model`` and ``offline`` is set to ``True``.
"""
LOGGER_JOIN_CHAR = "-"
def __init__(
self,
name: Optional[str] = None,
save_dir: _PATH = ".",
version: Optional[str] = None,
offline: bool = False,
dir: Optional[_PATH] = None,
id: Optional[str] = None,
anonymous: Optional[bool] = None,
project: Optional[str] = None,
log_model: Union[Literal["all"], bool] = False,
experiment: Optional["wandb.Run"] = None,
prefix: str = "",
checkpoint_name: Optional[str] = None,
log_checkpoint_on: Union[Literal["success"], Literal["all"]] = "success",
**kwargs: Any,
) -> None:
if offline and log_model:
raise MisconfigurationException(
f"Providing log_model={log_model} and offline={offline} is an invalid configuration"
" since model checkpoints cannot be uploaded in offline mode.\n"
"Hint: Set `offline=False` to log your model."
)
super().__init__()
self._offline = offline
self._log_model = log_model
self._prefix = prefix
self._experiment = experiment
self._logged_model_time: Dict[str, float] = {}
self._checkpoint_callback: Optional[ModelCheckpoint] = None
# paths are processed as strings
if save_dir is not None:
save_dir = os.fspath(save_dir)
elif dir is not None:
dir = os.fspath(dir)
project = project or os.environ.get("WANDB_PROJECT", "lightning_fabric_logs")
# set wandb init arguments
self._wandb_init: Dict[str, Any] = {
"name": name,
"project": project,
"dir": save_dir or dir,
"id": version or id,
"resume": "allow",
"anonymous": ("allow" if anonymous else None),
}
self._wandb_init.update(**kwargs)
# extract parameters
self._project = self._wandb_init.get("project")
self._save_dir = self._wandb_init.get("dir")
self._name = self._wandb_init.get("name")
self._id = self._wandb_init.get("id")
self._checkpoint_name = checkpoint_name
self._log_checkpoint_on = log_checkpoint_on
def __getstate__(self) -> Dict[str, Any]:
# Hack: If the 'spawn' launch method is used, the logger will get pickled and this `__getstate__` gets called.
# We create an experiment here in the main process, and attach to it in the worker process.
# Using wandb-service, we persist the same experiment even if multiple `Trainer.fit/test/validate` calls
# are made.
_ = self.experiment
state = self.__dict__.copy()
# args needed to reload correct experiment
if self._experiment is not None:
state["_id"] = getattr(self._experiment, "id", None)
state["_attach_id"] = getattr(self._experiment, "_attach_id", None)
state["_name"] = self._experiment.name
# cannot be pickled
state["_experiment"] = None
return state
@property
@rank_zero_experiment
def experiment(self) -> "wandb.Run":
r"""Actual wandb object.
To use wandb features in your :class:`~lightning.pytorch.core.LightningModule`, do the
following.
Example::
.. code-block:: python
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ["WANDB_MODE"] = "dryrun"
attach_id = getattr(self, "_attach_id", None)
if wandb.run is not None:
# wandb process already created in this instance
rank_zero_warn(
"There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"
" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`."
)
self._experiment = wandb.run
elif attach_id is not None and hasattr(wandb, "_attach"):
# attach to wandb process referenced
self._experiment = wandb._attach(attach_id)
else:
# create new wandb process
self._experiment = wandb.init(**self._wandb_init)
# define default x-axis
if isinstance(self._experiment, wandb.Run) and getattr(
self._experiment, "define_metric", None
):
self._experiment.define_metric("trainer/global_step")
self._experiment.define_metric(
"*", step_metric="trainer/global_step", step_sync=True
)
self._experiment._label(repo="lightning_fabric_logger") # pylint: disable=protected-access
with telemetry.context(run=self._experiment) as tel:
tel.feature.lightning_fabric_logger = True
return self._experiment
def watch(
self,
model: nn.Module,
log: str = "gradients",
log_freq: int = 100,
log_graph: bool = True,
) -> None:
self.experiment.watch(model, log=log, log_freq=log_freq, log_graph=log_graph)
@override
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
params = _convert_params(params)
params = _sanitize_callable_params(params)
self.experiment.config.update(params, allow_val_change=True)
@override
@rank_zero_only
def log_metrics(
self, metrics: Mapping[str, float], step: Optional[int] = None
) -> None:
assert rank_zero_only.rank == 0, "experiment tried to log from global_rank != 0"
metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR)
if step is not None:
self.experiment.log(dict(metrics, **{"trainer/global_step": step}))
else:
self.experiment.log(metrics)
@rank_zero_only
def log_table(
self,
key: str,
columns: Optional[List[str]] = None,
data: Optional[List[List[Any]]] = None,
dataframe: Any = None,
step: Optional[int] = None,
) -> None:
"""Log a Table containing any object type (text, image, audio, video, molecule, html, etc).
Can be defined either with `columns` and `data` or with `dataframe`.
"""
metrics = {key: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
self.log_metrics(metrics, step)
@rank_zero_only
def log_text(
self,
key: str,
columns: Optional[List[str]] = None,
data: Optional[List[List[str]]] = None,
dataframe: Any = None,
step: Optional[int] = None,
) -> None:
"""Log text as a Table.
Can be defined either with `columns` and `data` or with `dataframe`.
"""
self.log_table(key, columns, data, dataframe, step)
@rank_zero_only
def log_html(
self, key: str, htmls: List[Any], step: Optional[int] = None, **kwargs: Any
) -> None:
"""Log html files.
Optional kwargs are lists passed to each html (ex: inject).
"""
if not isinstance(htmls, list):
raise TypeError(f'Expected a list as "htmls", found {type(htmls)}')
n = len(htmls)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
metrics = {
key: [wandb.Html(html, **kwarg) for html, kwarg in zip(htmls, kwarg_list)]
}
self.log_metrics(metrics, step) # type: ignore[arg-type]
@rank_zero_only
def log_image(
self, key: str, images: List[Any], step: Optional[int] = None, **kwargs: Any
) -> None:
"""Log images (tensors, numpy arrays, PIL Images or file paths).
Optional kwargs are lists passed to each image (ex: caption, masks, boxes).
"""
if not isinstance(images, list):
raise TypeError(f'Expected a list as "images", found {type(images)}')
n = len(images)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
metrics = {
key: [wandb.Image(img, **kwarg) for img, kwarg in zip(images, kwarg_list)]
}
self.log_metrics(metrics, step) # type: ignore[arg-type]
@rank_zero_only
def log_audio(
self, key: str, audios: List[Any], step: Optional[int] = None, **kwargs: Any
) -> None:
r"""Log audios (numpy arrays, or file paths).
Args:
key: The key to be used for logging the audio files
audios: The list of audio file paths, or numpy arrays to be logged
step: The step number to be used for logging the audio files
\**kwargs: Optional kwargs are lists passed to each ``Wandb.Audio`` instance (ex: caption, sample_rate).
Optional kwargs are lists passed to each audio (ex: caption, sample_rate).
"""
if not isinstance(audios, list):
raise TypeError(f'Expected a list as "audios", found {type(audios)}')
n = len(audios)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
metrics = {
key: [
wandb.Audio(audio, **kwarg) for audio, kwarg in zip(audios, kwarg_list)
]
}
self.log_metrics(metrics, step) # type: ignore[arg-type]
@rank_zero_only
def log_video(
self, key: str, videos: List[Any], step: Optional[int] = None, **kwargs: Any
) -> None:
"""Log videos (numpy arrays, or file paths).
Args:
key: The key to be used for logging the video files
videos: The list of video file paths, or numpy arrays to be logged
step: The step number to be used for logging the video files
**kwargs: Optional kwargs are lists passed to each Wandb.Video instance (ex: caption, fps, format).
Optional kwargs are lists passed to each video (ex: caption, fps, format).
"""
if not isinstance(videos, list):
raise TypeError(f'Expected a list as "videos", found {type(videos)}')
n = len(videos)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
metrics = {
key: [
wandb.Video(video, **kwarg) for video, kwarg in zip(videos, kwarg_list)
]
}
self.log_metrics(metrics, step) # type: ignore[arg-type]
@property
@override
def save_dir(self) -> Optional[str]:
"""Gets the save directory.
Returns:
The path to the save directory.
"""
return self._save_dir
@property
@override
def name(self) -> Optional[str]:
"""The project name of this experiment.
Returns:
The name of the project the current experiment belongs to. This name is not the same as `wandb.Run`'s
name. To access wandb's internal experiment name, use ``logger.experiment.name`` instead.
"""
return self._project
@property
@override
def version(self) -> Optional[str]:
"""Gets the id of the experiment.
Returns:
The id of the experiment if the experiment exists else the id given to the constructor.
"""
# don't create an experiment if we don't have one
return self._experiment.id if self._experiment else self._id
@property
def log_dir(self) -> Optional[str]:
"""Gets the save directory.
Returns:
The path to the save directory.
"""
return self.save_dir
@property
def group_separator(self) -> str:
"""Return the default separator used by the logger to group the data into subfolders."""
return self.LOGGER_JOIN_CHAR
@property
def root_dir(self) -> Optional[str]:
"""Return the root directory.
Return the root directory where all versions of an experiment get saved, or `None` if the logger does not
save data locally.
"""
return self.save_dir.parent if self.save_dir else None
def log_graph(self, model: Module, input_array: Optional[Tensor] = None) -> None:
"""Record model graph.
Args:
model: the model with an implementation of ``forward``.
input_array: input passes to `model.forward`
This is a noop function and does not perform any operation.
"""
return
@override
def after_save_checkpoint(self, checkpoint_callback: "ModelCheckpoint") -> None:
# log checkpoints as artifacts
if (
self._log_model == "all"
or self._log_model is True
and checkpoint_callback.save_top_k == -1
):
# TODO: Replace with new Fabric Checkpoints system
self._scan_and_log_pytorch_checkpoints(checkpoint_callback)
elif self._log_model is True:
self._checkpoint_callback = checkpoint_callback
@staticmethod
@rank_zero_only
def download_artifact(
artifact: str,
save_dir: Optional[_PATH] = None,
artifact_type: Optional[str] = None,
use_artifact: Optional[bool] = True,
) -> str:
"""Downloads an artifact from the wandb server.
Args:
artifact: The path of the artifact to download.
save_dir: The directory to save the artifact to.
artifact_type: The type of artifact to download.
use_artifact: Whether to add an edge between the artifact graph.
Returns:
The path to the downloaded artifact.
"""
if wandb.run is not None and use_artifact:
artifact = wandb.run.use_artifact(artifact)
else:
api = wandb.Api()
artifact = api.artifact(artifact, type=artifact_type)
save_dir = None if save_dir is None else os.fspath(save_dir)
return artifact.download(root=save_dir)
def use_artifact(
self, artifact: str, artifact_type: Optional[str] = None
) -> "Artifact":
"""Logs to the wandb dashboard that the mentioned artifact is used by the run.
Args:
artifact: The path of the artifact.
artifact_type: The type of artifact being used.
Returns:
wandb Artifact object for the artifact.
"""
return self.experiment.use_artifact(artifact, type=artifact_type)
@override
@rank_zero_only
def save(self) -> None:
"""Save log data."""
self.experiment.log({}, commit=True)
@override
@rank_zero_only
def finalize(self, status: str) -> None:
if self._log_checkpoint_on == "success" and status != "success":
# Currently, checkpoints only get logged on success
return
# log checkpoints as artifacts
if (
self._checkpoint_callback
and self._experiment is not None
and self._log_checkpoint_on in ["success", "all"]
):
self._scan_and_log_pytorch_checkpoints(self._checkpoint_callback)
def _scan_and_log_pytorch_checkpoints(
self, checkpoint_callback: "ModelCheckpoint"
) -> None:
from lightning.pytorch.loggers.utilities import _scan_checkpoints
# get checkpoints to be saved with associated score
checkpoints = _scan_checkpoints(checkpoint_callback, self._logged_model_time)
# log iteratively all new checkpoints
for t, p, s, _ in checkpoints:
metadata = {
"score": s.item() if isinstance(s, Tensor) else s,
"original_filename": Path(p).name,
checkpoint_callback.__class__.__name__: {
k: getattr(checkpoint_callback, k)
for k in [
"monitor",
"mode",
"save_last",
"save_top_k",
"save_weights_only",
"_every_n_train_steps",
]
# ensure it does not break if `ModelCheckpoint` args change
if hasattr(checkpoint_callback, k)
},
}
if not self._checkpoint_name:
self._checkpoint_name = f"model-{self.experiment.id}"
artifact = wandb.Artifact(
name=self._checkpoint_name, type="model", metadata=metadata
)
artifact.add_file(p, name="model.ckpt")
aliases = (
["latest", "best"]
if p == checkpoint_callback.best_model_path
else ["latest"]
)
self.experiment.log_model(artifact, aliases=aliases)
# remember logged models - timestamp needed in case filename didn't change (lastkckpt or custom name)
self._logged_model_time[p] = t
|
WandbLogger
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/layout/mouse_handlers.py
|
{
"start": 385,
"end": 1589
}
|
class ____:
"""
Two dimensional raster of callbacks for mouse events.
"""
def __init__(self) -> None:
def dummy_callback(mouse_event: MouseEvent) -> NotImplementedOrNone:
"""
:param mouse_event: `MouseEvent` instance.
"""
return NotImplemented
# NOTE: Previously, the data structure was a dictionary mapping (x,y)
# to the handlers. This however would be more inefficient when copying
# over the mouse handlers of the visible region in the scrollable pane.
# Map y (row) to x (column) to handlers.
self.mouse_handlers: defaultdict[int, defaultdict[int, MouseHandler]] = (
defaultdict(lambda: defaultdict(lambda: dummy_callback))
)
def set_mouse_handler_for_range(
self,
x_min: int,
x_max: int,
y_min: int,
y_max: int,
handler: Callable[[MouseEvent], NotImplementedOrNone],
) -> None:
"""
Set mouse handler for a region.
"""
for y in range(y_min, y_max):
row = self.mouse_handlers[y]
for x in range(x_min, x_max):
row[x] = handler
|
MouseHandlers
|
python
|
huggingface__transformers
|
src/transformers/models/roc_bert/configuration_roc_bert.py
|
{
"start": 797,
"end": 7779
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`RoCBertModel`]. It is used to instantiate a
RoCBert model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the RoCBert
[weiweishi/roc-bert-base-zh](https://huggingface.co/weiweishi/roc-bert-base-zh) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`RoCBertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`RoCBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
enable_pronunciation (`bool`, *optional*, defaults to `True`):
Whether or not the model use pronunciation embed when training.
enable_shape (`bool`, *optional*, defaults to `True`):
Whether or not the model use shape embed when training.
pronunciation_embed_dim (`int`, *optional*, defaults to 768):
Dimension of the pronunciation_embed.
pronunciation_vocab_size (`int`, *optional*, defaults to 910):
Pronunciation Vocabulary size of the RoCBert model. Defines the number of different tokens that can be
represented by the `input_pronunciation_ids` passed when calling [`RoCBertModel`].
shape_embed_dim (`int`, *optional*, defaults to 512):
Dimension of the shape_embed.
shape_vocab_size (`int`, *optional*, defaults to 24858):
Shape Vocabulary size of the RoCBert model. Defines the number of different tokens that can be represented
by the `input_shape_ids` passed when calling [`RoCBertModel`].
concat_input (`bool`, *optional*, defaults to `True`):
Defines the way of merging the shape_embed, pronunciation_embed and word_embed, if the value is true,
output_embed = torch.cat((word_embed, shape_embed, pronunciation_embed), -1), else output_embed =
(word_embed + shape_embed + pronunciation_embed) / 3
Example:
```python
>>> from transformers import RoCBertModel, RoCBertConfig
>>> # Initializing a RoCBert weiweishi/roc-bert-base-zh style configuration
>>> configuration = RoCBertConfig()
>>> # Initializing a model from the weiweishi/roc-bert-base-zh style configuration
>>> model = RoCBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "roc_bert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
use_cache=True,
pad_token_id=0,
classifier_dropout=None,
enable_pronunciation=True,
enable_shape=True,
pronunciation_embed_dim=768,
pronunciation_vocab_size=910,
shape_embed_dim=512,
shape_vocab_size=24858,
concat_input=True,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.enable_pronunciation = enable_pronunciation
self.enable_shape = enable_shape
self.pronunciation_embed_dim = pronunciation_embed_dim
self.pronunciation_vocab_size = pronunciation_vocab_size
self.shape_embed_dim = shape_embed_dim
self.shape_vocab_size = shape_vocab_size
self.concat_input = concat_input
self.classifier_dropout = classifier_dropout
super().__init__(pad_token_id=pad_token_id, **kwargs)
__all__ = ["RoCBertConfig"]
|
RoCBertConfig
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_blas.py
|
{
"start": 28713,
"end": 29997
}
|
class ____:
def setup_method(self):
self.a = np.array([[1., 0.],
[0., -2.],
[2., 3.]])
self.t = np.array([[1., 0., 2.],
[0., 4., -6.],
[2., -6., 13.]])
self.tt = np.array([[5., 6.],
[6., 13.]])
@parametrize_blas(fblas, "syrk", "sdcz")
def test_syrk(self, f, dtype):
c = f(a=self.a, alpha=1.)
assert_array_almost_equal(np.triu(c), np.triu(self.t))
c = f(a=self.a, alpha=1., lower=1)
assert_array_almost_equal(np.tril(c), np.tril(self.t))
c0 = np.ones(self.t.shape)
c = f(a=self.a, alpha=1., beta=1., c=c0)
assert_array_almost_equal(np.triu(c), np.triu(self.t+c0))
c = f(a=self.a, alpha=1., trans=1)
assert_array_almost_equal(np.triu(c), np.triu(self.tt))
# prints '0-th dimension must be fixed to 3 but got 5',
# FIXME: suppress?
@parametrize_blas(fblas, "syrk", "sdcz")
def test_syrk_wrong_c(self, f, dtype):
# FIXME narrow down to _fblas.error
with pytest.raises(Exception):
f(a=self.a, alpha=1., c=np.ones((5, 8)))
# if C is supplied, it must have compatible dimensions
|
TestBLAS3Syrk
|
python
|
fluentpython__example-code-2e
|
05-data-classes/dataclass/resource.py
|
{
"start": 1787,
"end": 2810
}
|
class ____(TypedDict):
identifier: str
title: str
creators: list[str]
date: Optional[date]
type: ResourceType
description: str
language: str
subjects: list[str]
if __name__ == '__main__':
r = Resource('0')
description = 'Improving the design of existing code'
book = Resource('978-0-13-475759-9', 'Refactoring, 2nd Edition',
['Martin Fowler', 'Kent Beck'], date(2018, 11, 19),
ResourceType.BOOK, description,
'EN', ['computer programming', 'OOP'])
print(book)
book_dict: ResourceDict = {
'identifier': '978-0-13-475759-9',
'title': 'Refactoring, 2nd Edition',
'creators': ['Martin Fowler', 'Kent Beck'],
'date': date(2018, 11, 19),
'type': ResourceType.BOOK,
'description': 'Improving the design of existing code',
'language': 'EN',
'subjects': ['computer programming', 'OOP']}
book2 = Resource(**book_dict)
print(book == book2)
|
ResourceDict
|
python
|
walkccc__LeetCode
|
solutions/343. Integer Break/343.py
|
{
"start": 0,
"end": 559
}
|
class ____:
def integerBreak(self, n: int) -> int:
# If an optimal product contains a factor f >= 4, then we can replace it
# with 2 and f - 2 without losing optimality. As 2(f - 2) = 2f - 4 >= f,
# we never need a factor >= 4, meaning we only need factors 1, 2, and 3
# (and 1 is wasteful).
# Also, 3 * 3 is better than 2 * 2 * 2, so we never use 2 more than twice.
if n == 2: # 1 * 1
return 1
if n == 3: # 1 * 2
return 2
ans = 1
while n > 4:
n -= 3
ans *= 3
ans *= n
return ans
|
Solution
|
python
|
sympy__sympy
|
sympy/core/tests/test_priority.py
|
{
"start": 287,
"end": 2000
}
|
class ____(Integer):
'''
Integer of value 1 and _op_priority 20
Operations handled by this class return 1 and reverse operations return 2
'''
_op_priority = 20.0
result: Expr = S.One
def __new__(cls):
obj = Expr.__new__(cls)
obj.p = 1
return obj
@call_highest_priority('__rmul__')
def __mul__(self, other):
return self.result
@call_highest_priority('__mul__')
def __rmul__(self, other):
return 2*self.result
@call_highest_priority('__radd__')
def __add__(self, other):
return self.result
@call_highest_priority('__add__')
def __radd__(self, other):
return 2*self.result
@call_highest_priority('__rsub__')
def __sub__(self, other):
return self.result
@call_highest_priority('__sub__')
def __rsub__(self, other):
return 2*self.result
@call_highest_priority('__rpow__')
def __pow__(self, other):
return self.result
@call_highest_priority('__pow__')
def __rpow__(self, other):
return 2*self.result
@call_highest_priority('__rtruediv__')
def __truediv__(self, other):
return self.result
@call_highest_priority('__truediv__')
def __rtruediv__(self, other):
return 2*self.result
@call_highest_priority('__rmod__')
def __mod__(self, other):
return self.result
@call_highest_priority('__mod__')
def __rmod__(self, other):
return 2*self.result
@call_highest_priority('__rfloordiv__')
def __floordiv__(self, other):
return self.result
@call_highest_priority('__floordiv__')
def __rfloordiv__(self, other):
return 2*self.result
|
Higher
|
python
|
readthedocs__readthedocs.org
|
readthedocs/builds/migrations/0043_add_cancelled_state.py
|
{
"start": 149,
"end": 976
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0042_version_state"),
]
operations = [
migrations.AlterField(
model_name="build",
name="state",
field=models.CharField(
choices=[
("triggered", "Triggered"),
("cloning", "Cloning"),
("installing", "Installing"),
("building", "Building"),
("uploading", "Uploading"),
("finished", "Finished"),
("cancelled", "Cancelled"),
],
db_index=True,
default="finished",
max_length=55,
verbose_name="State",
),
),
]
|
Migration
|
python
|
doocs__leetcode
|
solution/3000-3099/3069.Distribute Elements Into Two Arrays I/Solution.py
|
{
"start": 0,
"end": 293
}
|
class ____:
def resultArray(self, nums: List[int]) -> List[int]:
arr1 = [nums[0]]
arr2 = [nums[1]]
for x in nums[2:]:
if arr1[-1] > arr2[-1]:
arr1.append(x)
else:
arr2.append(x)
return arr1 + arr2
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/poolformer/modeling_poolformer.py
|
{
"start": 4579,
"end": 6844
}
|
class ____(nn.Module):
"""This corresponds to the 'PoolFormerBlock' class in the original implementation."""
def __init__(self, config, num_channels, pool_size, hidden_size, intermediate_size, drop_path):
super().__init__()
self.pooling = PoolFormerPooling(pool_size)
self.output = PoolFormerOutput(config, drop_path, hidden_size, intermediate_size)
self.before_norm = PoolFormerGroupNorm(num_channels)
self.after_norm = PoolFormerGroupNorm(num_channels)
# Useful for training neural nets
self.drop_path = PoolFormerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.use_layer_scale = config.use_layer_scale
if config.use_layer_scale:
self.layer_scale_1 = nn.Parameter(
config.layer_scale_init_value * torch.ones(num_channels), requires_grad=True
)
self.layer_scale_2 = nn.Parameter(
config.layer_scale_init_value * torch.ones(num_channels), requires_grad=True
)
def forward(self, hidden_states):
if self.use_layer_scale:
pooling_output = self.pooling(self.before_norm(hidden_states))
scaled_op = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * pooling_output
# First residual connection
hidden_states = hidden_states + self.drop_path(scaled_op)
outputs = ()
layer_output = self.output(self.after_norm(hidden_states))
scaled_op = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * layer_output
# Second residual connection
output = hidden_states + self.drop_path(scaled_op)
outputs = (output,) + outputs
return outputs
else:
pooling_output = self.drop_path(self.pooling(self.before_norm(hidden_states)))
# First residual connection
hidden_states = pooling_output + hidden_states
outputs = ()
# Second residual connection inside the PoolFormerOutput block
layer_output = self.drop_path(self.output(self.after_norm(hidden_states)))
output = hidden_states + layer_output
outputs = (output,) + outputs
return outputs
|
PoolFormerLayer
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/util/langhelpers.py
|
{
"start": 12588,
"end": 33341
}
|
class ____:
def __init__(
self, group: str, auto_fn: Optional[Callable[..., Any]] = None
):
self.group = group
self.impls: Dict[str, Any] = {}
self.auto_fn = auto_fn
def clear(self):
self.impls.clear()
def load(self, name: str) -> Any:
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
for impl in compat.importlib_metadata_get(self.group):
if impl.name == name:
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" % (self.group, name)
)
def register(self, name: str, modulepath: str, objname: str) -> None:
def load():
mod = __import__(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def deregister(self, name: str) -> None:
del self.impls[name]
def _inspect_func_args(fn):
try:
co_varkeywords = inspect.CO_VARKEYWORDS
except AttributeError:
# https://docs.python.org/3/library/inspect.html
# The flags are specific to CPython, and may not be defined in other
# Python implementations. Furthermore, the flags are an implementation
# detail, and can be removed or deprecated in future Python releases.
spec = compat.inspect_getfullargspec(fn)
return spec[0], bool(spec[2])
else:
# use fn.__code__ plus flags to reduce method call overhead
co = fn.__code__
nargs = co.co_argcount
return (
list(co.co_varnames[:nargs]),
bool(co.co_flags & co_varkeywords),
)
@overload
def get_cls_kwargs(
cls: type,
*,
_set: Optional[Set[str]] = None,
raiseerr: Literal[True] = ...,
) -> Set[str]: ...
@overload
def get_cls_kwargs(
cls: type, *, _set: Optional[Set[str]] = None, raiseerr: bool = False
) -> Optional[Set[str]]: ...
def get_cls_kwargs(
cls: type, *, _set: Optional[Set[str]] = None, raiseerr: bool = False
) -> Optional[Set[str]]:
r"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getfullargspec() to cut down on method overhead,
as this is used within the Core typing system to create copies of type
objects which is a performance-sensitive operation.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
assert _set is not None
ctr = cls.__dict__.get("__init__", False)
has_init = (
ctr
and isinstance(ctr, types.FunctionType)
and isinstance(ctr.__code__, types.CodeType)
)
if has_init:
names, has_kw = _inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
if raiseerr:
raise TypeError(
f"given cls {cls} doesn't have an __init__ method"
)
else:
return None
else:
has_kw = False
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set=_set) is None:
break
_set.discard("self")
return _set
def get_func_kwargs(func: Callable[..., Any]) -> List[str]:
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getfullargspec(func)[0]
def get_callable_argspec(
fn: Callable[..., Any], no_self: bool = False, _is_init: bool = False
) -> compat.FullArgSpec:
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getfullargspec(fn)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getfullargspec(fn.__func__)
return compat.FullArgSpec(
spec.args[1:],
spec.varargs,
spec.varkw,
spec.defaults,
spec.kwonlyargs,
spec.kwonlydefaults,
spec.annotations,
)
else:
return compat.inspect_getfullargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True
)
elif hasattr(fn, "__func__"):
return compat.inspect_getfullargspec(fn.__func__)
elif hasattr(fn, "__call__"):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(
fn: Union[Callable[..., Any], compat.FullArgSpec], grouped: bool = True
) -> Dict[str, Optional[str]]:
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
apply_pos_proxied
Like apply_pos but omits the self/cls argument
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'grouped_args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
spec = fn
args = compat.inspect_formatargspec(*spec)
apply_pos = compat.inspect_formatargspec(
spec[0], spec[1], spec[2], None, spec[4]
)
if spec[0]:
self_arg = spec[0][0]
apply_pos_proxied = compat.inspect_formatargspec(
spec[0][1:], spec[1], spec[2], None, spec[4]
)
elif spec[1]:
# I'm not sure what this is
self_arg = "%s[0]" % spec[1]
apply_pos_proxied = apply_pos
else:
self_arg = None
apply_pos_proxied = apply_pos
num_defaults = 0
if spec[3]:
num_defaults += len(cast(Tuple[Any], spec[3]))
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
defaulted_vals: Union[List[str], Tuple[()]]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults :]
else:
defaulted_vals = ()
apply_kw = compat.inspect_formatargspec(
name_args,
spec[1],
spec[2],
defaulted_vals,
formatvalue=lambda x: "=" + str(x),
)
if spec[0]:
apply_kw_proxied = compat.inspect_formatargspec(
name_args[1:],
spec[1],
spec[2],
defaulted_vals,
formatvalue=lambda x: "=" + str(x),
)
else:
apply_kw_proxied = apply_kw
if grouped:
return dict(
grouped_args=args,
self_arg=self_arg,
apply_pos=apply_pos,
apply_kw=apply_kw,
apply_pos_proxied=apply_pos_proxied,
apply_kw_proxied=apply_kw_proxied,
)
else:
return dict(
grouped_args=args,
self_arg=self_arg,
apply_pos=apply_pos[1:-1],
apply_kw=apply_kw[1:-1],
apply_pos_proxied=apply_pos_proxied[1:-1],
apply_kw_proxied=apply_kw_proxied[1:-1],
)
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases:
.. sourcecode:: text
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
grouped_args = "(self)"
args = "(self)" if grouped else "self"
proxied = "()" if grouped else ""
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
grouped_args = "(self, *args, **kwargs)"
args = grouped_args if grouped else "self, *args, **kwargs"
proxied = "(*args, **kwargs)" if grouped else "*args, **kwargs"
return dict(
self_arg="self",
grouped_args=grouped_args,
apply_pos=args,
apply_kw=args,
apply_pos_proxied=proxied,
apply_kw_proxied=proxied,
)
def create_proxy_methods(
target_cls: Type[Any],
target_cls_sphinx_name: str,
proxy_cls_sphinx_name: str,
classmethods: Sequence[str] = (),
methods: Sequence[str] = (),
attributes: Sequence[str] = (),
use_intermediate_variable: Sequence[str] = (),
) -> Callable[[_T], _T]:
"""A class decorator indicating attributes should refer to a proxy
class.
This decorator is now a "marker" that does nothing at runtime. Instead,
it is consumed by the tools/generate_proxy_methods.py script to
statically generate proxy methods and attributes that are fully
recognized by typing tools such as mypy.
"""
def decorate(cls):
return cls
return decorate
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases:
.. sourcecode:: text
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return compat.inspect_getfullargspec(method)
except TypeError:
if method is object.__init__:
return (["self"], None, None, None)
else:
return (["self"], "args", "kwargs", None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(
obj: Any,
additional_kw: Sequence[Tuple[str, Any]] = (),
to_inspect: Optional[Union[object, List[object]]] = None,
omit_kwarg: Sequence[str] = (),
) -> str:
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args: _collections.OrderedDict[str, Any] = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
spec = compat.inspect_getfullargspec(insp.__init__)
except TypeError:
continue
else:
default_len = len(spec.defaults) if spec.defaults else 0
if i == 0:
if spec.varargs:
vargs = spec.varargs
if default_len:
pos_args.extend(spec.args[1:-default_len])
else:
pos_args.extend(spec.args[1:])
else:
kw_args.update(
[(arg, missing) for arg in spec.args[1:-default_len]]
)
if default_len:
assert spec.defaults
kw_args.update(
[
(arg, default)
for arg, default in zip(
spec.args[-default_len:], spec.defaults
)
]
)
output: List[str] = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append("%s=%r" % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
"""
hier = {cls}
process = list(cls.__mro__)
while process:
c = process.pop()
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if c.__module__ == "builtins" or not hasattr(c, "__subclasses__"):
continue
for s in [
_
for _ in (
c.__subclasses__()
if not issubclass(c, type)
else c.__subclasses__(c)
)
if _ not in hier
]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(
into_cls,
from_cls,
skip=None,
only=None,
name="self.proxy",
from_instance=None,
):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = (
"__slots__",
"__del__",
"__getattribute__",
"__metaclass__",
"__getstate__",
"__setstate__",
)
dunders = [
m
for m in dir(from_cls)
if (
m.startswith("__")
and m.endswith("__")
and not hasattr(into_cls, m)
and m not in skip
)
]
for method in dunders:
try:
maybe_fn = getattr(from_cls, method)
if not hasattr(maybe_fn, "__call__"):
continue
maybe_fn = getattr(maybe_fn, "__func__", maybe_fn)
fn = cast(types.FunctionType, maybe_fn)
except AttributeError:
continue
try:
spec = compat.inspect_getfullargspec(fn)
fn_args = compat.inspect_formatargspec(spec[0])
d_args = compat.inspect_formatargspec(spec[0][1:])
except TypeError:
fn_args = "(self, *args, **kw)"
d_args = "(*args, **kw)"
py = (
"def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals()
)
env: Dict[str, types.FunctionType] = (
from_instance is not None and {name: from_instance} or {}
)
exec(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, "__func__", meth1) is getattr(
meth2, "__func__", meth2
)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError("a class or collection of method names are required")
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith("_")])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and "any of" or "all of"
raise TypeError(
"%r does not implement %s: %s"
% (obj, qualifier, ", ".join(interface))
)
class AnonymousInterface:
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = "Anonymous" + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError(
"dictionary does not contain required keys %s"
% ", ".join(required - found)
)
_GFD = TypeVar("_GFD", bound="generic_fn_descriptor[Any]")
|
PluginLoader
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/tests/test_privacy_urls.py
|
{
"start": 1822,
"end": 2983
}
|
class ____(OrganizationMixin, TestCase):
"""All views are available for the owner of the organization."""
response_data = {
# Places where we 302 on success.
"/organizations/choose/{next_name}/": {"status_code": 302},
"/organizations/invite/{hash}/redeem/": {"status_code": 302},
# 405's where we should be POST'ing
"/organizations/{slug}/delete/": {"status_code": 405},
"/organizations/{slug}/teams/{team}/delete/": {"status_code": 405},
"/organizations/{slug}/owners/{owner}/delete/": {"status_code": 405},
"/organizations/{slug}/teams/{team}/members/{member}/revoke/": {
"status_code": 405
},
# Placeholder URL.
"/organizations/{slug}/authorization/": {"status_code": 404},
}
def login(self):
self.client.force_login(self.user)
def test_public_urls(self):
from readthedocs.organizations.urls.public import urlpatterns
self._test_url(urlpatterns)
def test_private_urls(self):
from readthedocs.organizations.urls.private import urlpatterns
self._test_url(urlpatterns)
|
AuthUserOrganizationsTest
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py
|
{
"start": 11318,
"end": 11860
}
|
class ____(SearchParams):
ef_search: Annotated[PositiveInt | None, Field(le=1_000)] = None
iterative_scan: HNSWIterativeScanMode | None = None
max_scan_tuples: PositiveInt | None = None
scan_mem_multiplier: Annotated[PositiveFloat | None, Field(le=1_000)] = None
@override
def search_settings(self, exclude_none=True):
return {
f"hnsw.{key}": value
for key, value in self.model_dump(
mode="json", exclude_none=exclude_none
).items()
}
|
HNSWSearchParams
|
python
|
prakhar1989__Algorithms
|
tests/singly_linked_list_test.py
|
{
"start": 143,
"end": 668
}
|
class ____(unittest.TestCase):
def setUp(self):
self.tens = SinglyLinkedList(range(0, 100, 10))
self.blankList = SinglyLinkedList()
def test_length_method(self):
self.assertEqual(len(self.tens), 10)
self.assertEqual(len(self.blankList), 0)
def test_add_method(self):
self.blankList.append(50)
self.tens.append(110)
self.assertEqual(len(self.blankList), 1)
self.assertEqual(len(self.tens), 11)
if __name__ == "__main__":
unittest.main()
|
test_graph
|
python
|
django__django
|
django/contrib/postgres/fields/citext.py
|
{
"start": 122,
"end": 533
}
|
class ____(CharField):
system_check_removed_details = {
"msg": (
"django.contrib.postgres.fields.CICharField is removed except for support "
"in historical migrations."
),
"hint": (
'Use CharField(db_collation="…") with a case-insensitive non-deterministic '
"collation instead."
),
"id": "fields.E905",
}
|
CICharField
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/outbox/base.py
|
{
"start": 896,
"end": 2937
}
|
class ____(Model):
"""
overrides model save, update, and delete methods such that, within an atomic transaction,
an outbox returned from outbox_for_update is saved. Furthermore, using this mixin causes get_protected_operations
to protect any updates/deletes/inserts of this model that do not go through the model methods (such as querysets
or raw sql). See `get_protected_operations` for info on working around this.
Models that subclass from this or its descendents should consider using RegionOutboxProducingManager
to support bulk operations that respect outbox creation.
"""
class Meta:
abstract = True
default_flush: bool | None = None
replication_version: int = 1
@contextlib.contextmanager
def prepare_outboxes(
self, *, outbox_before_super: bool, flush: bool | None = None
) -> Generator[None]:
from sentry.hybridcloud.models.outbox import outbox_context
if flush is None:
flush = self.default_flush
with outbox_context(
transaction.atomic(router.db_for_write(type(self))),
flush=flush,
):
if not outbox_before_super:
yield
self.outbox_for_update().save()
if outbox_before_super:
yield
def save(self, *args: Any, **kwds: Any) -> None:
with self.prepare_outboxes(outbox_before_super=False):
super().save(*args, **kwds)
def update(self, *args: Any, **kwds: Any) -> int:
with self.prepare_outboxes(outbox_before_super=False):
return super().update(*args, **kwds)
def delete(self, *args: Any, **kwds: Any) -> tuple[int, dict[str, Any]]:
with self.prepare_outboxes(outbox_before_super=True, flush=False):
return super().delete(*args, **kwds)
def outbox_for_update(self, shard_identifier: int | None = None) -> RegionOutboxBase:
raise NotImplementedError
_RM = TypeVar("_RM", bound=RegionOutboxProducingModel)
|
RegionOutboxProducingModel
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/marketing_platform/operators/search_ads.py
|
{
"start": 9772,
"end": 10959
}
|
class ____(_GoogleSearchAdsBaseOperator):
"""
List all custom columns.
.. seealso:
For API documentation check:
https://developers.google.com/search-ads/reporting/api/reference/rest/v0/customers.customColumns/list
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleSearchAdsListCustomColumnsOperator`
:param customer_id: The customer ID for the custom column.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: The version of the API that will be requested for example 'v0'.
"""
def __init__(
self,
*,
customer_id: str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.customer_id = customer_id
def execute(self, context: Context):
self.log.info("Listing the custom columns for %s", self.customer_id)
response = self.hook.list_custom_columns(customer_id=self.customer_id)
self.log.info("Num of retrieved custom column: %d", len(response.get("customColumns")))
return response
|
GoogleSearchAdsListCustomColumnsOperator
|
python
|
pandas-dev__pandas
|
pandas/tests/frame/methods/test_astype.py
|
{
"start": 31099,
"end": 32565
}
|
class ____(pd.Int16Dtype):
# GH 42501
def construct_array_type(self):
return IntegerArrayNoCopy
def test_frame_astype_no_copy():
# GH 42501
df = DataFrame({"a": [1, 4, None, 5], "b": [6, 7, 8, 9]}, dtype=object)
result = df.astype({"a": Int16DtypeNoCopy()})
assert result.a.dtype == pd.Int16Dtype()
assert np.shares_memory(df.b.values, result.b.values)
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
def test_astype_copies(dtype):
# GH#50984
pytest.importorskip("pyarrow")
df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
result = df.astype("int64[pyarrow]")
df.iloc[0, 0] = 100
expected = DataFrame({"a": [1, 2, 3]}, dtype="int64[pyarrow]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("val", [None, 1, 1.5, np.nan, NaT])
def test_astype_to_string_not_modifying_input(string_storage, val):
# GH#51073
df = DataFrame({"a": ["a", "b", val]})
expected = df.copy()
with option_context("mode.string_storage", string_storage):
df.astype("string")
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [None, 1, 1.5, np.nan, NaT])
def test_astype_to_string_dtype_not_modifying_input(any_string_dtype, val):
# GH#51073 - variant of the above test with explicit dtype instances
df = DataFrame({"a": ["a", "b", val]})
expected = df.copy()
df.astype(any_string_dtype)
tm.assert_frame_equal(df, expected)
|
Int16DtypeNoCopy
|
python
|
euske__pdfminer
|
pdfminer/ccitt.py
|
{
"start": 23474,
"end": 25463
}
|
class ____(CCITTG4Parser):
def __init__(self, width, bytealign=False, reversed=False):
CCITTG4Parser.__init__(self, width, bytealign=bytealign)
self.reversed = reversed
self._buf = b''
return
def close(self):
return self._buf
def output_line(self, y, bits):
bytes = array.array('B', [0]*((len(bits)+7)//8))
if self.reversed:
bits = [1-b for b in bits]
for (i, b) in enumerate(bits):
if b:
bytes[i//8] += (128, 64, 32, 16, 8, 4, 2, 1)[i % 8]
self._buf += bytes.tostring()
return
def ccittfaxdecode(data, params):
K = params.get('K')
cols = params.get('Columns')
bytealign = params.get('EncodedByteAlign')
reversed = params.get('BlackIs1')
if K == -1:
parser = CCITTFaxDecoder(cols, bytealign=bytealign, reversed=reversed)
else:
raise ValueError(K)
parser.feedbytes(data)
return parser.close()
# test
def main(argv):
if not argv[1:]:
return unittest.main()
class Parser(CCITTG4Parser):
def __init__(self, width, bytealign=False):
import pygame
CCITTG4Parser.__init__(self, width, bytealign=bytealign)
self.img = pygame.Surface((self.width, 1000))
return
def output_line(self, y, bits):
for (x, b) in enumerate(bits):
if b:
self.img.set_at((x, y), (255, 255, 255))
else:
self.img.set_at((x, y), (0, 0, 0))
return
def close(self):
import pygame
pygame.image.save(self.img, 'out.bmp')
return
for path in argv[1:]:
with open(path, 'rb') as fp:
(_, _, k, w, h, _) = path.split('.')
parser = Parser(int(w))
parser.feedbytes(fp.read())
parser.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
|
CCITTFaxDecoder
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultTypeAlias3.py
|
{
"start": 3332,
"end": 3802
}
|
class ____[T1, *Ts2]: ...
type TA_TC[T1 = str, *Ts2 = Unpack[tuple[T1, ...]]] = ClassTC[T1, *Ts2]
def func6(
tc1: TA_TC,
tc2: TA_TC[int],
tc3: TA_TC[int, *tuple[()]],
tc4: TA_TC[int, *tuple[None]],
):
reveal_type(tc1, expected_text="ClassTC[str, *tuple[str, ...]]")
reveal_type(tc2, expected_text="ClassTC[int, *tuple[int, ...]]")
reveal_type(tc3, expected_text="ClassTC[int]")
reveal_type(tc4, expected_text="ClassTC[int, None]")
|
ClassTC
|
python
|
great-expectations__great_expectations
|
docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/column_pair_map_expectation_template.py
|
{
"start": 1051,
"end": 2783
}
|
class ____(ColumnPairMapMetricProvider):
# </snippet>
# This is the id string that will be used to reference your metric.
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/column_pair_map_expectation_template.py metric_name">
condition_metric_name = "METRIC NAME GOES HERE"
# </snippet>
# These point your metric at the provided keys to facilitate calculation
condition_domain_keys = (
"column_A",
"column_B",
)
condition_value_keys = ()
# This method implements the core logic for the PandasExecutionEngine
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/column_pair_map_expectation_template.py pandas">
@column_pair_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_A, column_B, **kwargs):
raise NotImplementedError
# </snippet>
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column_A, column_B, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_pair_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column_A, column_B, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/column_pair_map_expectation_template.py ExpectColumnPairValuesToMatchSomeCriteria class_def">
|
ColumnPairValuesMatchSomeCriteria
|
python
|
catalyst-team__catalyst
|
catalyst/metrics/_cmc_score.py
|
{
"start": 8205,
"end": 13511
}
|
class ____(AccumulativeMetric):
"""Cumulative Matching Characteristics for Reid case
Args:
embeddings_key: key of embedding tensor in batch
pids_key: key of pids tensor in batch
cids_key: key of cids tensor in batch
is_query_key: key of query flag tensor in batch
topk: list of k, specifies which cmc@k should be calculated
compute_on_call: if True, allows compute metric's value on call
prefix: metric prefix
suffix: metric suffix
Examples:
.. code-block:: python
import torch
from catalyst.metrics import ReidCMCMetric
batch = {
"embeddings": torch.tensor(
[
[1, 1, 0, 0],
[1, 0, 0, 0],
[0, 1, 1, 1],
[0, 0, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[0, 1, 1, 0],
]
).float(),
"pids": torch.Tensor([0, 0, 1, 1, 0, 1, 1]).long(),
"cids": torch.Tensor([0, 1, 1, 2, 0, 1, 3]).long(),
"is_query": torch.Tensor([1, 1, 1, 1, 0, 0, 0]).bool(),
}
topk = (1, 3)
metric = ReidCMCMetric(
embeddings_key="embeddings",
pids_key="pids",
cids_key="cids",
is_query_key="is_query",
topk=topk,
)
metric.reset(num_batches=1, num_samples=len(batch["embeddings"]))
metric.update(**batch)
metric.compute()
# [0.75, 1.0] # CMC@01, CMC@03
metric.compute_key_value()
# {'cmc01': 0.75, 'cmc03': 1.0}
"""
def __init__(
self,
embeddings_key: str,
pids_key: str,
cids_key: str,
is_query_key: str,
topk: Iterable[int] = None,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
) -> None:
"""Init CMCMetric"""
super().__init__(
compute_on_call=compute_on_call,
prefix=prefix,
suffix=suffix,
keys=[embeddings_key, pids_key, cids_key, is_query_key],
)
self.embeddings_key = embeddings_key
self.pids_key = pids_key
self.cids_key = cids_key
self.is_query_key = is_query_key
self.topk = topk or (1,)
def reset(self, num_batches: int, num_samples: int) -> None:
"""
Reset metrics fields
Args:
num_batches: expected number of batches
num_samples: expected number of samples to accumulate
"""
super().reset(num_batches, num_samples)
assert get_rank() < 0, "No DDP support implemented yet"
def compute(self) -> List[float]:
"""
Compute cmc@k metrics with all the accumulated data for all k.
Returns:
list of metrics values
Raises:
ValueError: if there are samples in query
that have no relevant samples in gallery
"""
query_mask = (self.storage[self.is_query_key] == 1).to(torch.bool)
embeddings = self.storage[self.embeddings_key].float()
pids = self.storage[self.pids_key]
cids = self.storage[self.cids_key]
query_embeddings = embeddings[query_mask]
query_pids = pids[query_mask]
query_cids = cids[query_mask]
gallery_embeddings = embeddings[~query_mask]
gallery_pids = pids[~query_mask]
gallery_cids = cids[~query_mask]
pid_conformity_matrix = (gallery_pids == query_pids.reshape(-1, 1)).bool()
cid_conformity_matrix = (gallery_cids == query_cids.reshape(-1, 1)).bool()
# Now we are going to generate a mask that should show if
# a sample from gallery can be used during model scoring on the query
# sample.
# There is only one case when the label shouldn't be used for:
# if query sample is a photo of the person pid_i taken from camera
# cam_j and the gallery sample is a photo of the same person pid_i
# from the same camera cam_j. All other cases are available.
available_samples = ~(pid_conformity_matrix * cid_conformity_matrix).bool()
if (available_samples.max(dim=1).values == 0).any():
raise ValueError(
"There is a sample in query that has no relevant samples in gallery."
)
metrics = []
for k in self.topk:
value = masked_cmc_score(
query_embeddings=query_embeddings,
gallery_embeddings=gallery_embeddings,
conformity_matrix=pid_conformity_matrix,
available_samples=available_samples,
topk=k,
)
metrics.append(value)
return metrics
def compute_key_value(self) -> Dict[str, float]:
"""
Compute cmc@k metrics with all the accumulated data for all k.
Returns:
metrics values in key-value format
"""
values = self.compute()
kv_metrics = {
f"{self.prefix}cmc{k:02d}{self.suffix}": value
for k, value in zip(self.topk, values)
}
return kv_metrics
__all__ = ["CMCMetric", "ReidCMCMetric"]
|
ReidCMCMetric
|
python
|
django__django
|
tests/admin_changelist/admin.py
|
{
"start": 1989,
"end": 2055
}
|
class ____(admin.ModelAdmin):
list_filter = ["genres"]
|
BandAdmin
|
python
|
django__django
|
tests/admin_changelist/admin.py
|
{
"start": 2698,
"end": 2766
}
|
class ____(admin.ModelAdmin):
list_filter = ["members"]
|
GroupAdmin
|
python
|
pytorch__pytorch
|
test/torch_np/test_ndarray_methods.py
|
{
"start": 17205,
"end": 21653
}
|
class ____(TestCase):
usg_data = [
([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], 8),
([3, 3, 3, 3, 2, 2, 2, 2], 4),
([0, 1, 2, 3, 4, 5, 6, 7], 0),
([7, 6, 5, 4, 3, 2, 1, 0], 7),
]
sg_data = usg_data + [
([1, 2, 3, 4, -4, -3, -2, -1], 4),
([1, 2, 3, 4, -1, -2, -3, -4], 7),
]
darr = [
(np.array(d[0], dtype=t), d[1])
for d, t in (itertools.product(usg_data, (np.uint8,)))
]
darr += [
(np.array(d[0], dtype=t), d[1])
for d, t in (
itertools.product(
sg_data, (np.int8, np.int16, np.int32, np.int64, np.float32, np.float64)
)
)
]
darr += [
(np.array(d[0], dtype=t), d[1])
for d, t in (
itertools.product(
(
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
# To hit the tail of SIMD multi-level(x4, x1) inner loops
# on variant SIMD widths
([1] * (2 * 5 - 1) + [np.nan], 2 * 5 - 1),
([1] * (4 * 5 - 1) + [np.nan], 4 * 5 - 1),
([1] * (8 * 5 - 1) + [np.nan], 8 * 5 - 1),
([1] * (16 * 5 - 1) + [np.nan], 16 * 5 - 1),
([1] * (32 * 5 - 1) + [np.nan], 32 * 5 - 1),
),
(np.float32, np.float64),
)
)
]
nan_arr = darr + [
subtest(([0, 1, 2, 3, complex(0, np.nan)], 4), decorators=[xfail]),
subtest(([0, 1, 2, 3, complex(np.nan, 0)], 4), decorators=[xfail]),
subtest(([0, 1, 2, complex(np.nan, 0), 3], 3), decorators=[xfail]),
subtest(([0, 1, 2, complex(0, np.nan), 3], 3), decorators=[xfail]),
subtest(([complex(0, np.nan), 0, 1, 2, 3], 0), decorators=[xfail]),
subtest(([complex(np.nan, np.nan), 0, 1, 2, 3], 0), decorators=[xfail]),
subtest(
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
decorators=[xfail],
),
subtest(
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
decorators=[xfail],
),
subtest(
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
decorators=[xfail],
),
subtest(([complex(0, 0), complex(0, 2), complex(0, 1)], 0), decorators=[xfail]),
subtest(([complex(1, 0), complex(0, 2), complex(0, 1)], 2), decorators=[xfail]),
subtest(([complex(1, 0), complex(0, 2), complex(1, 1)], 1), decorators=[xfail]),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
@parametrize("data", nan_arr)
def test_combinations(self, data):
arr, pos = data
if np.asarray(arr).dtype.kind in "c":
pytest.xfail(reason="'min_values_cpu' not implemented for 'ComplexDouble'")
# with suppress_warnings() as sup:
# sup.filter(RuntimeWarning, "invalid value encountered in reduce")
min_val = np.min(arr)
assert_equal(np.argmin(arr), pos, err_msg=f"{arr!r}")
assert_equal(arr[np.argmin(arr)], min_val, err_msg=f"{arr!r}")
# add padding to test SIMD loops
rarr = np.repeat(arr, 129)
rpos = pos * 129
assert_equal(np.argmin(rarr), rpos, err_msg=f"{rarr!r}")
assert_equal(rarr[np.argmin(rarr)], min_val, err_msg=f"{rarr!r}")
padding = np.repeat(np.max(arr), 513)
rarr = np.concatenate((arr, padding))
rpos = pos
assert_equal(np.argmin(rarr), rpos, err_msg=f"{rarr!r}")
assert_equal(rarr[np.argmin(rarr)], min_val, err_msg=f"{rarr!r}")
def test_minimum_signed_integers(self):
a = np.array([1, -(2**7), -(2**7) + 1, 2**7 - 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -(2**15), -(2**15) + 1, 2**15 - 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -(2**31), -(2**31) + 1, 2**31 - 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -(2**63), -(2**63) + 1, 2**63 - 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
|
TestArgmin
|
python
|
facebookresearch__faiss
|
faiss/gpu/test/test_gpu_basics.py
|
{
"start": 8149,
"end": 11548
}
|
class ____(unittest.TestCase):
def test_input_types(self):
self.do_test_input_types(0, 0)
def test_input_types_tiling(self):
self.do_test_input_types(0, 500)
self.do_test_input_types(1000, 0)
self.do_test_input_types(1000, 500)
def do_test_input_types(self, vectorsMemoryLimit, queriesMemoryLimit):
d = 33
k = 5
nb = 1000
nq = 10
xs = make_t(nb, d)
qs = make_t(nq, d)
res = faiss.StandardGpuResources()
# Get ground truth using IndexFlat
index = faiss.IndexFlatL2(d)
index.add(xs)
ref_d, ref_i = index.search(qs, k)
out_d = np.empty((nq, k), dtype=np.float32)
out_i = np.empty((nq, k), dtype=np.int64)
gpu_id = random.randrange(0, faiss.get_num_gpus())
# Try f32 data/queries, i64 out indices
params = faiss.GpuDistanceParams()
params.k = k
params.dims = d
params.vectors = faiss.swig_ptr(xs)
params.numVectors = nb
params.queries = faiss.swig_ptr(qs)
params.numQueries = nq
params.outDistances = faiss.swig_ptr(out_d)
params.outIndices = faiss.swig_ptr(out_i)
params.device = gpu_id
if vectorsMemoryLimit > 0 or queriesMemoryLimit > 0:
faiss.bfKnn_tiling(
res,
params,
vectorsMemoryLimit,
queriesMemoryLimit)
else:
faiss.bfKnn(res, params)
np.testing.assert_allclose(ref_d, out_d, atol=1e-5)
np.testing.assert_array_equal(out_i, ref_i)
faiss.knn_gpu(
res, qs, xs, k, out_d, out_i, device=gpu_id,
vectorsMemoryLimit=vectorsMemoryLimit,
queriesMemoryLimit=queriesMemoryLimit)
np.testing.assert_allclose(ref_d, out_d, atol=1e-5)
np.testing.assert_array_equal(out_i, ref_i)
# Try int32 out indices
out_i32 = np.empty((nq, k), dtype=np.int32)
params.outIndices = faiss.swig_ptr(out_i32)
params.outIndicesType = faiss.IndicesDataType_I32
faiss.bfKnn(res, params)
np.testing.assert_allclose(ref_d, out_d, atol=1e-5)
np.testing.assert_array_equal(out_i32, ref_i)
# Try float16 data/queries, i64 out indices
xs_f16 = xs.astype(np.float16)
qs_f16 = qs.astype(np.float16)
xs_f16_f32 = xs_f16.astype(np.float32)
qs_f16_f32 = qs_f16.astype(np.float32)
index.reset()
index.add(xs_f16_f32)
ref_d_f16, ref_i_f16 = index.search(qs_f16_f32, k)
params.vectors = faiss.swig_ptr(xs_f16)
params.vectorType = faiss.DistanceDataType_F16
params.queries = faiss.swig_ptr(qs_f16)
params.queryType = faiss.DistanceDataType_F16
params.device = random.randrange(0, faiss.get_num_gpus())
out_d_f16 = np.empty((nq, k), dtype=np.float32)
out_i_f16 = np.empty((nq, k), dtype=np.int64)
params.outDistances = faiss.swig_ptr(out_d_f16)
params.outIndices = faiss.swig_ptr(out_i_f16)
params.outIndicesType = faiss.IndicesDataType_I64
params.device = random.randrange(0, faiss.get_num_gpus())
faiss.bfKnn(res, params)
self.assertGreaterEqual((out_i_f16 == ref_i_f16).sum(), ref_i_f16.size - 5)
np.testing.assert_allclose(ref_d_f16, out_d_f16, atol = 2e-3)
|
TestKnn
|
python
|
walkccc__LeetCode
|
solutions/3509. Maximum Product of Subsequences With an Alternating Sum Equal to K/3509.py
|
{
"start": 24,
"end": 240
}
|
class ____(Enum):
FIRST = 0 # first element - add to sum and start product
SUBTRACT = 1 # second element - subtract from sum and multiply product
ADD = 2 # third element - add to sum and multiply product
|
State
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/engine/keras_tensor.py
|
{
"start": 22463,
"end": 25178
}
|
class ____(object):
"""Iterates over the leading dim of a KerasTensor. Performs 0 error checks."""
def __init__(self, tensor, dim0):
self._tensor = tensor
self._index = 0
self._limit = dim0
def __iter__(self):
return self
def __next__(self):
if self._index == self._limit:
raise StopIteration
result = self._tensor[self._index]
self._index += 1
return result
# Specify the mappings of tensor class to KerasTensor class.
# This is specifically a list instead of a dict for now because
# 1. we do a check w/ isinstance because a key lookup based on class
# would miss subclasses
# 2. a list allows us to control lookup ordering
# We include tensor.Tensor -> KerasTensor in the first position as a fastpath,
# *and* include object -> KerasTensor at the end as a catch-all.
# We can re-visit these choices in the future as needed.
keras_tensor_classes = [
(tensor_lib.Tensor, KerasTensor),
(sparse_tensor.SparseTensor, SparseKerasTensor),
(ragged_tensor.RaggedTensor, RaggedKerasTensor),
(object, KerasTensor)
]
def register_keras_tensor_specialization(cls, keras_tensor_subclass):
"""Register a specialized KerasTensor subclass for a Tensor type."""
# We always leave (object, KerasTensor) at the end as a generic fallback
keras_tensor_classes.insert(-1, (cls, keras_tensor_subclass))
def keras_tensor_to_placeholder(x):
"""Construct a graph placeholder to represent a KerasTensor when tracing."""
if isinstance(x, KerasTensor):
return x._to_placeholder() # pylint: disable=protected-access
else:
return x
def keras_tensor_from_tensor(tensor):
"""Convert a traced (composite)tensor to a representative KerasTensor."""
# Create a specialized KerasTensor that supports instance methods,
# operators, and additional value inference if possible
keras_tensor_cls = None
for tensor_type, cls in keras_tensor_classes:
if isinstance(tensor, tensor_type):
keras_tensor_cls = cls
break
out = keras_tensor_cls.from_tensor(tensor)
if hasattr(tensor, '_keras_mask'):
out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask) # pylint: disable=protected-access
return out
def keras_tensor_from_type_spec(type_spec, name=None):
"""Convert a TypeSpec to a representative KerasTensor."""
# Create a specialized KerasTensor that supports instance methods,
# operators, and additional value inference if possible
keras_tensor_cls = None
value_type = type_spec.value_type
for tensor_type, cls in keras_tensor_classes:
if issubclass(value_type, tensor_type):
keras_tensor_cls = cls
break
return keras_tensor_cls.from_type_spec(type_spec, name=name)
|
_KerasTensorIterator
|
python
|
tiangolo__fastapi
|
fastapi/openapi/models.py
|
{
"start": 2179,
"end": 2301
}
|
class ____(BaseModelWithConfig):
name: str
identifier: Optional[str] = None
url: Optional[AnyUrl] = None
|
License
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/endpoints/backend/main.py
|
{
"start": 4752,
"end": 5305
}
|
class ____(remote.Service):
@endpoints.method(
message_types.VoidMessage,
Greeting,
path="greet",
http_method="POST",
name="greet",
)
def greet(self, request):
user = endpoints.get_current_user()
user_name = user.email() if user else "Anonymous"
return Greeting(message="Hello, {}".format(user_name))
# [END endpoints_authed_greeting_api]
# [START endpoints_api_server]
api = endpoints.api_server([GreetingApi, AuthedGreetingApi])
# [END endpoints_api_server]
|
AuthedGreetingApi
|
python
|
cython__cython
|
tests/run/posonly.py
|
{
"start": 11203,
"end": 18330
}
|
class ____(object):
"""
>>> TestMangling().f()
42
>>> TestMangling().f2()
42
#>>> TestMangling().f3()
#(42, 43)
#>>> TestMangling().f4()
#(42, 43, 44)
>>> TestMangling().f2(1)
1
#>>> TestMangling().f3(1, _TestMangling__b=2)
#(1, 2)
#>>> TestMangling().f4(1, _TestMangling__b=2, _TestMangling__c=3)
#(1, 2, 3)
"""
def f(self, *, __a=42):
return __a
def f2(self, __a=42, /):
return __a
# FIXME: https://github.com/cython/cython/issues/1382
# def f3(self, __a=42, /, __b=43):
# return (__a, __b)
# def f4(self, __a=42, /, __b=43, *, __c=44):
# return (__a, __b, __c)
def test_module_function(a, b, /):
"""
>>> test_module_function(1, 2)
>>> test_module_function() # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_module_function() ... positional arguments...
"""
def test_closures1(x,y):
"""
>>> test_closures1(1,2)(3,4)
10
>>> test_closures1(1,2)(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...g() ... positional argument...
>>> test_closures1(1,2)(3,4,5) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...g() ... positional argument...
"""
def g(x2, /, y2):
return x + y + x2 + y2
return g
def test_closures2(x, /, y):
"""
>>> test_closures2(1,2)(3,4)
10
"""
def g(x2,y2):
return x + y + x2 + y2
return g
def test_closures3(x, /, y):
"""
>>> test_closures3(1,2)(3,4)
10
>>> test_closures3(1,2)(3) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...g() ... positional argument...
>>> test_closures3(1,2)(3,4,5) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: ...g() ... positional argument...
"""
def g(x2, /, y2):
return x + y + x2 + y2
return g
def test_same_keyword_as_positional_with_kwargs(something, /, **kwargs):
"""
>>> test_same_keyword_as_positional_with_kwargs(42, something=42)
(42, {'something': 42})
>>> test_same_keyword_as_positional_with_kwargs(something=42) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_same_keyword_as_positional_with_kwargs() ... positional argument...
>>> test_same_keyword_as_positional_with_kwargs(42)
(42, {})
"""
return (something, kwargs)
def test_serialization1(a, b, /):
"""
>>> pickled_posonly = pickle.dumps(test_serialization1)
>>> unpickled_posonly = pickle.loads(pickled_posonly)
>>> unpickled_posonly(1, 2)
(1, 2)
>>> unpickled_posonly(a=1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_serialization1() got ... keyword argument...
"""
return (a, b)
def test_serialization2(a, /, b):
"""
>>> pickled_optional = pickle.dumps(test_serialization2)
>>> unpickled_optional = pickle.loads(pickled_optional)
>>> unpickled_optional(1, 2)
(1, 2)
>>> unpickled_optional(a=1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_serialization2() ... positional... argument...
"""
return (a, b)
def test_serialization3(a=1, /, b=2):
"""
>>> pickled_defaults = pickle.dumps(test_serialization3)
>>> unpickled_defaults = pickle.loads(pickled_defaults)
>>> unpickled_defaults(1, 2)
(1, 2)
>>> unpickled_defaults(a=1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_serialization3() got ... keyword argument... 'a'
"""
return (a, b)
async def test_async(a=1, /, b=2):
"""
>>> test_async(a=1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_async() got ... keyword argument... 'a'
"""
return a, b
def test_async_call(*args, **kwargs):
"""
>>> test_async_call(1, 2)
>>> test_async_call(1, b=2)
>>> test_async_call(1)
>>> test_async_call()
"""
try:
coro = test_async(*args, **kwargs)
coro.send(None)
except StopIteration as e:
result = e.value
assert result == (1, 2), result
def test_generator(a=1, /, b=2):
"""
>>> test_generator(a=1, b=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
TypeError: test_generator() got ... keyword argument... 'a'
>>> gen = test_generator(1, 2)
>>> next(gen)
(1, 2)
>>> gen = test_generator(1, b=2)
>>> next(gen)
(1, 2)
>>> gen = test_generator(1)
>>> next(gen)
(1, 2)
>>> gen = test_generator()
>>> next(gen)
(1, 2)
"""
yield a, b
def f_call_1_0_0(a,/):
"""
>>> f_call_1_0_0(1)
(1,)
"""
return (a,)
def f_call_1_1_0(a, /, b):
"""
>>> f_call_1_1_0(1,2)
(1, 2)
"""
return (a,b)
def f_call_1_1_1(a, /, b, *, c):
"""
>>> f_call_1_1_1(1,2,c=3)
(1, 2, 3)
"""
return (a,b,c)
def f_call_1_1_1_star(a, /, b, *args, c):
"""
>>> f_call_1_1_1_star(1,2,c=3)
(1, 2, (), 3)
>>> f_call_1_1_1_star(1,2,3,4,5,6,7,8,c=9)
(1, 2, (3, 4, 5, 6, 7, 8), 9)
"""
return (a,b,args,c)
def f_call_1_1_1_kwds(a, /, b, *, c, **kwds):
"""
>>> f_call_1_1_1_kwds(1,2,c=3)
(1, 2, 3, {})
>>> f_call_1_1_1_kwds(1,2,c=3,d=4,e=5) == (1, 2, 3, {'d': 4, 'e': 5})
True
"""
return (a,b,c,kwds)
def f_call_1_1_1_star_kwds(a, /, b, *args, c, **kwds):
"""
>>> f_call_1_1_1_star_kwds(1,2,c=3,d=4,e=5) == (1, 2, (), 3, {'d': 4, 'e': 5})
True
>>> f_call_1_1_1_star_kwds(1,2,3,4,c=5,d=6,e=7) == (1, 2, (3, 4), 5, {'d': 6, 'e': 7})
True
"""
return (a,b,args,c,kwds)
def f_call_one_optional_kwd(a, /, *, b=2):
"""
>>> f_call_one_optional_kwd(1)
(1, 2)
>>> f_call_one_optional_kwd(1, b=3)
(1, 3)
"""
return (a,b)
def f_call_posonly_stararg(a, /, *args):
"""
>>> f_call_posonly_stararg(1)
(1, ())
>>> f_call_posonly_stararg(1, 2, 3, 4)
(1, (2, 3, 4))
"""
return (a,args)
def f_call_posonly_kwarg(a, /, **kw):
"""
>>> f_call_posonly_kwarg(1)
(1, {})
>>> all_args = f_call_posonly_kwarg(1, b=2, c=3, d=4)
>>> all_args == (1, {'b': 2, 'c': 3, 'd': 4}) or all_args
True
"""
return (a,kw)
def f_call_posonly_stararg_kwarg(a, /, *args, **kw):
"""
>>> f_call_posonly_stararg_kwarg(1)
(1, (), {})
>>> f_call_posonly_stararg_kwarg(1, 2)
(1, (2,), {})
>>> all_args = f_call_posonly_stararg_kwarg(1, b=3, c=4)
>>> all_args == (1, (), {'b': 3, 'c': 4}) or all_args
True
>>> all_args = f_call_posonly_stararg_kwarg(1, 2, b=3, c=4)
>>> all_args == (1, (2,), {'b': 3, 'c': 4}) or all_args
True
"""
return (a,args,kw)
def test_empty_kwargs(a, b, /):
"""
>>> test_empty_kwargs(1, 2)
(1, 2)
>>> test_empty_kwargs(1, 2, **{})
(1, 2)
>>> test_empty_kwargs(1, 2, **{'c': 3})
Traceback (most recent call last):
TypeError: test_empty_kwargs() got an unexpected keyword argument 'c'
"""
return (a,b)
@cython.cclass
|
TestMangling
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.