sample_id
string
instruction
string
completion
string
category
string
label
int64
metadata
string
exo-explore/exo:src/exo/worker/engines/image/models/qwen/adapter.py:QwenModelAdapter.get_joint_block_wrappers
# Context: from typing import Any import mlx.core as mx from exo.worker.engines.image.models.qwen.wrappers import QwenJointBlockWrapper from exo.worker.engines.image.pipeline.block_wrapper import ( JointBlockWrapper, SingleBlockWrapper, ) class QwenPromptData(PromptData): ... class QwenModelAdapter(ModelAdapter[QwenImage, QwenTransformer]): def __init__( self, config: ImageModelConfig, model_id: str, local_path: Path, quantize: int | None = None, ): self._config = config self._model = QwenImage( model_config=ModelConfig.from_name(model_name=model_id, base_model=None), model_path=str(local_path), quantize=quantize, ) self._transformer = self._model.transformer def hidden_dim(self) -> int: ... def needs_cfg(self) -> bool: ... def _get_latent_creator(self) -> type: ... def get_single_block_wrappers(self, text_seq_len: int) -> list[SingleBlockWrapper[Any]]: ... def slice_transformer_blocks(self, start_layer: int, end_layer: int): ... def encode_prompt(self, prompt: str, negative_prompt: str | None) -> QwenPromptData: ... def compute_embeddings(self, hidden_states: mx.array, prompt_embeds: mx.array) -> tuple[mx.array, mx.array]: ... def compute_text_embeddings(self, t: int, runtime_config: Config, pooled_prompt_embeds: mx.array | None, hidden_states: mx.array | None) -> mx.array: ... def compute_rotary_embeddings(self, prompt_embeds: mx.array, runtime_config: Config, encoder_hidden_states_mask: mx.array | None, cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]] | None, kontext_image_ids: mx.array | None) -> RotaryEmbeddings: ... def apply_guidance(self, noise_positive: mx.array, noise_negative: mx.array, guidance_scale: float) -> mx.array: ... # Task: Write a Python method `get_joint_block_wrappers` for the class `QwenModelAdapter` to create wrapped joint blocks for Qwen. Parameters: text_seq_len: int, encoder_hidden_states_mask: mx.array | None Returns: list[JointBlockWrapper[Any]]
def get_joint_block_wrappers( self, text_seq_len: int, encoder_hidden_states_mask: mx.array | None = None, ) -> list[JointBlockWrapper[Any]]: """Create wrapped joint blocks for Qwen.""" return [ QwenJointBlockWrapper(block, text_seq_len, encoder_hidden_states_mask) for block in self._transformer.transformer_blocks ]
function_simple
0
{"cognitive_complexity": 0, "loc": 10, "code_loc": 4, "docstring_loc": 1, "function_name": "get_joint_block_wrappers", "class_name": "QwenModelAdapter", "qualname": "QwenModelAdapter.get_joint_block_wrappers", "file_path": "src/exo/worker/engines/image/models/qwen/adapter.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/llava_onevision/test_video_processing_llava_onevision.py:LlavaOnevisionVideoProcessingTest.test_video_processor_properties
# Context: class LlavaOnevisionVideoProcessingTester: ... class LlavaOnevisionVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = LlavaOnevisionVideoProcessor if is_torchvision_available() else None def setUp(self): ... def video_processor_dict(self): ... def test_video_processor_from_dict_with_kwargs(self): ... # Task: Write a Python test method `test_video_processor_properties` in test class `LlavaOnevisionVideoProcessingTest` to verify the behavior of `video_processor_properties`. Module under test: transformers.image_utils, transformers.testing_utils, transformers.utils
def test_video_processor_properties(self): video_processing = self.fast_video_processing_class(**self.video_processor_dict) self.assertTrue(hasattr(video_processing, "do_resize")) self.assertTrue(hasattr(video_processing, "size")) self.assertTrue(hasattr(video_processing, "do_normalize")) self.assertTrue(hasattr(video_processing, "image_mean")) self.assertTrue(hasattr(video_processing, "image_std")) self.assertTrue(hasattr(video_processing, "do_convert_rgb"))
test
0
{"function_name": "test_video_processor_properties", "class_name": "LlavaOnevisionVideoProcessingTest", "qualname": "LlavaOnevisionVideoProcessingTest.test_video_processor_properties", "file_path": "tests/models/llava_onevision/test_video_processing_llava_onevision.py", "repo_id": "huggingface/transformers", "loc": 8, "tested_modules": ["transformers.image_utils", "transformers.testing_utils", "transformers.utils", "test_video_processing_common", "transformers"], "has_docstring": false, "runnable_level": "class_runnable"}
ray-project/ray:python/ray/tests/test_util_helpers.py:test_map_unordered_chunk_size_1
# Context: import pytest import ray from ray._common.test_utils import SignalActor from ray.util import as_completed, map_unordered def ray_init_4_cpu_shared(): ... def test_as_completed_chunk_size_1(ray_init_4_cpu_shared, yield_obj_refs): ... def test_as_completed_chunk_size_2(ray_init_4_cpu_shared, yield_obj_refs): ... def test_map_unordered_chunk_size_2(ray_init_4_cpu_shared, yield_obj_refs): ... # Task: Write a Python test function `test_map_unordered_chunk_size_1` to test map_unordered with chunk_size=1. Module under test: ray._common.test_utils, ray.util
def test_map_unordered_chunk_size_1(ray_init_4_cpu_shared, yield_obj_refs): """Test map_unordered with chunk_size=1. Use SignalActor to control task completion order and mimic time.sleep(x) behavior. """ inputs = [10, 8, 6, 4, 2] # Create signals for each task signals = [SignalActor.remote() for _ in range(len(inputs))] # Create tasks @ray.remote def f(args): x, signal = args ray.get(signal.wait.remote()) return x # Submit tasks with their corresponding signals in the original order it = map_unordered( f, zip(inputs, signals), chunk_size=1, yield_obj_refs=yield_obj_refs ) # Send signal in reverse order to mimic time.sleep(x), i.e., # smallest value releases first. At the same time, collect results results = [] for signal in reversed(signals): ray.get(signal.send.remote()) results.append(next(it)) if yield_obj_refs: results = ray.get(results) assert results == [2, 4, 6, 8, 10]
test
0
{"function_name": "test_map_unordered_chunk_size_1", "class_name": null, "qualname": "test_map_unordered_chunk_size_1", "file_path": "python/ray/tests/test_util_helpers.py", "repo_id": "ray-project/ray", "loc": 35, "tested_modules": ["ray._common.test_utils", "ray.util"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/integrations/mxfp4.py:load_and_swizzle_mxfp4
# Context: import torch from ..integrations.tensor_parallel import shard_and_distribute_module def on_device(dev): ... class Mxfp4Quantize(ConversionOps): ... class Mxfp4Dequantize(ConversionOps): ... class Mxfp4Deserialize(ConversionOps): ... class Mxfp4ReverseDeserialize(ConversionOps): ... def quantize_to_mxfp4(w, triton_kernels_hub): ... def swizzle_mxfp4(w, w_scale, triton_kernels_hub): ... def _convert_moe_packed_tensors(blocks, scales, dtype: torch.dtype, rows_per_chunk: int) -> torch.Tensor: ... def convert_moe_packed_tensors(blocks, scales, dtype: torch.dtype, rows_per_chunk: int) -> torch.Tensor: ... class Mxfp4GptOssExperts(nn.Module): ... def routing_torch_dist(logits, n_expts_act): ... def mlp_forward(self, hidden_states): ... def dequantize(module, param_name, param_value, target_device, dq_param_name, **kwargs): ... def dequantize_convertops(blocks, scales): ... def swizzle_mxfp4_convertops(blocks, scales, module, proj, target_device, triton_kernels_hub): ... def replace_with_mxfp4_linear(model, quantization_config, modules_to_not_convert: list[str] | None): ... # Task: Write a Python function `load_and_swizzle_mxfp4` to this transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`. Parameters: module, param_name, param_value, target_device, triton_kernels_hub
def load_and_swizzle_mxfp4(module, param_name, param_value, target_device, triton_kernels_hub, **kwargs): """ This transforms the weights obtained using `convert_gpt_oss.py` to load them into `Mxfp4GptOssExperts`. """ PrecisionConfig, FlexCtx, InFlexData = ( triton_kernels_hub.matmul_ogs.PrecisionConfig, triton_kernels_hub.matmul_ogs.FlexCtx, triton_kernels_hub.matmul_ogs.InFlexData, ) from ..integrations.tensor_parallel import shard_and_distribute_module model = kwargs.get("model") empty_param = kwargs.get("empty_param") casting_dtype = kwargs.get("casting_dtype") to_contiguous = kwargs.get("to_contiguous") rank = kwargs.get("rank") device_mesh = kwargs.get("device_mesh") if "blocks" in param_name: proj = param_name.split(".")[-1].split("_blocks")[0] if "scales" in param_name: proj = param_name.split(".")[-1].split("_scales")[0] if device_mesh is not None: shard_and_distribute_module( model, param_value, empty_param, param_name, casting_dtype, to_contiguous, rank, device_mesh ) else: setattr(module, param_name.rsplit(".", 1)[1], torch.nn.Parameter(param_value, requires_grad=False)) blocks_attr = f"{proj}_blocks" scales_attr = f"{proj}_scales" blocks = getattr(module, blocks_attr) # at this point values were loaded from ckpt scales = getattr(module, scales_attr) # Check if both blocks and scales both not on meta device if blocks.device.type != "meta" and scales.device.type != "meta": local_experts = blocks.size(0) if proj == "gate_up_proj": blocks = blocks.reshape(local_experts, module.intermediate_size * 2, -1) else: blocks = blocks.reshape(local_experts, -1, module.intermediate_size // 2) if ( getattr(target_device, "type", target_device) == "cpu" and hasattr(torch, "accelerator") and torch.accelerator.current_accelerator() is not None ): target_device = torch.accelerator.current_accelerator().type blocks = blocks.to(target_device).contiguous() scales = scales.to(target_device).contiguous() with on_device(target_device): triton_weight_tensor, weight_scale = swizzle_mxfp4( blocks.transpose(-2, -1), scales.transpose(-2, -1), triton_kernels_hub ) # need to overwrite the shapes for the kernels if proj == "gate_up_proj": triton_weight_tensor.shape = torch.Size([local_experts, module.hidden_size, module.intermediate_size * 2]) else: triton_weight_tensor.shape = torch.Size([local_experts, module.intermediate_size, module.hidden_size]) # triton_weight_tensor is what needs to be passed in oai kernels. It stores the data, the shapes and any more objects. It is like a subtensor setattr(module, proj, triton_weight_tensor) setattr( module, f"{proj}_precision_config", PrecisionConfig(weight_scale=weight_scale, flex_ctx=FlexCtx(rhs_data=InFlexData())), ) # delete blocks and scales delattr(module, scales_attr) delattr(module, blocks_attr) del blocks
function_complex
0
{"cognitive_complexity": 15, "loc": 69, "code_loc": 57, "docstring_loc": 3, "function_name": "load_and_swizzle_mxfp4", "class_name": null, "qualname": "load_and_swizzle_mxfp4", "file_path": "src/transformers/integrations/mxfp4.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"}
google/langextract:tests/factory_schema_test.py:FactorySchemaIntegrationTest.test_no_examples_no_schema
# Context: from unittest import mock from langextract import factory class SchemaApplicationTest(absltest.TestCase): ... class FactorySchemaIntegrationTest(absltest.TestCase): def setUp(self): ... def test_gemini_with_schema_returns_false_fence(self): ... def test_ollama_with_schema_returns_false_fence(self): ... def test_explicit_fence_output_respected(self): ... def test_no_schema_defaults_to_true_fence(self): ... def test_schema_disabled_returns_true_fence(self): ... def test_caller_overrides_schema_config(self): ... # Task: Write a Python test method `test_no_examples_no_schema` in test class `FactorySchemaIntegrationTest` to test that no examples means no schema is created. Module under test: absl.testing, langextract, langextract
def test_no_examples_no_schema(self): """Test that no examples means no schema is created.""" config = factory.ModelConfig( model_id="gemini-2.5-flash", provider_kwargs={"api_key": "test_key"} ) with mock.patch( "langextract.providers.gemini.GeminiLanguageModel.__init__", return_value=None, ) as mock_init: model = factory._create_model_with_schema( config=config, examples=None, use_schema_constraints=True, fence_output=None, ) call_kwargs = mock_init.call_args[1] self.assertNotIn("response_schema", call_kwargs) self.assertTrue(model.requires_fence_output)
test
1
{"function_name": "test_no_examples_no_schema", "class_name": "FactorySchemaIntegrationTest", "qualname": "FactorySchemaIntegrationTest.test_no_examples_no_schema", "file_path": "tests/factory_schema_test.py", "repo_id": "google/langextract", "loc": 21, "tested_modules": ["absl.testing", "langextract", "langextract", "langextract.core", "langextract.core"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py:LMCacheMPWorkerAdapter.batched_submit_store_requests
# Context: import torch from lmcache.v1.multiprocess.custom_types import ( CudaIPCWrapper, IPCCacheEngineKey, KVCache, ) from lmcache.v1.multiprocess.protocol import RequestType, get_response_class def wrap_kv_caches(kv_caches: dict[str, torch.Tensor]) -> KVCache: ... def striding_block_hashes(block_hashes: list[bytes], blocks_in_chunk: int) -> Iterable[bytes]: ... def send_lmcache_request(mq_client: MessageQueueClient, request_type: RequestType, payloads: list[Any]) -> MessagingFuture[Any]: ... def get_lmcache_chunk_size(mq_client: MessageQueueClient) -> int: ... class LoadStoreOp: ... class LMCacheMPSchedulerAdapter: ... class LMCacheMPWorkerAdapter: def __init__( self, server_url: str, context: zmq.Context, model_name: str, world_size: int, kv_rank: int, vllm_block_size: int, ): self.mq_client = MessageQueueClient(server_url, context) # Instance id for GPU worker self.instance_id = os.getpid() # Registered kv caches from vLLM self.kv_caches: dict[str, torch.Tensor] = {} # Request futures # request_id -> (future, other merged requests) self.store_futures: dict[ str, tuple[MessagingFuture[StoreResult], list[str]] ] = {} self.retrieve_futures: dict[ str, tuple[MessagingFuture[RetrieveResult], list[str]] ] = {} # The store requests that have finished execution in LMCache self.finished_stores: set[str] = set() # The finished request ids that are passed via vLLM and also # have corresponding store requests submitted to LMCache before self.previously_finished: set[str] = set() self.model_name = model_name self.world_size = world_size self.worker_id = kv_rank # Read chunk size from lmcache chunk_size = get_lmcache_chunk_size(self.mq_client) assert chunk_size % vllm_block_size == 0, ( "LMCache chunk size should be a multiple of vLLM block size" ) self.blocks_in_chunk = chunk_size // vllm_block_size def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): ... def submit_store_request(self, request_id: str, op: LoadStoreOp, event: torch.cuda.Event): ... def submit_retrieve_request(self, request_id: str, op: LoadStoreOp, event: torch.cuda.Event): ... def batched_submit_retrieve_requests(self, request_ids: list[str], ops: list[LoadStoreOp], event: torch.cuda.Event): ... def get_finished(self, finished_req_ids_from_engine: set[str]) -> tuple[set[str] | None, set[str] | None]: ... def num_blocks_per_chunk(self) -> int: ... def shutdown(self): ... def _update_and_get_finished_store(self) -> set[str]: ... def _create_key(self, token_ids: list[int], start: int, end: int, request_id: str | None) -> IPCCacheEngineKey: ... def _create_hash_key(self, chunk_hash: bytes, request_id: str | None) -> IPCCacheEngineKey: ... # Task: Write a Python method `batched_submit_store_requests` for the class `LMCacheMPWorkerAdapter` to submit a batched store request to LMCache. Parameters: request_ids: list[str], ops: list[LoadStoreOp], event: torch.cuda.Event
def batched_submit_store_requests( self, request_ids: list[str], ops: list[LoadStoreOp], event: torch.cuda.Event, ): """ Submit a batched store request to LMCache Args: request_ids: The IDs of the requests ops: The LoadStoreOps describing the store operations. Should have the same length as request_ids event: The CUDA event that is recorded after the current model inference step """ all_keys: list[IPCCacheEngineKey] = [] block_ids: list[int] = [] for request_id, op in zip(request_ids, ops, strict=False): if op.block_hashes is not None: chunk_hashes = list( striding_block_hashes(op.block_hashes, self.blocks_in_chunk) ) keys = [ self._create_hash_key(ch, request_id=request_id) for ch in chunk_hashes ] all_keys.extend(keys) else: assert op.token_ids is not None all_keys.append( self._create_key( op.token_ids, op.start, op.end, request_id=request_id ) ) block_ids.extend(op.block_ids) future = send_lmcache_request( self.mq_client, RequestType.STORE, [ all_keys, self.instance_id, block_ids, event.ipc_handle(), ], ).to_cuda_future() self.store_futures[request_ids[0]] = (future, list(request_ids[1:]))
function_simple
1
{"cognitive_complexity": 4, "loc": 47, "code_loc": 31, "docstring_loc": 10, "function_name": "batched_submit_store_requests", "class_name": "LMCacheMPWorkerAdapter", "qualname": "LMCacheMPWorkerAdapter.batched_submit_store_requests", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/teradata/src/airflow/providers/teradata/utils/tpt_util.py:verify_tpt_utility_on_remote_host
# Context: import logging from paramiko import SSHClient class TPTConfig: ... def execute_remote_command(ssh_client: SSHClient, command: str) -> tuple[int, str, str]: ... def write_file(path: str, content: str) -> None: ... def secure_delete(file_path: str, logger: logging.Logger | None) -> None: ... def remote_secure_delete(ssh_client: SSHClient, remote_files: list[str], logger: logging.Logger | None) -> None: ... def terminate_subprocess(sp: subprocess.Popen | None, logger: logging.Logger | None) -> None: ... def get_remote_os(ssh_client: SSHClient, logger: logging.Logger | None) -> str: ... def set_local_file_permissions(local_file_path: str, logger: logging.Logger | None) -> None: ... def _set_windows_file_permissions(ssh_client: SSHClient, remote_file_path: str, logger: logging.Logger) -> None: ... def _set_unix_file_permissions(ssh_client: SSHClient, remote_file_path: str, logger: logging.Logger) -> None: ... def set_remote_file_permissions(ssh_client: SSHClient, remote_file_path: str, logger: logging.Logger | None) -> None: ... def get_remote_temp_directory(ssh_client: SSHClient, logger: logging.Logger | None) -> str: ... def is_valid_file(file_path: str) -> bool: ... def verify_tpt_utility_installed(utility: str) -> None: ... def prepare_tpt_ddl_script(sql: list[str], error_list: list[int] | None, source_conn: dict[str, Any], job_name: str | None) -> str: ... def prepare_tdload_job_var_file(mode: str, source_table: str | None, select_stmt: str | None, insert_stmt: str | None, target_table: str | None, source_file_name: str | None, target_file_name: str | None, source_format: str, target_format: str, source_text_delimiter: str, target_text_delimiter: str, source_conn: dict[str, Any], target_conn: dict[str, Any] | None) -> str: ... def is_valid_remote_job_var_file(ssh_client: SSHClient, remote_job_var_file_path: str, logger: logging.Logger | None) -> bool: ... def read_file(file_path: str, encoding: str) -> str: ... def decrypt_remote_file(ssh_client: SSHClient, remote_enc_file: str, remote_dec_file: str, password: str, logger: logging.Logger | None) -> int: ... def transfer_file_sftp(ssh_client: SSHClient, local_path: str, remote_path: str, logger: logging.Logger | None) -> None: ... # Task: Write a Python function `verify_tpt_utility_on_remote_host` to verify if a TPT utility (tbuild) is installed on the remote host via SSH. Parameters: ssh_client: SSHClient, utility: str, logger: logging.Logger | None Returns: None
def verify_tpt_utility_on_remote_host( ssh_client: SSHClient, utility: str, logger: logging.Logger | None = None ) -> None: """ Verify if a TPT utility (tbuild) is installed on the remote host via SSH. :param ssh_client: SSH client connection :param utility: Name of the utility to verify :param logger: Optional logger instance :raises FileNotFoundError: If utility is not found on remote host :raises RuntimeError: If verification fails unexpectedly """ logger = logger or logging.getLogger(__name__) try: # Detect remote OS once remote_os = get_remote_os(ssh_client, logger) if remote_os == "windows": command = f"where {utility}" else: command = f"which {utility}" exit_status, output, error = execute_remote_command(ssh_client, command) if exit_status != 0 or not output: raise FileNotFoundError( f"TPT utility '{utility}' is not installed or not available in PATH on the remote host. " f"Command: {command}, Exit status: {exit_status}, " f"stderr: {error if error else 'N/A'}" ) logger.info("TPT utility '%s' found at: %s", utility, output.split("\n")[0]) except (FileNotFoundError, RuntimeError): raise except Exception as e: raise RuntimeError(f"Failed to verify TPT utility '{utility}' on remote host: {e}") from e
function_complex
1
{"cognitive_complexity": 12, "loc": 38, "code_loc": 19, "docstring_loc": 9, "function_name": "verify_tpt_utility_on_remote_host", "class_name": null, "qualname": "verify_tpt_utility_on_remote_host", "file_path": "providers/teradata/src/airflow/providers/teradata/utils/tpt_util.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "file_runnable"}
PaddlePaddle/PaddleOCR:langchain-paddleocr/langchain_paddleocr/document_loaders/paddleocr_vl.py:PaddleOCRVLLoader.__init__
# Context: import os from collections.abc import Iterable, Iterator from typing import Any, Literal from pydantic import SecretStr def _snake_to_camel(name: str) -> str: ... def _snake_keys_to_camel(params: dict[str, Any]) -> dict[str, Any]: ... def _normalize_file_type(file_type: FileTypeInput) -> FileType | None: ... def _infer_file_type_from_path(path: Path) -> FileType | None: ... class PaddleOCRVLLoader(BaseLoader): def _read_file_bytes(self, path: Path) -> str: ... def _call_api(self, data: str, file_type: FileType) -> tuple[str, dict[str, Any]]: ... def lazy_load(self) -> Iterator[Document]: ... # Task: Write a Python method `__init__` for the class `PaddleOCRVLLoader` to initialize the PaddleOCR-VL loader. Parameters: file_path: str | Iterable[str] Returns: None
def __init__( self, file_path: str | Iterable[str], *, api_url: str, access_token: SecretStr | None = None, file_type: FileTypeInput = None, use_doc_orientation_classify: bool | None = False, use_doc_unwarping: bool | None = False, use_layout_detection: bool | None = None, use_chart_recognition: bool | None = None, use_seal_recognition: bool | None = None, use_ocr_for_image_block: bool | None = None, layout_threshold: float | dict[int, float] | None = None, layout_nms: bool | None = None, layout_unclip_ratio: ( tuple[float, float] | dict[int, tuple[float, float]] | float | None ) = None, layout_merge_bboxes_mode: str | dict[str, float] | None = None, layout_shape_mode: str | None = None, prompt_label: str | None = None, format_block_content: bool | None = None, repetition_penalty: float | None = None, temperature: float | None = None, top_p: float | None = None, min_pixels: int | None = None, max_pixels: int | None = None, max_new_tokens: int | None = None, merge_layout_blocks: bool | None = None, markdown_ignore_labels: list[str] | None = None, vlm_extra_args: dict[str, Any] | None = None, prettify_markdown: bool | None = None, show_formula_number: bool | None = None, restructure_pages: bool | None = None, merge_tables: bool | None = None, relevel_titles: bool | None = None, visualize: bool | None = None, additional_params: dict[str, Any] | None = None, timeout: int = 300, ) -> None: """Initialize the PaddleOCR-VL loader. Args: file_path: Single path/URL or an iterable of paths/URLs to PDF or image files. api_url: URL of the PaddleOCR-VL API endpoint. access_token: Optional access token as ``SecretStr``. If ``None``, the value from the ``PADDLEOCR_ACCESS_TOKEN`` environment variable will be used, if present. file_type: File type hint. ``"pdf"`` for PDFs, ``"image"`` for images, or ``None`` to infer from the file extension. use_doc_orientation_classify: Whether to enable document orientation classification. use_doc_unwarping: Whether to enable document unwarping. use_layout_detection: Whether to enable layout detection. use_chart_recognition: Whether to enable chart recognition. use_seal_recognition: Whether to enable seal recognition. use_ocr_for_image_block: Whether to run OCR on image blocks. layout_threshold: Layout detection threshold (float or page-specific dict). layout_nms: Whether to apply non-maximum suppression for layout detection. layout_unclip_ratio: Layout unclip ratio (float, (min, max) tuple, or dict). layout_merge_bboxes_mode: Mode for merging layout bounding boxes. layout_shape_mode: Layout shape mode. prompt_label: Prompt label for the VLM (for example, ``"ocr"`` or ``"table"``). format_block_content: Whether to format block content. repetition_penalty: Repetition penalty for VLM sampling. temperature: Temperature for VLM sampling. top_p: Top-p sampling value for VLM. min_pixels: Minimum number of pixels allowed in preprocessing. max_pixels: Maximum number of pixels allowed in preprocessing. max_new_tokens: Maximum number of tokens generated by the VLM. merge_layout_blocks: Whether to merge layout blocks across columns. markdown_ignore_labels: Layout labels to ignore when generating Markdown. vlm_extra_args: Additional configuration parameters for the VLM. prettify_markdown: Whether to prettify the Markdown output. show_formula_number: Whether to include formula numbers in Markdown. restructure_pages: Whether to restructure results across pages. merge_tables: Whether to merge tables across pages. relevel_titles: Whether to relevel titles. visualize: Whether to include visualization results. additional_params: Additional parameters to pass directly to the API (keys are treated as snake_case and converted to camelCase). timeout: Request timeout in seconds. """ self._file_paths = ( file_path if isinstance(file_path, Iterable) and not isinstance(file_path, str) else [file_path] ) self.api_url = api_url self.timeout = timeout if access_token is None: env_value = os.getenv("PADDLEOCR_ACCESS_TOKEN") self.access_token = SecretStr(env_value) if env_value else None else: self.access_token = access_token self.file_type = _normalize_file_type(file_type) base_params: dict[str, Any] = { "use_doc_orientation_classify": use_doc_orientation_classify, "use_doc_unwarping": use_doc_unwarping, "use_layout_detection": use_layout_detection, "use_chart_recognition": use_chart_recognition, "use_seal_recognition": use_seal_recognition, "use_ocr_for_image_block": use_ocr_for_image_block, "layout_threshold": layout_threshold, "layout_nms": layout_nms, "layout_unclip_ratio": layout_unclip_ratio, "layout_merge_bboxes_mode": layout_merge_bboxes_mode, "layout_shape_mode": layout_shape_mode, "prompt_label": prompt_label, "format_block_content": format_block_content, "repetition_penalty": repetition_penalty, "temperature": temperature, "top_p": top_p, "min_pixels": min_pixels, "max_pixels": max_pixels, "max_new_tokens": max_new_tokens, "merge_layout_blocks": merge_layout_blocks, "markdown_ignore_labels": markdown_ignore_labels, "vlm_extra_args": vlm_extra_args, "prettify_markdown": prettify_markdown, "show_formula_number": show_formula_number, "restructure_pages": restructure_pages, "merge_tables": merge_tables, "relevel_titles": relevel_titles, "visualize": visualize, } # Convert all known service parameters from snake_case to camelCase. service_params = _snake_keys_to_camel(base_params) if additional_params: for key, value in additional_params.items(): if value is None: continue service_params[key] = value self._service_params = service_params
function_complex
0
{"cognitive_complexity": 12, "loc": 143, "code_loc": 50, "docstring_loc": 45, "function_name": "__init__", "class_name": "PaddleOCRVLLoader", "qualname": "PaddleOCRVLLoader.__init__", "file_path": "langchain-paddleocr/langchain_paddleocr/document_loaders/paddleocr_vl.py", "repo_id": "PaddlePaddle/PaddleOCR", "has_docstring": true, "runnable_level": "file_runnable"}
apache/airflow:devel-common/tests/unit/tests_common/test_utils/test_stream_capture_manager.py:test_handler_restoration
# Context: import logging import sys def test_stdout_only(stdout_capture): ... def test_stderr_only(stderr_capture): ... def test_combined(combined_capture): ... def test_configurable(stream_capture): ... def test_stdout_logging_isolation(stdout_capture): ... def test_stderr_logging_isolation(stderr_capture): ... def test_combined_logging_isolation(combined_capture): ... def test_root_logger_isolation(stdout_capture): ... def test_mixed_output_ordering(stdout_capture): ... def test_multiple_loggers_isolation(stream_capture): ... def test_exception_during_capture_preserves_isolation(stdout_capture): ... def test_exception_during_capture_with_pytest_raises(stdout_capture): ... # Task: Write a Python test function `test_handler_restoration` to test that logging handlers are properly restored after capture. Module under test: __future__
def test_handler_restoration(stdout_capture): """Test that logging handlers are properly restored after capture.""" root_logger = logging.getLogger() # Add a test handler to root logger to ensure we have something to test test_root_handler = logging.StreamHandler(sys.stdout) test_root_handler.setFormatter(logging.Formatter("ROOT: %(message)s")) root_logger.addHandler(test_root_handler) # Also create a non-root logger with its own handler logger = logging.getLogger("test_restoration") logger.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter("TEST: %(message)s")) logger.addHandler(handler) try: # Record initial state initial_root_handlers = list(root_logger.handlers) assert test_root_handler in initial_root_handlers # Use the capture context with stdout_capture: print("Inside capture") logger.info("Log inside capture") # During capture, our test handler should be removed from root current_root_handlers = list(root_logger.handlers) assert test_root_handler not in current_root_handlers, ( "Test handler should be removed during capture" ) # The non-root logger's handler should still exist assert handler in logger.handlers # After capture, root logger handlers should be restored final_root_handlers = list(root_logger.handlers) assert test_root_handler in final_root_handlers, "Test handler should be restored after capture" assert len(final_root_handlers) == len(initial_root_handlers), ( f"Handler count mismatch. Initial: {len(initial_root_handlers)}, Final: {len(final_root_handlers)}" ) finally: # Clean up root_logger.removeHandler(test_root_handler) logger.removeHandler(handler)
test
1
{"function_name": "test_handler_restoration", "class_name": null, "qualname": "test_handler_restoration", "file_path": "devel-common/tests/unit/tests_common/test_utils/test_stream_capture_manager.py", "repo_id": "apache/airflow", "loc": 47, "tested_modules": ["__future__"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/cuda/test_cuda_compatibility_path.py:TestCudaCompatibilityLdPathManipulation.test_prepends_to_existing_ld_path
# Context: import os from vllm.env_override import ( _get_torch_cuda_version, _maybe_set_cuda_compatibility_path, ) class TestCudaCompatibilityEnvParsing: ... class TestCudaCompatibilityPathDetection: ... class TestGetTorchCudaVersion: ... class TestCudaCompatibilityLdPathManipulation: def test_prepends_to_empty_ld_path(self, monkeypatch, tmp_path): ... def test_deduplicates_existing_compat_path(self, monkeypatch, tmp_path): ... def test_already_at_front_is_noop(self, monkeypatch, tmp_path): ... # Task: Write a Python test method `test_prepends_to_existing_ld_path` in test class `TestCudaCompatibilityLdPathManipulation` to compat path is prepended before existing entries. Module under test: vllm.env_override
def test_prepends_to_existing_ld_path(self, monkeypatch, tmp_path): """Compat path is prepended before existing entries.""" compat_dir = tmp_path / "compat" compat_dir.mkdir() monkeypatch.setenv("VLLM_ENABLE_CUDA_COMPATIBILITY", "1") monkeypatch.setenv("VLLM_CUDA_COMPATIBILITY_PATH", str(compat_dir)) monkeypatch.setenv("LD_LIBRARY_PATH", "/usr/lib:/other/lib") _maybe_set_cuda_compatibility_path() ld_path = os.environ["LD_LIBRARY_PATH"] parts = ld_path.split(os.pathsep) assert parts[0] == str(compat_dir) assert "/usr/lib" in parts assert "/other/lib" in parts
test
1
{"function_name": "test_prepends_to_existing_ld_path", "class_name": "TestCudaCompatibilityLdPathManipulation", "qualname": "TestCudaCompatibilityLdPathManipulation.test_prepends_to_existing_ld_path", "file_path": "tests/cuda/test_cuda_compatibility_path.py", "repo_id": "vllm-project/vllm", "loc": 13, "tested_modules": ["vllm.env_override"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_registry.py:TestComponentRegistry.test_double_registration_raises
# Context: import pytest from ray.llm._internal.serve.utils.registry import ComponentRegistry, get_registry class TestComponentRegistry: def test_register_and_get_direct_class(self): ... def test_register_and_get_module_path(self): ... def test_get_nonexistent_component_raises(self): ... def test_invalid_string_format_raises(self): ... def test_reregister_after_unregister(self): ... def test_get_registry_singleton(self): ... def test_get_registry_different_categories(self): ... def test_unregister(self): ... # Task: Write a Python test method `test_double_registration_raises` in test class `TestComponentRegistry` to test that double registration raises ValueError. Module under test: ray.llm._internal.serve.utils.registry
def test_double_registration_raises(self): """Test that double registration raises ValueError.""" registry = ComponentRegistry("test_category") test_class1 = type("TestClass1", (), {}) test_class2 = type("TestClass2", (), {}) registry.register("test_component", test_class1) with pytest.raises(ValueError, match="already registered"): registry.register("test_component", test_class2) # Verify original registration is still intact assert registry.get("test_component") == test_class1
test
0
{"function_name": "test_double_registration_raises", "class_name": "TestComponentRegistry", "qualname": "TestComponentRegistry.test_double_registration_raises", "file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/vllm/kv_transfer_backends/test_registry.py", "repo_id": "ray-project/ray", "loc": 13, "tested_modules": ["ray.llm._internal.serve.utils.registry"], "has_docstring": true, "runnable_level": "plib_runnable"}
streamlit/streamlit:lib/tests/streamlit/components/v2/test_component_manager.py:test_manager_uses_lock_for_api_inputs
# Context: from pathlib import Path def _setup_manager_with_manifest(tmp_path: Path) -> tuple[BidiComponentManager, str]: ... def test_concurrent_record_and_change_no_exceptions(tmp_path: Path) -> None: ... def test_get_component_path_prefers_asset_dir_when_present() -> None: ... def test_register_from_manifest_does_not_require_asset_dir() -> None: ... def test_on_components_changed_preserves_html_and_resolves_assets(tmp_path: Path) -> None: ... def _make_manifest(pkg_name: str, comp_name: str, asset_dir: str) -> ComponentManifest: ... def _write(path: Path, content: str) -> None: ... def test_re_resolves_js_glob_on_change(monkeypatch: pytest.MonkeyPatch) -> None: ... def test_re_resolution_no_match_keeps_previous(monkeypatch: pytest.MonkeyPatch) -> None: ... # Task: Write a Python test function `test_manager_uses_lock_for_api_inputs` to verify manager exposes a lock and handles inputs safely during updates. Module under test: __future__, pathlib, typing
def test_manager_uses_lock_for_api_inputs(tmp_path: Path) -> None: """Verify manager exposes a lock and handles inputs safely during updates.""" manager, comp_name = _setup_manager_with_manifest(tmp_path) # The manager is expected to expose an _api_inputs_lock used to guard access. assert hasattr(manager, "_api_inputs_lock"), ( "BidiComponentManager must define _api_inputs_lock to guard _api_inputs" ) # Exercise calls to ensure no exceptions and that state updates correctly manager.record_api_inputs(comp_name, css="*.css", js="js/*.js") manager._on_components_changed([comp_name]) d = manager.get(comp_name) assert d is not None assert d.html_content == "<div>ok</div>"
test
1
{"function_name": "test_manager_uses_lock_for_api_inputs", "class_name": null, "qualname": "test_manager_uses_lock_for_api_inputs", "file_path": "lib/tests/streamlit/components/v2/test_component_manager.py", "repo_id": "streamlit/streamlit", "loc": 16, "tested_modules": ["__future__", "pathlib", "typing", "streamlit.components.v2", "streamlit.components.v2.component_manager"], "has_docstring": true, "runnable_level": "file_runnable"}
mitmproxy/mitmproxy:test/mitmproxy/contentviews/test__view_zip.py:test_view_zip
# Context: import io import zipfile def meta(content_type: str) -> Metadata: ... def test_view_zip_empty(): ... def test_render_priority(): ... # Task: Write a Python test function `test_view_zip` to verify the behavior of `view_zip`. Module under test: mitmproxy, mitmproxy.contentviews, mitmproxy.contentviews._view_zip
def test_view_zip(): buffer = io.BytesIO() with zipfile.ZipFile(buffer, "w") as zf: for name in [ "normal.txt", "with spaces.txt", "dir/nested.txt", "file\nwith\nnewlines.txt", "unicode_文件.txt", "café.txt", ]: zf.writestr(name, b"content") result = zip.prettify(buffer.getvalue(), meta("application/zip")) for name in [ "normal.txt", "with spaces.txt", "dir/nested.txt", "newlines", "文件", "café", ]: assert name in result assert zip.syntax_highlight == "yaml"
test
0
{"function_name": "test_view_zip", "class_name": null, "qualname": "test_view_zip", "file_path": "test/mitmproxy/contentviews/test__view_zip.py", "repo_id": "mitmproxy/mitmproxy", "loc": 23, "tested_modules": ["mitmproxy", "mitmproxy.contentviews", "mitmproxy.contentviews._view_zip"], "has_docstring": false, "runnable_level": "file_runnable"}
huggingface/transformers:tests/quantization/mxfp4/test_mxfp4.py:Mxfp4ModelTest.test_gpt_oss_model_loading_quantized_with_device_map
# Context: from transformers import AutoTokenizer, GptOssForCausalLM, Mxfp4Config import torch def _empty_accelerator_cache(): ... def _patch_no_accelerator(): ... class Mxfp4ConfigTest(unittest.TestCase): ... class Mxfp4QuantizerTest(unittest.TestCase): ... class Mxfp4IntegrationTest(unittest.TestCase): ... class Mxfp4ModelTest(unittest.TestCase): model_name = "openai/gpt-oss-20b" input_text = "Once upon a time" EXPECTED_OUTPUTS = set() def setUp(self): ... def tearDown(self): ... def check_inference_correctness_quantized(self, model, tokenizer): ... def test_gpt_oss_model_loading_dequantized_with_device_map(self): ... def test_model_device_map_validation(self): ... def test_memory_footprint_comparison(self): ... def test_save_mxfp4(self): ... def test_save_mxfp4_non_quantized(self): ... def test_compute_module_sizes(self): ... # Task: Write a Python test method `test_gpt_oss_model_loading_quantized_with_device_map` in test class `Mxfp4ModelTest` to test loading OpenAI MoE model with mxfp4 quantization and device_map. Module under test: contextlib, transformers, transformers.testing_utils
def test_gpt_oss_model_loading_quantized_with_device_map(self): """Test loading OpenAI MoE model with mxfp4 quantization and device_map""" model = GptOssForCausalLM.from_pretrained( self.model_name, dtype=torch.bfloat16, device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.check_inference_correctness_quantized(model, tokenizer)
test
0
{"function_name": "test_gpt_oss_model_loading_quantized_with_device_map", "class_name": "Mxfp4ModelTest", "qualname": "Mxfp4ModelTest.test_gpt_oss_model_loading_quantized_with_device_map", "file_path": "tests/quantization/mxfp4/test_mxfp4.py", "repo_id": "huggingface/transformers", "loc": 10, "tested_modules": ["contextlib", "transformers", "transformers.testing_utils", "transformers.utils", "transformers.quantizers.quantizer_mxfp4"], "has_docstring": true, "runnable_level": "class_runnable"}
vllm-project/vllm:vllm/benchmarks/mm_processor.py:benchmark_multimodal_processor
# Context: import argparse import dataclasses import time from typing import TYPE_CHECKING, Any, Literal import numpy as np from vllm.benchmarks.throughput import get_requests from vllm.engine.arg_utils import EngineArgs from vllm.utils.gc_utils import freeze_gc_heap from vllm import LLM, SamplingParams def get_timing_stats_from_engine(llm_engine: LLMEngine) -> dict[str, dict[str, float]]: ... def collect_mm_processor_stats(llm_engine: LLMEngine) -> dict[str, list[float]]: ... def calculate_mm_processor_metrics(stats_by_stage: dict[str, list[float]], selected_percentiles: list[float], unit: Literal['us', 'ms', 's']) -> dict[str, dict[str, float]]: ... def validate_args(args): ... def add_cli_args(parser: argparse.ArgumentParser) -> None: ... def main(args: argparse.Namespace) -> None: ... # Task: Write a Python function `benchmark_multimodal_processor` to run the multimodal processor benchmark. Parameters: args: argparse.Namespace Returns: dict[str, Any]
def benchmark_multimodal_processor( args: argparse.Namespace, ) -> dict[str, Any]: """ Run the multimodal processor benchmark. """ from vllm import LLM, SamplingParams validate_args(args) if args.seed is None: args.seed = 0 engine_args = EngineArgs.from_cli_args(args) llm = LLM(**dataclasses.asdict(engine_args)) tokenizer = llm.get_tokenizer() requests = get_requests(args, tokenizer) assert all( llm.llm_engine.model_config.max_model_len >= (request.prompt_len + request.expected_output_len) for request in requests ), ( "Please ensure that max_model_len is greater than the sum of " "prompt_len and expected_output_len for all requests." ) prompts = [request.prompt for request in requests] expected_output_lens = [request.expected_output_len for request in requests] sampling_params = [ SamplingParams( n=1, temperature=0.0, max_tokens=output_len, detokenize=True, ) for output_len in expected_output_lens ] selected_percentiles = [ float(p) for p in getattr(args, "metric_percentiles", "99").split(",") ] freeze_gc_heap() num_warmups = getattr(args, "num_warmups", 0) if num_warmups > 0: print(f"Processing {num_warmups} warmup requests...") # Create a temporary args object for warmup requests warmup_args = argparse.Namespace(**vars(args)) warmup_args.num_prompts = num_warmups warmup_args.seed += 1 warmup_requests = get_requests(warmup_args, tokenizer) warmup_prompts = [req.prompt for req in warmup_requests] warmup_output_lens = [req.expected_output_len for req in warmup_requests] warmup_sampling_params = [ SamplingParams(max_tokens=output_len) for output_len in warmup_output_lens ] llm.chat( warmup_prompts, warmup_sampling_params, use_tqdm=not getattr(args, "disable_tqdm", False), ) # Clear stats from warmup requests collect_mm_processor_stats(llm.llm_engine) print(f"Processing {len(prompts)} requests...") start_time = time.perf_counter() outputs = llm.chat( prompts, sampling_params, use_tqdm=not getattr(args, "disable_tqdm", False) ) end_time = time.perf_counter() total_time = end_time - start_time mm_stats_by_stage = collect_mm_processor_stats(llm.llm_engine) if not any(mm_stats_by_stage.values()): print( "\n⚠️ Warning: No MM processor stats found in registry.\n" " This may indicate that:\n" " - No multimodal requests were processed\n" " - Stats were already retrieved (registry is cleared after retrieval)\n" ) mm_processor_metrics = calculate_mm_processor_metrics( mm_stats_by_stage, selected_percentiles ) completed = len([o for o in outputs if o.finished]) failed = len(outputs) - completed e2el_times = [] for output in outputs: if not output.finished or output.metrics is None: continue metrics = output.metrics # Calculate E2E latency as: TTFT + (last_token_ts - first_token_ts) if ( getattr(metrics, "first_token_latency", None) is not None and getattr(metrics, "last_token_ts", None) is not None and getattr(metrics, "first_token_ts", None) is not None ): ttft = metrics.first_token_latency # Decode time is the duration between the first and last token generation decode_time = max(0.0, metrics.last_token_ts - metrics.first_token_ts) e2el_times.append((ttft + decode_time) * 1000) if not e2el_times and completed > 0: print( "\n⚠️ Warning: Detailed end-to-end latency metrics not available.\n" " Falling back to average request latency " "(total_time / num_completed_requests).\n" ) avg_time_per_request = total_time / completed e2el_times = [avg_time_per_request * 1000] * completed if e2el_times: mean_e2el_ms = float(np.mean(e2el_times)) median_e2el_ms = float(np.median(e2el_times)) std_e2el_ms = float(np.std(e2el_times)) percentiles_e2el_ms = [ (p, float(np.percentile(e2el_times, p))) for p in selected_percentiles ] else: mean_e2el_ms = 0.0 median_e2el_ms = 0.0 std_e2el_ms = 0.0 percentiles_e2el_ms = [(p, 0.0) for p in selected_percentiles] encoder_summary = {} if ( "num_encoder_calls" in mm_stats_by_stage and mm_stats_by_stage["num_encoder_calls"] ): encoder_calls = mm_stats_by_stage["num_encoder_calls"] encoder_summary = { "total_encoder_calls": int(sum(encoder_calls)), "num_requests_with_encoder_calls": len(encoder_calls), } benchmark_result = { "completed": completed, "failed": failed, "mean_e2el_ms": mean_e2el_ms, "median_e2el_ms": median_e2el_ms, "std_e2el_ms": std_e2el_ms, "percentiles_e2el_ms": percentiles_e2el_ms, "mm_processor_stats": mm_processor_metrics, "encoder_summary": encoder_summary, } return benchmark_result
function_complex
1
{"cognitive_complexity": 16, "loc": 157, "code_loc": 123, "docstring_loc": 3, "function_name": "benchmark_multimodal_processor", "class_name": null, "qualname": "benchmark_multimodal_processor", "file_path": "vllm/benchmarks/mm_processor.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
streamlit/streamlit:e2e_playwright/theming/theme_metric_value_style_test.py:test_metric_value_font_size_with_rem
# Context: import pytest from playwright.sync_api import Page, expect from e2e_playwright.shared.app_utils import expect_no_skeletons, get_metric def configure_metric_value_style(): ... def test_metric_value_style_snapshot(app: Page, assert_snapshot: ImageCompareFunction): ... # Task: Write a Python test function `test_metric_value_font_size_with_rem` to test that metricValueFontSize accepts rem values and applies them correctly. Module under test: playwright.sync_api, e2e_playwright.conftest, e2e_playwright.shared.app_utils
def test_metric_value_font_size_with_rem(app: Page): """Test that metricValueFontSize accepts rem values and applies them correctly.""" expect_no_skeletons(app, timeout=25000) metric = get_metric(app, "Revenue") metric_value = metric.get_by_test_id("stMetricValue") # Verify rem value is converted and applied correctly (3rem = 48px with 16px base) expect(metric_value).to_have_css("font-size", "48px") # Verify custom font weight is applied (300) expect(metric_value).to_have_css("font-weight", "300") # Verify it's NOT the default size (2.25rem = 36px) expect(metric_value).not_to_have_css("font-size", "36px")
test
1
{"function_name": "test_metric_value_font_size_with_rem", "class_name": null, "qualname": "test_metric_value_font_size_with_rem", "file_path": "e2e_playwright/theming/theme_metric_value_style_test.py", "repo_id": "streamlit/streamlit", "loc": 15, "tested_modules": ["playwright.sync_api", "e2e_playwright.conftest", "e2e_playwright.shared.app_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/tests/unit/test_transform_pyarrow.py:test_arrow_concat_with_objects
# Context: import pyarrow as pa from ray.data._internal.arrow_ops.transform_pyarrow import ( MIN_PYARROW_VERSION_TYPE_PROMOTION, _align_struct_fields, concat, hash_partition, shuffle, try_combine_chunked_columns, unify_schemas, ) def test_try_defragment_table(): ... def test_hash_partitioning(): ... def test_shuffle(): ... def test_arrow_concat_empty(simple_concat_data): ... def test_arrow_concat_single_block(simple_concat_data): ... def test_arrow_concat_basic(basic_concat_blocks, basic_concat_expected): ... def test_arrow_concat_null_promotion(null_promotion_blocks, null_promotion_expected): ... def test_arrow_concat_tensor_extension_uniform(uniform_tensor_blocks, uniform_tensor_expected): ... def test_arrow_concat_tensor_extension_variable_shaped(variable_shaped_tensor_blocks, variable_shaped_tensor_expected): ... def test_arrow_concat_tensor_extension_uniform_and_variable_shaped(mixed_tensor_blocks, mixed_tensor_expected): ... def test_arrow_concat_tensor_extension_uniform_but_different(different_shape_tensor_blocks, different_shape_tensor_expected): ... def test_struct_with_different_field_names(struct_different_field_names_blocks, struct_different_field_names_expected): ... def test_nested_structs(nested_structs_blocks, nested_structs_expected): ... def test_struct_with_null_values(struct_null_values_blocks, struct_null_values_expected): ... def test_struct_with_mismatched_lengths(struct_mismatched_lengths_blocks, struct_mismatched_lengths_expected): ... def test_struct_with_empty_arrays(struct_empty_arrays_blocks, struct_empty_arrays_expected): ... def test_struct_with_arrow_variable_shaped_tensor_type(struct_variable_shaped_tensor_blocks, struct_variable_shaped_tensor_expected): ... def test_struct_with_diverging_primitive_types(): ... def test_arrow_concat_object_with_tensor_fails(object_with_tensor_fails_blocks): ... def test_unify_schemas(unify_schemas_basic_schemas, unify_schemas_multicol_schemas): ... def test_unify_schemas_object_types(unify_schemas_object_types_schemas): ... def test_unify_schemas_incompatible_tensor_dtypes(unify_schemas_incompatible_tensor_schemas): ... def test_unify_schemas_objects_and_tensors(unify_schemas_objects_and_tensors_schemas): ... def test_unify_schemas_missing_tensor_fields(unify_schemas_missing_tensor_fields_schemas): ... def test_unify_schemas_nested_struct_tensors(unify_schemas_nested_struct_tensors_schemas): ... def test_unify_schemas_edge_cases(unify_schemas_edge_cases_data): ... def test_unify_schemas_mixed_tensor_types(unify_schemas_mixed_tensor_data): ... def test_unify_schemas_type_promotion(unify_schemas_type_promotion_data): ... def test_arrow_block_select(block_select_data): ... def test_arrow_block_slice_copy(block_slice_data): ... def test_arrow_block_slice_copy_empty(block_slice_data): ... def test_mixed_tensor_types_same_dtype(mixed_tensor_types_same_dtype_blocks, mixed_tensor_types_same_dtype_expected): ... def test_mixed_tensor_types_fixed_shape_different(mixed_tensor_types_fixed_shape_blocks, mixed_tensor_types_fixed_shape_expected): ... def test_mixed_tensor_types_variable_shaped(mixed_tensor_types_variable_shaped_blocks, mixed_tensor_types_variable_shaped_expected): ... def test_mixed_tensor_types_in_struct(struct_with_mixed_tensor_types_blocks, struct_with_mixed_tensor_types_expected): ... def test_nested_struct_with_mixed_tensor_types(nested_struct_with_mixed_tensor_types_blocks, nested_struct_with_mixed_tensor_types_expected): ... def test_multiple_tensor_fields_in_struct(multiple_tensor_fields_struct_blocks, multiple_tensor_fields_struct_expected): ... def test_struct_with_incompatible_tensor_dtypes_fails(): ... def test_struct_with_additional_fields(struct_with_additional_fields_blocks, struct_with_additional_fields_expected): ... def test_struct_with_null_tensor_values(struct_with_null_tensor_values_blocks, struct_with_null_tensor_values_expected): ... def simple_struct_blocks(): ... def simple_struct_schema(): ... def nested_struct_blocks(): ... def nested_struct_schema(): ... def missing_column_blocks(): ... def missing_column_schema(): ... def multiple_struct_blocks(): ... def multiple_struct_schema(): ... def mixed_column_blocks(): ... def mixed_column_schema(): ... def empty_block_blocks(): ... def empty_block_schema(): ... def already_aligned_blocks(): ... def already_aligned_schema(): ... def no_struct_blocks(): ... def no_struct_schema(): ... def deep_nesting_blocks(): ... def deep_nesting_schema(): ... def test_align_struct_fields_simple(simple_struct_blocks, simple_struct_schema): ... def test_align_struct_fields_nested(nested_struct_blocks, nested_struct_schema): ... def test_align_struct_fields_missing_column(missing_column_blocks, missing_column_schema): ... def test_align_struct_fields_multiple_structs(multiple_struct_blocks, multiple_struct_schema): ... def test_align_struct_fields_non_struct_columns(mixed_column_blocks, mixed_column_schema): ... def test_align_struct_fields_empty_blocks(empty_block_blocks, empty_block_schema): ... def test_align_struct_fields_already_aligned(already_aligned_blocks, already_aligned_schema): ... def test_align_struct_fields_no_struct_columns(no_struct_blocks, no_struct_schema): ... def test_align_struct_fields_deep_nesting(deep_nesting_blocks, deep_nesting_schema): ... def uniform_tensor_blocks(): ... def uniform_tensor_expected(): ... def variable_shaped_tensor_blocks(): ... def variable_shaped_tensor_expected(): ... def mixed_tensor_blocks(): ... def mixed_tensor_expected(): ... def different_shape_tensor_blocks(): ... def different_shape_tensor_expected(): ... def mixed_tensor_types_same_dtype_blocks(): ... def mixed_tensor_types_same_dtype_expected(): ... def mixed_tensor_types_fixed_shape_blocks(): ... def mixed_tensor_types_fixed_shape_expected(): ... def mixed_tensor_types_variable_shaped_blocks(): ... def mixed_tensor_types_variable_shaped_expected(): ... def struct_with_mixed_tensor_types_blocks(): ... def struct_with_mixed_tensor_types_expected(): ... def nested_struct_with_mixed_tensor_types_blocks(): ... def nested_struct_with_mixed_tensor_types_expected(): ... def multiple_tensor_fields_struct_blocks(): ... def multiple_tensor_fields_struct_expected(): ... def struct_with_additional_fields_blocks(): ... def struct_with_additional_fields_expected(): ... def struct_with_null_tensor_values_blocks(): ... def struct_with_null_tensor_values_expected(): ... def basic_concat_blocks(): ... def basic_concat_expected(): ... def null_promotion_blocks(): ... def null_promotion_expected(): ... def struct_different_field_names_blocks(): ... def struct_different_field_names_expected(): ... def nested_structs_blocks(): ... def nested_structs_expected(): ... def struct_null_values_blocks(): ... def struct_null_values_expected(): ... def struct_mismatched_lengths_blocks(): ... def struct_mismatched_lengths_expected(): ... def struct_empty_arrays_blocks(): ... def struct_empty_arrays_expected(): ... def unify_schemas_basic_schemas(): ... def unify_schemas_multicol_schemas(): ... def object_concat_blocks(): ... def object_concat_expected(): ... def struct_variable_shaped_tensor_blocks(): ... def struct_variable_shaped_tensor_expected(): ... def unify_schemas_object_types_schemas(): ... def unify_schemas_incompatible_tensor_schemas(): ... def unify_schemas_objects_and_tensors_schemas(): ... def unify_schemas_missing_tensor_fields_schemas(): ... def unify_schemas_nested_struct_tensors_schemas(): ... def object_with_tensor_fails_blocks(): ... def simple_concat_data(): ... def _create_tensor_array(data, tensor_type): ... def _create_expected_result(schema, length, **kwargs): ... def _create_tensor_blocks(tensor_data1, tensor_data2, tensor_type1, tensor_type2, id_data1, id_data2, column_name): ... def _create_struct_tensor_blocks(tensor_data1, tensor_data2, tensor_type1, tensor_type2, value_data1, value_data2, extra_data2, struct_name, id_data1, id_data2): ... def _create_tensor_schema(tensor_type, dtype, ndim, include_id, struct_name, include_extra): ... def _create_basic_struct_blocks(struct_data1, struct_data2, column_name, id_data1, id_data2, other_columns): ... def _create_struct_schema(struct_fields, include_id, other_fields): ... def _create_struct_blocks_with_columns(struct_data1, struct_data2, struct_type1, struct_type2, additional_columns1, additional_columns2, struct_column): ... def _create_struct_expected_result(schema, length, content): ... def _create_simple_struct_blocks(struct_data1, struct_data2, field_names, field_types, additional_columns1, additional_columns2, struct_column): ... def _create_simple_struct_schema(field_names, field_types, additional_fields): ... def unify_schemas_edge_cases_data(): ... def unify_schemas_mixed_tensor_data(): ... def unify_schemas_type_promotion_data(): ... def block_select_data(): ... def block_slice_data(): ... # Task: Write a Python test function `test_arrow_concat_with_objects` to verify the behavior of `arrow_concat_with_objects`. Module under test: typing, ray.data._internal.arrow_ops.transform_pyarrow, ray.data._internal.tensor_extensions.arrow
def test_arrow_concat_with_objects(object_concat_blocks, object_concat_expected): t3 = concat(object_concat_blocks) assert isinstance(t3, pa.Table) assert len(t3) == object_concat_expected["length"] assert isinstance(t3.schema.field("a").type, object_concat_expected["a_type"]) assert object_concat_expected["b_type"](t3.schema.field("b").type) assert t3.column("a").to_pylist() == object_concat_expected["content"]["a"] assert t3.column("b").to_pylist() == object_concat_expected["content"]["b"]
test
0
{"function_name": "test_arrow_concat_with_objects", "class_name": null, "qualname": "test_arrow_concat_with_objects", "file_path": "python/ray/data/tests/unit/test_transform_pyarrow.py", "repo_id": "ray-project/ray", "loc": 8, "tested_modules": ["typing", "ray.data._internal.arrow_ops.transform_pyarrow", "ray.data._internal.tensor_extensions.arrow", "ray.data._internal.utils.arrow_utils", "ray.data.block"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py:Qwen3VLMoePreTrainedModel._init_weights
# Context: import torch from ... import initialization as init from ...modeling_utils import PreTrainedModel class Qwen3VLMoeTextConfig(PreTrainedConfig): ... class Qwen3VLMoeVisionConfig(Qwen3VLVisionConfig): ... class Qwen3VLMoeConfig(Qwen3VLConfig): ... class Qwen3VLMoeTextRMSNorm(Qwen3MoeRMSNorm): ... class Qwen3VLMoeTextExperts(Qwen3MoeExperts): ... class Qwen3VLMoeTextTopKRouter(nn.Module): ... class Qwen3VLMoeTextSparseMoeBlock(Qwen3MoeSparseMoeBlock): ... class Qwen3VLMoeTextAttention(Qwen3VLTextAttention): ... class Qwen3VLMoeTextDecoderLayer(Qwen3MoeDecoderLayer): ... class Qwen3VLMoeVisionRotaryEmbedding(Qwen3VLVisionRotaryEmbedding): ... class Qwen3VLMoeVisionAttention(Qwen3VLVisionAttention): ... class Qwen3VLMoeVisionBlock(Qwen3VLVisionBlock): ... class Qwen3VLMoeVisionModel(Qwen3VLVisionModel): ... class Qwen3VLMoeTextModel(Qwen3VLTextModel): ... class Qwen3VLMoeModelOutputWithPast(Qwen3VLModelOutputWithPast): ... class Qwen3VLMoeCausalLMOutputWithPast(Qwen3VLCausalLMOutputWithPast): ... class Qwen3VLMoeForConditionalGeneration(Qwen3VLForConditionalGeneration): ... class Qwen3VLMoePreTrainedModel(Qwen3MoePreTrainedModel): input_modalities = ("text", "image", "video") _no_split_modules = ["Qwen3VLMoeTextDecoderLayer", "Qwen3VLMoeVisionBlock"] # Task: Write a Python method `_init_weights` for the class `Qwen3VLMoePreTrainedModel` to initialize the weights. Parameters: module
def _init_weights(self, module): """Initialize the weights.""" PreTrainedModel._init_weights(self, module) if hasattr(self.config, "initializer_range"): std = self.config.initializer_range else: std = getattr(self.config.get_text_config(), "initializer_range", 0.02) if isinstance(module, Qwen3VLMoeTextExperts): init.normal_(module.gate_up_proj, mean=0.0, std=std) init.normal_(module.down_proj, mean=0.0, std=std) elif isinstance(module, Qwen3VLMoeTextTopKRouter): init.normal_(module.weight, mean=0.0, std=std) elif isinstance(module, Qwen3VLMoeVisionRotaryEmbedding): inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim)) init.copy_(module.inv_freq, inv_freq)
function_simple
0
{"cognitive_complexity": 5, "loc": 15, "code_loc": 13, "docstring_loc": 1, "function_name": "_init_weights", "class_name": "Qwen3VLMoePreTrainedModel", "qualname": "Qwen3VLMoePreTrainedModel._init_weights", "file_path": "src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"}
keras-team/keras:keras/src/layers/pooling/adaptive_pooling1d_test.py:AdaptivePooling1DLayerTest.test_max_pooling_compute_output_shape
# Context: from keras.src import layers class AdaptivePooling1DLayerTest(testing.TestCase): def _run_layer_test(self, layer_class, x_np, output_size, data_format): ... def test_average_pooling_basic_shapes(self): ... def test_max_pooling_basic_shapes(self): ... def test_average_pooling_channels_last(self): ... def test_max_pooling_channels_last(self): ... def test_average_pooling_compute_output_shape(self): ... def test_average_pooling_get_config(self): ... def test_max_pooling_get_config(self): ... def test_average_pooling_numerical(self): ... def test_max_pooling_numerical(self): ... # Task: Write a Python test method `test_max_pooling_compute_output_shape` in test class `AdaptivePooling1DLayerTest` to test compute_output_shape() for AdaptiveMaxPooling1D. Module under test: keras.src, keras.src, keras.src
def test_max_pooling_compute_output_shape(self): """Test compute_output_shape() for AdaptiveMaxPooling1D.""" layer = layers.AdaptiveMaxPooling1D( output_size=16, data_format="channels_first" ) input_shape = (2, 3, 64) output_shape = layer.compute_output_shape(input_shape) self.assertEqual(output_shape, (2, 3, 16))
test
1
{"function_name": "test_max_pooling_compute_output_shape", "class_name": "AdaptivePooling1DLayerTest", "qualname": "AdaptivePooling1DLayerTest.test_max_pooling_compute_output_shape", "file_path": "keras/src/layers/pooling/adaptive_pooling1d_test.py", "repo_id": "keras-team/keras", "loc": 8, "tested_modules": ["keras.src", "keras.src", "keras.src"], "has_docstring": true, "runnable_level": "project_runnable"}
ManimCommunity/manim:tests/module/mobject/test_matrix.py:TestIntegerMatrix.test_integer_matrix_init
# Context: import pytest from manim.mobject.matrix import ( DecimalMatrix, IntegerMatrix, Matrix, ) class TestMatrix: ... class TestDecimalMatrix: ... class TestIntegerMatrix: # Task: Write a Python test method `test_integer_matrix_init` in test class `TestIntegerMatrix` to verify the behavior of `integer_matrix_init`. Module under test: __future__, manim.mobject.matrix, manim.mobject.text.tex_mobject
def test_integer_matrix_init(self, matrix_elements, expected_elements): matrix = IntegerMatrix(matrix_elements) assert isinstance(matrix, IntegerMatrix) for row_index, row in enumerate(matrix.get_rows()): for column_index, element in enumerate(row): assert element.number == expected_elements[row_index][column_index]
test
1
{"function_name": "test_integer_matrix_init", "class_name": "TestIntegerMatrix", "qualname": "TestIntegerMatrix.test_integer_matrix_init", "file_path": "tests/module/mobject/test_matrix.py", "repo_id": "ManimCommunity/manim", "loc": 7, "tested_modules": ["__future__", "manim.mobject.matrix", "manim.mobject.text.tex_mobject", "manim.mobject.types.vectorized_mobject"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/examples/data/video_processing/http_utils.py:HTTPConnection:class_doc
Write a class-level docstring for `HTTPConnection` which has methods: `__init__`, `get_sync_client`, `get_async_client`, `_validate_http_url`, `_headers`.
Small helper around ``requests``/``aiohttp`` for reuseable HTTP clients.
documentation
0
{"doc_type": "class", "class_name": "HTTPConnection", "file_path": "python/ray/data/examples/data/video_processing/http_utils.py", "repo_id": "ray-project/ray", "char_length": 72, "methods": ["__init__", "get_sync_client", "get_async_client", "_validate_http_url", "_headers", "get_response", "get_async_response", "get_bytes", "async_get_bytes", "download_file"]}
ray-project/ray:ci/raydepsets/tests/test_cli.py:TestCli.test_subset
# Context: import tempfile from pathlib import Path from ci.raydepsets.tests.utils import ( append_to_file, copy_data_to_tmpdir, replace_in_file, save_file_as, save_packages_to_file, write_to_config_file, ) def _create_test_manager(tmpdir: str, config_path: Optional[str], check: bool, build_all_configs: Optional[bool]) -> DependencySetManager: ... def _invoke_build(tmpdir: str, config_path: str, name: Optional[str]): ... class TestCli(unittest.TestCase): def test_cli_load_fail_no_config(self): ... def test_dependency_set_manager_init(self): ... def test_uv_binary_exists(self): ... def test_uv_version(self): ... def test_compile(self): ... def test_compile_update_package(self): ... def test_compile_with_append_and_override_flags(self, mock_stdout): ... def test_compile_by_depset_name(self): ... def test_subset_does_not_exist(self): ... def test_subset_with_expanded_depsettest_subset_with_expanded_depset(self): ... def test_check_if_subset_exists(self): ... def test_compile_bad_requirements(self): ... def test_get_path(self): ... def test_append_uv_flags_exist_in_output(self, mock_stdout): ... def test_append_uv_flags_with_space_in_flag(self, mock_stdout): ... def test_include_setuptools(self): ... def test_ignore_setuptools(self, mock_stdout): ... def test_override_uv_flag_single_flag(self): ... def test_override_uv_flag_multiple_flags(self): ... def test_flatten_flags(self): ... def test_build_graph(self): ... def test_build_graph_predecessors(self): ... def test_build_graph_bad_operation(self): ... def test_execute(self): ... def test_execute_single_depset(self): ... def test_execute_single_depset_that_does_not_exist(self): ... def test_expand(self): ... def test_expand_with_requirements(self): ... def test_get_depset_with_build_arg_set(self): ... def test_get_depset_without_build_arg_set(self): ... def test_execute_single_pre_hook(self): ... def test_execute_single_invalid_pre_hook(self): ... def test_copy_lock_files_to_temp_dir(self): ... def test_diff_lock_files_out_of_date(self): ... def test_diff_lock_files_up_to_date(self): ... def test_compile_with_packages(self): ... def test_compile_with_packages_and_requirements(self): ... def test_requirements_ordering(self, mock_stdout): ... def test_constraints_ordering(self, mock_stdout): ... def test_execute_pre_hook(self, mock_stdout): ... def test_get_expanded_depset_requirements(self): ... def test_build_all_configs(self): ... def test_parse_lock_file(self): ... def test_parse_lock_file_with_index_url(self): ... def test_parse_lock_file_empty(self): ... def test_parse_lock_file_comments_only(self): ... def test_write_lock_file(self): ... def test_write_lock_file_empty(self): ... def test_roundtrip_preserves_packages(self): ... def test_parse_large_lock_file(self): ... def test_relax(self): ... def test_relax_multiple_packages(self): ... def test_relax_package_not_found(self): ... def test_relax_preserves_options(self): ... def test_relax_large_lock_file(self): ... # Task: Write a Python test method `test_subset` in test class `TestCli` to verify the behavior of `subset`. Module under test: pathlib, typing, click.testing
def test_subset(self): with tempfile.TemporaryDirectory() as tmpdir: copy_data_to_tmpdir(tmpdir) # Add six to requirements_test_subset.txt save_packages_to_file( Path(tmpdir) / "requirements_test_subset.txt", ["six==1.16.0"], ) manager = _create_test_manager(tmpdir) # Compile general_depset with requirements_test.txt and requirements_test_subset.txt manager.compile( constraints=["requirement_constraints_test.txt"], requirements=["requirements_test.txt", "requirements_test_subset.txt"], append_flags=["--no-annotate"], name="general_depset__py311_cpu", output="requirements_compiled_general.txt", ) # Subset general_depset with requirements_test.txt (should lock emoji & pyperclip) manager.subset( source_depset="general_depset__py311_cpu", requirements=["requirements_test.txt"], append_flags=["--no-annotate"], name="subset_general_depset__py311_cpu", output="requirements_compiled_subset_general.txt", ) output_file = Path(tmpdir) / "requirements_compiled_subset_general.txt" output_text = output_file.read_text() output_file_valid = Path(tmpdir) / "requirements_compiled_test.txt" output_text_valid = output_file_valid.read_text() assert output_text == output_text_valid
test
0
{"function_name": "test_subset", "class_name": "TestCli", "qualname": "TestCli.test_subset", "file_path": "ci/raydepsets/tests/test_cli.py", "repo_id": "ray-project/ray", "loc": 31, "tested_modules": ["pathlib", "typing", "click.testing", "networkx", "ci.raydepsets.cli"], "has_docstring": false, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/browser/profile.py:BrowserLaunchArgs.validate_devtools_headless
# Context: from typing import Annotated, Any, Literal, Self from pydantic import AfterValidator, AliasChoices, BaseModel, ConfigDict, Field, field_validator, model_validator def _get_enable_default_extensions_default() -> bool: ... class ViewportSize(BaseModel): ... def get_display_size() -> ViewportSize | None: ... def get_window_adjustments() -> tuple[int, int]: ... def validate_url(url: str, schemes: Iterable[str]) -> str: ... def validate_float_range(value: float, min_val: float, max_val: float) -> float: ... def validate_cli_arg(arg: str) -> str: ... class RecordHarContent(str, Enum): ... class RecordHarMode(str, Enum): ... class BrowserChannel(str, Enum): ... class BrowserContextArgs(BaseModel): ... class BrowserConnectArgs(BaseModel): ... class BrowserNewContextArgs(BrowserContextArgs): ... class BrowserLaunchPersistentContextArgs(BrowserLaunchArgs, BrowserContextArgs): ... class ProxySettings(BaseModel): ... class BrowserProfile(BrowserConnectArgs, BrowserLaunchPersistentContextArgs, BrowserLaunchArgs, BrowserNewContextArgs): ... class BrowserLaunchArgs(BaseModel): model_config = ConfigDict( def set_default_downloads_path(self) -> Self: ... def args_as_dict(args: list[str]) -> dict[str, str]: ... def args_as_list(args: dict[str, str]) -> list[str]: ... # Task: Write a Python method `validate_devtools_headless` for the class `BrowserLaunchArgs` to cannot open devtools when headless is True. Returns: Self
def validate_devtools_headless(self) -> Self: """Cannot open devtools when headless is True""" assert not (self.headless and self.devtools), 'headless=True and devtools=True cannot both be set at the same time' return self
function_simple
0
{"cognitive_complexity": 1, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "validate_devtools_headless", "class_name": "BrowserLaunchArgs", "qualname": "BrowserLaunchArgs.validate_devtools_headless", "file_path": "browser_use/browser/profile.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/events/types/a2a_events.py:A2AServerTaskFailedEvent:class_doc
Write a class-level docstring for `A2AServerTaskFailedEvent` (inherits from A2AEventBase) which has methods: various methods.
Event emitted when an A2A server task execution fails. Attributes: task_id: A2A task ID for this execution. context_id: A2A context ID grouping related tasks. error: Error message describing the failure. metadata: Custom A2A metadata key-value pairs.
documentation
0
{"doc_type": "class", "class_name": "A2AServerTaskFailedEvent", "file_path": "lib/crewai/src/crewai/events/types/a2a_events.py", "repo_id": "crewAIInc/crewAI", "char_length": 267, "methods": []}
huggingface/transformers:tests/models/d_fine/test_modeling_d_fine.py:DFineModelTest.test_retain_grad_hidden_states_attentions
# Context: from transformers.testing_utils import ( require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) class DFineModelTester: ... def prepare_img(): ... class DFineModelIntegrationTest(unittest.TestCase): ... class DFineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (DFineModel, DFineForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( is_encoder_decoder = True test_missing_keys = False def _prepare_for_class(self, inputs_dict, model_class, return_labels): ... def setUp(self): ... def test_config(self): ... def test_d_fine_model(self): ... def test_d_fine_object_detection_head_model(self): ... def test_multi_gpu_data_parallel_forward(self): ... def test_inputs_embeds(self): ... def test_inputs_embeds_matches_input_ids(self): ... def test_model_get_set_embeddings(self): ... def test_model_common_attributes(self): ... def test_resize_tokens_embeddings(self): ... def test_feed_forward_chunking(self): ... def test_load_save_without_tied_weights(self): ... def test_attention_outputs(self): ... def test_hidden_states_output(self): ... def test_forward_signature(self): ... def test_backbone_selection(self): ... def test_inference_with_different_dtypes(self, dtype_str): ... def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str): ... # Task: Write a Python test method `test_retain_grad_hidden_states_attentions` in test class `DFineModelTest` to verify the behavior of `retain_grad_hidden_states_attentions`. Module under test: functools, parameterized, transformers
def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the first output since last_hidden_state is the first item output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad)
test
0
{"function_name": "test_retain_grad_hidden_states_attentions", "class_name": "DFineModelTest", "qualname": "DFineModelTest.test_retain_grad_hidden_states_attentions", "file_path": "tests/models/d_fine/test_modeling_d_fine.py", "repo_id": "huggingface/transformers", "loc": 33, "tested_modules": ["functools", "parameterized", "transformers", "transformers.testing_utils", "transformers"], "has_docstring": false, "runnable_level": "class_runnable"}
ray-project/ray:doc/source/serve/tutorials/video-analysis/autoscaling_policy.py:_find_deployment
# Context: from typing import Dict, Tuple from ray.serve._private.common import DeploymentID from ray.serve.config import AutoscalingContext def _get_error_ratio(ctx: AutoscalingContext) -> float: ... def _scale_by_error_ratio(ctx: AutoscalingContext, floor: int) -> int: ... def coordinated_scaling_policy(contexts: Dict[DeploymentID, AutoscalingContext]) -> Tuple[Dict[DeploymentID, int], Dict]: ... # Task: Write a Python function `_find_deployment` to find deployment by name. Parameters: contexts: Dict[DeploymentID, AutoscalingContext], name: str Returns: Tuple[DeploymentID, AutoscalingContext]
def _find_deployment( contexts: Dict[DeploymentID, AutoscalingContext], name: str, ) -> Tuple[DeploymentID, AutoscalingContext]: """Find deployment by name.""" for dep_id, ctx in contexts.items(): if dep_id.name == name: return dep_id, ctx raise KeyError(f"Deployment '{name}' not found")
function_simple
0
{"cognitive_complexity": 3, "loc": 9, "code_loc": 4, "docstring_loc": 1, "function_name": "_find_deployment", "class_name": null, "qualname": "_find_deployment", "file_path": "doc/source/serve/tutorials/video-analysis/autoscaling_policy.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "plib_runnable"}
browser-use/browser-use:browser_use/browser/watchdogs/dom_watchdog.py:DOMWatchdog:class_doc
Write a class-level docstring for `DOMWatchdog` (inherits from BaseWatchdog) which has methods: `on_TabCreatedEvent`, `_get_recent_events_str`, `_get_pending_network_requests`, `on_BrowserStateRequestEvent`, `_build_dom_tree_without_highlights`.
Handles DOM tree building, serialization, and element access via CDP. This watchdog acts as a bridge between the event-driven browser session and the DomService implementation, maintaining cached state and providing helper methods for other watchdogs.
documentation
0
{"doc_type": "class", "class_name": "DOMWatchdog", "file_path": "browser_use/browser/watchdogs/dom_watchdog.py", "repo_id": "browser-use/browser-use", "char_length": 252, "methods": ["on_TabCreatedEvent", "_get_recent_events_str", "_get_pending_network_requests", "on_BrowserStateRequestEvent", "_build_dom_tree_without_highlights", "_capture_clean_screenshot", "_detect_pagination_buttons", "_get_page_info", "get_element_by_index", "clear_cache"]}
vllm-project/vllm:vllm/model_executor/models/funasr.py:FunASRAudioInputs:class_doc
Write a class-level docstring for `FunASRAudioInputs` (inherits from TensorSchema) which has methods: various methods.
Dimensions: - b: Batch size - nmb: Number of mel bins - t: Time frames (M)
documentation
1
{"doc_type": "class", "class_name": "FunASRAudioInputs", "file_path": "vllm/model_executor/models/funasr.py", "repo_id": "vllm-project/vllm", "char_length": 86, "methods": []}
infiniflow/ragflow:test/unit_test/common/test_token_utils.py:TestTotalTokenCountFromResponse.test_dict_with_usage_total_tokens
# Context: from common.token_utils import num_tokens_from_string, total_token_count_from_response, truncate, encoder class TestNumTokensFromString: ... def test_token_count_ranges(input_string, expected_min_tokens): ... def test_consistency(): ... class TestTruncate: ... class TestTotalTokenCountFromResponse: def test_dict_with_usage_input_output_tokens(self): ... def test_dict_with_meta_tokens_input_output(self): ... def test_priority_order_dict_usage_total_tokens_third(self): ... def test_priority_order_dict_usage_input_output_fourth(self): ... def test_priority_order_meta_tokens_last(self): ... def test_no_token_info_returns_zero(self): ... def test_partial_dict_usage_missing_output_tokens(self): ... def test_partial_meta_tokens_missing_input_tokens(self): ... def test_none_response(self): ... def test_invalid_response_type(self): ... # Task: Write a Python test method `test_dict_with_usage_total_tokens` in test class `TestTotalTokenCountFromResponse` to test dictionary response with usage['total_tokens']. Module under test: common.token_utils
def test_dict_with_usage_total_tokens(self): """Test dictionary response with usage['total_tokens']""" resp_dict = { 'usage': { 'total_tokens': 175 } } result = total_token_count_from_response(resp_dict) assert result == 175
test
1
{"function_name": "test_dict_with_usage_total_tokens", "class_name": "TestTotalTokenCountFromResponse", "qualname": "TestTotalTokenCountFromResponse.test_dict_with_usage_total_tokens", "file_path": "test/unit_test/common/test_token_utils.py", "repo_id": "infiniflow/ragflow", "loc": 10, "tested_modules": ["common.token_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/kernels/moe/test_silu_mul_per_token_group_quant_fp8_colmajor.py:test_silu_mul_fp8_quant_deep_gemm
# Context: import pytest import torch from vllm.model_executor.layers.quantization.utils.fp8_utils import ( _per_token_group_quant_fp8_colmajor, silu_mul_per_token_group_quant_fp8_colmajor, ) from vllm.platforms import current_platform from vllm.utils.deep_gemm import is_deep_gemm_e8m0_used from vllm.utils.torch_utils import set_random_seed def reference_quant(x: torch.Tensor, use_ue8m0: bool): ... def reference(x: torch.Tensor, use_ue8m0: bool) -> tuple[torch.Tensor, torch.Tensor]: ... # Task: Write a Python test function `test_silu_mul_fp8_quant_deep_gemm` to verify the behavior of `silu_mul_fp8_quant_deep_gemm`. Module under test: vllm.model_executor.layers.quantization.utils.fp8_utils, vllm.platforms, vllm.triton_utils
def test_silu_mul_fp8_quant_deep_gemm(T: int, N: int): set_random_seed(42) input = torch.rand((T, N), dtype=torch.bfloat16, device="cuda") use_ue8m0 = is_deep_gemm_e8m0_used() # Test output, output_scales = silu_mul_per_token_group_quant_fp8_colmajor( input, use_ue8m0=use_ue8m0 ) # Reference ref_output, ref_output_scales = reference(input, use_ue8m0) torch.testing.assert_close(output.to(torch.float32), ref_output.to(torch.float32)) torch.testing.assert_close(output_scales, ref_output_scales)
test
1
{"function_name": "test_silu_mul_fp8_quant_deep_gemm", "class_name": null, "qualname": "test_silu_mul_fp8_quant_deep_gemm", "file_path": "tests/kernels/moe/test_silu_mul_per_token_group_quant_fp8_colmajor.py", "repo_id": "vllm-project/vllm", "loc": 17, "tested_modules": ["vllm.model_executor.layers.quantization.utils.fp8_utils", "vllm.platforms", "vllm.triton_utils", "vllm.utils.deep_gemm", "vllm.utils.torch_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/calculator_agent.py:demonstrate_calculator
# Context: from agents import Agent, Runner, function_tool def add_numbers(a: float, b: float) -> float: ... def subtract_numbers(a: float, b: float) -> float: ... def multiply_numbers(a: float, b: float) -> float: ... def divide_numbers(a: float, b: float) -> float: ... def calculate_compound_interest(principal: float, rate: float, time: int, compounds_per_year: int) -> str: ... def calculate_circle_area(radius: float) -> str: ... def calculate_triangle_area(base: float, height: float) -> str: ... def convert_temperature(temperature: float, from_unit: str, to_unit: str) -> str: ... def interactive_mode(): ... def main(): ... # Task: Write a Python function `demonstrate_calculator` to demonstrate the calculator agent with various examples.
def demonstrate_calculator(): """Demonstrate the calculator agent with various examples""" print("🎯 OpenAI Agents SDK - Tutorial 3: Calculator Agent") print("=" * 60) print() # Test cases test_cases = [ "Calculate 15 + 27", "What's the compound interest on $5000 at 3.5% for 8 years?", "Find the area of a circle with radius 12", "Convert 100 degrees Fahrenheit to Celsius", "What's 144 divided by 12?", "Calculate the area of a triangle with base 8 and height 6" ] for i, question in enumerate(test_cases, 1): print(f"=== Calculation {i} ===") print(f"Question: {question}") try: result = Runner.run_sync(calculator_agent, question) print(f"Answer: {result.final_output}") except Exception as e: print(f"❌ Error: {e}") print() print("-" * 40) print()
function_simple
0
{"cognitive_complexity": 2, "loc": 29, "code_loc": 22, "docstring_loc": 1, "function_name": "demonstrate_calculator", "class_name": null, "qualname": "demonstrate_calculator", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/3_tool_using_agent/calculator_agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"}
ocrmypdf/OCRmyPDF:tests/test_multi_font_manager.py:test_select_font_arabic_text_without_language_hint
# Context: import pytest def font_dir(): ... def multi_font_manager(font_dir): ... def has_cjk_font(manager: MultiFontManager) -> bool: ... def has_arabic_font(manager: MultiFontManager) -> bool: ... def has_devanagari_font(manager: MultiFontManager) -> bool: ... def test_init_loads_builtin_fonts(multi_font_manager): ... def test_missing_font_directory(): ... def test_select_font_for_arabic_language(multi_font_manager): ... def test_select_font_for_persian_language(multi_font_manager): ... def test_select_font_for_urdu_language(multi_font_manager): ... def test_farsi_language_code(multi_font_manager): ... def test_select_font_for_hindi_language(multi_font_manager): ... def test_select_font_for_sanskrit_language(multi_font_manager): ... def test_select_font_for_marathi_language(multi_font_manager): ... def test_select_font_for_nepali_language(multi_font_manager): ... def test_select_font_for_chinese_language(multi_font_manager): ... def test_select_font_for_chinese_generic(multi_font_manager): ... def test_select_font_for_chinese_simplified(multi_font_manager): ... def test_select_font_for_chinese_traditional(multi_font_manager): ... def test_select_font_for_japanese_language(multi_font_manager): ... def test_select_font_for_korean_language(multi_font_manager): ... def test_select_font_for_english_text(multi_font_manager): ... def test_select_font_without_language_hint(multi_font_manager): ... def test_devanagari_text_without_language_hint(multi_font_manager): ... def test_cjk_text_without_language_hint(multi_font_manager): ... def test_fallback_to_occulta_font(multi_font_manager): ... def test_fallback_fonts_constant(multi_font_manager): ... def test_has_all_glyphs_for_english(multi_font_manager): ... def test_has_all_glyphs_for_arabic(multi_font_manager): ... def test_has_all_glyphs_for_devanagari(multi_font_manager): ... def test_has_all_glyphs_for_cjk(multi_font_manager): ... def test_empty_text_has_all_glyphs(multi_font_manager): ... def test_has_all_glyphs_missing_font(multi_font_manager): ... def test_font_selection_caching(multi_font_manager): ... def test_language_font_map_coverage(): ... def test_get_all_fonts(multi_font_manager): ... class MockFontProvider: ... def test_custom_font_provider(font_dir): ... def test_missing_font_uses_fallback(font_dir): ... def test_builtin_font_provider_loads_expected_fonts(font_dir): ... def test_builtin_font_provider_get_font(font_dir): ... def test_builtin_font_provider_get_fallback(font_dir): ... def test_builtin_font_provider_missing_font_logs_warning(tmp_path, font_dir, caplog): ... def test_builtin_font_provider_missing_occulta_raises(tmp_path): ... # Task: Write a Python test function `test_select_font_arabic_text_without_language_hint` to test that Arabic text is handled via fallback without language hint. Module under test: __future__, pathlib, ocrmypdf.font
def test_select_font_arabic_text_without_language_hint(multi_font_manager): """Test that Arabic text is handled via fallback without language hint.""" if not has_arabic_font(multi_font_manager): pytest.skip("Arabic font not available") font_manager = multi_font_manager.select_font_for_word("مرحبا", None) # Should get NotoSansArabic-Regular via fallback chain glyph checking assert font_manager == multi_font_manager.fonts['NotoSansArabic-Regular']
test
1
{"function_name": "test_select_font_arabic_text_without_language_hint", "class_name": null, "qualname": "test_select_font_arabic_text_without_language_hint", "file_path": "tests/test_multi_font_manager.py", "repo_id": "ocrmypdf/OCRmyPDF", "loc": 7, "tested_modules": ["__future__", "pathlib", "ocrmypdf.font"], "has_docstring": true, "runnable_level": "file_runnable"}
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_glm_tool_parsing.py:TestGlmToolParsingEdgeCases.test_empty_value_with_closing
# Context: def _parse_args(text: str) -> list[tuple[str, str]]: ... def _parse_func_name(text: str) -> str: ... class TestGlmToolParsingWithClosingTags: ... class TestGlmToolParsingMissingClosingTags: ... class TestGlmToolParsingEdgeCases: def test_value_with_json_content(self): ... def test_value_with_json_no_closing(self): ... # Task: Write a Python test method `test_empty_value_with_closing` in test class `TestGlmToolParsingEdgeCases` to verify the behavior of `empty_value_with_closing`.
def test_empty_value_with_closing(self): text = "fn<arg_key>empty</arg_key><arg_value></arg_value>" pairs = _parse_args(text) assert pairs == [("empty", "")]
test
0
{"function_name": "test_empty_value_with_closing", "class_name": "TestGlmToolParsingEdgeCases", "qualname": "TestGlmToolParsingEdgeCases.test_empty_value_with_closing", "file_path": "src/exo/worker/tests/unittests/test_runner/test_glm_tool_parsing.py", "repo_id": "exo-explore/exo", "loc": 4, "tested_modules": [], "has_docstring": false, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py:PyArrowFileSystem.upload_files
# Context: import pyarrow.fs as pa_fs class PyArrowFileSystem(BaseCloudFileSystem): def get_fs_and_path(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ... def _create_azure_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ... def _create_abfss_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ... def _filter_files(fs: pa_fs.FileSystem, source_path: str, destination_path: str, substrings_to_include: Optional[List[str]], suffixes_to_exclude: Optional[List[str]]) -> List[Tuple[str, str]]: ... def get_file(object_uri: str, decode_as_utf_8: bool) -> Optional[Union[str, bytes]]: ... def list_subfolders(folder_uri: str) -> List[str]: ... def download_files(path: str, bucket_uri: str, substrings_to_include: Optional[List[str]], suffixes_to_exclude: Optional[List[str]], max_concurrency: int, chunk_size: int) -> None: ... # Task: Write a Python method `upload_files` for the class `PyArrowFileSystem` to upload files to cloud storage. Parameters: local_path: str, bucket_uri: str Returns: None
def upload_files( local_path: str, bucket_uri: str, ) -> None: """Upload files to cloud storage. Args: local_path: The local path of the files to upload. bucket_uri: The bucket uri to upload the files to, must start with `s3://`, `gs://`, `abfss://`, or `azure://`. """ try: fs, dest_path = PyArrowFileSystem.get_fs_and_path(bucket_uri) pa_fs.copy_files( source=local_path, destination=dest_path, source_filesystem=pa_fs.LocalFileSystem(), destination_filesystem=fs, ) except Exception as e: logger.exception(f"Error uploading files to {bucket_uri}: {e}") raise
function_simple
0
{"cognitive_complexity": 1, "loc": 23, "code_loc": 11, "docstring_loc": 7, "function_name": "upload_files", "class_name": "PyArrowFileSystem", "qualname": "PyArrowFileSystem.upload_files", "file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
binary-husky/gpt_academic:crazy_functions/doc_fns/conversation_doc/html_doc.py:HtmlFormatter.create_document
# Context: class HtmlFormatter: def __init__(self, chatbot, history): self.chatbot = chatbot self.history = history self.css_styles = """ :root { --primary-color: #2563eb; --primary-light: #eff6ff; --secondary-color: #1e293b; --background-color: #f8fafc; --text-color: #334155; --border-color: #e2e8f0; --card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1); } body { font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; line-height: 1.8; margin: 0; padding: 2rem; color: var(--text-color); background-color: var(--background-color); } .container { max-width: 1200px; margin: 0 auto; background: white; padding: 2rem; border-radius: 16px; box-shadow: var(--card-shadow); } ::selection { background: var(--primary-light); color: var(--primary-color); } @keyframes fadeIn { from { opacity: 0; transform: translateY(20px); } to { opacity: 1; transform: translateY(0); } } @keyframes slideIn { from { transform: translateX(-20px); opacity: 0; } to { transform: translateX(0); opacity: 1; } } .container { animation: fadeIn 0.6s ease-out; } .QaBox { animation: slideIn 0.5s ease-out; transition: all 0.3s ease; } .QaBox:hover { transform: translateX(5px); } .Question, .Answer, .historyBox { transition: all 0.3s ease; } .chat-title { color: var(--primary-color); font-size: 2em; text-align: center; margin: 1rem 0 2rem; padding-bottom: 1rem; border-bottom: 2px solid var(--primary-color); } .chat-body { display: flex; flex-direction: column; gap: 1.5rem; margin: 2rem 0; } .QaBox { background: white; padding: 1.5rem; border-radius: 8px; border-left: 4px solid var(--primary-color); box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); margin-bottom: 1.5rem; } .Question { color: var(--secondary-color); font-weight: 500; margin-bottom: 1rem; } .Answer { color: var(--text-color); background: var(--primary-light); padding: 1rem; border-radius: 6px; } .history-section { margin-top: 3rem; padding-top: 2rem; border-top: 2px solid var(--border-color); } .history-title { color: var(--secondary-color); font-size: 1.5em; margin-bottom: 1.5rem; text-align: center; } .historyBox { background: white; padding: 1rem; margin: 0.5rem 0; border-radius: 6px; border: 1px solid var(--border-color); } @media (prefers-color-scheme: dark) { :root { --background-color: #0f172a; --text-color: #e2e8f0; --border-color: #1e293b; } .container, .QaBox { background: #1e293b; } } """ def format_chat_content(self) -> str: ... def format_history_content(self) -> str: ... # Task: Write a Python method `create_document` for the class `HtmlFormatter` to 生成完整的HTML文档. Returns: str
def create_document(self) -> str: """生成完整的HTML文档 Returns: str: 完整的HTML文档字符串 """ return f""" <!DOCTYPE html> <html lang="zh-CN"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>对话存档</title> <style>{self.css_styles}</style> </head> <body> <div class="container"> <h1 class="chat-title">对话存档</h1> <div class="chat-body"> {self.format_chat_content()} </div> </div> </body> </html> """
function_simple
1
{"cognitive_complexity": 0, "loc": 25, "code_loc": 19, "docstring_loc": 5, "function_name": "create_document", "class_name": "HtmlFormatter", "qualname": "HtmlFormatter.create_document", "file_path": "crazy_functions/doc_fns/conversation_doc/html_doc.py", "repo_id": "binary-husky/gpt_academic", "has_docstring": true, "runnable_level": "class_runnable"}
huggingface/pytorch-image-models:tests/test_layers_drop.py:TestDropBlock2d.test_drop_block_2d_scale_by_keep
# Context: import torch from timm.layers.drop import drop_block_2d, DropBlock2d, drop_path, DropPath class TestDropBlock2dModule: ... class TestDropPath: ... class TestDropPathModule: ... class TestDropBlock2d: def test_drop_block_2d_output_shape(self): ... def test_drop_block_2d_no_drop_when_prob_zero(self): ... def test_drop_block_2d_approximate_keep_ratio(self): ... def test_drop_block_2d_inplace(self): ... def test_drop_block_2d_couple_channels_true(self): ... def test_drop_block_2d_couple_channels_false(self): ... def test_drop_block_2d_with_noise(self): ... def test_drop_block_2d_even_block_size(self): ... def test_drop_block_2d_asymmetric_input(self): ... # Task: Write a Python test method `test_drop_block_2d_scale_by_keep` in test class `TestDropBlock2d` to test scale_by_keep parameter. Module under test: timm.layers.drop
def test_drop_block_2d_scale_by_keep(self): """Test scale_by_keep parameter.""" torch.manual_seed(42) x = torch.ones((2, 3, 16, 16)) # With scale_by_keep=True (default), kept values are scaled up result_scaled = drop_block_2d(x.clone(), drop_prob=0.3, block_size=5, scale_by_keep=True) kept_vals_scaled = result_scaled[result_scaled > 0] # Scaled values should be > 1.0 (scaled up to compensate for drops) assert kept_vals_scaled.min() > 1.0, "Scaled values should be > 1.0" # With scale_by_keep=False, kept values stay at original torch.manual_seed(42) result_unscaled = drop_block_2d(x.clone(), drop_prob=0.3, block_size=5, scale_by_keep=False) kept_vals_unscaled = result_unscaled[result_unscaled > 0] # Unscaled values should be exactly 1.0 assert torch.allclose(kept_vals_unscaled, torch.ones_like(kept_vals_unscaled)), \ "Unscaled values should be 1.0"
test
1
{"function_name": "test_drop_block_2d_scale_by_keep", "class_name": "TestDropBlock2d", "qualname": "TestDropBlock2d.test_drop_block_2d_scale_by_keep", "file_path": "tests/test_layers_drop.py", "repo_id": "huggingface/pytorch-image-models", "loc": 18, "tested_modules": ["timm.layers.drop"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:benchmarks/benchmark_topk_topp.py:benchmark_function
# Context: import torch class BenchmarkConfig: ... def calculate_ops_pct(k_values: torch.Tensor | None, p_values: torch.Tensor | None, vocab_size: int, batch_size: int) -> float: ... def create_logits(batch_size: int, vocab_size: int, device: str) -> torch.Tensor: ... def measure_memory() -> tuple[int, int]: ... def reset_memory_stats(): ... def create_benchmark_configs(batch_sizes: list[int], vocab_sizes: list[int], device: str) -> list[BenchmarkConfig]: ... def format_memory(bytes_val: int) -> str: ... def run_benchmark(configs: list[BenchmarkConfig], warmup_iters: int, benchmark_iters: int, verbose: bool): ... def print_summary_table(results: list[dict]): ... def main(): ... # Task: Write a Python function `benchmark_function` to benchmark a function and return (avg_time_ms, peak_memory_bytes). Parameters: func, logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, warmup_iters: int, benchmark_iters: int Returns: tuple[float, int]
def benchmark_function( func, logits: torch.Tensor, k: torch.Tensor | None, p: torch.Tensor | None, warmup_iters: int = 5, benchmark_iters: int = 20, ) -> tuple[float, int]: """ Benchmark a function and return (avg_time_ms, peak_memory_bytes). Returns average time in milliseconds and peak memory usage. """ # Warmup for _ in range(warmup_iters): logits_copy = logits.clone() func(logits_copy, k, p) torch.cuda.synchronize() # Reset memory stats before benchmark reset_memory_stats() # Benchmark start_events = [ torch.cuda.Event(enable_timing=True) for _ in range(benchmark_iters) ] end_events = [torch.cuda.Event(enable_timing=True) for _ in range(benchmark_iters)] for i in range(benchmark_iters): logits_copy = logits.clone() start_events[i].record() func(logits_copy, k, p) end_events[i].record() torch.cuda.synchronize() # Calculate timing times = [ start_events[i].elapsed_time(end_events[i]) for i in range(benchmark_iters) ] avg_time = sum(times) / len(times) # Get peak memory _, peak_memory = measure_memory() return avg_time, peak_memory
function_simple
1
{"cognitive_complexity": 2, "loc": 46, "code_loc": 21, "docstring_loc": 5, "function_name": "benchmark_function", "class_name": null, "qualname": "benchmark_function", "file_path": "benchmarks/benchmark_topk_topp.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py:Ernie4_5_VLMoeImageProcessor:class_doc
Write a class-level docstring for `Ernie4_5_VLMoeImageProcessor` (inherits from Glm4vImageProcessor) which has methods: `__init__`, `_preprocess`, `get_number_of_image_patches`.
Constructs a Ernie 4.5 VL image processor that dynamically resizes images based on the original images. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 6177}`): Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): Mean to use if normalizing the image. This is a float or list of floats for each channel in the image. image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. patch_size (`int`, *optional*, defaults to 14): The spatial patch size of the vision encoder. temporal_patch_size (`int`, *optional*): The temporal patch size of the vision encoder. Unused in the image processor, only used for videos. merge_size (`int`, *optional*, defaults to 2): The merge size of the vision encoder to llm encoder.
documentation
0
{"doc_type": "class", "class_name": "Ernie4_5_VLMoeImageProcessor", "file_path": "src/transformers/models/ernie4_5_vl_moe/modular_ernie4_5_vl_moe.py", "repo_id": "huggingface/transformers", "char_length": 1878, "methods": ["__init__", "_preprocess", "get_number_of_image_patches"]}
streamlit/streamlit:e2e_playwright/window_config_test.py:test_window_config_captured_at_preload
# Context: from playwright.sync_api import Page, Request, expect from e2e_playwright.conftest import ( ImageCompareFunction, wait_for_app_loaded, wait_until, ) def test_window_theme_config_immutable_after_load(app: Page, assert_snapshot: ImageCompareFunction): ... def test_window_config_backend_base_url_immutable(app: Page): ... def test_window_config_main_page_url(app: Page): ... def test_window_config_direct_property_modification(app: Page): ... def test_window_config_download_url(app: Page): ... # Task: Write a Python test function `test_window_config_captured_at_preload` to test that window.__streamlit configuration works when set before load. Module under test: playwright.sync_api, e2e_playwright.conftest
def test_window_config_captured_at_preload( app: Page, assert_snapshot: ImageCompareFunction ): """Test that window.__streamlit configuration works when set before load. This verifies the normal use case where embedding environments set configuration before the Streamlit bundle loads. Tests multiple config property types: themes, URLs, client IDs, and boolean flags. """ # Inject comprehensive configuration BEFORE the page loads # This includes theme, URL, client ID, and boolean flag configs app.add_init_script(""" window.__streamlit = { LIGHT_THEME: { base: "light", primaryColor: "#1f2578", backgroundColor: "#c8ccf7", secondaryBackgroundColor: "#ebecf5", textColor: "#1A1A1A", }, MAIN_PAGE_BASE_URL: "https://host.example.com/my-app", CUSTOM_COMPONENT_CLIENT_ID: "preload-test-client-id", ENABLE_RELOAD_BASED_ON_HARDCODED_STREAMLIT_VERSION: false } """) # Reload to apply the injected script app.reload() wait_for_app_loaded(app) # Verify theme config was captured (visual verification) assert_snapshot(app, name="window_config_preload_applied") # Verify URL config was captured wait_until( app, lambda: ( app.evaluate("() => window.__streamlit?.MAIN_PAGE_BASE_URL") == "https://host.example.com/my-app" ), ) # Verify client ID config was captured wait_until( app, lambda: ( app.evaluate("() => window.__streamlit?.CUSTOM_COMPONENT_CLIENT_ID") == "preload-test-client-id" ), ) # Verify boolean flag config was captured wait_until( app, lambda: ( app.evaluate( "() => window.__streamlit?.ENABLE_RELOAD_BASED_ON_HARDCODED_STREAMLIT_VERSION" ) is False ), ) # Verify app is functional with all configs applied button = app.get_by_role("button", name="Click me") expect(button).to_be_visible() button.click()
test
1
{"function_name": "test_window_config_captured_at_preload", "class_name": null, "qualname": "test_window_config_captured_at_preload", "file_path": "e2e_playwright/window_config_test.py", "repo_id": "streamlit/streamlit", "loc": 66, "tested_modules": ["playwright.sync_api", "e2e_playwright.conftest"], "has_docstring": true, "runnable_level": "project_runnable"}
sansan0/TrendRadar:mcp_server/tools/system.py:check_single_version
# Context: from typing import Dict, List, Optional import requests class SystemManagementTools: ... # Task: Write a Python function `check_single_version` to 检查单个组件的版本. Parameters: name: str, local_version: str, remote_url: str, proxies: Optional[Dict], headers: Dict Returns: Dict
def check_single_version( name: str, local_version: str, remote_url: str, proxies: Optional[Dict], headers: Dict ) -> Dict: """检查单个组件的版本""" try: response = requests.get( remote_url, proxies=proxies, headers=headers, timeout=10 ) response.raise_for_status() remote_version = response.text.strip() local_tuple = parse_version(local_version) remote_tuple = parse_version(remote_version) need_update = local_tuple < remote_tuple if need_update: message = f"发现新版本 {remote_version},当前版本 {local_version},建议更新" elif local_tuple > remote_tuple: message = f"当前版本 {local_version} 高于远程版本 {remote_version}(可能是开发版本)" else: message = f"当前版本 {local_version} 已是最新版本" return { "success": True, "name": name, "current_version": local_version, "remote_version": remote_version, "need_update": need_update, "current_parsed": list(local_tuple), "remote_parsed": list(remote_tuple), "message": message } except requests.exceptions.Timeout: return { "success": False, "name": name, "current_version": local_version, "error": "获取远程版本超时" } except requests.exceptions.RequestException as e: return { "success": False, "name": name, "current_version": local_version, "error": f"网络请求失败: {str(e)}" } except Exception as e: return { "success": False, "name": name, "current_version": local_version, "error": str(e) }
function_complex
1
{"cognitive_complexity": 7, "loc": 57, "code_loc": 46, "docstring_loc": 1, "function_name": "check_single_version", "class_name": null, "qualname": "check_single_version", "file_path": "mcp_server/tools/system.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:release/ray_release/kuberay_util.py:convert_cluster_compute_to_kuberay_compute_config
Write a Python function `convert_cluster_compute_to_kuberay_compute_config` to convert cluster compute config to KubeRay compute config format. Parameters: compute_config: dict Returns: dict
def convert_cluster_compute_to_kuberay_compute_config(compute_config: dict) -> dict: """Convert cluster compute config to KubeRay compute config format. Args: compute_config: Original cluster compute configuration dict. Returns: Dict containing KubeRay-formatted compute configuration. """ worker_node_types = compute_config["worker_node_types"] head_node_resources = compute_config.get("head_node_type", {}).get("resources", {}) kuberay_worker_nodes = [] for worker_node_type in worker_node_types: worker_node_config = { "group_name": worker_node_type.get("name"), "min_nodes": worker_node_type.get("min_workers"), "max_nodes": worker_node_type.get("max_workers"), } if worker_node_type.get("resources", {}): worker_node_config["resources"] = worker_node_type.get("resources", {}) kuberay_worker_nodes.append(worker_node_config) config = { "head_node": {}, "worker_nodes": kuberay_worker_nodes, } if head_node_resources: config["head_node"]["resources"] = head_node_resources return config
function_simple
0
{"cognitive_complexity": 4, "loc": 28, "code_loc": 19, "docstring_loc": 6, "function_name": "convert_cluster_compute_to_kuberay_compute_config", "class_name": null, "qualname": "convert_cluster_compute_to_kuberay_compute_config", "file_path": "release/ray_release/kuberay_util.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "self_contained"}
langflow-ai/langflow:src/backend/base/langflow/utils/template_validation.py:validate_flow_execution
# Context: import asyncio from typing import Any def validate_template_structure(template_data: dict[str, Any], filename: str) -> list[str]: ... def validate_flow_can_build(template_data: dict[str, Any], filename: str) -> list[str]: ... def validate_flow_code(template_data: dict[str, Any], filename: str) -> list[str]: ... async def _validate_event_stream(response, job_id: str, filename: str, errors: list[str]) -> None: ... # Task: Write a Python async function `validate_flow_execution` to validate flow execution by building and running the flow. Parameters: client, template_data: dict[str, Any], filename: str, headers: dict[str, str] Returns: list[str]
async def validate_flow_execution( client, template_data: dict[str, Any], filename: str, headers: dict[str, str] ) -> list[str]: """Validate flow execution by building and running the flow. Args: client: AsyncClient for API requests template_data: The template data to validate filename: Name of the template file for error reporting headers: Authorization headers for API requests Returns: List of execution errors, empty if execution succeeds """ errors = [] try: # Create a flow from the template with timeout create_response = await client.post("api/v1/flows/", json=template_data, headers=headers, timeout=10) if create_response.status_code != 201: # noqa: PLR2004 errors.append(f"{filename}: Failed to create flow: {create_response.status_code}") return errors flow_id = create_response.json()["id"] try: # Build the flow with timeout build_response = await client.post(f"api/v1/build/{flow_id}/flow", json={}, headers=headers, timeout=10) if build_response.status_code != 200: # noqa: PLR2004 errors.append(f"{filename}: Failed to build flow: {build_response.status_code}") return errors job_id = build_response.json()["job_id"] # Get build events to validate execution events_headers = {**headers, "Accept": "application/x-ndjson"} events_response = await client.get(f"api/v1/build/{job_id}/events", headers=events_headers, timeout=10) if events_response.status_code != 200: # noqa: PLR2004 errors.append(f"{filename}: Failed to get build events: {events_response.status_code}") return errors # Validate the event stream await _validate_event_stream(events_response, job_id, filename, errors) finally: # Clean up the flow with timeout try: # noqa: SIM105 await client.delete(f"api/v1/flows/{flow_id}", headers=headers, timeout=10) except asyncio.TimeoutError: # Log but don't fail if cleanup times out pass except asyncio.TimeoutError: errors.append(f"{filename}: Flow execution timed out") except (ValueError, TypeError, KeyError, AttributeError) as e: errors.append(f"{filename}: Flow execution validation failed: {e!s}") return errors
function_complex
1
{"cognitive_complexity": 11, "loc": 61, "code_loc": 29, "docstring_loc": 11, "function_name": "validate_flow_execution", "class_name": null, "qualname": "validate_flow_execution", "file_path": "src/backend/base/langflow/utils/template_validation.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "file_runnable"}
apache/airflow:providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py:Worker:class_doc
Write a class-level docstring for `Worker` (inherits from WorkerStateBody) which has methods: various methods.
Details of the worker state sent to the scheduler.
documentation
1
{"doc_type": "class", "class_name": "Worker", "file_path": "providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py", "repo_id": "apache/airflow", "char_length": 50, "methods": []}
ocrmypdf/OCRmyPDF:tests/test_ocr_engine_selection.py:TestOcrEnginePluginSelection:class_doc
Write a class-level docstring for `TestOcrEnginePluginSelection` which has methods: `test_tesseract_selected_when_auto`, `test_tesseract_selected_when_tesseract`, `test_null_selected_when_none`, `test_null_returns_none_when_auto`.
Test that get_ocr_engine() hook selects correct engine based on options.
documentation
1
{"doc_type": "class", "class_name": "TestOcrEnginePluginSelection", "file_path": "tests/test_ocr_engine_selection.py", "repo_id": "ocrmypdf/OCRmyPDF", "char_length": 72, "methods": ["test_tesseract_selected_when_auto", "test_tesseract_selected_when_tesseract", "test_null_selected_when_none", "test_null_returns_none_when_auto"]}
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_decorator.py:TestAsyncSupport.test_sync_method_remains_sync
# Context: import asyncio from crewai.flow import Flow, human_feedback, listen, start class TestHumanFeedbackValidation: ... class TestHumanFeedbackConfig: ... class TestHumanFeedbackResult: ... class TestDecoratorAttributePreservation: ... class TestHumanFeedbackExecution: ... class TestHumanFeedbackHistory: ... class TestCollapseToOutcome: ... class TestHumanFeedbackLearn: ... class TestAsyncSupport: def test_async_method_detection(self): ... # Task: Write a Python test method `test_sync_method_remains_sync` in test class `TestAsyncSupport` to test that sync methods remain synchronous. Module under test: __future__, datetime, typing
def test_sync_method_remains_sync(self): """Test that sync methods remain synchronous.""" @human_feedback(message="Review:") def sync_method(self): return "sync output" assert not asyncio.iscoroutinefunction(sync_method)
test
0
{"function_name": "test_sync_method_remains_sync", "class_name": "TestAsyncSupport", "qualname": "TestAsyncSupport.test_sync_method_remains_sync", "file_path": "lib/crewai/tests/test_human_feedback_decorator.py", "repo_id": "crewAIInc/crewAI", "loc": 8, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow", "crewai.flow.human_feedback"], "has_docstring": true, "runnable_level": "project_runnable"}
exo-explore/exo:src/exo/worker/tests/unittests/test_mlx/test_kv_prefix_cache.py:TestGetPrefixLength.test_empty_prompt
# Context: import mlx.core as mx from exo.worker.engines.mlx.cache import ( KVPrefixCache, cache_length, encode_prompt, get_prefix_length, make_kv_cache, ) def _check_model_exists() -> bool: ... class TestKVPrefix: ... def _load_gpt_oss() -> tuple[Model, object]: ... class TestKVPrefixCacheWithModel: ... class TestGetPrefixLength: def test_identical_arrays(self): ... def test_no_common_prefix(self): ... def test_partial_prefix(self): ... def test_prompt_longer_than_cached(self): ... def test_cached_longer_than_prompt(self): ... def test_single_token_match(self): ... def test_empty_cached(self): ... def test_both_empty(self): ... # Task: Write a Python test method `test_empty_prompt` in test class `TestGetPrefixLength` to verify the behavior of `empty_prompt`. Module under test: typing, mlx_lm.models.cache, mlx_lm.sample_utils
def test_empty_prompt(self): a = mx.array([]).astype(mx.int32) b = mx.array([1, 2, 3]) assert get_prefix_length(a, b) == 0
test
0
{"function_name": "test_empty_prompt", "class_name": "TestGetPrefixLength", "qualname": "TestGetPrefixLength.test_empty_prompt", "file_path": "src/exo/worker/tests/unittests/test_mlx/test_kv_prefix_cache.py", "repo_id": "exo-explore/exo", "loc": 4, "tested_modules": ["typing", "mlx_lm.models.cache", "mlx_lm.sample_utils", "exo.shared.types.common", "exo.shared.types.mlx"], "has_docstring": false, "runnable_level": "project_runnable"}
apache/airflow:providers/google/tests/unit/google/cloud/fs/test_gcs.py:TestGCSFilesystem.test_get_fs_with_anonymous_credentials
# Context: from unittest.mock import MagicMock, patch from airflow.providers.google.cloud.fs.gcs import get_fs from google.auth.credentials import AnonymousCredentials def _setup_connections(): ... class TestGCSFilesystem: def test_get_fs_passes_credentials_object(self, mock_gcsfs, mock_hook): ... def test_get_fs_no_conn_id(self, mock_gcsfs): ... # Task: Write a Python test method `test_get_fs_with_anonymous_credentials` in test class `TestGCSFilesystem` to test that get_fs works with anonymous credentials. Module under test: __future__, airflow.providers.google.cloud.fs.gcs, airflow.providers.google.cloud.fs.gcs
def test_get_fs_with_anonymous_credentials(self, mock_gcsfs, mock_hook): """Test that get_fs works with anonymous credentials.""" from google.auth.credentials import AnonymousCredentials from airflow.providers.google.cloud.fs.gcs import get_fs anonymous_creds = AnonymousCredentials() mock_hook_instance = MagicMock() mock_hook_instance.get_credentials.return_value = anonymous_creds mock_hook_instance.project_id = None mock_hook_instance.extras = {} mock_hook.return_value = mock_hook_instance get_fs(conn_id=TEST_CONN) call_kwargs = mock_gcsfs.call_args.kwargs assert isinstance(call_kwargs["token"], AnonymousCredentials)
test
1
{"function_name": "test_get_fs_with_anonymous_credentials", "class_name": "TestGCSFilesystem", "qualname": "TestGCSFilesystem.test_get_fs_with_anonymous_credentials", "file_path": "providers/google/tests/unit/google/cloud/fs/test_gcs.py", "repo_id": "apache/airflow", "loc": 17, "tested_modules": ["__future__", "airflow.providers.google.cloud.fs.gcs", "airflow.providers.google.cloud.fs.gcs", "google.auth.credentials", "airflow.providers.google.cloud.fs.gcs"], "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_backwards_compat.py:test_old_pattern_1_unparameterized
# Context: from langchain_core.language_models.fake_chat_models import GenericFakeChatModel from langchain_core.messages import AIMessage, HumanMessage from langchain.agents import create_agent class OldStyleMiddleware1(AgentMiddleware): ... class OldStyleMiddleware2(AgentMiddleware[AgentState[Any], ContextT]): ... class OldStyleMiddleware3(AgentMiddleware[AgentState[Any], None]): ... class MyContext(TypedDict): ... class OldStyleMiddleware4(AgentMiddleware[AgentState[Any], MyContext]): ... def old_style_decorator(state: AgentState[Any], runtime: Runtime[None]) -> dict[str, Any] | None: ... class OldStyleAsyncMiddleware(AgentMiddleware[AgentState[Any], ContextT]): ... class OldStyleModelResponseMiddleware(AgentMiddleware): ... def fake_model() -> GenericFakeChatModel: ... def test_old_pattern_2_two_params(fake_model: GenericFakeChatModel) -> None: ... def test_old_pattern_3_explicit_none(fake_model: GenericFakeChatModel) -> None: ... def test_old_pattern_4_specific_context(fake_model: GenericFakeChatModel) -> None: ... def test_old_pattern_5_decorator(fake_model: GenericFakeChatModel) -> None: ... async def test_old_pattern_6_async(fake_model: GenericFakeChatModel) -> None: ... def test_old_pattern_7_model_response_unparameterized(fake_model: GenericFakeChatModel) -> None: ... def test_multiple_old_style_middlewares(fake_model: GenericFakeChatModel) -> None: ... def test_model_response_backwards_compat() -> None: ... def test_model_request_backwards_compat() -> None: ... # Task: Write a Python test function `test_old_pattern_1_unparameterized` to old pattern 1: Completely unparameterized middleware. Module under test: __future__, typing, langchain_core.language_models.fake_chat_models
def test_old_pattern_1_unparameterized(fake_model: GenericFakeChatModel) -> None: """Old pattern 1: Completely unparameterized middleware.""" agent = create_agent( model=fake_model, middleware=[OldStyleMiddleware1()], ) result = agent.invoke({"messages": [HumanMessage(content="hi")]}) assert "messages" in result assert len(result["messages"]) >= 1
test
1
{"function_name": "test_old_pattern_1_unparameterized", "class_name": null, "qualname": "test_old_pattern_1_unparameterized", "file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_backwards_compat.py", "repo_id": "langchain-ai/langchain", "loc": 9, "tested_modules": ["__future__", "typing", "langchain_core.language_models.fake_chat_models", "langchain_core.messages", "typing_extensions"], "has_docstring": true, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/utils.py:validate_model_slug
# Context: class Model(BaseModel): ... def is_supported_model_slug(model_id: str) -> bool: ... def get_supported_models() -> List[str]: ... def get_available_models_dynamic(client) -> List[Model]: ... def validate_model_dynamic(client, model_name: str) -> None: ... # Task: Write a Python function `validate_model_slug` to validate that the model_id is a supported model slug for Baseten Model APIs. Parameters: model_id: str Returns: None
def validate_model_slug(model_id: str) -> None: """ Validate that the model_id is a supported model slug for Baseten Model APIs. Args: model_id: The model ID to validate Raises: ValueError: If the model_id is not a supported model slug """ if model_id not in SUPPORTED_MODEL_SLUGS: raise ValueError( f"Model '{model_id}' is not supported by Baseten Model APIs. " f"Supported models are: {', '.join(SUPPORTED_MODEL_SLUGS)}" )
function_simple
1
{"cognitive_complexity": 1, "loc": 16, "code_loc": 5, "docstring_loc": 10, "function_name": "validate_model_slug", "class_name": null, "qualname": "validate_model_slug", "file_path": "llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/utils.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "file_runnable"}
commaai/openpilot:system/ui/lib/tests/test_handle_state_change.py:TestDisconnected.test_new_activation_is_noop
# Context: from openpilot.system.ui.lib.networkmanager import NMDeviceState, NMDeviceStateReason from openpilot.system.ui.lib.wifi_manager import WifiManager, WifiState, ConnectStatus def _make_wm(mocker: MockerFixture, connections): ... def fire(wm: WifiManager, new_state: int, prev_state: int, reason: int) -> None: ... def fire_wpa_connect(wm: WifiManager) -> None: ... class TestDeactivating: ... class TestPrepareConfig: ... class TestNeedAuth: ... class TestPassthroughStates: ... class TestActivated: ... class TestThreadRaces: ... class TestFullSequences: ... class TestWorkerErrorRecovery: ... class TestDisconnected: def test_generic_disconnect_clears_state(self, mocker): ... def test_connection_removed_keeps_other_connecting(self, mocker): ... def test_connection_removed_clears_when_forgotten(self, mocker): ... # Task: Write a Python test method `test_new_activation_is_noop` in test class `TestDisconnected` to nEW_ACTIVATION means NM is about to connect to another network — don't clear. Module under test: jeepney.low_level, openpilot.system.ui.lib.networkmanager, openpilot.system.ui.lib.wifi_manager
def test_new_activation_is_noop(self, mocker): """NEW_ACTIVATION means NM is about to connect to another network — don't clear.""" wm = _make_wm(mocker) wm._wifi_state = WifiState(ssid="OldNet", status=ConnectStatus.CONNECTED) fire(wm, NMDeviceState.DISCONNECTED, reason=NMDeviceStateReason.NEW_ACTIVATION) assert wm._wifi_state.ssid == "OldNet" assert wm._wifi_state.status == ConnectStatus.CONNECTED
test
0
{"function_name": "test_new_activation_is_noop", "class_name": "TestDisconnected", "qualname": "TestDisconnected.test_new_activation_is_noop", "file_path": "system/ui/lib/tests/test_handle_state_change.py", "repo_id": "commaai/openpilot", "loc": 9, "tested_modules": ["jeepney.low_level", "openpilot.system.ui.lib.networkmanager", "openpilot.system.ui.lib.wifi_manager"], "has_docstring": true, "runnable_level": "project_runnable"}
unclecode/crawl4ai:docs/examples/demo_multi_config_clean.py:print_section
Write a Python function `print_section` to print a formatted section header. Parameters: title
def print_section(title): """Print a formatted section header""" print(f"\n{'=' * 60}") print(f"{title}") print(f"{'=' * 60}\n")
function_simple
1
{"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "print_section", "class_name": null, "qualname": "print_section", "file_path": "docs/examples/demo_multi_config_clean.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_level": "self_contained"}
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py:TestPrefixTreeInitialization.test_add_existing_tenant_noop
# Context: from ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree import ( Node, PrefixTree, PrefixTreeActor, ) def tree() -> PrefixTree: ... def tree_actor(): ... def get_lru_texts_from_tree(tree: PrefixTree, tenant_id: str) -> List[str]: ... async def get_lru_texts_from_tree_actor(tree_actor: PrefixTreeActor, tenant_id: str) -> List[str]: ... class TestPrefixTreeInsert: ... class TestPrefixTreeMatch: ... class TestPrefixTreeRemove: ... class TestPrefixTreeEviction: ... class TestPrefixTreeGetSmallestTenants: ... class TestPrefixTreeComprehensive: ... class TestPrefixTreeActorComprehensive: ... class TestPrefixTreeActorEvictionLoop: ... class TestPrefixTreeInitialization: def test_initial_state(self, tree: PrefixTree) -> None: ... def test_add_tenant(self, tree: PrefixTree) -> None: ... def test_add_multiple_tenants(self, tree: PrefixTree) -> None: ... def test_add_multiple_tenants_with_existing(self, tree: PrefixTree) -> None: ... # Task: Write a Python test method `test_add_existing_tenant_noop` in test class `TestPrefixTreeInitialization` to test that adding an existing tenant via add_tenants is a no-op. Module under test: typing, ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree
def test_add_existing_tenant_noop(self, tree: PrefixTree) -> None: """Test that adding an existing tenant via add_tenants is a no-op.""" tree.add_tenants(["tenant_1"], 0) assert tree.tenant_to_char_count == {"tenant_1": 0} assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root assert tree.root.tenant_to_last_access_time == {"tenant_1": 0} assert get_lru_texts_from_tree(tree, "tenant_1") == [""] tree.add_tenants(["tenant_1"], 0) # Add again assert tree.tenant_to_char_count == {"tenant_1": 0} assert tree.tenant_to_lru_tail.get("tenant_1") == tree.root assert tree.root.tenant_to_last_access_time == {"tenant_1": 0} assert get_lru_texts_from_tree(tree, "tenant_1") == [""]
test
0
{"function_name": "test_add_existing_tenant_noop", "class_name": "TestPrefixTreeInitialization", "qualname": "TestPrefixTreeInitialization.test_add_existing_tenant_noop", "file_path": "python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py", "repo_id": "ray-project/ray", "loc": 14, "tested_modules": ["typing", "ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree"], "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai-files/tests/test_resolved.py:TestInlineBase64.test_create_inline_base64
# Context: from crewai_files.core.resolved import ( FileReference, InlineBase64, InlineBytes, ResolvedFile, UrlReference, ) class TestInlineBytes: ... class TestFileReference: ... class TestUrlReference: ... class TestInlineBase64: def test_inline_base64_is_resolved_file(self): ... def test_inline_base64_frozen(self): ... # Task: Write a Python test method `test_create_inline_base64` in test class `TestInlineBase64` to test creating InlineBase64 instance. Module under test: datetime, crewai_files.core.resolved
def test_create_inline_base64(self): """Test creating InlineBase64 instance.""" resolved = InlineBase64( content_type="image/png", data="iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==", ) assert resolved.content_type == "image/png" assert len(resolved.data) > 0
test
0
{"function_name": "test_create_inline_base64", "class_name": "TestInlineBase64", "qualname": "TestInlineBase64.test_create_inline_base64", "file_path": "lib/crewai-files/tests/test_resolved.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["datetime", "crewai_files.core.resolved"], "has_docstring": true, "runnable_level": "project_runnable"}
unslothai/unsloth:unsloth/kernels/moe/grouped_gemm/reference/moe_ops.py:permute
# Context: import torch def unpermute(X: torch.Tensor, gather_indices: torch.Tensor): ... def calculate_topk(gating_output: torch.Tensor, top_k: int, use_sigmoid: bool, renormalize: bool, pre_act: bool, post_act: bool): ... def get_routing_indices(selected_experts, num_experts, return_scatter_indices: bool): ... def torch_grouped_gemm(X, W, m_sizes, transpose): ... # Task: Write a Python function `permute` to scatters X to a new tensor with shape [total_tokens, hidden_dim] where total_tokens is num_tokens * topk,. Parameters: X: torch.Tensor, gather_indices: torch.Tensor, topk: int
def permute(X: torch.Tensor, gather_indices: torch.Tensor, topk: int): """ Scatters X to a new tensor with shape [total_tokens, hidden_dim] where total_tokens is num_tokens * topk, permuting the tokens according to sorted_token_idx. Helper for grouped gemm where hidden states need be ordered by expert. X: [num_tokens, hidden_dim] sorted_token_idx: [num_tokens * topk] topk: int Returns: [total_tokens, hidden_dim] """ assert gather_indices.ndim == 1 X = X.view(-1, X.shape[-1]) # Shortcut for topk == 1 if topk == 1: return X[gather_indices] return X[gather_indices // topk]
function_simple
0
{"cognitive_complexity": 1, "loc": 20, "code_loc": 5, "docstring_loc": 12, "function_name": "permute", "class_name": null, "qualname": "permute", "file_path": "unsloth/kernels/moe/grouped_gemm/reference/moe_ops.py", "repo_id": "unslothai/unsloth", "has_docstring": true, "runnable_level": "plib_runnable"}
apache/airflow:providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py:TestCamelizeDictKeys.test_camelize_dict_keys_with_mock
# Context: from unittest import mock from airflow.providers.amazon.aws.executors.ecs.utils import ( AllEcsConfigKeys, EcsExecutorException, EcsExecutorTask, EcsQueuedTask, EcsTaskCollection, EcsTaskInfo, RunTaskKwargsConfigKeys, _recursive_flatten_dict, camelize_dict_keys, parse_assign_public_ip, ) class TestEcsQueuedTask: ... class TestEcsTaskInfo: ... class TestRunTaskKwargsConfigKeys: ... class TestAllEcsConfigKeys: ... class TestEcsExecutorException: ... class TestEcsExecutorTask: ... class TestEcsTaskCollection: ... class TestRecursiveFlattenDict: ... class TestParseAssignPublicIp: ... class TestCamelizeDictKeys: def test_camelize_flat_dict(self): ... def test_camelize_nested_dict(self): ... def test_camelize_dict_with_tags(self): ... def test_camelize_dict_with_tags_uppercase(self): ... def test_camelize_dict_with_mixed_case_tags(self): ... def test_camelize_empty_dict(self): ... def test_camelize_dict_with_non_dict_values(self): ... # Task: Write a Python test method `test_camelize_dict_keys_with_mock` in test class `TestCamelizeDictKeys` to test camelize_dict_keys with mocked camelize function. Module under test: __future__, airflow.models.taskinstance, airflow.providers.amazon.aws.executors.ecs.utils
def test_camelize_dict_keys_with_mock(self, mock_camelize): """Test camelize_dict_keys with mocked camelize function.""" mock_camelize.side_effect = lambda x, uppercase_first_letter=False: f"camelized_{x}" input_dict = {"test_key": {"nested_key": "value"}} result = camelize_dict_keys(input_dict) expected = {"camelized_test_key": {"camelized_nested_key": "value"}} assert result == expected mock_camelize.assert_called()
test
1
{"function_name": "test_camelize_dict_keys_with_mock", "class_name": "TestCamelizeDictKeys", "qualname": "TestCamelizeDictKeys.test_camelize_dict_keys_with_mock", "file_path": "providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py", "repo_id": "apache/airflow", "loc": 10, "tested_modules": ["__future__", "airflow.models.taskinstance", "airflow.providers.amazon.aws.executors.ecs.utils", "airflow.utils.state"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/teradata/tests/unit/teradata/utils/test_tpt_util.py:TestTptUtil.test_verify_tpt_utility_installed_not_found
# Context: from unittest.mock import Mock, patch import pytest from airflow.providers.teradata.utils.tpt_util import ( TPTConfig, decrypt_remote_file, execute_remote_command, get_remote_os, get_remote_temp_directory, is_valid_file, is_valid_remote_job_var_file, prepare_tdload_job_var_file, prepare_tpt_ddl_script, read_file, remote_secure_delete, secure_delete, set_local_file_permissions, set_remote_file_permissions, terminate_subprocess, transfer_file_sftp, verify_tpt_utility_installed, verify_tpt_utility_on_remote_host, write_file, ) class TestTptUtil: def test_write_file(self): ... def test_secure_delete_file_not_exists(self, mock_exists): ... def test_secure_delete_with_shred(self, mock_exists, mock_subprocess, mock_which): ... def test_secure_delete_without_shred(self, mock_exists, mock_remove, mock_which): ... def test_secure_delete_os_error(self, mock_exists, mock_remove, mock_which): ... def test_remote_secure_delete_no_ssh_client(self): ... def test_remote_secure_delete_no_files(self): ... def test_remote_secure_delete_with_shred(self, mock_execute_cmd, mock_get_remote_os): ... def test_remote_secure_delete_without_shred(self, mock_execute_cmd, mock_get_remote_os): ... def test_remote_secure_delete_windows(self, mock_execute_cmd, mock_get_remote_os): ... def test_remote_secure_delete_exception(self, mock_get_remote_os): ... def test_terminate_subprocess_none(self): ... def test_terminate_subprocess_running(self): ... def test_terminate_subprocess_timeout(self): ... def test_terminate_subprocess_kill_error(self): ... def test_terminate_subprocess_not_running(self): ... def test_terminate_subprocess_terminate_error(self): ... def test_verify_tpt_utility_installed_success(self, mock_which): ... def test_verify_tpt_utility_on_remote_host_success(self, mock_execute_cmd, mock_get_remote_os): ... def test_verify_tpt_utility_on_remote_host_windows(self, mock_execute_cmd, mock_get_remote_os): ... def test_verify_tpt_utility_on_remote_host_exception(self, mock_execute_cmd, mock_get_remote_os): ... def test_prepare_tpt_ddl_script_basic(self): ... def test_prepare_tpt_ddl_script_auto_job_name(self): ... def test_prepare_tpt_ddl_script_empty_sql(self): ... def test_prepare_tpt_ddl_script_invalid_sql(self): ... def test_prepare_tpt_ddl_script_empty_statements(self): ... def test_prepare_tpt_ddl_script_sql_escaping(self): ... def test_decrypt_remote_file_success(self, mock_execute_cmd, mock_get_remote_os): ... def test_decrypt_remote_file_with_quotes_in_password(self, mock_execute_cmd, mock_get_remote_os): ... def test_decrypt_remote_file_windows(self, mock_execute_cmd, mock_get_remote_os): ... def test_decrypt_remote_file_failure(self, mock_execute_cmd, mock_get_remote_os): ... def test_tpt_config_constants(self): ... def test_execute_remote_command_success(self): ... def test_get_remote_os_windows(self): ... def test_get_remote_os_unix(self): ... def test_get_remote_os_exception(self): ... def test_set_local_file_permissions_success(self): ... def test_set_local_file_permissions_file_not_exists(self): ... def test_set_local_file_permissions_empty_path(self): ... def test_set_remote_file_permissions_unix(self, mock_execute_cmd, mock_get_remote_os): ... def test_set_remote_file_permissions_windows(self, mock_execute_cmd, mock_get_remote_os): ... def test_set_remote_file_permissions_failure(self, mock_execute_cmd, mock_get_remote_os): ... def test_get_remote_temp_directory_windows(self, mock_execute_cmd, mock_get_remote_os): ... def test_get_remote_temp_directory_unix(self, mock_get_remote_os): ... def test_get_remote_temp_directory_windows_fallback(self, mock_execute_cmd, mock_get_remote_os): ... def test_transfer_file_sftp_success(self): ... def test_transfer_file_sftp_local_file_not_exists(self): ... def test_transfer_file_sftp_transfer_error(self): ... def test_verify_tpt_utility_on_remote_host_not_found(self, mock_execute_cmd, mock_get_remote_os): ... def test_is_valid_file_true(self): ... def test_is_valid_file_false(self): ... def test_is_valid_file_directory(self): ... def test_prepare_tdload_job_var_file_file_to_table(self): ... def test_prepare_tdload_job_var_file_table_to_file(self): ... def test_prepare_tdload_job_var_file_table_to_file_with_select(self): ... def test_prepare_tdload_job_var_file_table_to_table(self): ... def test_prepare_tdload_job_var_file_table_to_table_no_target_conn(self): ... def test_is_valid_remote_job_var_file_success(self): ... def test_is_valid_remote_job_var_file_not_regular_file(self): ... def test_is_valid_remote_job_var_file_not_found(self): ... def test_is_valid_remote_job_var_file_empty_path(self): ... def test_is_valid_remote_job_var_file_none_path(self): ... def test_read_file_success(self): ... def test_read_file_with_encoding(self): ... def test_read_file_not_found(self): ... # Task: Write a Python test method `test_verify_tpt_utility_installed_not_found` in test class `TestTptUtil` to test verify_tpt_utility_installed when utility is not found. Module under test: __future__, airflow.providers.teradata.utils.tpt_util
def test_verify_tpt_utility_installed_not_found(self, mock_which): """Test verify_tpt_utility_installed when utility is not found.""" mock_which.return_value = None with pytest.raises(FileNotFoundError, match="TPT utility 'tdload' is not installed"): verify_tpt_utility_installed("tdload")
test
1
{"function_name": "test_verify_tpt_utility_installed_not_found", "class_name": "TestTptUtil", "qualname": "TestTptUtil.test_verify_tpt_utility_installed_not_found", "file_path": "providers/teradata/tests/unit/teradata/utils/test_tpt_util.py", "repo_id": "apache/airflow", "loc": 6, "tested_modules": ["__future__", "airflow.providers.teradata.utils.tpt_util"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:scripts/ci/prek/check_notice_files.py:license_header
Add a Apache-2.0 license header comment for the project 'airflow', authored by Unknown, year Unknown.
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # /// script # requires-python = ">=3.10, <3.11" # dependencies = [] # ///
license
1
{"license_type": "Apache-2.0", "author": "Unknown", "year": "Unknown", "source": "header", "repo_id": "apache/airflow"}
vllm-project/vllm:vllm/model_executor/models/arcee.py:ArceeModel.load_weights
# Context: from collections.abc import Iterable import torch from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, maybe_remap_kv_scale_name, ) from .utils import ( AutoWeightsLoader, PPMissingLayer, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, ) class ArceeMLP(nn.Module): ... class ArceeDecoderLayer(nn.Module): ... class ArceeForCausalLM(nn.Module, SupportsLoRA, SupportsPP): ... class ArceeModel(nn.Module): def __init__( self, *, vllm_config, prefix: str = "", layer_type: type[nn.Module] = ArceeDecoderLayer, ) -> None: super().__init__() config: LlamaConfig = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config quant_config = vllm_config.quant_config self.quant_config = quant_config self.config = config self.vocab_size = config.vocab_size # Word embeddings (parallelized if using pipeline parallel) if get_pp_group().is_first_rank or ( config.tie_word_embeddings and get_pp_group().is_last_rank ): self.embed_tokens = VocabParallelEmbedding( self.vocab_size, config.hidden_size, quant_config=quant_config, ) else: self.embed_tokens = PPMissingLayer() # placeholder on non-embedding ranks # Build decoder layers across pipeline ranks self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: layer_type( config=config, cache_config=cache_config, quant_config=quant_config, prefix=prefix, ), prefix=f"{prefix}.layers", ) # Final RMSNorm on the last pipeline stage if get_pp_group().is_last_rank: self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) else: self.norm = PPMissingLayer() # For optional capturing of intermediate hidden states # (not used by default) self.aux_hidden_state_layers: tuple[int, ...] = tuple() # Prepare factory for empty intermediate tensors # (for pipeline scheduling) self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size ) def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor: ... def forward(self, input_ids: torch.Tensor | None, positions: torch.Tensor, intermediate_tensors: IntermediateTensors | None, inputs_embeds: torch.Tensor | None) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]: ... # Task: Write a Python method `load_weights` for the class `ArceeModel` to load weights, mapping q/k/v projections to fused qkv_proj. Parameters: weights: Iterable[tuple[str, torch.Tensor]] Returns: set[str]
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: """Load weights, mapping q/k/v projections to fused qkv_proj.""" stacked_params_mapping = [ (".qkv_proj", ".q_proj", "q"), (".qkv_proj", ".k_proj", "k"), (".qkv_proj", ".v_proj", "v"), ] params_dict = dict(self.named_parameters()) loaded_params: set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name: continue if self.quant_config is not None and ( scale_name := self.quant_config.get_cache_scale(name) ): param = params_dict[scale_name] weight_loader = getattr(param, "weight_loader", default_weight_loader) loaded_weight = ( loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0] ) weight_loader(param, loaded_weight) loaded_params.add(scale_name) continue if "scale" in name or "zero_point" in name: remapped_name = maybe_remap_kv_scale_name(name, params_dict) if remapped_name is None: continue name = remapped_name mapped = False for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) if name.endswith(".bias") and name not in params_dict: mapped = True break if is_pp_missing_parameter(name, self): mapped = True break param = params_dict[name] weight_loader = param.weight_loader # type: ignore[attr-defined] weight_loader(param, loaded_weight, shard_id) loaded_params.add(name) mapped = True break if mapped: continue if name.endswith(".bias") and name not in params_dict: continue if is_pp_missing_parameter(name, self): continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) loaded_params.add(name) return loaded_params
function_complex
1
{"cognitive_complexity": 37, "loc": 72, "code_loc": 56, "docstring_loc": 1, "function_name": "load_weights", "class_name": "ArceeModel", "qualname": "ArceeModel.load_weights", "file_path": "vllm/model_executor/models/arcee.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/paddleocr_vl/modular_paddleocr_vl.py:PaddleOCRVLImageProcessor:class_doc
Write a class-level docstring for `PaddleOCRVLImageProcessor` (inherits from Qwen2VLImageProcessor) which has methods: `__init__`, `_preprocess`.
Constructs a PaddleOCRVL image processor that dynamically resizes images based on the original images. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions. size (`dict[str, int]`, *optional*): Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*): Mean to use if normalizing the image. This is a float or list of floats for each channel in the image. image_std (`float` or `list[float]`, *optional*): Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image. do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. min_pixels (`int`, *optional*, defaults to `384 * 384`): The min pixels of the image to resize the image. max_pixels (`int`, *optional*, defaults to `1536 * 1536`): The max pixels of the image to resize the image. patch_size (`int`, *optional*, defaults to 14): The spatial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to 1): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to 2): The merge size of the vision encoder to llm encoder.
documentation
0
{"doc_type": "class", "class_name": "PaddleOCRVLImageProcessor", "file_path": "src/transformers/models/paddleocr_vl/modular_paddleocr_vl.py", "repo_id": "huggingface/transformers", "char_length": 1892, "methods": ["__init__", "_preprocess"]}
browser-use/browser-use:tests/ci/test_action_loop_detection.py:test_scroll_different_elements_different_hash
# Context: from browser_use.agent.views import ( ActionLoopDetector, PageFingerprint, compute_action_hash, ) def _get_context_messages(agent: Agent) -> list[str]: ... def test_search_normalization_ignores_keyword_order(): ... def test_search_normalization_ignores_case(): ... def test_search_normalization_ignores_punctuation(): ... def test_search_normalization_deduplicates_tokens(): ... def test_search_different_queries_produce_different_hashes(): ... def test_click_same_index_same_hash(): ... def test_click_different_index_different_hash(): ... def test_input_same_element_same_text(): ... def test_input_different_text_different_hash(): ... def test_navigate_same_url_same_hash(): ... def test_navigate_different_paths_different_hash(): ... def test_navigate_different_domain_different_hash(): ... def test_scroll_direction_matters(): ... def test_scroll_same_element_same_hash(): ... def test_different_action_types_different_hashes(): ... def test_detector_no_nudge_for_diverse_actions(): ... def test_detector_nudge_at_5_repeats(): ... def test_detector_no_nudge_at_4_repeats(): ... def test_detector_nudge_escalates_at_8_repeats(): ... def test_detector_nudge_escalates_at_12_repeats(): ... def test_detector_critical_message_no_done_directive(): ... def test_detector_first_nudge_no_cannot_complete(): ... def test_detector_window_slides(): ... def test_detector_search_variations_detected_as_same(): ... def test_page_stagnation_no_nudge_when_pages_change(): ... def test_page_stagnation_nudge_at_5_identical_pages(): ... def test_page_stagnation_no_nudge_at_4_identical_pages(): ... def test_page_stagnation_resets_on_change(): ... def test_combined_loop_and_stagnation(): ... def test_page_fingerprint_same_content_equal(): ... def test_page_fingerprint_different_content_not_equal(): ... def test_page_fingerprint_different_url_not_equal(): ... def test_page_fingerprint_different_element_count_not_equal(): ... async def test_loop_nudge_injected_into_context(): ... async def test_no_loop_nudge_when_disabled(): ... async def test_no_loop_nudge_for_diverse_actions(): ... async def test_loop_detector_initialized_from_settings(): ... async def test_loop_detector_default_window_size(): ... # Task: Write a Python test function `test_scroll_different_elements_different_hash` to scrolling different elements produces different hashes. Module under test: browser_use.agent.service, browser_use.agent.views, browser_use.llm.messages
def test_scroll_different_elements_different_hash(): """Scrolling different elements produces different hashes.""" h1 = compute_action_hash('scroll', {'down': True, 'index': 5}) h2 = compute_action_hash('scroll', {'down': True, 'index': 10}) assert h1 != h2
test
0
{"function_name": "test_scroll_different_elements_different_hash", "class_name": null, "qualname": "test_scroll_different_elements_different_hash", "file_path": "tests/ci/test_action_loop_detection.py", "repo_id": "browser-use/browser-use", "loc": 5, "tested_modules": ["browser_use.agent.service", "browser_use.agent.views", "browser_use.llm.messages", "tests.ci.conftest"], "has_docstring": true, "runnable_level": "project_runnable"}
karpathy/nanochat:nanochat/report.py:module_doc
Write a module-level docstring for the Python module `report` which contains function `run_command`, function `get_git_info`, function `get_gpu_info`, function `get_system_info`, function `estimate_cost`.
Utilities for generating training report cards. More messy code than usual, will fix.
documentation
0
{"doc_type": "module", "module_name": "report", "file_path": "nanochat/report.py", "repo_id": "karpathy/nanochat", "char_length": 85}
TheAlgorithms/Python:sorts/cyclic_sort.py:cyclic_sort
Write a Python function `cyclic_sort` to sorts the input list of n integers from 1 to n in-place. Parameters: nums: list[int] Returns: list[int]
def cyclic_sort(nums: list[int]) -> list[int]: """ Sorts the input list of n integers from 1 to n in-place using the Cyclic Sort algorithm. :param nums: List of n integers from 1 to n to be sorted. :return: The same list sorted in ascending order. Time complexity: O(n), where n is the number of integers in the list. Examples: >>> cyclic_sort([]) [] >>> cyclic_sort([3, 5, 2, 1, 4]) [1, 2, 3, 4, 5] """ # Perform cyclic sort index = 0 while index < len(nums): # Calculate the correct index for the current element correct_index = nums[index] - 1 # If the current element is not at its correct position, # swap it with the element at its correct index if index != correct_index: nums[index], nums[correct_index] = nums[correct_index], nums[index] else: # If the current element is already in its correct position, # move to the next element index += 1 return nums
function_simple
1
{"cognitive_complexity": 4, "loc": 32, "code_loc": 8, "docstring_loc": 15, "function_name": "cyclic_sort", "class_name": null, "qualname": "cyclic_sort", "file_path": "sorts/cyclic_sort.py", "repo_id": "TheAlgorithms/Python", "has_docstring": true, "runnable_level": "self_contained"}
ray-project/ray:ci/raydepsets/cli.py:DependencySetManager.relax
# Context: from typing import List, Optional import click def cli(): ... def build(config_path: str, workspace_dir: Optional[str], name: Optional[str], uv_cache_dir: Optional[str], check: Optional[bool], all_configs: Optional[bool]): ... def _get_bytes(packages: List[str]) -> bytes: ... def _get_depset(depsets: List[Depset], name: str) -> Depset: ... def _flatten_flags(flags: List[str]) -> List[str]: ... def _override_uv_flags(flags: List[str], args: List[str]) -> List[str]: ... def parse_lock_file(lock_file_path: str) -> RequirementsFile: ... def write_lock_file(requirements_file: RequirementsFile, lock_file_path: str): ... def _uv_binary(): ... class DependencySetManager: def __init__( self, config_path: str = None, workspace_dir: Optional[str] = None, uv_cache_dir: Optional[str] = None, check: Optional[bool] = False, build_all_configs: Optional[bool] = False, ): """Initialize the dependency set manager. Args: config_path: Path to the depsets config file. workspace_dir: Path to the workspace directory. uv_cache_dir: Directory to cache uv dependencies. check: Whether to check if lock files are up to date. build_all_configs: Whether to build all configs or just the specified one. """ self.workspace = Workspace(workspace_dir) self.config = self.workspace.load_configs(config_path) self.config_name = os.path.basename(config_path) self.build_graph = DiGraph() self._build(build_all_configs) self._uv_binary = _uv_binary() self._uv_cache_dir = uv_cache_dir if check: self.temp_dir = tempfile.mkdtemp() self.output_paths = self.get_output_paths() self.copy_to_temp_dir() def get_output_paths(self) -> List[Path]: ... def copy_to_temp_dir(self): ... def get_diffs(self) -> List[str]: ... def diff_lock_files(self): ... def get_source_and_dest(self, output_path: str) -> tuple[Path, Path]: ... def _build(self, build_all_configs: Optional[bool]): ... def subgraph_dependency_nodes(self, depset_name: str): ... def subgraph_config_nodes(self): ... def execute(self, single_depset_name: Optional[str]): ... def exec_uv_cmd(self, cmd: str, args: List[str], stdin: Optional[bytes]) -> str: ... def execute_pre_hook(self, pre_hook: str): ... def execute_depset(self, depset: Depset): ... def compile(self, constraints: List[str], name: str, output: str, append_flags: Optional[List[str]], override_flags: Optional[List[str]], packages: Optional[List[str]], requirements: Optional[List[str]], include_setuptools: Optional[bool]): ... def subset(self, source_depset: str, requirements: List[str], name: str, output: str, append_flags: Optional[List[str]], override_flags: Optional[List[str]], include_setuptools: Optional[bool]): ... def expand(self, depsets: List[str], requirements: List[str], constraints: List[str], name: str, output: str, append_flags: Optional[List[str]], override_flags: Optional[List[str]], include_setuptools: Optional[bool]): ... def read_lock_file(self, file_path: Path) -> List[str]: ... def get_path(self, path: str) -> Path: ... def check_subset_exists(self, source_depset: Depset, requirements: List[str]): ... def get_expanded_depset_requirements(self, depset_name: str, requirements_list: List[str]) -> List[str]: ... def cleanup(self): ... # Task: Write a Python method `relax` for the class `DependencySetManager` to relax a dependency set by removing specified packages from the lock file. Parameters: source_depset: str, packages: List[str], name: str, output: str
def relax( self, source_depset: str, packages: List[str], name: str, output: str = None, ): """Relax a dependency set by removing specified packages from the lock file.""" source_depset = _get_depset(self.config.depsets, source_depset) lock_file_path = self.get_path(source_depset.output) requirements_file = parse_lock_file(str(lock_file_path)) requirements_list = [req.name for req in requirements_file.requirements] for package in packages: if package not in requirements_list: raise RuntimeError( f"Package {package} not found in lock file {source_depset.output}" ) # Remove specified packages from requirements requirements_file.requirements = [ req for req in requirements_file.requirements if req.name not in packages ] # Write the modified lock file output_path = self.get_path(output) if output else lock_file_path write_lock_file(requirements_file, str(output_path)) click.echo( f"Relaxed {source_depset.name} by removing packages {packages} and wrote to {output_path}" )
function_simple
0
{"cognitive_complexity": 4, "loc": 30, "code_loc": 17, "docstring_loc": 1, "function_name": "relax", "class_name": "DependencySetManager", "qualname": "DependencySetManager.relax", "file_path": "ci/raydepsets/cli.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/train/tests/test_iter_torch_batches_gpu.py:BasePandasBatchCollateFn:class_doc
Write a class-level docstring for `BasePandasBatchCollateFn` (inherits from PandasBatchCollateFn) which has methods: `__init__`, `_process_batch`, `_get_tensors`.
Base class for Pandas batch collate functions that process and convert to tensors. This class provides common functionality for processing Pandas DataFrames and converting them to PyTorch tensors. It handles device placement and dtype conversion. Attributes: device: Optional device to place tensors on. Can be a string (e.g. "cpu", "cuda:0") or a torch.device object.
documentation
0
{"doc_type": "class", "class_name": "BasePandasBatchCollateFn", "file_path": "python/ray/train/tests/test_iter_torch_batches_gpu.py", "repo_id": "ray-project/ray", "char_length": 382, "methods": ["__init__", "_process_batch", "_get_tensors"]}
unclecode/crawl4ai:tests/cache_validation/test_head_fingerprint.py:TestHeadFingerprint.test_extracts_og_tags
# Context: from crawl4ai.utils import compute_head_fingerprint class TestHeadFingerprint: def test_same_content_same_fingerprint(self): ... def test_different_title_different_fingerprint(self): ... def test_empty_head_returns_empty_string(self): ... def test_head_without_signals_returns_empty(self): ... def test_extracts_title(self): ... def test_extracts_meta_description(self): ... def test_extracts_og_image(self): ... def test_extracts_article_modified_time(self): ... def test_case_insensitive(self): ... def test_handles_attribute_order(self): ... def test_real_world_head(self): ... # Task: Write a Python test method `test_extracts_og_tags` in test class `TestHeadFingerprint` to open Graph tags are extracted. Module under test: crawl4ai.utils
def test_extracts_og_tags(self): """Open Graph tags are extracted.""" head1 = '<head><meta property="og:title" content="OG Title"></head>' head2 = '<head><meta property="og:title" content="Different OG Title"></head>' assert compute_head_fingerprint(head1) != compute_head_fingerprint(head2)
test
1
{"function_name": "test_extracts_og_tags", "class_name": "TestHeadFingerprint", "qualname": "TestHeadFingerprint.test_extracts_og_tags", "file_path": "tests/cache_validation/test_head_fingerprint.py", "repo_id": "unclecode/crawl4ai", "loc": 5, "tested_modules": ["crawl4ai.utils"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/agents/test_native_tool_calling.py:TestOpenAINativeToolCalling.test_openai_agent_with_native_tool_calling
# Context: import pytest from crewai import Agent, Crew, Task from crewai.llm import LLM class CalculatorInput(BaseModel): ... class CalculatorTool(BaseTool): ... class WeatherInput(BaseModel): ... class WeatherTool(BaseTool): ... class FailingTool(BaseTool): ... class LocalSearchInput(BaseModel): ... class ParallelProbe: ... def _parallel_prompt() -> str: ... def _max_concurrency(windows: list[tuple[str, float, float]]) -> int: ... def _assert_tools_overlapped() -> None: ... def calculator_tool() -> CalculatorTool: ... def weather_tool() -> WeatherTool: ... def failing_tool() -> BaseTool: ... def parallel_tools() -> list[BaseTool]: ... def _attach_parallel_probe_handler() -> None: ... class TestAnthropicNativeToolCalling: ... class TestGeminiNativeToolCalling: ... class TestAzureNativeToolCalling: ... class TestBedrockNativeToolCalling: ... class TestNativeToolCallingBehavior: ... class TestNativeToolCallingTokenUsage: ... def test_native_tool_calling_error_handling(failing_tool: FailingTool): ... class CountingInput(BaseModel): ... class CountingTool(BaseTool): ... class TestMaxUsageCountWithNativeToolCalling: ... class TestNativeToolCallingJsonParseError: ... class TestOpenAINativeToolCalling: def test_openai_agent_kickoff_with_tools_mocked(self, calculator_tool: CalculatorTool) -> None: ... def test_openai_parallel_native_tool_calling_test_crew(self, parallel_tools: list[BaseTool]) -> None: ... def test_openai_parallel_native_tool_calling_test_agent_kickoff(self, parallel_tools: list[BaseTool]) -> None: ... def test_openai_parallel_native_tool_calling_tool_hook_parity_crew(self, parallel_tools: list[BaseTool]) -> None: ... def test_openai_parallel_native_tool_calling_tool_hook_parity_agent_kickoff(self, parallel_tools: list[BaseTool]) -> None: ... # Task: Write a Python test method `test_openai_agent_with_native_tool_calling` in test class `TestOpenAINativeToolCalling` to test OpenAI agent can use native tool calling. Module under test: __future__, collections.abc, collections
def test_openai_agent_with_native_tool_calling( self, calculator_tool: CalculatorTool ) -> None: """Test OpenAI agent can use native tool calling.""" agent = Agent( role="Math Assistant", goal="Help users with mathematical calculations", backstory="You are a helpful math assistant.", tools=[calculator_tool], llm=LLM(model="gpt-4o-mini"), verbose=False, max_iter=3, ) task = Task( description="Calculate what is 15 * 8", expected_output="The result of the calculation", agent=agent, ) crew = Crew(agents=[agent], tasks=[task]) result = crew.kickoff() assert result is not None assert result.raw is not None assert "120" in str(result.raw)
test
0
{"function_name": "test_openai_agent_with_native_tool_calling", "class_name": "TestOpenAINativeToolCalling", "qualname": "TestOpenAINativeToolCalling.test_openai_agent_with_native_tool_calling", "file_path": "lib/crewai/tests/agents/test_native_tool_calling.py", "repo_id": "crewAIInc/crewAI", "loc": 26, "tested_modules": ["__future__", "collections.abc", "collections", "pydantic", "crewai"], "has_docstring": true, "runnable_level": "project_runnable"}
ocrmypdf/OCRmyPDF:src/ocrmypdf/builtin_plugins/null_ocr.py:get_ocr_engine
# Context: class NullOcrEngine(OcrEngine): ... # Task: Write a Python function `get_ocr_engine` to return NullOcrEngine when --ocr-engine none is selected. Parameters: options
def get_ocr_engine(options): """Return NullOcrEngine when --ocr-engine none is selected.""" if options is not None: ocr_engine = getattr(options, 'ocr_engine', 'auto') if ocr_engine != 'none': return None return NullOcrEngine()
function_simple
1
{"cognitive_complexity": 3, "loc": 7, "code_loc": 5, "docstring_loc": 1, "function_name": "get_ocr_engine", "class_name": null, "qualname": "get_ocr_engine", "file_path": "src/ocrmypdf/builtin_plugins/null_ocr.py", "repo_id": "ocrmypdf/OCRmyPDF", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_keycloak.py:TestKeycloakProvider.test_get_issuer_with_different_domain
# Context: from crewai.cli.authentication.main import Oauth2Settings from crewai.cli.authentication.providers.keycloak import KeycloakProvider class TestKeycloakProvider: def setup_method(self): ... def test_initialization_with_valid_settings(self): ... def test_get_authorize_url(self): ... def test_get_authorize_url_with_different_domain(self): ... def test_get_token_url(self): ... def test_get_token_url_with_different_domain(self): ... def test_get_jwks_url(self): ... def test_get_jwks_url_with_different_domain(self): ... def test_get_issuer(self): ... def test_get_audience(self): ... def test_get_client_id(self): ... def test_get_required_fields(self): ... def test_oauth2_base_url(self): ... def test_oauth2_base_url_strips_https_prefix(self): ... def test_oauth2_base_url_strips_http_prefix(self): ... # Task: Write a Python test method `test_get_issuer_with_different_domain` in test class `TestKeycloakProvider` to verify the behavior of `get_issuer_with_different_domain`. Module under test: crewai.cli.authentication.main, crewai.cli.authentication.providers.keycloak
def test_get_issuer_with_different_domain(self): settings = Oauth2Settings( provider="keycloak", domain="login.myapp.io", client_id="test-client", audience="test-audience", extra={ "realm": "app-realm" } ) provider = KeycloakProvider(settings) expected_issuer = "https://login.myapp.io/realms/app-realm" assert provider.get_issuer() == expected_issuer
test
0
{"function_name": "test_get_issuer_with_different_domain", "class_name": "TestKeycloakProvider", "qualname": "TestKeycloakProvider.test_get_issuer_with_different_domain", "file_path": "lib/crewai/tests/cli/authentication/providers/test_keycloak.py", "repo_id": "crewAIInc/crewAI", "loc": 13, "tested_modules": ["crewai.cli.authentication.main", "crewai.cli.authentication.providers.keycloak"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py:GraniteMoeHybridIntegrationTest.test_model_logits
# Context: from parameterized import parameterized import torch from transformers import ( GraniteMoeHybridForCausalLM, GraniteMoeHybridModel, ) class GraniteMoeHybridModelTester(BambaModelTester): ... class GraniteMoeHybridModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): ... class GraniteMoeHybridIntegrationTest(unittest.TestCase): def test_model_generation(self, device): ... # Task: Write a Python test method `test_model_logits` in test class `GraniteMoeHybridIntegrationTest` to verify the behavior of `model_logits`. Module under test: parameterized, transformers, transformers.testing_utils
def test_model_logits(self, device): input_ids = [31390, 631, 4162, 30, 322, 25342, 432, 1875, 43826, 10066, 688, 225] model = GraniteMoeHybridForCausalLM.from_pretrained("ibm-granite/granite-4.0-h-tiny", device_map=device) with torch.no_grad(): out = model(torch.tensor([input_ids]).to(device)) # fmt: off # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([ [-0.3543, -1.0066, -0.5338, -0.8816, -0.7438, 0.0500, -1.3644, -0.0742, -1.7746, -1.6326, -1.4802, -0.4961] ], device=device) torch.testing.assert_close(EXPECTED_MEAN, out.logits.float().mean(-1), rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:15] EXPECTED_SLICE = torch.tensor([ [6.5938, 7.2500, 1.6484, 5.2188, 3.5781, 2.5469, 6.1250, 5.1875, 9.5000, 4.6875, 4.7188, 10.7500, 10.3125, 7.8438, 5.5312] ], device=device) # fmt: on self.assertTrue( torch.allclose( EXPECTED_SLICE, out.logits[0, 0, :15].float(), atol=1e-3, rtol=1e-3, ) )
test
0
{"function_name": "test_model_logits", "class_name": "GraniteMoeHybridIntegrationTest", "qualname": "GraniteMoeHybridIntegrationTest.test_model_logits", "file_path": "tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py", "repo_id": "huggingface/transformers", "loc": 30, "tested_modules": ["parameterized", "transformers", "transformers.testing_utils", "generation.test_utils", "models.bamba.test_modeling_bamba"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/llm/_internal/batch/constants.py:TaskType.values
# Context: class vLLMTaskType(TaskType): ... class SGLangTaskType(TaskType): ... class TaskType: # Task: Write a Python method `values` for the class `TaskType` to return a set of all valid task type values.
def values(cls): """Return a set of all valid task type values.""" return { value for key, value in vars(cls).items() if not key.startswith("_") and isinstance(value, str) }
function_simple
0
{"cognitive_complexity": 0, "loc": 7, "code_loc": 5, "docstring_loc": 1, "function_name": "values", "class_name": "TaskType", "qualname": "TaskType.values", "file_path": "python/ray/llm/_internal/batch/constants.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/data/util/torch_utils.py:tensorize
# Context: import numpy as np import pandas as pd import torch def convert_pandas_to_torch_tensor(data_batch: pd.DataFrame, columns: Optional[Union[List[str], List[List[str]]]], column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]], unsqueeze: bool) -> Union[torch.Tensor, List[torch.Tensor]]: ... def convert_ndarray_to_torch_tensor(ndarray: np.ndarray, dtype: Optional[torch.dtype], device: Optional[Union[str, 'torch.device']], pin_memory: bool) -> torch.Tensor: ... def convert_ndarray_batch_to_torch_tensor_batch(ndarrays: Union[np.ndarray, Dict[str, np.ndarray]], dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]], device: Optional[Union[str, 'torch.device']], pin_memory: bool) -> Union[torch.Tensor, Dict[str, torch.Tensor]]: ... def convert_ndarray_list_to_torch_tensor_list(ndarrays: Dict[str, List[np.ndarray]], dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]], device: Optional[Union[str, 'torch.device']], pin_memory: bool) -> Dict[str, List[torch.Tensor]]: ... def arrow_batch_to_tensors(batch: pyarrow.Table, dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]], combine_chunks: bool, pin_memory: bool, threadpool: Optional[ThreadPoolExecutor]) -> Union[Dict[str, torch.Tensor], Dict[str, List[torch.Tensor]]]: ... def concat_tensors_to_device(tensor_sequence: Sequence[torch.Tensor], device: Optional[Union[str, 'torch.device']], non_blocking: bool) -> torch.Tensor: ... def _get_type_str(batch: Any) -> str: ... def move_tensors_to_device(batch: TensorBatchType, device: Optional[Union[str, 'torch.device']], non_blocking: bool) -> TensorBatchReturnType: ... # Task: Write a Python function `tensorize` to this recursive function allows to convert pyarrow List dtypes. Parameters: vals, dtype
def tensorize(vals, dtype): """This recursive function allows to convert pyarrow List dtypes to multi-dimensional tensors.""" if isinstance(vals, pd.api.extensions.ExtensionArray): # torch.as_tensor() does not yet support the __array__ protocol, so we need # to convert extension arrays to ndarrays manually before converting to a # Torch tensor. # See https://github.com/pytorch/pytorch/issues/51156. vals = vals.to_numpy() if vals.dtype.type is np.object_: # Column has an object dtype which Torch can't handle, so we try to # tensorize each column element and then stack the resulting tensors. tensors = [tensorize(x, dtype) for x in vals] try: return torch.stack(tensors) except RuntimeError: # NOTE: RuntimeError is raised when trying to stack ragged tensors. # Try to coerce the tensor to a nested tensor, if possible. # If this fails, the exception will be propagated up to the caller. return torch.nested_tensor(tensors) else: return torch.as_tensor(vals, dtype=dtype)
function_simple
0
{"cognitive_complexity": 4, "loc": 23, "code_loc": 10, "docstring_loc": 2, "function_name": "tensorize", "class_name": null, "qualname": "tensorize", "file_path": "python/ray/data/util/torch_utils.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "plib_runnable"}
huggingface/transformers:src/transformers/models/qwen3_vl/modular_qwen3_vl.py:Qwen3VLTextModel.forward
# Context: import torch from ...cache_utils import Cache, DynamicCache from ...masking_utils import create_causal_mask from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...modeling_outputs import BaseModelOutputWithPast, BaseModelOutputWithPooling from ...processing_utils import ProcessingKwargs, Unpack class BaseModelOutputWithDeepstackFeatures(BaseModelOutputWithPooling): ... class Qwen3VLVisionConfig(PreTrainedConfig): ... class Qwen3VLTextConfig(PreTrainedConfig): ... class Qwen3VLConfig(PreTrainedConfig): ... class Qwen3VLVisionMLP(nn.Module): ... class Qwen3VLVisionPatchEmbed(PatchEmbed): ... class Qwen3VLVisionRotaryEmbedding(VisionRotaryEmbedding): ... class Qwen3VLVisionPatchMerger(nn.Module): ... class Qwen3VLVisionAttention(VisionAttention): ... class Qwen3VLVisionBlock(Qwen2_5_VLVisionBlock): ... class Qwen3VLTextRotaryEmbedding(LlamaRotaryEmbedding): ... class Qwen3VLTextAttention(Qwen3Attention): ... class Qwen3VLTextDecoderLayer(Qwen3DecoderLayer): ... class Qwen3VLModelOutputWithPast(Qwen2VLModelOutputWithPast): ... class Qwen3VLPreTrainedModel(Qwen2VLPreTrainedModel): ... class Qwen3VLVisionModel(Qwen3VLPreTrainedModel): ... class Qwen3VLModel(Qwen2VLModel): ... class Qwen3VLCausalLMOutputWithPast(Qwen2_5_VLCausalLMOutputWithPast): ... class Qwen3VLForConditionalGeneration(Qwen2_5_VLForConditionalGeneration): ... class Qwen3VLProcessorKwargs(ProcessingKwargs): ... class Qwen3VLProcessor(Qwen2VLProcessor): ... class Qwen3VLTextModel(Qwen3VLPreTrainedModel, Qwen3Model): input_modalities = ("text",) _no_split_modules = ["Qwen3VLTextDecoderLayer"] def __init__(self, config: Qwen3VLTextConfig): super().__init__(config) del self.has_sliding_layers def _deepstack_process(self, hidden_states: torch.Tensor, visual_pos_masks: torch.Tensor, visual_embeds: torch.Tensor): ... # Task: Write a Python method `forward` for the class `Qwen3VLTextModel` to visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*):. Parameters: input_ids: torch.LongTensor | None, attention_mask: torch.Tensor | None, position_ids: torch.LongTensor | None, past_key_values: Cache | None, inputs_embeds: torch.FloatTensor | None, use_cache: bool | None, cache_position: torch.LongTensor | None, visual_pos_masks: torch.Tensor | None, deepstack_visual_embeds: list[torch.Tensor] | None Returns: tuple | BaseModelOutputWithPast
def forward( self, input_ids: torch.LongTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, use_cache: bool | None = None, cache_position: torch.LongTensor | None = None, # args for deepstack visual_pos_masks: torch.Tensor | None = None, deepstack_visual_embeds: list[torch.Tensor] | None = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple | BaseModelOutputWithPast: r""" visual_pos_masks (`torch.Tensor` of shape `(batch_size, seqlen)`, *optional*): The mask of the visual positions. deepstack_visual_embeds (`list[torch.Tensor]`, *optional*): The deepstack visual embeddings. The shape is (num_layers, visual_seqlen, embed_dim). The feature is extracted from the different visual encoder layers, and fed to the decoder hidden states. It's from the paper DeepStack(https://arxiv.org/abs/2406.04334). """ if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") # torch.jit.trace() doesn't support cache objects in the output if use_cache and past_key_values is None and not torch.jit.is_tracing(): past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) # the hard coded `4` is for text, temporal, height and width. if position_ids is None: position_ids = cache_position.view(1, 1, -1).expand(4, inputs_embeds.shape[0], -1) elif position_ids.ndim == 2: position_ids = position_ids[None, ...].expand(4, position_ids.shape[0], -1) if position_ids.ndim == 3 and position_ids.shape[0] == 4: text_position_ids = position_ids[0] position_ids = position_ids[1:] else: text_position_ids = None attention_mask = create_causal_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=text_position_ids, ) hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers for layer_idx, decoder_layer in enumerate(self.layers): layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=text_position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = layer_outputs # add visual features to the hidden states of first several layers if deepstack_visual_embeds is not None and layer_idx in range(len(deepstack_visual_embeds)): hidden_states = self._deepstack_process( hidden_states, visual_pos_masks, deepstack_visual_embeds[layer_idx], ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, )
function_complex
0
{"cognitive_complexity": 16, "loc": 91, "code_loc": 52, "docstring_loc": 8, "function_name": "forward", "class_name": "Qwen3VLTextModel", "qualname": "Qwen3VLTextModel.forward", "file_path": "src/transformers/models/qwen3_vl/modular_qwen3_vl.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/browser/session.py:BrowserSession.cdp_client_for_frame
# Context: class Target(BaseModel): ... class CDPSession(BaseModel): ... class BrowserSession(BaseModel): model_config = ConfigDict( def __init__( self, *, # Cloud browser params - use these for cloud mode cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = None, cloud_timeout: int | None = None, # Backward compatibility aliases profile_id: UUID | str | None = None, proxy_country_code: ProxyCountryCode | None = None, timeout: int | None = None, use_cloud: bool | None = None, cloud_browser: bool | None = None, # Backward compatibility alias cloud_browser_params: CloudBrowserParams | None = None, # Common params that work with cloud id: str | None = None, headers: dict[str, str] | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, captcha_solver: bool | None = None, auto_download_pdfs: bool | None = None, cookie_whitelist_domains: list[str] | None = None, cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, max_iframes: int | None = None, max_iframe_depth: int | None = None, ) -> None: ... def __init__( self, *, # Core configuration for local id: str | None = None, cdp_url: str | None = None, browser_profile: BrowserProfile | None = None, # Local browser launch params executable_path: str | Path | None = None, headless: bool | None = None, user_data_dir: str | Path | None = None, args: list[str] | None = None, downloads_path: str | Path | None = None, # Common params headers: dict[str, str] | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, auto_download_pdfs: bool | None = None, cookie_whitelist_domains: list[str] | None = None, cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, max_iframes: int | None = None, max_iframe_depth: int | None = None, # All other local params env: dict[str, str | float | bool] | None = None, ignore_default_args: list[str] | Literal[True] | None = None, channel: str | None = None, chromium_sandbox: bool | None = None, devtools: bool | None = None, traces_dir: str | Path | None = None, accept_downloads: bool | None = None, permissions: list[str] | None = None, user_agent: str | None = None, screen: dict | None = None, viewport: dict | None = None, no_viewport: bool | None = None, device_scale_factor: float | None = None, record_har_content: str | None = None, record_har_mode: str | None = None, record_har_path: str | Path | None = None, record_video_dir: str | Path | None = None, record_video_framerate: int | None = None, record_video_size: dict | None = None, storage_state: str | Path | dict[str, Any] | None = None, disable_security: bool | None = None, deterministic_rendering: bool | None = None, proxy: ProxySettings | None = None, enable_default_extensions: bool | None = None, captcha_solver: bool | None = None, window_size: dict | None = None, window_position: dict | None = None, filter_highlight_ids: bool | None = None, profile_directory: str | None = None, ) -> None: ... def __init__( self, # Core configuration id: str | None = None, cdp_url: str | None = None, is_local: bool = False, browser_profile: BrowserProfile | None = None, # Cloud browser params (don't mix with local browser params) cloud_profile_id: UUID | str | None = None, cloud_proxy_country_code: ProxyCountryCode | None = None, cloud_timeout: int | None = None, # Backward compatibility aliases for cloud params profile_id: UUID | str | None = None, proxy_country_code: ProxyCountryCode | None = None, timeout: int | None = None, # BrowserProfile fields that can be passed directly # From BrowserConnectArgs headers: dict[str, str] | None = None, # From BrowserLaunchArgs env: dict[str, str | float | bool] | None = None, executable_path: str | Path | None = None, headless: bool | None = None, args: list[str] | None = None, ignore_default_args: list[str] | Literal[True] | None = None, channel: str | None = None, chromium_sandbox: bool | None = None, devtools: bool | None = None, downloads_path: str | Path | None = None, traces_dir: str | Path | None = None, # From BrowserContextArgs accept_downloads: bool | None = None, permissions: list[str] | None = None, user_agent: str | None = None, screen: dict | None = None, viewport: dict | None = None, no_viewport: bool | None = None, device_scale_factor: float | None = None, record_har_content: str | None = None, record_har_mode: str | None = None, record_har_path: str | Path | None = None, record_video_dir: str | Path | None = None, record_video_framerate: int | None = None, record_video_size: dict | None = None, # From BrowserLaunchPersistentContextArgs user_data_dir: str | Path | None = None, # From BrowserNewContextArgs storage_state: str | Path | dict[str, Any] | None = None, # BrowserProfile specific fields ## Cloud Browser Fields use_cloud: bool | None = None, cloud_browser: bool | None = None, # Backward compatibility alias cloud_browser_params: CloudBrowserParams | None = None, ## Other params disable_security: bool | None = None, deterministic_rendering: bool | None = None, allowed_domains: list[str] | None = None, prohibited_domains: list[str] | None = None, keep_alive: bool | None = None, proxy: ProxySettings | None = None, enable_default_extensions: bool | None = None, captcha_solver: bool | None = None, window_size: dict | None = None, window_position: dict | None = None, minimum_wait_page_load_time: float | None = None, wait_for_network_idle_page_load_time: float | None = None, wait_between_actions: float | None = None, filter_highlight_ids: bool | None = None, auto_download_pdfs: bool | None = None, profile_directory: str | None = None, cookie_whitelist_domains: list[str] | None = None, # DOM extraction layer configuration cross_origin_iframes: bool | None = None, highlight_elements: bool | None = None, dom_highlight_elements: bool | None = None, paint_order_filtering: bool | None = None, # Iframe processing limits max_iframes: int | None = None, max_iframe_depth: int | None = None, ): # Following the same pattern as AgentSettings in service.py # Only pass non-None values to avoid validation errors profile_kwargs = { k: v for k, v in locals().items() if k not in [ 'self', 'browser_profile', 'id', 'cloud_profile_id', 'cloud_proxy_country_code', 'cloud_timeout', 'profile_id', 'proxy_country_code', 'timeout', ] and v is not None } # Handle backward compatibility: prefer cloud_* params over old names final_profile_id = cloud_profile_id if cloud_profile_id is not None else profile_id final_proxy_country_code = cloud_proxy_country_code if cloud_proxy_country_code is not None else proxy_country_code final_timeout = cloud_timeout if cloud_timeout is not None else timeout # If any cloud params are provided, create cloud_browser_params if final_profile_id is not None or final_proxy_country_code is not None or final_timeout is not None: cloud_params = CreateBrowserRequest( cloud_profile_id=final_profile_id, cloud_proxy_country_code=final_proxy_country_code, cloud_timeout=final_timeout, ) profile_kwargs['cloud_browser_params'] = cloud_params profile_kwargs['use_cloud'] = True # Handle backward compatibility: map cloud_browser to use_cloud if 'cloud_browser' in profile_kwargs: profile_kwargs['use_cloud'] = profile_kwargs.pop('cloud_browser') # If cloud_browser_params is set, force use_cloud=True if cloud_browser_params is not None: profile_kwargs['use_cloud'] = True # if is_local is False but executable_path is provided, set is_local to True if is_local is False and executable_path is not None: profile_kwargs['is_local'] = True # Only set is_local=True when cdp_url is missing if we're not using cloud browser # (cloud browser will provide cdp_url later) use_cloud = profile_kwargs.get('use_cloud') or profile_kwargs.get('cloud_browser') if not cdp_url and not use_cloud: profile_kwargs['is_local'] = True # Create browser profile from direct parameters or use provided one if browser_profile is not None: # Merge any direct kwargs into the provided browser_profile (direct kwargs take precedence) merged_kwargs = {**browser_profile.model_dump(exclude_unset=True), **profile_kwargs} resolved_browser_profile = BrowserProfile(**merged_kwargs) else: resolved_browser_profile = BrowserProfile(**profile_kwargs) # Initialize the Pydantic model super().__init__( id=id or str(uuid7str()), browser_profile=resolved_browser_profile, ) def from_system_chrome(cls, profile_directory: str | None, **kwargs) -> Self: ... def list_chrome_profiles(cls) -> list[dict[str, str]]: ... def cdp_url(self) -> str | None: ... def is_local(self) -> bool: ... def is_cdp_connected(self) -> bool: ... async def wait_if_captcha_solving(self, timeout: float | None) -> 'CaptchaWaitResult | None': ... def is_reconnecting(self) -> bool: ... def cloud_browser(self) -> bool: ... def demo_mode(self) -> 'DemoMode | None': ... def logger(self) -> Any: ... def _id_for_logs(self) -> str: ... def _tab_id_for_logs(self) -> str: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... async def reset(self) -> None: ... def model_post_init(self, __context) -> None: ... async def start(self) -> None: ... async def kill(self) -> None: ... async def stop(self) -> None: ... async def on_BrowserStartEvent(self, event: BrowserStartEvent) -> dict[str, str]: ... async def on_NavigateToUrlEvent(self, event: NavigateToUrlEvent) -> None: ... async def _navigate_and_wait(self, url: str, target_id: str, timeout: float | None, wait_until: str) -> None: ... async def on_SwitchTabEvent(self, event: SwitchTabEvent) -> TargetID: ... async def on_CloseTabEvent(self, event: CloseTabEvent) -> None: ... async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None: ... async def on_TabClosedEvent(self, event: TabClosedEvent) -> None: ... async def on_AgentFocusChangedEvent(self, event: AgentFocusChangedEvent) -> None: ... async def on_FileDownloadedEvent(self, event: FileDownloadedEvent) -> None: ... async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None: ... def cdp_client(self) -> CDPClient: ... async def new_page(self, url: str | None) -> 'Page': ... async def get_current_page(self) -> 'Page | None': ... async def must_get_current_page(self) -> 'Page': ... async def get_pages(self) -> list['Page']: ... def get_focused_target(self) -> 'Target | None': ... def get_page_targets(self) -> list['Target']: ... async def close_page(self, page: 'Union[Page, str]') -> None: ... async def cookies(self) -> list['Cookie']: ... async def clear_cookies(self) -> None: ... async def export_storage_state(self, output_path: str | Path | None) -> dict[str, Any]: ... async def get_or_create_cdp_session(self, target_id: TargetID | None, focus: bool) -> CDPSession: ... async def set_extra_headers(self, headers: dict[str, str], target_id: TargetID | None) -> None: ... async def get_browser_state_summary(self, include_screenshot: bool, cached: bool, include_recent_events: bool) -> BrowserStateSummary: ... async def get_state_as_text(self) -> str: ... async def attach_all_watchdogs(self) -> None: ... async def connect(self, cdp_url: str | None) -> Self: ... async def _setup_proxy_auth(self) -> None: ... async def reconnect(self) -> None: ... async def _auto_reconnect(self, max_attempts: int) -> None: ... def _attach_ws_drop_callback(self) -> None: ... async def get_tabs(self) -> list[TabInfo]: ... async def get_current_target_info(self) -> TargetInfo | None: ... async def get_current_page_url(self) -> str: ... async def get_current_page_title(self) -> str: ... async def navigate_to(self, url: str, new_tab: bool) -> None: ... async def get_dom_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... def update_cached_selector_map(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> None: ... async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... async def get_dom_element_at_coordinates(self, x: int, y: int) -> EnhancedDOMTreeNode | None: ... async def get_target_id_from_tab_id(self, tab_id: str) -> TargetID: ... async def get_target_id_from_url(self, url: str) -> TargetID: ... async def get_most_recently_opened_target_id(self) -> TargetID: ... def is_file_input(self, element: Any) -> bool: ... async def get_selector_map(self) -> dict[int, EnhancedDOMTreeNode]: ... async def get_index_by_id(self, element_id: str) -> int | None: ... async def get_index_by_class(self, class_name: str) -> int | None: ... async def remove_highlights(self) -> None: ... async def get_element_coordinates(self, backend_node_id: int, cdp_session: CDPSession) -> DOMRect | None: ... async def highlight_interaction_element(self, node: 'EnhancedDOMTreeNode') -> None: ... async def highlight_coordinate_click(self, x: int, y: int) -> None: ... async def add_highlights(self, selector_map: dict[int, 'EnhancedDOMTreeNode']) -> None: ... async def _close_extension_options_pages(self) -> None: ... async def send_demo_mode_log(self, message: str, level: str, metadata: dict[str, Any] | None) -> None: ... def downloaded_files(self) -> list[str]: ... async def _cdp_get_all_pages(self, include_http: bool, include_about: bool, include_pages: bool, include_iframes: bool, include_workers: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool) -> list[TargetInfo]: ... async def _cdp_create_new_page(self, url: str, background: bool, new_window: bool) -> str: ... async def _cdp_close_page(self, target_id: TargetID) -> None: ... async def _cdp_get_cookies(self) -> list[Cookie]: ... async def _cdp_set_cookies(self, cookies: list[Cookie]) -> None: ... async def _cdp_clear_cookies(self) -> None: ... async def _cdp_grant_permissions(self, permissions: list[str], origin: str | None) -> None: ... async def _cdp_set_geolocation(self, latitude: float, longitude: float, accuracy: float) -> None: ... async def _cdp_clear_geolocation(self) -> None: ... async def _cdp_add_init_script(self, script: str) -> str: ... async def _cdp_remove_init_script(self, identifier: str) -> None: ... async def _cdp_set_viewport(self, width: int, height: int, device_scale_factor: float, mobile: bool, target_id: str | None) -> None: ... async def _cdp_get_origins(self) -> list[dict[str, Any]]: ... async def _cdp_get_storage_state(self) -> dict: ... async def _cdp_navigate(self, url: str, target_id: TargetID | None) -> None: ... def _is_valid_target(target_info: TargetInfo, include_http: bool, include_chrome: bool, include_chrome_extensions: bool, include_chrome_error: bool, include_about: bool, include_iframes: bool, include_pages: bool, include_workers: bool) -> bool: ... async def get_all_frames(self) -> tuple[dict[str, dict], dict[str, str]]: ... async def _populate_frame_metadata(self, all_frames: dict[str, dict], target_sessions: dict[str, str]) -> None: ... async def find_frame_target(self, frame_id: str, all_frames: dict[str, dict] | None) -> dict | None: ... async def cdp_client_for_target(self, target_id: TargetID) -> CDPSession: ... async def cdp_client_for_node(self, node: EnhancedDOMTreeNode) -> CDPSession: ... async def take_screenshot(self, path: str | None, full_page: bool, format: str, quality: int | None, clip: dict | None) -> bytes: ... async def screenshot_element(self, selector: str, path: str | None, format: str, quality: int | None) -> bytes: ... async def _get_element_bounds(self, selector: str) -> dict | None: ... # Task: Write a Python async method `cdp_client_for_frame` for the class `BrowserSession` to get a CDP client attached to the target containing the specified frame. Parameters: frame_id: str Returns: CDPSession
async def cdp_client_for_frame(self, frame_id: str) -> CDPSession: """Get a CDP client attached to the target containing the specified frame. Builds a unified frame hierarchy from all targets to find the correct target for any frame, including OOPIFs (Out-of-Process iframes). Args: frame_id: The frame ID to search for Returns: Tuple of (cdp_cdp_session, target_id) for the target containing the frame Raises: ValueError: If the frame is not found in any target """ # If cross-origin iframes are disabled, just use the main session if not self.browser_profile.cross_origin_iframes: return await self.get_or_create_cdp_session() # Get complete frame hierarchy all_frames, target_sessions = await self.get_all_frames() # Find the requested frame frame_info = await self.find_frame_target(frame_id, all_frames) if frame_info: target_id = frame_info.get('frameTargetId') if target_id in target_sessions: assert target_id is not None # Use existing session session_id = target_sessions[target_id] # Return the client with session attached (don't change focus) return await self.get_or_create_cdp_session(target_id, focus=False) # Frame not found raise ValueError(f"Frame with ID '{frame_id}' not found in any target")
function_simple
0
{"cognitive_complexity": 4, "loc": 37, "code_loc": 11, "docstring_loc": 14, "function_name": "cdp_client_for_frame", "class_name": "BrowserSession", "qualname": "BrowserSession.cdp_client_for_frame", "file_path": "browser_use/browser/session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/data/tests/test_join.py:test_join_with_unjoinable_non_key_columns
# Context: import numpy as np import pytest from packaging.version import parse as parse_version import ray from ray.data._internal.utils.arrow_utils import get_pyarrow_version def test_simple_inner_join(ray_start_regular_shared_2_cpus, num_rows_left: int, num_rows_right: int, partition_size_hint: Optional[int]): ... def test_simple_left_right_outer_semi_anti_join(ray_start_regular_shared_2_cpus, join_type, num_rows_left, num_rows_right): ... def test_simple_full_outer_join(ray_start_regular_shared_2_cpus, num_rows_left, num_rows_right): ... def test_simple_self_join(ray_start_regular_shared_2_cpus, left_suffix, right_suffix): ... def test_invalid_join_config(ray_start_regular_shared_2_cpus): ... def test_invalid_join_not_matching_key_columns(ray_start_regular_shared_2_cpus, join_type): ... def test_anti_join_no_matches(ray_start_regular_shared_2_cpus, join_type): ... def test_anti_join_all_matches(ray_start_regular_shared_2_cpus, join_type): ... def test_anti_join_multi_key(ray_start_regular_shared_2_cpus, join_type): ... def _assert_columns_match(result, expected_columns): ... def _assert_list_values(result_by_id, expected_values): ... def _assert_tensor_values(result_by_id, expected_values): ... def _assert_none_values(result_by_id, none_checks): ... def _assert_scalar_values(result_by_id, expected_values): ... def test_join_with_predicate_pushdown(ray_start_regular_shared_2_cpus, join_type, filter_side, should_push): ... def test_join_cross_side_column_comparison_no_pushdown(ray_start_regular_shared_2_cpus): ... # Task: Write a Python test function `test_join_with_unjoinable_non_key_columns` to test that joins work correctly when non-key columns have unjoinable types. Module under test: typing, packaging.version, ray.data._internal.logical.operators
def test_join_with_unjoinable_non_key_columns( ray_start_regular_shared_2_cpus, join_type ): """Test that joins work correctly when non-key columns have unjoinable types.""" # Left dataset with joinable key but unjoinable non-key columns # Create test data - centralized for clarity and maintainability list_data = [ [1, 2, 3], # list for id=0 [4, 5, 6], # list for id=1 [7, 8, 9], # list for id=2 ] tensor_data = [ np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32), # 2x2 tensor for id=0 np.array([[5.0, 6.0], [7.0, 8.0]], dtype=np.float32), # 2x2 tensor for id=1 np.array([[9.0, 10.0], [11.0, 12.0]], dtype=np.float32), # 2x2 tensor for id=2 ] scalar_data = ["a", "b", "c"] # scalar data for id=0,1,2 left_ds = ray.data.from_items( [ { "id": 0, "list_col": list_data[0], "tensor_col": tensor_data[0], "data": scalar_data[0], }, { "id": 1, "list_col": list_data[1], "tensor_col": tensor_data[1], "data": scalar_data[1], }, { "id": 2, "list_col": list_data[2], "tensor_col": tensor_data[2], "data": scalar_data[2], }, ] ) # Right dataset with joinable key and columns # ids: 0, 1, 3 (so id=2 from left won't match, id=3 from right won't match) right_ds = ray.data.from_items( [ {"id": 0, "value": "x", "score": 10}, {"id": 1, "value": "y", "score": 20}, {"id": 3, "value": "z", "score": 30}, ] ) # Verify the join worked and includes unjoinable columns joined = left_ds.join(right_ds, join_type=join_type, on=("id",), num_partitions=2) result = joined.take_all() result_by_id = {row["id"]: row for row in result} # Basic validation - join should succeed with unjoinable non-key columns if join_type == "inner": # Should have 2 rows (id=0 and id=1 match) assert len(result) == 2 # Verify unjoinable columns are preserved _assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1]}) _assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1]}) elif join_type == "left_outer": # Should have 3 rows (all from left: id=0, 1, 2) assert len(result) == 3 # All left unjoinable columns preserved _assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1, 2]}) _assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1, 2]}) # Unmatched left row (id=2) should have None for right columns _assert_none_values(result_by_id, {2: ["value"]}) elif join_type == "right_outer": # Should have 3 rows (all from right: id=0, 1, 3) assert len(result) == 3 # Matched rows should have unjoinable columns from left _assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1]}) _assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1]}) _assert_scalar_values(result_by_id, {3: {"value": "z"}}) # Unmatched right row (id=3) should have None for left unjoinable columns _assert_none_values(result_by_id, {3: ["list_col", "tensor_col"]}) elif join_type == "full_outer": # Should have 4 rows (all from both sides: id=0, 1, 2, 3) assert len(result) == 4 # Matched rows (id=0, 1) should have data from both sides _assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1, 2]}) _assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1, 2]}) _assert_scalar_values( result_by_id, { 0: {"value": "x"}, 1: {"value": "y"}, 2: {"data": scalar_data[2]}, 3: {"value": "z", "score": 30}, }, ) # Unmatched rows should have None for columns from the other side _assert_none_values( result_by_id, {2: ["value", "score"], 3: ["list_col", "tensor_col", "data"]} ) elif join_type == "left_semi": # Should return left rows that have matches in right (id=0, 1) assert len(result) == 2 _assert_columns_match(result, {"id", "list_col", "tensor_col", "data"}) _assert_list_values(result_by_id, {i: list_data[i] for i in [0, 1]}) _assert_tensor_values(result_by_id, {i: tensor_data[i] for i in [0, 1]}) elif join_type == "left_anti": # Should return left rows that DON'T have matches in right (id=2) assert len(result) == 1 _assert_columns_match(result, {"id", "list_col", "tensor_col", "data"}) _assert_list_values(result_by_id, {2: list_data[2]}) _assert_tensor_values(result_by_id, {2: tensor_data[2]}) _assert_scalar_values(result_by_id, {2: {"data": scalar_data[2]}}) elif join_type == "right_semi": # Should return right rows that have matches in left (id=0, 1) assert len(result) == 2 _assert_columns_match(result, {"id", "value", "score"}) _assert_scalar_values(result_by_id, {0: {"value": "x"}, 1: {"value": "y"}}) elif join_type == "right_anti": # Should return right rows that DON'T have matches in left (id=3) assert len(result) == 1 _assert_columns_match(result, {"id", "value", "score"}) _assert_scalar_values(result_by_id, {3: {"value": "z", "score": 30}}) # For outer joins, ensure unjoinable columns are present if join_type in ["inner", "left_outer", "right_outer", "full_outer"]: _assert_columns_match( result, {"id", "list_col", "tensor_col", "data", "value", "score"} )
test
0
{"function_name": "test_join_with_unjoinable_non_key_columns", "class_name": null, "qualname": "test_join_with_unjoinable_non_key_columns", "file_path": "python/ray/data/tests/test_join.py", "repo_id": "ray-project/ray", "loc": 138, "tested_modules": ["typing", "packaging.version", "ray.data._internal.logical.operators", "ray.data._internal.util", "ray.data._internal.utils.arrow_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/diffusers:tests/models/testing_utils/quantization.py:TorchAoConfigMixin:class_doc
Write a class-level docstring for `TorchAoConfigMixin` which has methods: `_create_quantized_model`, `_verify_if_layer_quantized`.
Base mixin providing TorchAO quantization config and model creation. Expected class attributes: - model_class: The model class to test - pretrained_model_name_or_path: Hub repository ID for the pretrained model - pretrained_model_kwargs: (Optional) Dict of kwargs to pass to from_pretrained
documentation
1
{"doc_type": "class", "class_name": "TorchAoConfigMixin", "file_path": "tests/models/testing_utils/quantization.py", "repo_id": "huggingface/diffusers", "char_length": 303, "methods": ["_create_quantized_model", "_verify_if_layer_quantized"]}
ray-project/ray:python/ray/serve/taskiq_task_processor.py:TaskiqTaskProcessorAdapter.initialize
# Context: from ray.serve._private.constants import ( DEFAULT_CONSUMER_CONCURRENCY, SERVE_LOGGER_NAME, ) from taskiq_redis import RedisAsyncResultBackend def _import_broker_class(broker_type: str): ... def _create_broker(broker_type: str, queue_name: str, broker_kwargs: Optional[Dict[str, Any]]): ... class TaskiqAdapterConfig(BaseModel): ... class TaskiqTaskProcessorAdapter(TaskProcessorAdapter): def __init__(self, config: TaskProcessorConfig, *args, **kwargs): super().__init__(*args, **kwargs) if not isinstance(config.adapter_config, TaskiqAdapterConfig): raise TypeError( "TaskProcessorConfig.adapter_config must be an instance of " "TaskiqAdapterConfig" ) self._config = config self._broker = None self._result_backend = None def register_task_handle(self, func: Callable, name: Optional[str]): ... def enqueue_task_sync(self, task_name: str, args, kwargs, **options) -> TaskResult: ... def get_task_status_sync(self, task_id: str) -> TaskResult: ... def start_consumer(self, **kwargs): ... def stop_consumer(self, timeout: float): ... def cancel_task_sync(self, task_id: str): ... def get_metrics_sync(self) -> Dict[str, Any]: ... def health_check_sync(self) -> List[Dict]: ... # Task: Write a Python method `initialize` for the class `TaskiqTaskProcessorAdapter` to initialize the taskiq broker and result backend. Parameters: consumer_concurrency: int
def initialize(self, consumer_concurrency: int = DEFAULT_CONSUMER_CONCURRENCY): """Initialize the taskiq broker and result backend.""" self._consumer_concurrency = consumer_concurrency adapter_config: TaskiqAdapterConfig = self._config.adapter_config # Create the broker using the factory function self._broker = _create_broker( broker_type=adapter_config.broker_type, queue_name=self._config.queue_name, broker_kwargs=adapter_config.broker_kwargs, ) # Create result backend only if explicitly configured if adapter_config.result_backend_url: from taskiq_redis import RedisAsyncResultBackend self._result_backend = RedisAsyncResultBackend( redis_url=adapter_config.result_backend_url, ) self._broker = self._broker.with_result_backend(self._result_backend) logger.info( f"Taskiq adapter initialized with broker_type={adapter_config.broker_type!r}, " f"queue: {self._config.queue_name}" )
function_simple
0
{"cognitive_complexity": 1, "loc": 25, "code_loc": 17, "docstring_loc": 1, "function_name": "initialize", "class_name": "TaskiqTaskProcessorAdapter", "qualname": "TaskiqTaskProcessorAdapter.initialize", "file_path": "python/ray/serve/taskiq_task_processor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/tests/test_downstream_capacity_backpressure_policy.py:TestDownstreamCapacityBackpressurePolicy.test_no_backpressure_low_queue_ratio
# Context: from ray.data._internal.execution.backpressure_policy.downstream_capacity_backpressure_policy import ( DownstreamCapacityBackpressurePolicy, ) class TestDownstreamCapacityBackpressurePolicy: def setup_budget_fraction_mock(self): ... def _mock_operator(self, op_class: type, num_tasks_running: int, obj_store_mem_internal_inqueue: int, obj_store_mem_pending_task_inputs: int, throttling_disabled: bool, has_execution_finished: bool): ... def _mock_materializing_operator(self): ... def _mock_task_pool_map_operator(self, num_tasks_running: int, max_concurrency_limit: int, obj_store_mem_internal_inqueue: int, obj_store_mem_pending_task_inputs: int): ... def _mock_actor_pool_map_operator(self, num_tasks_running: int, max_size: int, max_tasks_in_flight_per_actor: int, obj_store_mem_internal_inqueue: int, obj_store_mem_pending_task_inputs: int): ... def _create_policy(self, topology, data_context, resource_manager): ... def _create_context(self, backpressure_ratio): ... def _mock_resource_manager(self, internal_usage, outputs_usage, external_bytes): ... def _set_utilized_budget_fraction(self, rm, fraction): ... def _set_queue_ratio(self, op, op_state, rm, queue_size, downstream_capacity): ... def test_backpressure_disabled_when_ratio_is_none(self): ... def test_backpressure_skipped_for_ineligible_op(self): ... def test_backpressure_skipped_for_materializing_downstream(self): ... def test_backpressure_skipped_for_low_utilization(self): ... def test_backpressure_skipped_at_threshold(self): ... def test_backpressure_triggered_high_utilization(self): ... def test_backpressure_triggered_high_queue_ratio(self): ... def test_no_backpressure_zero_downstream_capacity(self): ... def test_max_bytes_returns_none_when_backpressure_disabled(self): ... def test_max_bytes_returns_none_for_ineligible_op(self): ... def test_max_bytes_returns_none_for_low_utilization(self): ... def test_max_bytes_returns_zero_for_high_utilization(self): ... def test_max_bytes_returns_zero_for_high_queue_ratio(self): ... def test_max_bytes_returns_none_when_no_backpressure(self): ... def test_backpressure_applied_fast_producer_slow_consumer(self): ... # Task: Write a Python test method `test_no_backpressure_low_queue_ratio` in test class `TestDownstreamCapacityBackpressurePolicy` to test no backpressure when queue/capacity ratio is acceptable. Module under test: ray.data._internal.execution.backpressure_policy.downstream_capacity_backpressure_policy, ray.data._internal.execution.interfaces.physical_operator, ray.data._internal.execution.operators.actor_pool_map_operator
def test_no_backpressure_low_queue_ratio(self): """Test no backpressure when queue/capacity ratio is acceptable.""" op, op_state = self._mock_operator() downstream_op, downstream_op_state = self._mock_operator() op.output_dependencies = [downstream_op] topology = {op: op_state, downstream_op: downstream_op_state} context = self._create_context(backpressure_ratio=2.0) rm = self._mock_resource_manager() # Utilized budget fraction below threshold = skip backpressure threshold = ( DownstreamCapacityBackpressurePolicy.OBJECT_STORE_BUDGET_UTIL_THRESHOLD ) self._set_utilized_budget_fraction(rm, threshold - 0.1) # 0.8 # Queue ratio < 2.0: 500 / 1000 = 0.5 queue_ratio = self._set_queue_ratio( op, op_state, rm, queue_size=500, downstream_capacity=1000 ) assert queue_ratio < 2.0 policy = self._create_policy( topology, data_context=context, resource_manager=rm ) assert policy.can_add_input(op) is True
test
0
{"function_name": "test_no_backpressure_low_queue_ratio", "class_name": "TestDownstreamCapacityBackpressurePolicy", "qualname": "TestDownstreamCapacityBackpressurePolicy.test_no_backpressure_low_queue_ratio", "file_path": "python/ray/data/tests/test_downstream_capacity_backpressure_policy.py", "repo_id": "ray-project/ray", "loc": 25, "tested_modules": ["ray.data._internal.execution.backpressure_policy.downstream_capacity_backpressure_policy", "ray.data._internal.execution.interfaces.physical_operator", "ray.data._internal.execution.operators.actor_pool_map_operator", "ray.data._internal.execution.operators.base_physical_operator", "ray.data._internal.execution.operators.task_pool_map_operator"], "has_docstring": true, "runnable_level": "class_runnable"}
vllm-project/vllm:vllm/model_executor/models/isaac.py:process_vision_for_patches
# Context: import torch import torch.nn.functional as F def create_cumulative_seq_lengths(seq_sizes: torch.Tensor, device: torch.device) -> tuple[torch.Tensor, torch.Tensor]: ... class Siglip2VariableSequenceEmbeddings(nn.Module): ... def create_pixel_shuffle_index_map(seq_sizes: torch.Tensor, token_grids: torch.Tensor, scale_factor: int, device: torch.device | None) -> torch.Tensor: ... def pixel_shuffle_varlen(x: torch.Tensor, token_grids: torch.Tensor, scale_factor: int) -> torch.Tensor: ... def _make_writeable(arr: np.ndarray) -> np.ndarray: ... def extract_image_pil(image: PIL.Image.Image) -> torch.Tensor | None: ... def get_image_size_for_max_num_patches(image_height: int, image_width: int, patch_size: int, max_num_patches: int, min_num_patches: int | None, eps: float, pixel_shuffle_scale: int) -> tuple[int, int]: ... def _resolve_vision_token_id(model_config: ModelConfig, vision_token: str) -> int: ... def prepare_image_tensor(image: torch.Tensor, scale: float) -> torch.Tensor: ... def patchify_vision(image: torch.Tensor, patch_size: int) -> torch.Tensor: ... class IsaacImageProcessorKwargs(TypedDict): ... class IsaacImageProcessor: ... class IsaacProcessor: ... class IsaacProcessingInfo(BaseProcessingInfo): ... class IsaacDummyInputsBuilder(BaseDummyInputsBuilder[IsaacProcessingInfo]): ... class IsaacImagePixelInputs(TensorSchema): ... class IsaacMultiModalProcessor(BaseMultiModalProcessor): ... class Siglip2VisionAttention(nn.Module): ... class Siglip2EncoderLayer(nn.Module): ... class Siglip2Encoder(nn.Module): ... class Siglip2VisionTransformer(nn.Module): ... class IsaacVisionEmbedding(nn.Module): ... class IsaacForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsLoRA, SupportsPP, SupportsMRoPE): ... # Task: Write a Python function `process_vision_for_patches` to resize, normalize, and patchify RGB images for the vision encoder. Parameters: images: torch.Tensor, patch_size: int, max_num_patches: int, min_num_patches: int | None, pixel_shuffle_scale: int Returns: tuple[torch.Tensor, list[int]]
def process_vision_for_patches( images: torch.Tensor, patch_size: int, max_num_patches: int, min_num_patches: int | None = None, pixel_shuffle_scale: int = 1, ) -> tuple[torch.Tensor, list[int]]: r"""Resize, normalize, and patchify RGB images for the vision encoder. Args: images (`torch.Tensor`): Either `(height, width, channels)` for a single image or `(num_images, height, width, channels)` for a batch. Channels are expected to be RGB. patch_size (`int`): Edge length of square patches; implicitly controls resize grid granularity. max_num_patches (`int`): Maximum number of patches allowed after resizing. min_num_patches (`int`, *optional*): Minimum number of patches. If provided, the routine upsamples images as needed to satisfy the lower bound. pixel_shuffle_scale (`int`, *optional*, defaults to 1): Pixel shuffle scale factor; influences the target grid that the function produces. Returns: `tuple[torch.Tensor, list[int]]`: A pair `(patches, dims_virtual)` where `patches` has shape `(num_images, target_h / patch_size, target_w / patch_size, channels * patch_size**2)` and `dims_virtual` encodes effective `(images, height, width)` dimensions after optional pixel shuffling. """ # Add batch dim if single image if images.dim() == 3: images = images.unsqueeze(0) # Permute to channel first for resize images = images.permute(0, 3, 1, 2) # Get target dimensions _, _, orig_height, orig_width = images.shape target_height, target_width = get_image_size_for_max_num_patches( orig_height, orig_width, patch_size, max_num_patches, min_num_patches=min_num_patches, pixel_shuffle_scale=pixel_shuffle_scale, ) # Resize images = F.interpolate( images, size=(target_height, target_width), mode="bilinear", align_corners=False, ) # Back to channel last images = images.permute(0, 2, 3, 1) # Normalize images = prepare_image_tensor(images) # Patchify patches = patchify_vision(images, patch_size=patch_size) # Calculate dimensions for the patches n_images, h_patches, w_patches, _ = patches.shape dims_virtual = ( [1, h_patches, w_patches] if pixel_shuffle_scale == 1 else [1, h_patches // pixel_shuffle_scale, w_patches // pixel_shuffle_scale] ) return patches, dims_virtual
function_simple
1
{"cognitive_complexity": 2, "loc": 76, "code_loc": 28, "docstring_loc": 25, "function_name": "process_vision_for_patches", "class_name": null, "qualname": "process_vision_for_patches", "file_path": "vllm/model_executor/models/isaac.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
666ghj/BettaFish:ReportEngine/flask_interface.py:_broadcast_event
# Context: from typing import Dict, Any, List, Optional from loguru import logger def _is_excluded_engine_log(record: Dict[str, Any]) -> bool: ... def _stream_log_to_task(message): ... def _setup_log_stream_forwarder(): ... def _register_stream(task_id: str) -> Queue: ... def _unregister_stream(task_id: str, queue: Queue): ... def _prune_task_history_locked(): ... def _get_task(task_id: str) -> Optional['ReportTask']: ... def _format_sse(event: Dict[str, Any]) -> str: ... def _safe_filename_segment(value: str, fallback: str) -> str: ... def initialize_report_engine(): ... class ReportTask: ... def check_engines_ready() -> Dict[str, Any]: ... def run_report_generation(task: ReportTask, query: str, custom_template: str): ... def get_status(): ... def generate_report(): ... def get_progress(task_id: str): ... def stream_task(task_id: str): ... def get_result(task_id: str): ... def get_result_json(task_id: str): ... def download_report(task_id: str): ... def cancel_task(task_id: str): ... def get_templates(): ... def not_found(error): ... def internal_error(error): ... def clear_report_log(): ... def get_report_log(): ... def clear_log(): ... def export_markdown(task_id: str): ... def export_pdf(task_id: str): ... def export_pdf_from_ir(): ... # Task: Write a Python function `_broadcast_event` to 将事件推送给所有监听者,失败时做好异常捕获。. Parameters: task_id: str, event: Dict[str, Any]
def _broadcast_event(task_id: str, event: Dict[str, Any]): """ 将事件推送给所有监听者,失败时做好异常捕获。 采用浅拷贝监听列表,防止并发移除导致遍历异常。 参数: task_id: 待推送的任务ID。 event: 结构化事件payload。 """ with stream_lock: listeners = list(stream_subscribers.get(task_id, [])) for queue in listeners: try: queue.put(event, timeout=0.1) except Exception: logger.exception("推送流式事件失败,跳过当前监听队列")
function_simple
1
{"cognitive_complexity": 2, "loc": 17, "code_loc": 7, "docstring_loc": 9, "function_name": "_broadcast_event", "class_name": null, "qualname": "_broadcast_event", "file_path": "ReportEngine/flask_interface.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/tests/unit/test_runtime_env.py:test_runtime_env_merge
# Context: from ray.runtime_env.runtime_env import ( RuntimeEnvConfig, _merge_runtime_env, ) def test_current_py_version_supported(): ... def test_compatible_with_dataclasses(): ... def test_container_option_serialize(runtime_env_class): ... class TestURICache: ... def enable_dev_mode(local_env_var_enabled, monkeypatch): ... def test_subprocess_error(): ... def test_subprocess_error_with_last_n_lines(): ... async def test_check_output_cmd(): ... def test_serialize_deserialize(option): ... def test_runtime_env_interface(): ... # Task: Write a Python test function `test_runtime_env_merge` to verify the behavior of `runtime_env_merge`. Module under test: dataclasses, typing, ray._private.runtime_env.uri_cache
def test_runtime_env_merge(): # Both are None. parent = None child = None assert _merge_runtime_env(parent, child) == {} parent = {} child = None assert _merge_runtime_env(parent, child) == {} parent = None child = {} assert _merge_runtime_env(parent, child) == {} parent = {} child = {} assert _merge_runtime_env(parent, child) == {} # Only parent is given. parent = {"conda": ["requests"], "env_vars": {"A": "1"}} child = None assert _merge_runtime_env(parent, child) == parent # Only child is given. parent = None child = {"conda": ["requests"], "env_vars": {"A": "1"}} assert _merge_runtime_env(parent, child) == child # Successful case. parent = {"conda": ["requests"], "env_vars": {"A": "1"}} child = {"pip": ["requests"], "env_vars": {"B": "2"}} assert _merge_runtime_env(parent, child) == { "conda": ["requests"], "pip": ["requests"], "env_vars": {"A": "1", "B": "2"}, } # Failure case parent = {"pip": ["requests"], "env_vars": {"A": "1"}} child = {"pip": ["colors"], "env_vars": {"B": "2"}} assert _merge_runtime_env(parent, child) is None # Failure case (env_vars) parent = {"pip": ["requests"], "env_vars": {"A": "1"}} child = {"conda": ["requests"], "env_vars": {"A": "2"}} assert _merge_runtime_env(parent, child) is None # override = True parent = {"pip": ["requests"], "env_vars": {"A": "1"}} child = {"pip": ["colors"], "env_vars": {"B": "2"}} assert _merge_runtime_env(parent, child, override=True) == { "pip": ["colors"], "env_vars": {"A": "1", "B": "2"}, } # override = True + env vars parent = {"pip": ["requests"], "env_vars": {"A": "1"}} child = {"pip": ["colors"], "conda": ["requests"], "env_vars": {"A": "2"}} assert _merge_runtime_env(parent, child, override=True) == { "pip": ["colors"], "env_vars": {"A": "2"}, "conda": ["requests"], }
test
0
{"function_name": "test_runtime_env_merge", "class_name": null, "qualname": "test_runtime_env_merge", "file_path": "python/ray/tests/unit/test_runtime_env.py", "repo_id": "ray-project/ray", "loc": 63, "tested_modules": ["dataclasses", "typing", "ray._private.runtime_env.uri_cache", "ray._private.runtime_env.utils", "ray._private.test_utils"], "has_docstring": false, "runnable_level": "plib_runnable"}
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/article_router.py:read_articles
# Context: from fastapi import APIRouter, Query from typing import List, Optional, Dict, Any from models.article_schemas import Article, PaginatedArticles from services.article_service import article_service async def read_article(article_id: int): ... async def read_sources(): ... async def read_categories(): ... # Task: Write a Python async function `read_articles` to get all articles with pagination and filtering. Parameters: page: int, per_page: int, source: Optional[str], category: Optional[str], date_from: Optional[str], date_to: Optional[str], search: Optional[str]
async def read_articles( page: int = Query(1, ge=1, description="Page number"), per_page: int = Query(10, ge=1, le=100, description="Items per page"), source: Optional[str] = Query(None, description="Filter by source name"), category: Optional[str] = Query(None, description="Filter by category"), date_from: Optional[str] = Query(None, description="Filter by start date (format: YYYY-MM-DD)"), date_to: Optional[str] = Query(None, description="Filter by end date (format: YYYY-MM-DD)"), search: Optional[str] = Query(None, description="Search in title and summary"), ): """ Get all articles with pagination and filtering. - **page**: Page number (starting from 1) - **per_page**: Number of items per page (max 100) - **source**: Filter by source name - **category**: Filter by category - **date_from**: Filter by start date (format: YYYY-MM-DD) - **date_to**: Filter by end date (format: YYYY-MM-DD) - **search**: Search in title and summary """ return await article_service.get_articles( page=page, per_page=per_page, source=source, category=category, date_from=date_from, date_to=date_to, search=search )
function_simple
0
{"cognitive_complexity": 0, "loc": 23, "code_loc": 3, "docstring_loc": 11, "function_name": "read_articles", "class_name": null, "qualname": "read_articles", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/article_router.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/reasoning/test_deepseekv3_reasoning_parser.py:test_parser_selection
# Context: import pytest from vllm.reasoning.deepseek_r1_reasoning_parser import DeepSeekR1ReasoningParser from vllm.reasoning.deepseek_v3_reasoning_parser import DeepSeekV3ReasoningParser from vllm.reasoning.identity_reasoning_parser import IdentityReasoningParser def tokenizer(): ... def test_identity_reasoning_parser_basic(tokenizer): ... # Task: Write a Python test function `test_parser_selection` to verify the behavior of `parser_selection`. Module under test: transformers, vllm.entrypoints.openai.chat_completion.protocol, vllm.entrypoints.openai.engine.protocol
def test_parser_selection(tokenizer, thinking, expected_parser_type): parser = DeepSeekV3ReasoningParser( tokenizer, chat_template_kwargs={"thinking": thinking} ) assert isinstance(parser._parser, expected_parser_type)
test
1
{"function_name": "test_parser_selection", "class_name": null, "qualname": "test_parser_selection", "file_path": "tests/reasoning/test_deepseekv3_reasoning_parser.py", "repo_id": "vllm-project/vllm", "loc": 6, "tested_modules": ["transformers", "vllm.entrypoints.openai.chat_completion.protocol", "vllm.entrypoints.openai.engine.protocol", "vllm.reasoning.deepseek_r1_reasoning_parser", "vllm.reasoning.deepseek_v3_reasoning_parser"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/trainer_optimizer.py:_get_bitsandbytes_optimizer
# Context: from typing import TYPE_CHECKING, Any from .utils import ( is_apollo_torch_available, is_bitsandbytes_available, is_galore_torch_available, is_grokadamw_available, is_lomo_available, is_schedulefree_available, is_torch_optimi_available, is_torchao_available, strtobool, ) from torch.optim import AdamW from bitsandbytes.optim import AdamW, Lion, RMSprop from torch_xla.amp.syncfree import AdamW from bitsandbytes.optim import AdEMAMix class OptimizerContext: ... def _parse_optim_args(optim_args_str: str | None) -> dict[str, str]: ... def is_optimizer_factory(optimizer_cls_or_factory: Any) -> bool: ... def _setup_low_rank_optimizer(args: TrainingArguments, model: PreTrainedModel, optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], optimizer_kwargs: dict[str, Any], is_layerwise_supported: bool) -> tuple[Any, dict[str, Any]]: ... def _get_adafactor(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_adamw_torch(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_adamw_torch_xla(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_adamw_torch_npu_fused(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_adamw_apex_fused(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_adamw_anyprecision(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_sgd(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_adagrad(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_rmsprop(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_galore_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_apollo_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_lomo_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_grokadamw(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_torchao_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_schedule_free_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... def _get_stable_adamw(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ... # Task: Write a Python function `_get_bitsandbytes_optimizer` to get bitsandbytes optimizer (AdamW, Lion, RMSprop variants). Parameters: ctx: OptimizerContext Returns: tuple[Any, dict[str, Any]]
def _get_bitsandbytes_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: """Get bitsandbytes optimizer (AdamW, Lion, RMSprop variants).""" if not is_bitsandbytes_available(): raise ImportError( "You need to install `bitsandbytes` in order to use bitsandbytes optimizers: `pip install -U bitsandbytes`" ) from bitsandbytes.optim import AdamW, Lion, RMSprop optim_name = ctx.args.optim is_paged = "paged" in optim_name optim_bits = 8 if "8bit" in optim_name else 32 optimizer_cls = None additional_optim_kwargs = ctx.adam_kwargs if "adam" in optim_name: optimizer_cls = AdamW elif "lion" in optim_name: optimizer_cls = Lion additional_optim_kwargs = {"betas": (ctx.args.adam_beta1, ctx.args.adam_beta2)} elif "rmsprop" in optim_name: optimizer_cls = RMSprop additional_optim_kwargs = ctx.optim_args elif "ademamix" in optim_name: from bitsandbytes.optim import AdEMAMix optimizer_cls = AdEMAMix additional_optim_kwargs = { "betas": ( float(ctx.optim_args.get("beta1", ctx.args.adam_beta1)), float(ctx.optim_args.get("beta2", ctx.args.adam_beta2)), float(ctx.optim_args.get("beta3", 0.9999)), ), "alpha": float(ctx.optim_args.get("alpha", 5.0)), "eps": float(ctx.optim_args.get("eps", ctx.args.adam_epsilon)), } if "t_alpha" in ctx.optim_args: additional_optim_kwargs["t_alpha"] = int(ctx.optim_args["t_alpha"]) if "t_beta3" in ctx.optim_args: additional_optim_kwargs["t_beta3"] = int(ctx.optim_args["t_beta3"]) bnb_kwargs = {"optim_bits": optim_bits} if "rmsprop" not in optim_name: bnb_kwargs["is_paged"] = is_paged ctx.optimizer_kwargs.update(additional_optim_kwargs) ctx.optimizer_kwargs.update(bnb_kwargs) return optimizer_cls, ctx.optimizer_kwargs
function_complex
0
{"cognitive_complexity": 11, "loc": 48, "code_loc": 40, "docstring_loc": 1, "function_name": "_get_bitsandbytes_optimizer", "class_name": null, "qualname": "_get_bitsandbytes_optimizer", "file_path": "src/transformers/trainer_optimizer.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/tests/test_download_expression.py:TestDownloadExpressionErrors.test_download_expression_with_malformed_uris
# Context: import ray from ray.data.expressions import DownloadExpr, col, download class TestDownloadExpressionStructure: ... class TestDownloadExpressionFunctionality: ... class TestDownloadExpressionIntegration: ... class TestDownloadExpressionErrors: def test_download_expression_invalid_uri_column(self): ... def test_download_expression_with_null_uris(self): ... def test_download_expression_mixed_valid_and_invalid_uris(self, tmp_path): ... # Task: Write a Python test method `test_download_expression_with_malformed_uris` in test class `TestDownloadExpressionErrors` to test download expression with malformed URIs. Module under test: PIL, ray.data.expressions
def test_download_expression_with_malformed_uris(self, tmp_path): """Test download expression with malformed URIs. This tests that various malformed URIs are caught and return None instead of crashing. All of the URIs should be malformed in order to test the ZeroDivisionError described in https://github.com/ray-project/ray/issues/58462. """ malformed_uris = [ f"local://{tmp_path}/nonexistent.txt", # File doesn't exist "local:///this/path/does/not/exist/file.txt", # Invalid path "", # Empty URI "foobar", # Random string # TODO(xyuzh): Currently, using the below URIs raises an exception # in _resolve_paths_and_filesystem. We need to fix that issue and # add the tests in. # "file:///\x00/null/byte", # Null byte # "http://host/path\n\r", # Line breaks # "foo://bar", # Invalid scheme # "://no-scheme", # Missing scheme # "http://host/path?query=<script>", # Injection attempts ] ds = ray.data.from_items([{"uri": uri} for uri in malformed_uris]) ds_with_downloads = ds.with_column("bytes", download("uri")) results = ds_with_downloads.take_all() # All malformed URIs should return None assert len(results) == len(malformed_uris) for result in results: assert result["bytes"] is None
test
0
{"function_name": "test_download_expression_with_malformed_uris", "class_name": "TestDownloadExpressionErrors", "qualname": "TestDownloadExpressionErrors.test_download_expression_with_malformed_uris", "file_path": "python/ray/data/tests/test_download_expression.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["PIL", "ray.data.expressions"], "has_docstring": true, "runnable_level": "plib_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestEnsureTypeInSchemas.test_empty_schema_in_anyof_gets_type
# Context: from copy import deepcopy from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestRequiredOptional: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestBuildRichFieldDescription: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestStripUnsupportedFormats: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestEnsureTypeInSchemas: def test_empty_schema_in_oneof_gets_type(self) -> None: ... def test_non_empty_unchanged(self) -> None: ... # Task: Write a Python test method `test_empty_schema_in_anyof_gets_type` in test class `TestEnsureTypeInSchemas` to verify the behavior of `empty_schema_in_anyof_gets_type`. Module under test: __future__, copy, typing
def test_empty_schema_in_anyof_gets_type(self) -> None: schema = {"anyOf": [{}, {"type": "string"}]} result = ensure_type_in_schemas(deepcopy(schema)) assert result["anyOf"][0] == {"type": "object"}
test
0
{"function_name": "test_empty_schema_in_anyof_gets_type", "class_name": "TestEnsureTypeInSchemas", "qualname": "TestEnsureTypeInSchemas.test_empty_schema_in_anyof_gets_type", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/social_media_router.py:read_engagement_stats
# Context: from fastapi import APIRouter, Query from typing import List, Optional, Dict, Any from services.social_media_service import social_media_service async def read_posts(page: int, per_page: int, platform: Optional[str], user_handle: Optional[str], sentiment: Optional[str], category: Optional[str], date_from: Optional[str], date_to: Optional[str], search: Optional[str]): ... async def read_post(post_id: str): ... async def read_platforms(): ... async def read_sentiments(date_from: Optional[str], date_to: Optional[str]): ... async def read_top_users(platform: Optional[str], limit: int, date_from: Optional[str], date_to: Optional[str]): ... async def read_categories(date_from: Optional[str], date_to: Optional[str]): ... async def read_user_sentiment(limit: int, platform: Optional[str], date_from: Optional[str], date_to: Optional[str]): ... async def read_category_sentiment(date_from: Optional[str], date_to: Optional[str]): ... async def read_trending_topics(limit: int, date_from: Optional[str], date_to: Optional[str]): ... async def read_sentiment_over_time(platform: Optional[str], date_from: Optional[str], date_to: Optional[str]): ... async def read_influential_posts(sentiment: Optional[str], limit: int, date_from: Optional[str], date_to: Optional[str]): ... def _run_browser_setup_background(sites: Optional[List[str]]): ... async def setup_browser_session(sites: Optional[List[str]]): ... # Task: Write a Python async function `read_engagement_stats` to get overall engagement statistics. Parameters: date_from: Optional[str], date_to: Optional[str]
async def read_engagement_stats( date_from: Optional[str] = Query(None, description="Filter by start date (format: YYYY-MM-DD)"), date_to: Optional[str] = Query(None, description="Filter by end date (format: YYYY-MM-DD)"), ): """Get overall engagement statistics.""" return await social_media_service.get_engagement_stats(date_from=date_from, date_to=date_to)
function_simple
0
{"cognitive_complexity": 0, "loc": 6, "code_loc": 1, "docstring_loc": 1, "function_name": "read_engagement_stats", "class_name": null, "qualname": "read_engagement_stats", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/social_media_router.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:ci/ray_ci/test_supported_images.py:TestRayImagesSchema.test_python_versions_are_strings
# Context: import pytest from ci.ray_ci.supported_images import get_image_config, load_supported_images class TestRayImagesSchema: def test_has_image_types(self): ... def test_required_keys(self, image_type): ... def test_required_defaults(self, image_type): ... def test_defaults_in_supported(self, image_type): ... def test_no_empty_lists(self, image_type): ... def test_platforms_are_strings(self, image_type): ... def test_architectures_are_strings(self, image_type): ... # Task: Write a Python test method `test_python_versions_are_strings` in test class `TestRayImagesSchema` to verify the behavior of `python_versions_are_strings`. Module under test: ci.ray_ci.supported_images
def test_python_versions_are_strings(self, image_type): for v in get_image_config(image_type)["python"]: assert isinstance(v, str), ( f"{image_type}: python version {v!r} is {type(v).__name__}, " f"not str (missing quotes in YAML?)" )
test
0
{"function_name": "test_python_versions_are_strings", "class_name": "TestRayImagesSchema", "qualname": "TestRayImagesSchema.test_python_versions_are_strings", "file_path": "ci/ray_ci/test_supported_images.py", "repo_id": "ray-project/ray", "loc": 6, "tested_modules": ["ci.ray_ci.supported_images"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:tests/trainer/test_trainer_checkpointing.py:JITCheckpointTest.test_jit_checkpoint_callback_on_train_end
# Context: import signal from unittest.mock import Mock, patch from transformers.trainer_jit_checkpoint import CheckpointManager, JITCheckpointCallback class TrainerCheckpointSaveTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerResumeTrainingTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerAutoBatchSizeTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerCheckpointRotationTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerInterruptedTrainingTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerSavingTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerBestModelTest(TestCasePlus, TrainerIntegrationCommon): ... class TrainerIntegrationWithHubTester(unittest.TestCase): ... class JITCheckpointTest(unittest.TestCase): def setUp(self): ... def tearDown(self): ... def get_trainer(self, enable_jit): ... def test_checkpoint_manager_initialization(self): ... def test_signal_handler_setup(self): ... def test_sigterm_handler_flow(self, mock_timer): ... def test_toggle_checkpoint_flag(self): ... def test_execute_jit_checkpoint(self): ... def test_execute_jit_checkpoint_sentinel_file_cleanup(self): ... def test_execute_jit_checkpoint_with_exception(self): ... def test_jit_checkpoint_callback_initialization(self): ... def test_jit_checkpoint_callback_set_trainer_enabled(self): ... def test_jit_checkpoint_callback_set_trainer_disabled(self): ... def test_jit_checkpoint_callback_on_pre_optimizer_step(self): ... def test_jit_checkpoint_callback_on_step_begin(self): ... def test_jit_checkpoint_callback_on_step_end(self): ... def test_jit_checkpoint_callback_on_epoch_end(self): ... def test_kill_wait_period(self, mock_timer): ... def test_integration_with_trainer(self): ... # Task: Write a Python test method `test_jit_checkpoint_callback_on_train_end` in test class `JITCheckpointTest` to test signal handler restoration on training end. Module under test: pathlib, typing, huggingface_hub
def test_jit_checkpoint_callback_on_train_end(self): """Test signal handler restoration on training end.""" trainer = self.get_trainer() callback = JITCheckpointCallback() # Store original SIGTERM handler original_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL) try: callback.set_trainer(trainer) # Verify signal handler was set up self.assertIsNotNone(callback.jit_manager._original_sigterm_handler) # Mock control object control = Mock() # Call on_train_end callback.on_train_end(trainer.args, trainer.state, control) # Verify signal handler was restored current_handler = signal.signal(signal.SIGTERM, signal.SIG_DFL) self.assertEqual(current_handler, callback.jit_manager._original_sigterm_handler) finally: # Restore original handler for cleanup signal.signal(signal.SIGTERM, original_handler)
test
0
{"function_name": "test_jit_checkpoint_callback_on_train_end", "class_name": "JITCheckpointTest", "qualname": "JITCheckpointTest.test_jit_checkpoint_callback_on_train_end", "file_path": "tests/trainer/test_trainer_checkpointing.py", "repo_id": "huggingface/transformers", "loc": 27, "tested_modules": ["pathlib", "typing", "huggingface_hub", "torch", "transformers"], "has_docstring": true, "runnable_level": "class_runnable"}
huggingface/transformers:tests/models/eomt/test_image_processing_eomt.py:EomtImageProcessingTest.test_call_numpy
# Context: import numpy as np class EomtImageProcessingTester: ... def prepare_semantic_single_inputs(): ... def prepare_semantic_batch_inputs(): ... class EomtImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = EomtImageProcessor if is_vision_available() else None fast_image_processing_class = EomtImageProcessorFast if is_torchvision_available() else None def setUp(self): ... def image_processor_dict(self): ... def test_image_processor_properties(self): ... def test_image_processor_from_dict_with_kwargs(self): ... def test_call_numpy_4_channels(self): ... def test_call_pil(self): ... def test_call_pytorch(self): ... def test_slow_fast_equivalence(self): ... def test_slow_fast_equivalence_batched(self): ... def test_post_process_semantic_segmentation(self): ... def test_post_process_panoptic_segmentation(self): ... def test_post_process_instance_segmentation(self): ... # Task: Write a Python test method `test_call_numpy` in test class `EomtImageProcessingTest` to verify the behavior of `call_numpy`. Module under test: datasets, transformers.image_utils, transformers.testing_utils
def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = (1, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = (2, 3, 18, 18) self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
test
0
{"function_name": "test_call_numpy", "class_name": "EomtImageProcessingTest", "qualname": "EomtImageProcessingTest.test_call_numpy", "file_path": "tests/models/eomt/test_image_processing_eomt.py", "repo_id": "huggingface/transformers", "loc": 18, "tested_modules": ["datasets", "transformers.image_utils", "transformers.testing_utils", "transformers.utils", "test_image_processing_common"], "has_docstring": false, "runnable_level": "class_runnable"}
vllm-project/vllm:tests/kernels/helion/test_silu_mul_fp8.py:TestSiluMulFp8ConfigPicker.test_config_picker_default_ignored_when_valid_keys_exist
# Context: import torch from vllm.kernels.helion.ops.silu_mul_fp8 import ( pick_silu_mul_fp8_config, silu_mul_fp8, silu_mul_fp8_baseline, ) def skip_if_platform_unsupported(): ... def reset_config_manager_singleton(): ... class TestSiluMulFp8Correctness: ... def silu_mul_fp8_pytorch(input: torch.Tensor, scale: torch.Tensor) -> torch.Tensor: ... class TestSiluMulFp8PytorchReference: ... class TestSiluMulFp8Integration: ... class TestSiluMulFp8ConfigPicker: def test_config_picker_exact_match(self): ... def test_config_picker_closest_match(self): ... def test_config_picker_fallback_to_default(self): ... def test_config_picker_no_configs(self): ... def test_config_picker_different_sizes(self, intermediate_size): ... def test_config_picker_numtokens_ceiling(self): ... def test_config_picker_numtokens_exact(self): ... def test_config_picker_numtokens_fallback_to_largest(self): ... def test_config_picker_malformed_key_raises(self): ... # Task: Write a Python test method `test_config_picker_default_ignored_when_valid_keys_exist` in test class `TestSiluMulFp8ConfigPicker` to 'default' is skipped in favor of a real match. Module under test: vllm.utils.import_utils, vllm.kernels.helion.config_manager, vllm.kernels.helion.ops.silu_mul_fp8
def test_config_picker_default_ignored_when_valid_keys_exist(self): """'default' is skipped in favor of a real match.""" config_keys = [ "default", "intermediate_4096_numtokens_32", "intermediate_4096_numtokens_128", ] input_tensor = torch.randn(64, 8192, dtype=torch.bfloat16, device="cuda") scale = torch.tensor([0.5], dtype=torch.float32, device="cuda") selected_key = pick_silu_mul_fp8_config((input_tensor, scale), config_keys) assert selected_key == "intermediate_4096_numtokens_128"
test
1
{"function_name": "test_config_picker_default_ignored_when_valid_keys_exist", "class_name": "TestSiluMulFp8ConfigPicker", "qualname": "TestSiluMulFp8ConfigPicker.test_config_picker_default_ignored_when_valid_keys_exist", "file_path": "tests/kernels/helion/test_silu_mul_fp8.py", "repo_id": "vllm-project/vllm", "loc": 12, "tested_modules": ["vllm.utils.import_utils", "vllm.kernels.helion.config_manager", "vllm.kernels.helion.ops.silu_mul_fp8", "vllm.kernels.helion.utils", "vllm.kernels.helion.register"], "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/llm/ollama/serializer.py:OllamaMessageSerializer._serialize_tool_calls
# Context: import json from ollama._types import Image, Message from browser_use.llm.messages import ( AssistantMessage, BaseMessage, SystemMessage, ToolCall, UserMessage, ) class OllamaMessageSerializer: def _extract_text_content(content: Any) -> str: ... def _extract_images(content: Any) -> list[Image]: ... def serialize(message: UserMessage) -> Message: ... def serialize(message: SystemMessage) -> Message: ... def serialize(message: AssistantMessage) -> Message: ... def serialize(message: BaseMessage) -> Message: ... def serialize_messages(messages: list[BaseMessage]) -> list[Message]: ... # Task: Write a Python method `_serialize_tool_calls` for the class `OllamaMessageSerializer` to convert browser-use ToolCalls to Ollama ToolCalls. Parameters: tool_calls: list[ToolCall] Returns: list[Message.ToolCall]
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[Message.ToolCall]: """Convert browser-use ToolCalls to Ollama ToolCalls.""" ollama_tool_calls: list[Message.ToolCall] = [] for tool_call in tool_calls: # Parse arguments from JSON string to dict for Ollama try: arguments_dict = json.loads(tool_call.function.arguments) except json.JSONDecodeError: # If parsing fails, wrap in a dict arguments_dict = {'arguments': tool_call.function.arguments} ollama_tool_call = Message.ToolCall( function=Message.ToolCall.Function(name=tool_call.function.name, arguments=arguments_dict) ) ollama_tool_calls.append(ollama_tool_call) return ollama_tool_calls
function_simple
0
{"cognitive_complexity": 2, "loc": 18, "code_loc": 11, "docstring_loc": 1, "function_name": "_serialize_tool_calls", "class_name": "OllamaMessageSerializer", "qualname": "OllamaMessageSerializer._serialize_tool_calls", "file_path": "browser_use/llm/ollama/serializer.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/tests/unit/cli/test_common.py:TestApiKey.test_get_api_key_not_set
# Context: import os from unittest.mock import MagicMock, Mock, patch import pytest from lfx.cli.common import ( create_verbose_printer, execute_graph_with_capture, extract_result_data, flow_id_from_path, get_api_key, get_best_access_host, get_free_port, is_port_in_use, load_graph_from_path, ) class TestVerbosePrinter: ... class TestPortUtilities: ... class TestHostUtilities: ... class TestFlowId: ... class TestLoadGraph: ... class TestGraphExecution: ... class TestResultExtraction: ... class TestApiKey: def test_get_api_key_success(self): ... def test_get_api_key_empty_string(self): ... # Task: Write a Python test method `test_get_api_key_not_set` in test class `TestApiKey` to test error when API key is not set. Module under test: pathlib, lfx.cli.common
def test_get_api_key_not_set(self): """Test error when API key is not set.""" with ( patch.dict(os.environ, {}, clear=True), pytest.raises(ValueError, match="LANGFLOW_API_KEY environment variable is not set"), ): get_api_key()
test
1
{"function_name": "test_get_api_key_not_set", "class_name": "TestApiKey", "qualname": "TestApiKey.test_get_api_key_not_set", "file_path": "src/lfx/tests/unit/cli/test_common.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["pathlib", "lfx.cli.common"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/serve/tests/test_metrics_haproxy.py:test_replica_metrics_fields
# Context: import httpx from ray import serve from ray._common.test_utils import ( SignalActor, fetch_prometheus_metrics, wait_for_condition, ) from ray.serve._private.test_utils import get_application_url, get_metric_dictionaries def metrics_start_shutdown(request): ... def extract_tags(line: str) -> Dict[str, str]: ... def contains_tags(line: str, expected_tags: Optional[Dict[str, str]]) -> bool: ... def get_metric_float(metric: str, expected_tags: Optional[Dict[str, str]]) -> float: ... def check_metric_float_eq(metric: str, expected: float, expected_tags: Optional[Dict[str, str]]) -> bool: ... def check_sum_metric_eq(metric_name: str, expected: float, tags: Optional[Dict[str, str]]) -> bool: ... def test_serve_metrics_for_successful_connection(metrics_start_shutdown): ... def test_http_replica_gauge_metrics(metrics_start_shutdown): ... def test_proxy_metrics_not_found(metrics_start_shutdown): ... def test_proxy_metrics_internal_error(metrics_start_shutdown): ... def test_proxy_metrics_fields_not_found(metrics_start_shutdown): ... def test_proxy_timeout_metrics(metrics_start_shutdown): ... def test_proxy_disconnect_http_metrics(metrics_start_shutdown): ... def test_no_499_misclassification_after_successful_response(metrics_start_shutdown): ... def test_proxy_metrics_fields_internal_error(metrics_start_shutdown): ... def test_proxy_metrics_http_status_code_is_error(metrics_start_shutdown): ... def test_multiplexed_metrics(metrics_start_shutdown): ... def test_long_poll_host_sends_counted(serve_instance): ... def test_actor_summary(serve_instance): ... # Task: Write a Python test function `test_replica_metrics_fields` to test replica metrics fields. Module under test: concurrent.futures, typing, fastapi
def test_replica_metrics_fields(metrics_start_shutdown): """Test replica metrics fields""" @serve.deployment def f(): return "hello" @serve.deployment def g(): return "world" serve.run(f.bind(), name="app1", route_prefix="/f") serve.run(g.bind(), name="app2", route_prefix="/g") url_f = get_application_url("HTTP", "app1") url_g = get_application_url("HTTP", "app2") assert "hello" == httpx.post(url_f).text assert "world" == httpx.post(url_g).text wait_for_condition( lambda: len( get_metric_dictionaries("ray_serve_deployment_request_counter_total") ) == 2, timeout=40, ) metrics = get_metric_dictionaries("ray_serve_deployment_request_counter_total") assert len(metrics) == 2 expected_output = { ("/f", "f", "app1"), ("/g", "g", "app2"), } assert { ( metric["route"], metric["deployment"], metric["application"], ) for metric in metrics } == expected_output start_metrics = get_metric_dictionaries("ray_serve_deployment_replica_starts_total") assert len(start_metrics) == 2 expected_output = {("f", "app1"), ("g", "app2")} assert { (start_metric["deployment"], start_metric["application"]) for start_metric in start_metrics } == expected_output # Latency metrics wait_for_condition( lambda: len( get_metric_dictionaries("ray_serve_deployment_processing_latency_ms_count") ) == 2, timeout=40, ) for metric_name in [ "ray_serve_deployment_processing_latency_ms_count", "ray_serve_deployment_processing_latency_ms_sum", ]: latency_metrics = get_metric_dictionaries(metric_name) print(f"checking metric {metric_name}, {latency_metrics}") assert len(latency_metrics) == 2 expected_output = {("f", "app1"), ("g", "app2")} assert { (latency_metric["deployment"], latency_metric["application"]) for latency_metric in latency_metrics } == expected_output wait_for_condition( lambda: len(get_metric_dictionaries("ray_serve_replica_processing_queries")) == 2 ) processing_queries = get_metric_dictionaries("ray_serve_replica_processing_queries") expected_output = {("f", "app1"), ("g", "app2")} assert { (processing_query["deployment"], processing_query["application"]) for processing_query in processing_queries } == expected_output @serve.deployment def h(): return 1 / 0 serve.run(h.bind(), name="app3", route_prefix="/h") url_h = get_application_url("HTTP", "app3") assert 500 == httpx.get(url_h).status_code wait_for_condition( lambda: len(get_metric_dictionaries("ray_serve_deployment_error_counter_total")) == 1, timeout=40, ) err_requests = get_metric_dictionaries("ray_serve_deployment_error_counter_total") assert len(err_requests) == 1 expected_output = ("/h", "h", "app3") assert ( err_requests[0]["route"], err_requests[0]["deployment"], err_requests[0]["application"], ) == expected_output wait_for_condition( lambda: len(get_metric_dictionaries("ray_serve_deployment_replica_healthy")) == 3, timeout=40, ) health_metrics = get_metric_dictionaries("ray_serve_deployment_replica_healthy") expected_output = { ("f", "app1"), ("g", "app2"), ("h", "app3"), } assert { (health_metric["deployment"], health_metric["application"]) for health_metric in health_metrics } == expected_output
test
0
{"function_name": "test_replica_metrics_fields", "class_name": null, "qualname": "test_replica_metrics_fields", "file_path": "python/ray/serve/tests/test_metrics_haproxy.py", "repo_id": "ray-project/ray", "loc": 118, "tested_modules": ["concurrent.futures", "typing", "fastapi", "starlette.requests", "starlette.responses"], "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:tests/models/pp_doclayout_v2/test_modeling_pp_doclayout_v2.py:PPDocLayoutV2ModelTest.test_forward_signature
# Context: import inspect class PPDocLayoutV2ModelTester: ... class PPDocLayoutV2ModelIntegrationTest(unittest.TestCase): ... class PPDocLayoutV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (PPDocLayoutV2ForObjectDetection,) if is_torch_available() else () pipeline_model_mapping = {"object-detection": PPDocLayoutV2ForObjectDetection} if is_torch_available() else {} is_encoder_decoder = True test_missing_keys = False test_inputs_embeds = False test_resize_embeddings = False def setUp(self): ... def test_config(self): ... def test_pp_doclayout_v2_object_detection_head_model(self): ... def test_model_get_set_embeddings(self): ... def test_model_common_attributes(self): ... def test_feed_forward_chunking(self): ... def test_retain_grad_hidden_states_attentions(self): ... def test_inference_with_different_dtypes(self, dtype_str): ... def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str): ... def test_hidden_states_output(self): ... def test_attention_outputs(self): ... # Task: Write a Python test method `test_forward_signature` in test class `PPDocLayoutV2ModelTest` to verify the behavior of `forward_signature`. Module under test: parameterized, transformers, transformers.testing_utils
def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names)
test
0
{"function_name": "test_forward_signature", "class_name": "PPDocLayoutV2ModelTest", "qualname": "PPDocLayoutV2ModelTest.test_forward_signature", "file_path": "tests/models/pp_doclayout_v2/test_modeling_pp_doclayout_v2.py", "repo_id": "huggingface/transformers", "loc": 9, "tested_modules": ["parameterized", "transformers", "transformers.testing_utils", "test_configuration_common", "test_modeling_common"], "has_docstring": false, "runnable_level": "class_runnable"}
browser-use/browser-use:tests/ci/test_search_find.py:TestFindElements.test_empty_page
# Context: from browser_use.agent.views import ActionResult def http_server(): ... def base_url(http_server): ... async def browser_session(): ... def tools(): ... async def _navigate_and_wait(tools, browser_session, url): ... class TestSearchPage: ... class TestRegistration: ... class TestFindElements: async def test_basic_selector(self, tools, browser_session, base_url): ... async def test_attribute_extraction(self, tools, browser_session, base_url): ... async def test_max_results_limiting(self, tools, browser_session, base_url): ... async def test_no_matching_elements(self, tools, browser_session, base_url): ... async def test_invalid_selector(self, tools, browser_session, base_url): ... async def test_include_text_false(self, tools, browser_session, base_url): ... async def test_nested_selectors(self, tools, browser_session, base_url): ... async def test_children_count(self, tools, browser_session, base_url): ... async def test_memory_set(self, tools, browser_session, base_url): ... # Task: Write a Python test method `test_empty_page` in test class `TestFindElements` to works on a nearly empty page without errors. Module under test: browser_use.agent.views, browser_use.browser, browser_use.tools.service
async def test_empty_page(self, tools, browser_session, base_url): """Works on a nearly empty page without errors.""" await _navigate_and_wait(tools, browser_session, f'{base_url}/empty') result = await tools.find_elements(selector='p', browser_session=browser_session) assert isinstance(result, ActionResult) assert result.error is None assert result.extracted_content is not None assert 'No elements found' in result.extracted_content
test
0
{"function_name": "test_empty_page", "class_name": "TestFindElements", "qualname": "TestFindElements.test_empty_page", "file_path": "tests/ci/test_search_find.py", "repo_id": "browser-use/browser-use", "loc": 10, "tested_modules": ["browser_use.agent.views", "browser_use.browser", "browser_use.tools.service"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/custom/utils.py:build_custom_components
# Context: from lfx.custom.directory_reader.utils import ( abuild_custom_component_list_from_path, build_custom_component_list_from_path, merge_nested_dicts_with_renaming, ) from lfx.log.logger import logger def _generate_code_hash(source_code: str, modname: str) -> str: ... class UpdateBuildConfigError(Exception): ... def add_output_types(frontend_node: CustomComponentFrontendNode, return_types: list[str]) -> None: ... def reorder_fields(frontend_node: CustomComponentFrontendNode, field_order: list[str]) -> None: ... def add_base_classes(frontend_node: CustomComponentFrontendNode, return_types: list[str]) -> None: ... def extract_type_from_optional(field_type): ... def get_field_properties(extra_field): ... def process_type(field_type: str): ... def add_new_custom_field(frontend_node: CustomComponentFrontendNode, field_name: str, field_type: str, field_value: Any, field_required: bool, field_config: dict): ... def add_extra_fields(frontend_node, field_config, function_args) -> None: ... def get_field_dict(field: Input | dict): ... def run_build_inputs(custom_component: Component): ... def get_component_instance(custom_component: CustomComponent | Component, user_id: str | UUID | None): ... def is_a_preimported_component(custom_component: CustomComponent): ... def run_build_config(custom_component: CustomComponent, user_id: str | UUID | None) -> tuple[dict, CustomComponent]: ... def add_code_field(frontend_node: CustomComponentFrontendNode, raw_code): ... def add_code_field_to_build_config(build_config: dict, raw_code: str): ... def get_module_name_from_display_name(display_name: str): ... def build_custom_component_template_from_inputs(custom_component: Component | CustomComponent, user_id: str | UUID | None, module_name: str | None): ... def build_component_metadata(frontend_node: CustomComponentFrontendNode, custom_component: CustomComponent, module_name: str, ctype_name: str): ... def build_custom_component_template(custom_component: CustomComponent, user_id: str | UUID | None, module_name: str | None) -> tuple[dict[str, Any], CustomComponent | Component]: ... def create_component_template(component: dict | None, component_extractor: Component | CustomComponent | None, module_name: str | None): ... async def abuild_custom_components(components_paths: list[str]): ... def sanitize_field_config(field_config: dict | Input): ... def build_component(component): ... def get_function(code): ... def get_instance_name(instance): ... async def update_component_build_config(component: CustomComponent, build_config: dotdict, field_value: Any, field_name: str | None): ... async def get_all_types_dict(components_paths: list[str]): ... async def get_single_component_dict(component_type: str, component_name: str, components_paths: list[str]): ... async def load_custom_component(component_name: str, components_paths: list[str]): ... # Task: Write a Python function `build_custom_components` to build custom components from the specified paths. Parameters: components_paths: list[str]
def build_custom_components(components_paths: list[str]): """Build custom components from the specified paths.""" if not components_paths: return {} logger.info(f"Building custom components from {components_paths}") custom_components_from_file: dict = {} processed_paths = set() for path in components_paths: path_str = str(path) if path_str in processed_paths: continue custom_component_dict = build_custom_component_list_from_path(path_str) if custom_component_dict: category = next(iter(custom_component_dict)) logger.debug(f"Loading {len(custom_component_dict[category])} component(s) from category {category}") custom_components_from_file = merge_nested_dicts_with_renaming( custom_components_from_file, custom_component_dict ) processed_paths.add(path_str) return custom_components_from_file
function_complex
1
{"cognitive_complexity": 6, "loc": 24, "code_loc": 18, "docstring_loc": 1, "function_name": "build_custom_components", "class_name": null, "qualname": "build_custom_components", "file_path": "src/lfx/src/lfx/custom/utils.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:tests/kernels/test_kernels.py:TestHubKernels.test_kernelize
# Context: import copy from kernels import Device, Mode, kernelize class TestKernelsEnv(TestCasePlus): ... class TestKernelUtilities(TestCasePlus): ... class TestAttentionKernelRegistration(TestCasePlus): ... class TestUseKernelsLifecycle(TestCasePlus): ... class TestKernelMappingDeviceFiltering(TestCasePlus): ... class TestHubKernels(TestCasePlus): def setUpClass(cls): ... def tearDownClass(cls): ... def tearDown(self): ... def test_forward(self): ... def test_getter_use_kernels(self): ... def assert_kernelized_forward_is_different(self, kernelized_model, not_kernelized_model): ... def assert_kernelized_forward_is_the_same(self, model_1, model_2): ... def test_setter_use_kernels(self): ... def test_unkernelize(self): ... def test_kernels_mapping(self): ... def test_faulty_kernel_mapping_layer_name(self): ... def test_faulty_kernel_mapping_type(self): ... # Task: Write a Python test method `test_kernelize` in test class `TestHubKernels` to verify the behavior of `kernelize`. Module under test: transformers, transformers.integrations.hub_kernels, transformers.masking_utils
def test_kernelize(self): model = copy.deepcopy(self.model_not_kernelized) kernelize(model, mode=Mode.INFERENCE, device=Device(type=model.device.type)) # type: ignore[arg-type] self.assert_kernelized_forward_is_different(model, self.model_not_kernelized) self.assert_kernelized_forward_is_the_same(model, self.model_kernelized) del model
test
0
{"function_name": "test_kernelize", "class_name": "TestHubKernels", "qualname": "TestHubKernels.test_kernelize", "file_path": "tests/kernels/test_kernels.py", "repo_id": "huggingface/transformers", "loc": 6, "tested_modules": ["transformers", "transformers.integrations.hub_kernels", "transformers.masking_utils", "transformers.modeling_utils", "transformers.testing_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/auto/test_video_processing_auto.py:AutoVideoProcessorTest.test_video_processor_not_found
# Context: from transformers import ( CONFIG_MAPPING, VIDEO_PROCESSOR_MAPPING, AutoConfig, AutoVideoProcessor, LlavaOnevisionConfig, LlavaOnevisionVideoProcessor, ) class AutoVideoProcessorTest(unittest.TestCase): def setUp(self): ... def test_video_processor_from_model_shortcut(self): ... def test_video_processor_from_local_directory_from_key(self): ... def test_video_processor_from_local_directory_from_preprocessor_key(self): ... def test_video_processor_from_local_directory_from_config(self): ... def test_video_processor_from_local_file(self): ... def test_repo_not_found(self): ... def test_revision_not_found(self): ... def test_from_pretrained_dynamic_video_processor(self): ... def test_new_video_processor_registration(self): ... def test_from_pretrained_dynamic_video_processor_conflict(self): ... # Task: Write a Python test method `test_video_processor_not_found` in test class `AutoVideoProcessorTest` to verify the behavior of `video_processor_not_found`. Module under test: pathlib, transformers, transformers.testing_utils
def test_video_processor_not_found(self): with self.assertRaisesRegex( EnvironmentError, "Can't load video processor for 'hf-internal-testing/config-no-model'.", ): _ = AutoVideoProcessor.from_pretrained("hf-internal-testing/config-no-model")
test
0
{"function_name": "test_video_processor_not_found", "class_name": "AutoVideoProcessorTest", "qualname": "AutoVideoProcessorTest.test_video_processor_not_found", "file_path": "tests/models/auto/test_video_processing_auto.py", "repo_id": "huggingface/transformers", "loc": 6, "tested_modules": ["pathlib", "transformers", "transformers.testing_utils", "test_module.custom_configuration", "test_module.custom_video_processing"], "has_docstring": false, "runnable_level": "class_runnable"}
langflow-ai/langflow:src/backend/tests/unit/base/tools/test_run_flow.py:TestRunFlowBaseComponentInputOutputHandling.test_format_flow_outputs_creates_output_objects
# Context: from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch from lfx.base.tools.run_flow import RunFlowBaseComponent from lfx.graph.graph.base import Graph from lfx.template.field.base import Output def mock_shared_cache(): ... class TestRunFlowBaseComponentInitialization: ... class TestRunFlowBaseComponentFlowRetrieval: ... class TestRunFlowBaseComponentFlowCaching: ... class TestRunFlowBaseComponentOutputMethods: ... class TestRunFlowBaseComponentToolGeneration: ... class TestRunFlowBaseComponentTweakData: ... class TestRunFlowBaseComponentUpdateOutputs: ... class TestRunFlowBaseComponentTweaks: ... class TestRunFlowBaseComponentInputOutputHandling: def test_get_ioput_name_creates_unique_name(self): ... def test_get_ioput_name_raises_error_without_vertex_id(self): ... def test_get_ioput_name_raises_error_without_ioput_name(self): ... def test_extract_ioputs_from_keyed_values(self): ... def test_build_inputs_from_ioputs(self): ... def test_build_inputs_from_ioputs_handles_data_object(self): ... def test_format_flow_outputs_skips_vertices_with_successors(self): ... def test_delete_fields_with_list(self): ... def test_delete_fields_with_dict(self): ... def test_update_input_types_sets_empty_list_for_none(self): ... # Task: Write a Python test method `test_format_flow_outputs_creates_output_objects` in test class `TestRunFlowBaseComponentInputOutputHandling` to test that _format_flow_outputs creates Output objects from graph. Module under test: uuid, lfx.base.tools.run_flow, lfx.graph.graph.base
def test_format_flow_outputs_creates_output_objects(self): """Test that _format_flow_outputs creates Output objects from graph.""" component = RunFlowBaseComponent() mock_vertex = MagicMock() mock_vertex.id = "vertex_123" mock_vertex.is_output = True mock_vertex.outputs = [ {"name": "output1", "display_name": "Output 1"}, {"name": "output2", "display_name": "Output 2"}, ] mock_graph = MagicMock(spec=Graph) mock_graph.vertices = [mock_vertex] mock_graph.successor_map = {} outputs = component._format_flow_outputs(mock_graph) assert len(outputs) == 2 assert all(isinstance(output, Output) for output in outputs) assert outputs[0].name == f"vertex_123{component.IOPUT_SEP}output1" # The method name is dynamically generated with sanitized vertex and output names assert outputs[0].method == "_resolve_flow_output__vertex_123__output1" assert outputs[1].name == f"vertex_123{component.IOPUT_SEP}output2" assert outputs[1].method == "_resolve_flow_output__vertex_123__output2"
test
1
{"function_name": "test_format_flow_outputs_creates_output_objects", "class_name": "TestRunFlowBaseComponentInputOutputHandling", "qualname": "TestRunFlowBaseComponentInputOutputHandling.test_format_flow_outputs_creates_output_objects", "file_path": "src/backend/tests/unit/base/tools/test_run_flow.py", "repo_id": "langflow-ai/langflow", "loc": 25, "tested_modules": ["uuid", "lfx.base.tools.run_flow", "lfx.graph.graph.base", "lfx.graph.vertex.base", "lfx.schema.data"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/jetmoe/modular_jetmoe.py:JetMoeAttention.__init__
# Context: import torch from .configuration_jetmoe import JetMoeConfig class JetMoeRMSNorm(MixtralRMSNorm): ... class JetMoeRotaryEmbedding(MixtralRotaryEmbedding): ... class JetMoeParallelExperts(nn.Module): ... class JetMoeTopKGating(nn.Module): ... class JetMoeMoE(nn.Module): ... class JetMoeMoA(nn.Module): ... class JetMoeDecoderLayer(LlamaDecoderLayer): ... class JetMoePreTrainedModel(MixtralPreTrainedModel): ... class JetMoeModel(MixtralModel): ... class JetMoeForCausalLM(JetMoePreTrainedModel, GenerationMixin): ... class JetMoeForSequenceClassification(GenericForSequenceClassification, JetMoePreTrainedModel): ... class JetMoeAttention(nn.Module): def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor | None, position_embeddings: torch.LongTensor | None, past_key_values: Cache | None, cache_position: torch.LongTensor | None, **kwargs) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]: ... # Task: Write a Python method `__init__` for the class `JetMoeAttention` to initialize the JetMoeAttention module. Parameters: config: JetMoeConfig, layer_idx: int | None
def __init__(self, config: JetMoeConfig, layer_idx: int | None = None): """ Initialize the JetMoeAttention module. Args: config: Configuration object with model hyperparameters. layer_idx: Index of the layer in the model. """ super().__init__() self.config = config self.layer_idx = layer_idx self.is_causal = True if layer_idx is None: logger.warning_once( f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " "when creating this class." ) self.num_key_value_groups = 1 # We ignore this by setting it to 1 as we have different repeat patterns self.top_k = config.num_experts_per_tok self.attention_dropout = config.attention_dropout self.kv_projection_size = config.kv_channels * config.num_key_value_heads self.num_key_value_heads = config.num_key_value_heads self.num_heads = config.num_attention_heads self.head_dim = config.kv_channels self.scaling = self.head_dim**-0.5 self.experts = JetMoeMoA(config) self.kv_proj = torch.nn.Linear(config.hidden_size, self.kv_projection_size * 2, bias=False)
function_simple
0
{"cognitive_complexity": 1, "loc": 32, "code_loc": 20, "docstring_loc": 9, "function_name": "__init__", "class_name": "JetMoeAttention", "qualname": "JetMoeAttention.__init__", "file_path": "src/transformers/models/jetmoe/modular_jetmoe.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"}