sample_id
string
instruction
string
completion
string
category
string
label
int64
metadata
string
vllm-project/vllm:tests/entrypoints/test_responses_utils.py:TestShouldContinueFinalMessage.test_dict_without_status_returns_false
# Context: from vllm.entrypoints.openai.responses.utils import ( _construct_single_message_from_response_item, _maybe_combine_reasoning_and_tool_call, construct_chat_messages_with_tool_call, convert_tool_responses_to_completions_format, should_continue_final_message, ) class TestResponsesUtils: ... class TestMaybeCombineReasoningAndToolCall: ... class TestShouldContinueFinalMessage: def test_string_input_returns_false(self): ... def test_empty_list_returns_false(self): ... def test_completed_message_returns_false(self): ... def test_in_progress_message_returns_true(self): ... def test_incomplete_message_returns_true(self): ... def test_in_progress_reasoning_returns_true(self): ... def test_incomplete_reasoning_returns_true(self): ... def test_completed_reasoning_returns_false(self): ... def test_reasoning_with_none_status_returns_false(self): ... def test_only_last_item_matters(self): ... def test_tool_call_returns_false(self): ... def test_dict_in_progress_message_returns_true(self): ... def test_dict_incomplete_message_returns_true(self): ... def test_dict_completed_message_returns_false(self): ... def test_dict_reasoning_in_progress_returns_true(self): ... def test_dict_with_none_status_returns_false(self): ... # Task: Write a Python test method `test_dict_without_status_returns_false` in test class `TestShouldContinueFinalMessage` to dict without status field should not be continued. Module under test: openai.types.chat, openai.types.responses.response_function_tool_call, openai.types.responses.response_function_tool_call_output_item
def test_dict_without_status_returns_false(self): """Dict without status field should not be continued.""" dict_item = { "id": "msg_123", "type": "message", "role": "assistant", "content": [{"type": "output_text", "text": "Some text"}], } assert should_continue_final_message([dict_item]) is False
test
1
{"function_name": "test_dict_without_status_returns_false", "class_name": "TestShouldContinueFinalMessage", "qualname": "TestShouldContinueFinalMessage.test_dict_without_status_returns_false", "file_path": "tests/entrypoints/test_responses_utils.py", "repo_id": "vllm-project/vllm", "loc": 9, "tested_modules": ["openai.types.chat", "openai.types.responses.response_function_tool_call", "openai.types.responses.response_function_tool_call_output_item", "openai.types.responses.response_output_message", "openai.types.responses.response_output_text"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:release/train_tests/pytorch_lightning/test_lightning.py:test_lightning_train_run
# Context: import os from ray.train.torch import TorchTrainer class ImageClassifier(pl.LightningModule): ... def train_func(): ... # Task: Write a Python test function `test_lightning_train_run` to verify the behavior of `lightning_train_run`. Module under test: torch.utils.data, torchvision.models, torchvision.datasets
def test_lightning_train_run(): # [2] Configure scaling and resource requirements. scaling_config = ray.train.ScalingConfig(num_workers=4, use_gpu=True) # [3] Launch distributed training job. trainer = TorchTrainer( train_func, scaling_config=scaling_config, # [3a] If running in a multi-node cluster, this is where you # should configure the run's persistent storage that is accessible # across all worker nodes. run_config=ray.train.RunConfig( storage_path="/mnt/cluster_storage/lightning_run" ), ) result: ray.train.Result = trainer.fit() # [4] Load the trained model. with result.checkpoint.as_directory() as checkpoint_dir: model = ImageClassifier.load_from_checkpoint( # noqa: F841 os.path.join( checkpoint_dir, ray.train.lightning.RayTrainReportCallback.CHECKPOINT_NAME, ), )
test
0
{"function_name": "test_lightning_train_run", "class_name": null, "qualname": "test_lightning_train_run", "file_path": "release/train_tests/pytorch_lightning/test_lightning.py", "repo_id": "ray-project/ray", "loc": 25, "tested_modules": ["torch.utils.data", "torchvision.models", "torchvision.datasets", "torchvision.transforms", "ray.train.torch"], "has_docstring": false, "runnable_level": "file_runnable"}
apache/airflow:providers/teradata/src/airflow/providers/teradata/utils/tpt_util.py:decrypt_remote_file
# Context: import logging from paramiko import SSHClient class TPTConfig: ... def execute_remote_command(ssh_client: SSHClient, command: str) -> tuple[int, str, str]: ... def write_file(path: str, content: str) -> None: ... def secure_delete(file_path: str, logger: logging.Logger | None) -> None: ... def remote_secure_delete(ssh_client: SSHClient, remote_files: list[str], logger: logging.Logger | None) -> None: ... def terminate_subprocess(sp: subprocess.Popen | None, logger: logging.Logger | None) -> None: ... def get_remote_os(ssh_client: SSHClient, logger: logging.Logger | None) -> str: ... def set_local_file_permissions(local_file_path: str, logger: logging.Logger | None) -> None: ... def _set_windows_file_permissions(ssh_client: SSHClient, remote_file_path: str, logger: logging.Logger) -> None: ... def _set_unix_file_permissions(ssh_client: SSHClient, remote_file_path: str, logger: logging.Logger) -> None: ... def set_remote_file_permissions(ssh_client: SSHClient, remote_file_path: str, logger: logging.Logger | None) -> None: ... def get_remote_temp_directory(ssh_client: SSHClient, logger: logging.Logger | None) -> str: ... def is_valid_file(file_path: str) -> bool: ... def verify_tpt_utility_installed(utility: str) -> None: ... def verify_tpt_utility_on_remote_host(ssh_client: SSHClient, utility: str, logger: logging.Logger | None) -> None: ... def prepare_tpt_ddl_script(sql: list[str], error_list: list[int] | None, source_conn: dict[str, Any], job_name: str | None) -> str: ... def prepare_tdload_job_var_file(mode: str, source_table: str | None, select_stmt: str | None, insert_stmt: str | None, target_table: str | None, source_file_name: str | None, target_file_name: str | None, source_format: str, target_format: str, source_text_delimiter: str, target_text_delimiter: str, source_conn: dict[str, Any], target_conn: dict[str, Any] | None) -> str: ... def is_valid_remote_job_var_file(ssh_client: SSHClient, remote_job_var_file_path: str, logger: logging.Logger | None) -> bool: ... def read_file(file_path: str, encoding: str) -> str: ... def transfer_file_sftp(ssh_client: SSHClient, local_path: str, remote_path: str, logger: logging.Logger | None) -> None: ... # Task: Write a Python function `decrypt_remote_file` to decrypt a remote file using OpenSSL. Parameters: ssh_client: SSHClient, remote_enc_file: str, remote_dec_file: str, password: str, logger: logging.Logger | None Returns: int
def decrypt_remote_file( ssh_client: SSHClient, remote_enc_file: str, remote_dec_file: str, password: str, logger: logging.Logger | None = None, ) -> int: """ Decrypt a remote file using OpenSSL. :param ssh_client: SSH client connection :param remote_enc_file: Path to the encrypted file :param remote_dec_file: Path for the decrypted file :param password: Decryption password :param logger: Optional logger instance :return: Exit status of the decryption command :raises RuntimeError: If decryption fails """ logger = logger or logging.getLogger(__name__) # Detect remote OS remote_os = get_remote_os(ssh_client, logger) windows_remote = remote_os == "windows" if windows_remote: # Windows - use different quoting and potentially different OpenSSL parameters password_escaped = password.replace('"', '""') # Escape double quotes for Windows decrypt_cmd = ( f'openssl enc -d -aes-256-cbc -salt -pbkdf2 -pass pass:"{password_escaped}" ' f'-in "{remote_enc_file}" -out "{remote_dec_file}"' ) else: # Unix/Linux - use single quote escaping password_escaped = password.replace("'", "'\\''") # Escape single quotes decrypt_cmd = ( f"openssl enc -d -aes-256-cbc -salt -pbkdf2 -pass pass:'{password_escaped}' " f"-in {remote_enc_file} -out {remote_dec_file}" ) exit_status, stdout_data, stderr_data = execute_remote_command(ssh_client, decrypt_cmd) if exit_status != 0: raise RuntimeError( f"Decryption failed with exit status {exit_status}. Error: {stderr_data if stderr_data else 'N/A'}" ) logger.info("Successfully decrypted remote file %s to %s", remote_enc_file, remote_dec_file) return exit_status
function_complex
1
{"cognitive_complexity": 6, "loc": 48, "code_loc": 22, "docstring_loc": 11, "function_name": "decrypt_remote_file", "class_name": null, "qualname": "decrypt_remote_file", "file_path": "providers/teradata/src/airflow/providers/teradata/utils/tpt_util.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:doc/source/serve/tutorials/video-analysis/deployments/decoder.py:MultiDecoder._load_embeddings
# Context: import io import aioboto3 import numpy as np from utils.s3 import get_s3_region class MultiDecoder: async def __init__(self, bucket: str, s3_prefix: str = S3_EMBEDDINGS_PREFIX): """Initialize decoder with text embeddings from S3.""" self.bucket = bucket self.ema_alpha = EMA_ALPHA self.scene_threshold = SCENE_CHANGE_THRESHOLD self.s3_prefix = s3_prefix logger.info(f"MultiDecoder initializing (bucket={self.bucket}, ema_alpha={self.ema_alpha}, threshold={self.scene_threshold})") await self._load_embeddings() logger.info(f"MultiDecoder ready (tags={len(self.tag_texts)}, descriptions={len(self.desc_texts)})") def _cosine_similarity(self, embedding: np.ndarray, bank: np.ndarray) -> np.ndarray: ... def _get_top_tags(self, embedding: np.ndarray, top_k: int) -> list[dict]: ... def _get_retrieval_caption(self, embedding: np.ndarray) -> dict: ... def _detect_scene_changes(self, frame_embeddings: np.ndarray, chunk_index: int, chunk_start_time: float, chunk_duration: float, ema_state: np.ndarray | None) -> tuple[list[dict], np.ndarray]: ... def __call__(self, encoder_output: dict, chunk_index: int, chunk_start_time: float, chunk_duration: float, top_k_tags: int, ema_state: np.ndarray | None) -> dict: ... # Task: Write a Python async method `_load_embeddings` for the class `MultiDecoder` to load precomputed text embeddings from S3.
async def _load_embeddings(self): """Load precomputed text embeddings from S3.""" session = aioboto3.Session(region_name=get_s3_region(self.bucket)) async with session.client("s3") as s3: # Load tag embeddings tag_key = f"{self.s3_prefix}tag_embeddings.npz" response = await s3.get_object(Bucket=self.bucket, Key=tag_key) tag_data = await response["Body"].read() tag_npz = np.load(io.BytesIO(tag_data), allow_pickle=True) self.tag_embeddings = tag_npz["embeddings"] self.tag_texts = tag_npz["texts"].tolist() # Load description embeddings desc_key = f"{self.s3_prefix}description_embeddings.npz" response = await s3.get_object(Bucket=self.bucket, Key=desc_key) desc_data = await response["Body"].read() desc_npz = np.load(io.BytesIO(desc_data), allow_pickle=True) self.desc_embeddings = desc_npz["embeddings"] self.desc_texts = desc_npz["texts"].tolist()
function_simple
0
{"cognitive_complexity": 0, "loc": 20, "code_loc": 14, "docstring_loc": 1, "function_name": "_load_embeddings", "class_name": "MultiDecoder", "qualname": "MultiDecoder._load_embeddings", "file_path": "doc/source/serve/tutorials/video-analysis/deployments/decoder.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_agent_utils.py:TestSplitMessagesIntoChunks.test_single_chunk_when_under_limit
# Context: from typing import Any, Literal, Optional from crewai.utilities.agent_utils import ( _asummarize_chunks, _estimate_token_count, _extract_summary_tags, _format_messages_for_summary, _split_messages_into_chunks, convert_tools_to_openai_schema, parse_tool_call_args, summarize_messages, ) class CalculatorInput(BaseModel): ... class CalculatorTool(BaseTool): ... class SearchInput(BaseModel): ... class SearchTool(BaseTool): ... class NoSchemaTool(BaseTool): ... class TestConvertToolsToOpenaiSchema: ... def _make_mock_i18n() -> MagicMock: ... class MCPStyleInput(BaseModel): ... class MCPStyleTool(BaseTool): ... class TestOptionalFieldsPreserveNull: ... class TestSummarizeMessages: ... class TestFormatMessagesForSummary: ... class TestExtractSummaryTags: ... class TestEstimateTokenCount: ... class TestParallelSummarization: ... def _build_long_conversation() -> list[dict[str, Any]]: ... class TestParallelSummarizationVCR: ... class TestParseToolCallArgs: ... class TestSplitMessagesIntoChunks: def test_splits_at_message_boundaries(self) -> None: ... def test_excludes_system_messages(self) -> None: ... def test_empty_messages(self) -> None: ... def test_only_system_messages(self) -> None: ... def test_handles_none_content(self) -> None: ... # Task: Write a Python test method `test_single_chunk_when_under_limit` in test class `TestSplitMessagesIntoChunks` to verify the behavior of `single_chunk_when_under_limit`. Module under test: __future__, typing, pydantic
def test_single_chunk_when_under_limit(self) -> None: messages: list[dict[str, Any]] = [ {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}, ] chunks = _split_messages_into_chunks(messages, max_tokens=1000) assert len(chunks) == 1 assert len(chunks[0]) == 2
test
0
{"function_name": "test_single_chunk_when_under_limit", "class_name": "TestSplitMessagesIntoChunks", "qualname": "TestSplitMessagesIntoChunks.test_single_chunk_when_under_limit", "file_path": "lib/crewai/tests/utilities/test_agent_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 8, "tested_modules": ["__future__", "typing", "pydantic", "crewai.tools.base_tool", "crewai.utilities.agent_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
sansan0/TrendRadar:trendradar/core/scheduler.py:Scheduler._in_range
Write a Python method `_in_range` for the class `Scheduler` to 检查时间是否在范围内(支持跨日). Parameters: now_hhmm: str, start: str, end: str Returns: bool
def _in_range(now_hhmm: str, start: str, end: str) -> bool: """ 检查时间是否在范围内(支持跨日) Args: now_hhmm: 当前时间 HH:MM start: 开始时间 HH:MM end: 结束时间 HH:MM Returns: 是否在范围内 """ if start <= end: # 正常范围,如 08:00-09:00 return start <= now_hhmm <= end else: # 跨日范围,如 22:00-07:00 return now_hhmm >= start or now_hhmm <= end
function_simple
1
{"cognitive_complexity": 3, "loc": 18, "code_loc": 4, "docstring_loc": 11, "function_name": "_in_range", "class_name": "Scheduler", "qualname": "Scheduler._in_range", "file_path": "trendradar/core/scheduler.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "self_contained"}
mem0ai/mem0:mem0/vector_stores/neptune_analytics.py:NeptuneAnalyticsVector.list
# Context: from typing import Dict, List, Optional class OutputData(BaseModel): ... class NeptuneAnalyticsVector(VectorStoreBase): _COLLECTION_PREFIX = "MEM0_VECTOR_" _FIELD_N = 'n' _FIELD_ID = '~id' _FIELD_PROP = '~properties' _FIELD_SCORE = 'score' _FIELD_LABEL = 'label' _TIMEZONE = "UTC" def __init__( self, endpoint: str, collection_name: str, ): """ Initialize the Neptune Analytics vector store. Args: endpoint (str): Neptune Analytics endpoint in format 'neptune-graph://<graphid>'. collection_name (str): Name of the collection to store vectors. Raises: ValueError: If endpoint format is invalid. ImportError: If langchain_aws is not installed. """ if not endpoint.startswith("neptune-graph://"): raise ValueError("Please provide 'endpoint' with the format as 'neptune-graph://<graphid>'.") graph_id = endpoint.replace("neptune-graph://", "") self.graph = NeptuneAnalyticsGraph(graph_id) self.collection_name = self._COLLECTION_PREFIX + collection_name def create_col(self, name, vector_size, distance): ... def insert(self, vectors: List[list], payloads: Optional[List[Dict]], ids: Optional[List[str]]): ... def search(self, query: str, vectors: List[float], limit: int, filters: Optional[Dict]) -> List[OutputData]: ... def delete(self, vector_id: str): ... def update(self, vector_id: str, vector: Optional[List[float]], payload: Optional[Dict]): ... def get(self, vector_id: str): ... def list_cols(self): ... def delete_col(self): ... def col_info(self): ... def reset(self): ... def _parse_query_responses(self, response: dict, with_score: bool): ... def execute_query(self, query_string: str, params): ... def _get_where_clause(filters: dict): ... def _get_node_filter_clause(filters: dict): ... def _process_success_message(response, context): ... # Task: Write a Python method `list` for the class `NeptuneAnalyticsVector` to list all vectors in the collection with optional filtering. Parameters: filters: Optional[Dict], limit: int Returns: List[OutputData]
def list(self, filters: Optional[Dict] = None, limit: int = 100) -> List[OutputData]: """ List all vectors in the collection with optional filtering. Retrieves vectors from the collection, optionally filtered by metadata properties. Args: filters (Optional[Dict]): Optional filters to apply based on metadata. limit (int, optional): Maximum number of vectors to return. Defaults to 100. Returns: List[OutputData]: List of vectors with their metadata. """ where_clause = self._get_where_clause(filters) if filters else "" para = { "limit": limit, } query_string = f""" MATCH (n :{self.collection_name}) {where_clause} RETURN n LIMIT $limit """ query_response = self.execute_query(query_string, para) if len(query_response) > 0: # Handle if there is no match. return [self._parse_query_responses(query_response)] return [[]]
function_simple
1
{"cognitive_complexity": 2, "loc": 30, "code_loc": 14, "docstring_loc": 12, "function_name": "list", "class_name": "NeptuneAnalyticsVector", "qualname": "NeptuneAnalyticsVector.list", "file_path": "mem0/vector_stores/neptune_analytics.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "file_runnable"}
fastapi/fastapi:tests/test_sse.py:test_post_method_sse
# Context: from fastapi.testclient import TestClient class Item(BaseModel): ... async def sse_items() -> AsyncIterable[Item]: ... def sse_items_sync() -> Iterable[Item]: ... async def sse_items_no_annotation(): ... def sse_items_sync_no_annotation(): ... async def sse_items_dict(): ... async def sse_items_event(): ... async def sse_items_mixed() -> AsyncIterable[Item]: ... async def sse_items_string(): ... async def sse_items_post() -> AsyncIterable[Item]: ... async def sse_items_raw(): ... async def stream_events(): ... def client_fixture(): ... def test_async_generator_with_model(client: TestClient): ... def test_sync_generator_with_model(client: TestClient): ... def test_async_generator_no_annotation(client: TestClient): ... def test_sync_generator_no_annotation(client: TestClient): ... def test_dict_items(client: TestClient): ... def test_sse_events_with_fields(client: TestClient): ... def test_mixed_plain_and_sse_events(client: TestClient): ... def test_string_data_json_encoded(client: TestClient): ... def test_server_sent_event_null_id_rejected(): ... def test_server_sent_event_negative_retry_rejected(): ... def test_server_sent_event_float_retry_rejected(): ... def test_raw_data_sent_without_json_encoding(client: TestClient): ... def test_data_and_raw_data_mutually_exclusive(): ... def test_sse_on_router_included_in_app(client: TestClient): ... async def slow_async_stream(): ... def slow_sync_stream(): ... def test_keepalive_ping_async(monkeypatch: pytest.MonkeyPatch): ... def test_keepalive_ping_sync(monkeypatch: pytest.MonkeyPatch): ... def test_no_keepalive_when_fast(client: TestClient): ... # Task: Write a Python test function `test_post_method_sse` to sSE should work with POST (needed for MCP compatibility). Module under test: collections.abc, fastapi, fastapi.responses
def test_post_method_sse(client: TestClient): """SSE should work with POST (needed for MCP compatibility).""" response = client.post("/items/stream-post") assert response.status_code == 200 assert response.headers["content-type"] == "text/event-stream; charset=utf-8" data_lines = [ line for line in response.text.strip().split("\n") if line.startswith("data: ") ] assert len(data_lines) == 3
test
1
{"function_name": "test_post_method_sse", "class_name": null, "qualname": "test_post_method_sse", "file_path": "tests/test_sse.py", "repo_id": "fastapi/fastapi", "loc": 9, "tested_modules": ["collections.abc", "fastapi", "fastapi.responses", "fastapi.sse", "fastapi.testclient"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/integration/test_openai_responses_extended.py:test_openai_responses_long_input
# Context: import pytest from httpx import AsyncClient def load_env_vars(): ... async def create_global_variable(client: AsyncClient, headers, name, value, variable_type): ... async def load_and_prepare_flow(client: AsyncClient, created_api_key): ... async def load_and_prepare_agent_flow(client: AsyncClient, created_api_key): ... async def test_openai_responses_invalid_flow_id(client: AsyncClient, created_api_key): ... async def test_openai_responses_with_tools(client: AsyncClient, created_api_key): ... async def test_openai_responses_empty_input(client: AsyncClient, created_api_key): ... async def test_openai_responses_streaming_error_handling(client: AsyncClient, created_api_key): ... async def test_openai_responses_concurrent_requests(client: AsyncClient, created_api_key): ... async def test_openai_responses_unauthorized(client: AsyncClient): ... async def test_openai_responses_invalid_api_key(client: AsyncClient): ... async def test_openai_responses_malformed_request(client: AsyncClient, created_api_key): ... async def test_openai_responses_stream_interruption(client: AsyncClient, created_api_key): ... async def test_openai_responses_background_processing(client: AsyncClient, created_api_key): ... async def test_openai_responses_previous_response_id(client: AsyncClient, created_api_key): ... async def test_openai_responses_response_format(client: AsyncClient, created_api_key): ... async def test_openai_responses_stream_chunk_format(client: AsyncClient, created_api_key): ... async def test_openai_responses_stream_has_non_empty_content(client: AsyncClient, created_api_key): ... async def test_openai_responses_rate_limiting_simulation(client: AsyncClient, created_api_key): ... # Task: Write a Python test function `test_openai_responses_long_input` to test the OpenAI responses endpoint with very long input. Module under test: dotenv, httpx, lfx.log.logger
async def test_openai_responses_long_input(client: AsyncClient, created_api_key): """Test the OpenAI responses endpoint with very long input.""" flow, headers = await load_and_prepare_flow(client, created_api_key) # Create a very long input long_input = "Hello " * 1000 # ~6000 characters payload = {"model": flow["id"], "input": long_input, "stream": False} response = await client.post("/api/v1/responses", json=payload, headers=headers) assert response.status_code == 200 data = response.json() if "error" not in data: assert "id" in data assert "output" in data assert isinstance(data["output"], str)
test
1
{"function_name": "test_openai_responses_long_input", "class_name": null, "qualname": "test_openai_responses_long_input", "file_path": "src/backend/tests/integration/test_openai_responses_extended.py", "repo_id": "langflow-ai/langflow", "loc": 17, "tested_modules": ["dotenv", "httpx", "lfx.log.logger", "tests.api_keys", "tests.api_keys"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/services/mcp_composer/service.py:MCPComposerService._wait_for_process_exit
# Context: import asyncio class MCPComposerError(Exception): ... class MCPComposerPortError(MCPComposerError): ... class MCPComposerConfigError(MCPComposerError): ... class MCPComposerDisabledError(MCPComposerError): ... class MCPComposerStartupError(MCPComposerError): ... def require_composer_enabled(func: Callable) -> Callable: ... class MCPComposerService(Service): name = "mcp_composer_service" def __init__(self): super().__init__() self.project_composers: dict[ str, dict ] = {} # project_id -> {process, host, port, streamable_http_url, auth_config} self._start_locks: dict[ str, asyncio.Lock ] = {} # Lock to prevent concurrent start operations for the same project self._active_start_tasks: dict[ str, asyncio.Task ] = {} # Track active start tasks to cancel them when new request arrives self._port_to_project: dict[int, str] = {} # Track which project is using which port self._pid_to_project: dict[int, str] = {} # Track which PID belongs to which project self._last_errors: dict[str, str] = {} # Track last error message per project for UI display def get_last_error(self, project_id: str) -> str | None: ... def set_last_error(self, project_id: str, error_message: str) -> None: ... def clear_last_error(self, project_id: str) -> None: ... def _is_port_available(self, port: int, host: str) -> bool: ... async def _kill_process_on_port(self, port: int) -> bool: ... async def _kill_zombie_mcp_processes(self, port: int) -> bool: ... def _is_port_used_by_another_project(self, port: int, current_project_id: str) -> tuple[bool, str | None]: ... async def start(self): ... async def stop(self): ... async def stop_project_composer(self, project_id: str): ... async def _do_stop_project_composer(self, project_id: str): ... async def _read_process_output_and_extract_error(self, process: subprocess.Popen, oauth_server_url: str | None, timeout: float, stdout_file, stderr_file) -> tuple[str, str, str]: ... async def _read_stream_non_blocking(self, stream, stream_name: str) -> str: ... async def _ensure_port_available(self, port: int, current_project_id: str) -> None: ... async def _log_startup_error_details(self, project_id: str, cmd: list[str], host: str, port: int, stdout: str, stderr: str, error_msg: str, exit_code: int | None, pid: int | None) -> None: ... def _validate_oauth_settings(self, auth_config: dict[str, Any]) -> None: ... def _normalize_config_value(value: Any) -> Any: ... def _has_auth_config_changed(self, existing_auth: dict[str, Any] | None, new_auth: dict[str, Any] | None) -> bool: ... def _obfuscate_command_secrets(self, cmd: list[str]) -> list[str]: ... def _extract_error_message(self, stdout_content: str, stderr_content: str, oauth_server_url: str | None) -> str: ... async def start_project_composer(self, project_id: str, streamable_http_url: str, auth_config: dict[str, Any] | None, max_retries: int, max_startup_checks: int, startup_delay: float, legacy_sse_url: str | None) -> None: ... async def _do_start_project_composer(self, project_id: str, streamable_http_url: str, auth_config: dict[str, Any] | None, max_retries: int, max_startup_checks: int, startup_delay: float, legacy_sse_url: str | None) -> None: ... async def _start_project_composer_process(self, project_id: str, host: str, port: int, streamable_http_url: str, auth_config: dict[str, Any] | None, max_startup_checks: int, startup_delay: float, legacy_sse_url: str | None) -> subprocess.Popen: ... def get_project_composer_port(self, project_id: str) -> int | None: ... async def teardown(self) -> None: ... # Task: Write a Python async method `_wait_for_process_exit` for the class `MCPComposerService` to wait for a process to exit. Parameters: process
async def _wait_for_process_exit(self, process): """Wait for a process to exit.""" await asyncio.to_thread(process.wait)
function_simple
1
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "_wait_for_process_exit", "class_name": "MCPComposerService", "qualname": "MCPComposerService._wait_for_process_exit", "file_path": "src/lfx/src/lfx/services/mcp_composer/service.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "slib_runnable"}
browser-use/browser-use:browser_use/dom/serializer/paint_order.py:RectUnionPure._split_diff
# Context: class Rect: ... class PaintOrderRemover: ... class RectUnionPure: __slots__ = ('_rects',) def __init__(self): self._rects: list[Rect] = [] def contains(self, r: Rect) -> bool: ... def add(self, r: Rect) -> bool: ... # Task: Write a Python method `_split_diff` for the class `RectUnionPure` to return list of up to 4 rectangles = a \ b. Parameters: a: Rect, b: Rect Returns: list[Rect]
def _split_diff(self, a: Rect, b: Rect) -> list[Rect]: r""" Return list of up to 4 rectangles = a \ b. Assumes a intersects b. """ parts = [] # Bottom slice if a.y1 < b.y1: parts.append(Rect(a.x1, a.y1, a.x2, b.y1)) # Top slice if b.y2 < a.y2: parts.append(Rect(a.x1, b.y2, a.x2, a.y2)) # Middle (vertical) strip: y overlap is [max(a.y1,b.y1), min(a.y2,b.y2)] y_lo = max(a.y1, b.y1) y_hi = min(a.y2, b.y2) # Left slice if a.x1 < b.x1: parts.append(Rect(a.x1, y_lo, b.x1, y_hi)) # Right slice if b.x2 < a.x2: parts.append(Rect(b.x2, y_lo, a.x2, y_hi)) return parts
function_simple
0
{"cognitive_complexity": 4, "loc": 26, "code_loc": 12, "docstring_loc": 4, "function_name": "_split_diff", "class_name": "RectUnionPure", "qualname": "RectUnionPure._split_diff", "file_path": "browser_use/dom/serializer/paint_order.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestStripUnsupportedFormats.test_keeps_date
# Context: from copy import deepcopy from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestRequiredOptional: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestBuildRichFieldDescription: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestEnsureTypeInSchemas: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestStripUnsupportedFormats: def test_removes_email_format(self) -> None: ... def test_keeps_date_time(self) -> None: ... def test_removes_uri_format(self) -> None: ... def test_recursive(self) -> None: ... # Task: Write a Python test method `test_keeps_date` in test class `TestStripUnsupportedFormats` to verify the behavior of `keeps_date`. Module under test: __future__, copy, typing
def test_keeps_date(self) -> None: schema = {"type": "string", "format": "date"} result = strip_unsupported_formats(deepcopy(schema)) assert result["format"] == "date"
test
0
{"function_name": "test_keeps_date", "class_name": "TestStripUnsupportedFormats", "qualname": "TestStripUnsupportedFormats.test_keeps_date", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/ministral3/convert_ministral3_weights_to_hf.py:convert_and_write_model
# Context: import os import torch from safetensors.torch import load_file from transformers import ( GenerationConfig, Ministral3Config, Ministral3ForCausalLM, Mistral3Config, Mistral3ForConditionalGeneration, PixtralImageProcessorFast, PixtralProcessor, PixtralVisionConfig, ) from transformers.integrations.finegrained_fp8 import replace_with_fp8_linear def get_sd_mapping(has_vision: bool) -> dict: ... def map_old_key_to_new(old_key, mapping): ... def read_json(path): ... def permute_for_rope(tensor, n_heads, dim1, dim2): ... def convert_state_dict(original_state_dict: dict, config: Mistral3Config): ... def convert_config(original_config: dict, max_position_embeddings: int, is_vision: bool): ... def convert_and_write_processor_and_tokenizer(input_dir: str, output_dir: str, model_config: Mistral3Config | Ministral3ForCausalLM): ... def main(): ... # Task: Write a Python function `convert_and_write_model` to convert the model and save it (this implicitly save the config as well). Parameters: input_dir: str, output_dir: str, max_position_embeddings: int
def convert_and_write_model(input_dir: str, output_dir: str, max_position_embeddings: int): """Convert the model and save it (this implicitly save the config as well).""" params = read_json(os.path.join(input_dir, "params.json")) is_vision = params.get("vision_encoder") is not None config = convert_config(params, max_position_embeddings, is_vision) full_state_dict = {} # The model may be split between different files, but a single nn.Module is always fully present in a single file shards = [file for file in os.listdir(input_dir) if file.endswith(".safetensors")] for shard_file in shards: original_state_dict = load_file(os.path.join(input_dir, shard_file)) new_dict = convert_state_dict(original_state_dict, config) full_state_dict.update(new_dict) text_config = config.text_config if is_vision else config if text_config.tie_word_embeddings: model_key = "model.language_model" if is_vision else "model" full_state_dict["lm_head.weight"] = full_state_dict[f"{model_key}.embed_tokens.weight"] # Load weights into model and resave them with torch.device("meta"): if isinstance(config, Mistral3Config): model = Mistral3ForConditionalGeneration(config) elif isinstance(config, Ministral3Config): model = Ministral3ForCausalLM(config) else: raise ValueError(f"Unknown config type {type(config)}.") # let's swap nn.Linear to FP8 Linear before loading if hasattr(model.config, "quantization_config"): model = replace_with_fp8_linear( model, model.config.quantization_config.modules_to_not_convert, model.config.quantization_config ) model.load_state_dict(full_state_dict, strict=True, assign=True) model.save_pretrained(output_dir) return config
function_complex
0
{"cognitive_complexity": 11, "loc": 38, "code_loc": 27, "docstring_loc": 1, "function_name": "convert_and_write_model", "class_name": null, "qualname": "convert_and_write_model", "file_path": "src/transformers/models/ministral3/convert_ministral3_weights_to_hf.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_static_routes_test.py:TestReservedPaths.test_reserved_path_returns_404
# Context: from starlette.testclient import TestClient def static_app(tmp_path: Path) -> Iterator[TestClient]: ... class TestStreamlitStaticFiles: ... class TestWithBaseUrl: ... class TestDoubleSlashProtection: ... class TestTrailingSlashRedirect: ... class TestCacheHeadersOnRedirects: ... class TestReservedPaths: def test_reserved_paths_constant(self) -> None: ... def test_reserved_path_host_config_returns_404(self, static_app: TestClient) -> None: ... def test_user_path_ending_with_reserved_suffix_returns_404(self, static_app: TestClient) -> None: ... def test_user_path_custom_stcore_returns_404(self, static_app: TestClient) -> None: ... def test_nested_reserved_path_returns_404(self, static_app: TestClient) -> None: ... # Task: Write a Python test method `test_reserved_path_returns_404` in test class `TestReservedPaths` to test that reserved paths return 404 instead of SPA fallback. Module under test: __future__, typing, starlette.applications
def test_reserved_path_returns_404(self, static_app: TestClient) -> None: """Test that reserved paths return 404 instead of SPA fallback.""" response = static_app.get("/_stcore/health") assert response.status_code == 404
test
1
{"function_name": "test_reserved_path_returns_404", "class_name": "TestReservedPaths", "qualname": "TestReservedPaths.test_reserved_path_returns_404", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_static_routes_test.py", "repo_id": "streamlit/streamlit", "loc": 5, "tested_modules": ["__future__", "typing", "starlette.applications", "starlette.routing", "starlette.testclient"], "has_docstring": true, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/data/tests/unit/test_average_calculator.py:test_calcuate_time_window_average
# Context: from ray.data._internal.average_calculator import TimeWindowAverageCalculator def current_time(): ... # Task: Write a Python test function `test_calcuate_time_window_average` to test TimeWindowAverageCalculator. Module under test: ray.data._internal.average_calculator
def test_calcuate_time_window_average(current_time): """Test TimeWindowAverageCalculator.""" window_s = 10 values_to_report = [i + 1 for i in range(20)] calculator = TimeWindowAverageCalculator(window_s) assert calculator.get_average() is None for value in values_to_report: # Report values, test `get_average`. # and proceed the time by 1 second each time. calculator.report(value) avg = calculator.get_average() values_in_window = values_to_report[ max(current_time.get_value() - 10, 0) : current_time.get_value() + 1 ] expected = sum(values_in_window) / len(values_in_window) assert avg == expected, current_time.get_value() current_time.increment() for _ in range(10): # Keep proceeding the time, and test `get_average`. avg = calculator.get_average() values_in_window = values_to_report[max(current_time.get_value() - 10, 0) : 20] expected = sum(values_in_window) / len(values_in_window) assert avg == expected, current_time.get_value() current_time.increment() # Now no values in the time window, `get_average` should return None. assert calculator.get_average() is None
test
0
{"function_name": "test_calcuate_time_window_average", "class_name": null, "qualname": "test_calcuate_time_window_average", "file_path": "python/ray/data/tests/unit/test_average_calculator.py", "repo_id": "ray-project/ray", "loc": 30, "tested_modules": ["ray.data._internal.average_calculator"], "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:tests/ci/test_structured_extraction.py:TestExtractStructured.test_structured_extraction_returns_json
# Context: import asyncio import json import tempfile from browser_use.agent.views import ActionResult from browser_use.filesystem.file_system import FileSystem from browser_use.tools.service import Tools class TestSchemaDictToPydanticModel: ... class TestExtractionResult: ... def _make_extraction_llm(structured_response: dict | None, freetext_response: str) -> BaseChatModel: ... async def browser_session(): ... def http_server(): ... def base_url(http_server): ... class TestExtractionSchemaInjection: ... class TestExtractStructured: async def test_freetext_extraction_unchanged(self, browser_session, base_url): ... async def test_invalid_schema_falls_back_to_freetext(self, browser_session, base_url): ... # Task: Write a Python test method `test_structured_extraction_returns_json` in test class `TestExtractStructured` to when output_schema is provided, extract returns structured JSON in <structured_result> tags. Module under test: pydantic, browser_use.agent.views, browser_use.browser
async def test_structured_extraction_returns_json(self, browser_session, base_url): """When output_schema is provided, extract returns structured JSON in <structured_result> tags.""" tools = Tools() await tools.navigate(url=f'{base_url}/products', new_tab=False, browser_session=browser_session) await asyncio.sleep(0.5) output_schema = { 'type': 'object', 'properties': { 'products': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'price': {'type': 'number'}, }, 'required': ['name', 'price'], }, }, }, 'required': ['products'], } mock_data = {'products': [{'name': 'Widget A', 'price': 9.99}, {'name': 'Widget B', 'price': 19.99}]} extraction_llm = _make_extraction_llm(structured_response=mock_data) with tempfile.TemporaryDirectory() as tmp: fs = FileSystem(tmp) result = await tools.extract( query='List all products with prices', output_schema=output_schema, browser_session=browser_session, page_extraction_llm=extraction_llm, file_system=fs, ) assert isinstance(result, ActionResult) assert result.extracted_content is not None assert '<structured_result>' in result.extracted_content assert '</structured_result>' in result.extracted_content # Parse the JSON out of the tags start = result.extracted_content.index('<structured_result>') + len('<structured_result>') end = result.extracted_content.index('</structured_result>') parsed = json.loads(result.extracted_content[start:end].strip()) assert parsed == mock_data # Metadata assert result.metadata is not None assert result.metadata['structured_extraction'] is True meta = result.metadata['extraction_result'] assert meta['data'] == mock_data assert meta['schema_used'] == output_schema
test
0
{"function_name": "test_structured_extraction_returns_json", "class_name": "TestExtractStructured", "qualname": "TestExtractStructured.test_structured_extraction_returns_json", "file_path": "tests/ci/test_structured_extraction.py", "repo_id": "browser-use/browser-use", "loc": 54, "tested_modules": ["pydantic", "browser_use.agent.views", "browser_use.browser", "browser_use.filesystem.file_system", "browser_use.llm.base"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:airflow-core/tests/unit/models/test_log.py:TestLogTaskInstanceReproduction.test_log_task_instance_join_correctness
# Context: from sqlalchemy import select from sqlalchemy.orm import joinedload from airflow.models.log import Log from airflow.operators.empty import EmptyOperator from airflow.utils.state import TaskInstanceState class TestLogTaskInstanceReproduction: # Task: Write a Python test method `test_log_task_instance_join_correctness` in test class `TestLogTaskInstanceReproduction` to verify the behavior of `log_task_instance_join_correctness`. Module under test: __future__, sqlalchemy, sqlalchemy.orm
def test_log_task_instance_join_correctness(self, dag_maker, session): # Create dag_1 with a task with dag_maker("dag_1", session=session): EmptyOperator(task_id="common_task_id") dr1 = dag_maker.create_dagrun() ti1 = dr1.get_task_instance("common_task_id") ti1.state = TaskInstanceState.SUCCESS session.merge(ti1) session.commit() # Create dag_2 with the SAME task_id with dag_maker("dag_2", session=session): EmptyOperator(task_id="common_task_id") dr2 = dag_maker.create_dagrun() ti2 = dr2.get_task_instance("common_task_id") ti2.state = TaskInstanceState.FAILED session.merge(ti2) session.commit() # Create a log entry specifically for dag_1's task instance log = Log( event="test_event", task_instance=ti1, ) session.add(log) session.commit() # Query with joinedload to trigger the relationship join stmt = select(Log).where(Log.id == log.id).options(joinedload(Log.task_instance)) loaded_log = session.scalar(stmt) assert loaded_log.task_instance is not None assert loaded_log.task_instance.dag_id == "dag_1" assert loaded_log.task_instance.run_id == ti1.run_id # Verify incorrect join for second dag log2 = Log( event="test_event_2", task_instance=ti2, ) session.add(log2) session.commit() stmt2 = select(Log).where(Log.id == log2.id).options(joinedload(Log.task_instance)) loaded_log2 = session.scalar(stmt2) # This should fail if the join is ambiguous and picks the first one (dag_1) assert loaded_log2.task_instance is not None assert loaded_log2.task_instance.dag_id == "dag_2" assert loaded_log2.task_instance.run_id == ti2.run_id
test
1
{"function_name": "test_log_task_instance_join_correctness", "class_name": "TestLogTaskInstanceReproduction", "qualname": "TestLogTaskInstanceReproduction.test_log_task_instance_join_correctness", "file_path": "airflow-core/tests/unit/models/test_log.py", "repo_id": "apache/airflow", "loc": 53, "tested_modules": ["__future__", "sqlalchemy", "sqlalchemy.orm", "airflow.models.log", "airflow.operators.empty"], "has_docstring": false, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/memory/encoding_flow.py:EncodingFlow.batch_embed
# Context: from crewai.flow.flow import Flow, listen, start from crewai.memory.types import MemoryConfig, MemoryRecord, embed_texts class ItemState(BaseModel): ... class EncodingState(BaseModel): ... class EncodingFlow(Flow[EncodingState]): initial_state = EncodingState def __init__( self, storage: Any, llm: Any, embedder: Any, config: MemoryConfig | None = None, ) -> None: super().__init__(suppress_flow_events=True) self._storage = storage self._llm = llm self._embedder = embedder self._config = config or MemoryConfig() def intra_batch_dedup(self) -> None: ... def _cosine_similarity(a: list[float], b: list[float]) -> float: ... def parallel_find_similar(self) -> None: ... def parallel_analyze(self) -> None: ... def _apply_defaults(self, item: ItemState) -> None: ... def execute_plans(self) -> None: ... # Task: Write a Python method `batch_embed` for the class `EncodingFlow` to embed all items in a single embedder call. Returns: None
def batch_embed(self) -> None: """Embed all items in a single embedder call.""" items = list(self.state.items) texts = [item.content for item in items] embeddings = embed_texts(self._embedder, texts) for item, emb in zip(items, embeddings, strict=False): item.embedding = emb
function_simple
0
{"cognitive_complexity": 1, "loc": 7, "code_loc": 5, "docstring_loc": 1, "function_name": "batch_embed", "class_name": "EncodingFlow", "qualname": "EncodingFlow.batch_embed", "file_path": "lib/crewai/src/crewai/memory/encoding_flow.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/ovis2/image_processing_ovis2.py:get_all_supported_aspect_ratios
# Context: from functools import lru_cache class Ovis2ImageProcessorKwargs(ImagesKwargs): ... def get_optimal_tiled_canvas(original_image_size: tuple[int, int], target_tile_size: tuple[int, int], min_image_tiles: int, max_image_tiles: int) -> tuple[int, int]: ... def compute_patch_covering_area(left: int, upper: int, right: int, lower: int, side: int) -> float: ... def split_image_into_grid(h: int, w: int, grid: tuple[int, int]) -> list[tuple[int, int, int, int]]: ... def get_min_tile_covering_grid(image_size: tuple[int, int], target_patch_size: int, max_image_tiles: int, covering_threshold: float) -> tuple[int, int]: ... class Ovis2ImageProcessor(BaseImageProcessor): ... # Task: Write a Python function `get_all_supported_aspect_ratios` to computes all allowed aspect ratios for a given minimum and maximum number of input tiles. Parameters: min_image_tiles: int, max_image_tiles: int Returns: list[tuple[int, int]]
def get_all_supported_aspect_ratios(min_image_tiles: int, max_image_tiles: int) -> list[tuple[int, int]]: """ Computes all allowed aspect ratios for a given minimum and maximum number of input tiles. This function calculates all possible arrangements of tiles that can be formed within the constraint of the minimum and maximum number of tiles. Each arrangement is represented by its aspect ratio (width/height) and the corresponding tile configuration. Args: min_image_tiles (`int`): The minimum number of tiles allowed. max_image_tiles (`int`): The maximum number of tiles allowed. Returns: `List[Tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height) configuration in terms of number of tiles. Example: >>> get_all_supported_aspect_ratios(1, 4) [(1, 1), (1, 2), (2, 1), (1, 3), (3, 1), (1, 4), (2, 2), (4, 1)] """ aspect_ratios = [] for width in range(1, max_image_tiles + 1): for height in range(1, max_image_tiles + 1): if width * height <= max_image_tiles and width * height >= min_image_tiles: aspect_ratios.append((width, height)) aspect_ratios = sorted(aspect_ratios, key=lambda x: x[0] * x[1]) return aspect_ratios
function_complex
0
{"cognitive_complexity": 7, "loc": 32, "code_loc": 7, "docstring_loc": 22, "function_name": "get_all_supported_aspect_ratios", "class_name": null, "qualname": "get_all_supported_aspect_ratios", "file_path": "src/transformers/models/ovis2/image_processing_ovis2.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_code_interpreter_tool.py:test_unsafe_mode_running_unsafe_code
# Context: from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import ( CodeInterpreterTool, SandboxPython, ) def printer_mock(): ... def docker_unavailable_mock(): ... def test_run_code_in_docker(docker_mock, printer_mock): ... def test_run_code_in_docker_with_error(docker_mock, printer_mock): ... def test_run_code_in_docker_with_script(docker_mock, printer_mock): ... def test_restricted_sandbox_basic_code_execution(printer_mock, docker_unavailable_mock): ... def test_restricted_sandbox_running_with_blocked_modules(printer_mock, docker_unavailable_mock): ... def test_restricted_sandbox_running_with_blocked_builtins(printer_mock, docker_unavailable_mock): ... def test_restricted_sandbox_running_with_no_result_variable(printer_mock, docker_unavailable_mock): ... def test_unsafe_mode_running_with_no_result_variable(printer_mock, docker_unavailable_mock): ... # Task: Write a Python test function `test_unsafe_mode_running_unsafe_code` to test behavior when no result variable is set. Module under test: crewai_tools.tools.code_interpreter_tool.code_interpreter_tool
def test_unsafe_mode_running_unsafe_code(printer_mock, docker_unavailable_mock): """Test behavior when no result variable is set.""" tool = CodeInterpreterTool(unsafe_mode=True) code = """ import os os.system("ls -la") result = eval("5/1") """ result = tool.run(code=code, libraries_used=[]) printer_mock.assert_called_with( "WARNING: Running code in unsafe mode", color="bold_magenta" ) assert 5.0 == result
test
0
{"function_name": "test_unsafe_mode_running_unsafe_code", "class_name": null, "qualname": "test_unsafe_mode_running_unsafe_code", "file_path": "lib/crewai-tools/tests/tools/test_code_interpreter_tool.py", "repo_id": "crewAIInc/crewAI", "loc": 13, "tested_modules": ["crewai_tools.tools.code_interpreter_tool.code_interpreter_tool"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/standard/tests/unit/standard/operators/test_hitl.py:TestHITLOperator.test_validate_params_input_with_invalid_input
# Context: import pytest from typing import TYPE_CHECKING, Any from airflow.providers.common.compat.sdk import AirflowException, DownstreamTasksSkipped, ParamValidationError from airflow.providers.standard.operators.hitl import ( ApprovalOperator, HITLBranchOperator, HITLEntryOperator, HITLOperator, ) from airflow.sdk import Param, timezone from airflow.sdk.definitions.param import ParamsDict def hitl_task_and_ti_for_generating_link(dag_maker: DagMaker) -> tuple[HITLOperator, TaskInstance]: ... def get_context_from_model_ti(mock_supervisor_comms: Any) -> Any: ... class TestApprovalOperator: ... class TestHITLEntryOperator: ... class TestHITLBranchOperator: ... class TestHITLSummaryForListeners: ... class TestHITLOperator: def test_validate_options(self) -> None: ... def test_validate_options_with_empty_options(self) -> None: ... def test_validate_params(self, params: ParamsDict, exc: type[ValueError | ParamValidationError], error_msg: str) -> None: ... def test_validate_defaults(self) -> None: ... def test_validate_defaults_with_invalid_defaults(self, extra_kwargs: dict[str, Any], expected_error_msg: str) -> None: ... def test_execute(self, dag_maker: DagMaker, session: Session) -> None: ... def test_serialzed_params(self, input_params: ParamsDict | dict[str, Any] | None, expected_params: dict[str, Any]) -> None: ... def test_serialzed_params_legacy(self) -> None: ... def test_execute_complete(self) -> None: ... def test_process_trigger_event_error(self, event: dict[str, Any], expected_exception: type[Exception]) -> None: ... def test_validate_chosen_options_with_invalid_content(self) -> None: ... def test_generate_link_to_ui(self, base_url: str, conf_base_url: str, options: list[str] | None, params_input: dict[str, Any] | None, expected_parsed_query: dict[str, list[str]], hitl_task_and_ti_for_generating_link: tuple[HITLOperator, TaskInstance]) -> None: ... def test_generate_link_to_ui_with_invalid_input(self, options: list[Any] | None, params_input: dict[str, Any] | None, expected_err_msg: str, hitl_task_and_ti_for_generating_link: tuple[HITLOperator, TaskInstance]) -> None: ... def test_generate_link_to_ui_without_base_url(self, hitl_task_and_ti_for_generating_link: tuple[HITLOperator, TaskInstance]) -> None: ... # Task: Write a Python test method `test_validate_params_input_with_invalid_input` in test class `TestHITLOperator` to verify the behavior of `validate_params_input_with_invalid_input`. Module under test: __future__, uuid, tests_common.test_utils.version_compat
def test_validate_params_input_with_invalid_input( self, params: ParamsDict, params_input: dict[str, Any], exc: type[ValueError | ParamValidationError], error_msg: str, ) -> None: hitl_op = HITLOperator( task_id="hitl_test", subject="This is subject", body="This is body", options=["1", "2", "3", "4", "5"], params=params, ) with pytest.raises(exc, match=error_msg): hitl_op.execute_complete( context={}, event={ "chosen_options": ["1"], "params_input": params_input, "responded_by_user": {"id": "test", "name": "test"}, }, )
test
1
{"function_name": "test_validate_params_input_with_invalid_input", "class_name": "TestHITLOperator", "qualname": "TestHITLOperator.test_validate_params_input_with_invalid_input", "file_path": "providers/standard/tests/unit/standard/operators/test_hitl.py", "repo_id": "apache/airflow", "loc": 24, "tested_modules": ["__future__", "uuid", "tests_common.test_utils.version_compat", "typing", "urllib.parse"], "has_docstring": false, "runnable_level": "project_runnable"}
apache/airflow:airflow-core/tests/unit/api_fastapi/execution_api/versions/v2025_09_23/test_asset_events.py:test_asset_events
# Context: from datetime import datetime import pytest from airflow._shared.timezones import timezone from airflow.models.asset import AssetActive, AssetAliasModel, AssetEvent, AssetModel def ver_client(client): ... def test_asset(session): ... def test_asset_alias(session, test_asset_events, test_asset): ... class TestGetAssetEventByAsset: ... # Task: Write a Python test function `test_asset_events` to verify the behavior of `asset_events`. Module under test: __future__, datetime, airflow._shared.timezones
def test_asset_events(session): def make_timestamp(day): return datetime(2021, 1, day, tzinfo=timezone.utc) common = { "asset_id": 1, "extra": {"foo": "bar"}, "source_dag_id": "foo", "source_task_id": "bar", "source_run_id": "custom", "source_map_index": -1, "partition_key": None, } events = [AssetEvent(id=i, timestamp=make_timestamp(i), **common) for i in (1, 2, 3)] session.add_all(events) session.commit() yield events for event in events: session.delete(event) session.commit()
test
1
{"function_name": "test_asset_events", "class_name": null, "qualname": "test_asset_events", "file_path": "airflow-core/tests/unit/api_fastapi/execution_api/versions/v2025_09_23/test_asset_events.py", "repo_id": "apache/airflow", "loc": 22, "tested_modules": ["__future__", "datetime", "airflow._shared.timezones", "airflow.models.asset"], "has_docstring": false, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/streamlit/components/v2/manifest_scanner.py:ComponentConfig.from_dict
# Context: from typing import Any, Final def _normalize_package_name(dist_name: str) -> str: ... class ComponentManifest: ... def _is_likely_streamlit_component_package(dist: importlib.metadata.Distribution) -> bool: ... def _find_package_pyproject_toml(dist: importlib.metadata.Distribution) -> Path | None: ... def _pyproject_via_read_text(dist: importlib.metadata.Distribution) -> Path | None: ... def _pyproject_via_dist_files(dist: importlib.metadata.Distribution) -> Path | None: ... def _pyproject_via_import_spec(dist: importlib.metadata.Distribution, package_name: str) -> Path | None: ... def _validate_pyproject_for_package(pyproject_path: Path, dist_name: str, package_name: str) -> bool: ... def _load_pyproject(pyproject_path: Path) -> dict[str, Any] | None: ... def _extract_components(pyproject_data: dict[str, Any]) -> list[dict[str, Any]] | None: ... def _resolve_package_root(dist: importlib.metadata.Distribution, package_name: str, pyproject_path: Path) -> Path: ... def _derive_project_metadata(pyproject_data: dict[str, Any], dist: importlib.metadata.Distribution) -> tuple[str, str]: ... def _process_single_package(dist: importlib.metadata.Distribution) -> tuple[ComponentManifest, Path] | None: ... def scan_component_manifests(max_workers: int | None) -> list[tuple[ComponentManifest, Path]]: ... class ComponentConfig: def parse_or_none(config: dict[str, Any]) -> ComponentConfig | None: ... def resolve_asset_root(self, package_root: Path) -> Path | None: ... # Task: Write a Python method `from_dict` for the class `ComponentConfig` to create a ComponentConfig from a raw dict. Parameters: config: dict[str, Any] Returns: ComponentConfig
def from_dict(config: dict[str, Any]) -> ComponentConfig: """Create a ComponentConfig from a raw dict. Parameters ---------- config Raw component dictionary parsed from TOML. Returns ------- ComponentConfig Parsed and validated component configuration. """ name_value = config.get("name") if not isinstance(name_value, str) or not name_value: # Fail closed: invalid component entry raise ValueError("Component entry missing required 'name' field") asset_dir_value = config.get("asset_dir") if asset_dir_value is not None and not isinstance(asset_dir_value, str): # Fail closed: invalid asset_dir value raise ValueError("'asset_dir' must be a string") return ComponentConfig( name=name_value, asset_dir=asset_dir_value, )
function_simple
1
{"cognitive_complexity": 4, "loc": 27, "code_loc": 10, "docstring_loc": 12, "function_name": "from_dict", "class_name": "ComponentConfig", "qualname": "ComponentConfig.from_dict", "file_path": "lib/streamlit/components/v2/manifest_scanner.py", "repo_id": "streamlit/streamlit", "has_docstring": true, "runnable_level": "file_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/interface/initialize/loading.py:instantiate_class
# Context: from typing import TYPE_CHECKING, Any from lfx.custom.eval import eval_custom_component_code from lfx.log.logger import logger from lfx.custom.custom_component.component import Component from lfx.custom.custom_component.custom_component import CustomComponent from lfx.graph.vertex.base import Vertex async def get_instance_results(custom_component, custom_params: dict, vertex: Vertex, fallback_to_env_vars: bool, base_type: str): ... def get_params(vertex_params): ... def convert_params_to_sets(params): ... def convert_kwargs(params): ... def load_from_env_vars(params, load_from_db_fields, context): ... async def update_table_params_with_load_from_db_fields(custom_component: CustomComponent, params: dict, table_field_name: str, fallback_to_env_vars: bool) -> dict: ... async def update_params_with_load_from_db_fields(custom_component: CustomComponent, params, load_from_db_fields, fallback_to_env_vars): ... async def build_component(params: dict, custom_component: Component): ... async def build_custom_component(params: dict, custom_component: CustomComponent): ... # Task: Write a Python function `instantiate_class` to instantiate class from module type and key, and params. Parameters: vertex: Vertex, user_id, event_manager: EventManager | None Returns: Any
def instantiate_class( vertex: Vertex, user_id=None, event_manager: EventManager | None = None, ) -> Any: """Instantiate class from module type and key, and params.""" vertex_type = vertex.vertex_type base_type = vertex.base_type logger.debug(f"Instantiating {vertex_type} of type {base_type}") if not base_type: msg = "No base type provided for vertex" raise ValueError(msg) custom_params = get_params(vertex.params) code = custom_params.pop("code") class_object: type[CustomComponent | Component] = eval_custom_component_code(code) custom_component: CustomComponent | Component = class_object( _user_id=user_id, _parameters=custom_params, _vertex=vertex, _tracing_service=None, _id=vertex.id, ) if hasattr(custom_component, "set_event_manager"): custom_component.set_event_manager(event_manager) return custom_component, custom_params
function_simple
1
{"cognitive_complexity": 2, "loc": 27, "code_loc": 19, "docstring_loc": 1, "function_name": "instantiate_class", "class_name": null, "qualname": "instantiate_class", "file_path": "src/lfx/src/lfx/interface/initialize/loading.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"}
paperless-ngx/paperless-ngx:src/paperless_remote/tests/test_parser.py:TestParser.test_get_text_with_azure_error_logged_and_returns_none
# Context: import uuid from unittest import mock from django.test import override_settings from paperless_remote.signals import get_parser class TestParser(DirectoriesMixin, FileSystemAssertsMixin, TestCase): SAMPLE_FILES = Path(__file__).resolve().parent / "samples" def assertContainsStrings(self, content: str, strings: list[str]) -> None: ... def test_get_text_with_azure(self, mock_client_cls, mock_subprocess) -> None: ... def test_supported_mime_types_valid_config(self) -> None: ... def test_supported_mime_types_invalid_config(self) -> None: ... def test_parse_with_invalid_config(self) -> None: ... # Task: Write a Python test method `test_get_text_with_azure_error_logged_and_returns_none` in test class `TestParser` to verify the behavior of `get_text_with_azure_error_logged_and_returns_none`. Module under test: pathlib, django.test, django.test
def test_get_text_with_azure_error_logged_and_returns_none( self, mock_client_cls, ) -> None: mock_client = mock.Mock() mock_client.begin_analyze_document.side_effect = RuntimeError("fail") mock_client_cls.return_value = mock_client with override_settings( REMOTE_OCR_ENGINE="azureai", REMOTE_OCR_API_KEY="somekey", REMOTE_OCR_ENDPOINT="https://endpoint.cognitiveservices.azure.com", ): parser = get_parser(uuid.uuid4()) with mock.patch.object(parser.log, "error") as mock_log_error: parser.parse( self.SAMPLE_FILES / "simple-digital.pdf", "application/pdf", ) self.assertIsNone(parser.text) mock_client.begin_analyze_document.assert_called_once() mock_client.close.assert_called_once() mock_log_error.assert_called_once() self.assertIn( "Azure AI Vision parsing failed", mock_log_error.call_args[0][0], )
test
1
{"function_name": "test_get_text_with_azure_error_logged_and_returns_none", "class_name": "TestParser", "qualname": "TestParser.test_get_text_with_azure_error_logged_and_returns_none", "file_path": "src/paperless_remote/tests/test_parser.py", "repo_id": "paperless-ngx/paperless-ngx", "loc": 28, "tested_modules": ["pathlib", "django.test", "django.test", "documents.tests.utils", "documents.tests.utils"], "has_docstring": false, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py:TestAgentCoreMemoryContext.test_context_creation
# Context: from llama_index.memory.bedrock_agentcore.base import ( AgentCoreMemory, AgentCoreMemoryContext, ) def mock_client(): ... def memory_context(): ... def memory(mock_client, memory_context): ... class TestBaseAgentCoreMemoryMethods: ... class TestAgentCoreMemory: ... class TestIntegration: ... class TestErrorHandling: ... async def test_aput(memory): ... async def test_aput_messages(memory): ... class TestAgentCoreMemoryContext: def test_context_with_optional_fields(self): ... def test_get_context(self): ... # Task: Write a Python test method `test_context_creation` in test class `TestAgentCoreMemoryContext` to test creating a memory context. Module under test: llama_index.core.base.llms.types, llama_index.core.memory.memory, llama_index.memory.bedrock_agentcore.base
def test_context_creation(self): """Test creating a memory context.""" context = AgentCoreMemoryContext( actor_id="test-actor", memory_id="test-memory", session_id="test-session", ) assert context.actor_id == "test-actor" assert context.memory_id == "test-memory" assert context.session_id == "test-session" assert context.namespace == "/" assert context.memory_strategy_id is None
test
1
{"function_name": "test_context_creation", "class_name": "TestAgentCoreMemoryContext", "qualname": "TestAgentCoreMemoryContext.test_context_creation", "file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py", "repo_id": "run-llama/llama_index", "loc": 12, "tested_modules": ["llama_index.core.base.llms.types", "llama_index.core.memory.memory", "llama_index.memory.bedrock_agentcore.base"], "has_docstring": true, "runnable_level": "project_runnable"}
deepfakes/faceswap:tests/plugins/train/trainer/test_distributed.py:test_WrappedModel
# Context: import numpy as np import pytest import torch from plugins.train.trainer import distributed as mod_distributed def _trainer_mocked(mocker: pytest_mock.MockFixture): ... def test_Trainer(gpu_count, batch_size, _trainer_mocked): ... def test_Trainer_forward(gpu_count, batch_size, outputs, _trainer_mocked, mocker): ... # Task: Write a Python test function `test_WrappedModel` to test that the wrapped model calls preds and loss. Module under test: plugins.train.trainer, plugins.train.trainer, plugins.train.trainer
def test_WrappedModel(batch_size, outputs, mocker): """ Test that the wrapped model calls preds and loss """ model = mocker.MagicMock() instance = mod_distributed.WrappedModel(model) assert instance._keras_model is model loss_return = [torch.from_numpy((np.random.random((1, )))) for _ in range(outputs * 2)] model.loss = [mocker.MagicMock(return_value=ret) for ret in loss_return] test_dims = (batch_size, 16, 16, 3) inp_a = torch.from_numpy(np.random.random(test_dims)) inp_b = torch.from_numpy(np.random.random(test_dims)) targets = [torch.from_numpy(np.random.random(test_dims)) for _ in range(outputs * 2)] preds = [*torch.from_numpy(np.random.random((outputs * 2, *test_dims)))] model.return_value = preds # Call forwards result = instance.forward(inp_a, inp_b, *targets) # Confirm model was called once forward with correct args model.assert_called_once() model_args, model_kwargs = model.call_args assert model_kwargs == {"training": True} assert len(model_args) == 1 assert len(model_args[0]) == 2 for real, expected in zip(model_args[0], [inp_a, inp_b]): assert np.allclose(real.numpy(), expected.numpy()) # Confirm ZeroGrad called model.zero_grad.assert_called_once() # Confirm loss functions correctly called expected_targets = targets[0::2] + targets[1::2] for target, pred, loss in zip(expected_targets, preds, model.loss): loss.assert_called_once() loss_args, loss_kwargs = loss.call_args assert not loss_kwargs assert len(loss_args) == 2 for actual, expected in zip(loss_args, [target, pred]): assert np.allclose(actual.numpy(), expected.numpy()) # Check that the result comes out as we put it in for expected, actual in zip(loss_return, result.squeeze()): assert np.isclose(expected.numpy(), actual.numpy())
test
1
{"function_name": "test_WrappedModel", "class_name": null, "qualname": "test_WrappedModel", "file_path": "tests/plugins/train/trainer/test_distributed.py", "repo_id": "deepfakes/faceswap", "loc": 48, "tested_modules": ["plugins.train.trainer", "plugins.train.trainer", "plugins.train.trainer"], "has_docstring": true, "runnable_level": "project_runnable"}
sansan0/TrendRadar:trendradar/storage/local.py:module_doc
Write a module-level docstring for the Python module `local` which contains class `LocalStorageBackend`.
本地存储后端 - SQLite + TXT/HTML 使用 SQLite 作为主存储,支持可选的 TXT 快照和 HTML 报告
documentation
1
{"doc_type": "module", "module_name": "local", "file_path": "trendradar/storage/local.py", "repo_id": "sansan0/TrendRadar", "char_length": 65}
hiyouga/LlamaFactory:src/llamafactory/v1/accelerator/interface.py:DistributedStrategy.data_mesh_shape
# Context: class Dim(StrEnum): ... class DistributedInterface: ... class DistributedStrategy: def __post_init__(self) -> None: ... def model_mesh_shape(self) -> tuple[int, int]: ... def model_mesh_dim_names(self) -> tuple[str, str]: ... def data_mesh_dim_names(self) -> tuple[str, str]: ... # Task: Write a Python method `data_mesh_shape` for the class `DistributedStrategy` to data parallel mesh shape. Returns: tuple[int, int]
def data_mesh_shape(self) -> tuple[int, int]: """Data parallel mesh shape.""" return (self.dp_size, self.cp_size)
function_simple
1
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "data_mesh_shape", "class_name": "DistributedStrategy", "qualname": "DistributedStrategy.data_mesh_shape", "file_path": "src/llamafactory/v1/accelerator/interface.py", "repo_id": "hiyouga/LlamaFactory", "has_docstring": true, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai/tests/test_flow_visualization.py:test_build_flow_structure_with_router
# Context: from crewai.flow.visualization import ( build_flow_structure, visualize_flow_structure, ) class SimpleFlow(Flow): ... class RouterFlow(Flow): ... class ComplexFlow(Flow): ... def test_build_flow_structure_simple(): ... def test_build_flow_structure_with_and_or_conditions(): ... def test_visualize_flow_structure_creates_html(): ... def test_visualize_flow_structure_creates_assets(): ... def test_visualize_flow_structure_json_data(): ... def test_node_metadata_includes_source_info(): ... def test_node_metadata_includes_method_signature(): ... def test_router_node_has_correct_metadata(): ... def test_listen_node_has_trigger_methods(): ... def test_and_condition_node_metadata(): ... def test_visualization_handles_special_characters(): ... def test_empty_flow_structure(): ... def test_topological_path_counting(): ... def test_class_signature_metadata(): ... def test_visualization_plot_method(): ... def test_router_paths_to_string_conditions(): ... def test_router_paths_not_in_and_conditions(): ... def test_chained_routers_no_self_loops(): ... def test_routers_with_shared_output_strings(): ... def test_warning_for_router_without_paths(caplog): ... def test_warning_for_orphaned_listeners(caplog): ... def test_no_warning_for_properly_typed_router(caplog): ... # Task: Write a Python test function `test_build_flow_structure_with_router` to test building structure for a flow with router. Module under test: pathlib, crewai.flow.flow, crewai.flow.visualization
def test_build_flow_structure_with_router(): """Test building structure for a flow with router.""" flow = RouterFlow() structure = build_flow_structure(flow) assert structure is not None assert len(structure["nodes"]) == 4 assert len(structure["router_methods"]) == 1 assert "decide" in structure["router_methods"] router_node = structure["nodes"]["decide"] assert router_node["type"] == "router" if "router_paths" in router_node: assert len(router_node["router_paths"]) >= 1 assert any("path" in path for path in router_node["router_paths"]) router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]] assert len(router_edges) >= 1
test
0
{"function_name": "test_build_flow_structure_with_router", "class_name": null, "qualname": "test_build_flow_structure_with_router", "file_path": "lib/crewai/tests/test_flow_visualization.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["pathlib", "crewai.flow.flow", "crewai.flow.visualization", "typing", "typing"], "has_docstring": true, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py:LanceDBRetriever.aretrieve
# Context: import os from PIL import Image from llama_index.core.llms import ImageBlock from llama_index.core.schema import ImageDocument from llama_index.core.schema import QueryBundle, NodeWithScore from typing import Union, Optional, List, Any class ExtendedQueryBundle(QueryBundle): ... class LanceDBRetriever(BaseRetriever): def __init__( self, table: Union[AsyncTable, Table], multimodal: bool, **kwargs: Any ): self.table = table self.multimodal = multimodal callback_manager = kwargs.get("callback_manager") verbose = kwargs.get("verbose", False) super().__init__(callback_manager, verbose) def _retrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]: ... async def _aretrieve(self, query_bundle: ExtendedQueryBundle) -> List[NodeWithScore]: ... def retrieve(self, query_str: Optional[str], query_image: Optional[Union[Image.Image, ImageBlock, ImageDocument, str]], query_image_path: Optional[os.PathLike[str]]) -> List[NodeWithScore]: ... # Task: Write a Python async method `aretrieve` for the class `LanceDBRetriever` to asynchronously retrieves nodes relevant to the given query. Parameters: query_str: Optional[str], query_image: Optional[Union[Image.Image, ImageBlock, ImageDocument, str]], query_image_path: Optional[os.PathLike[str]] Returns: List[NodeWithScore]
async def aretrieve( self, query_str: Optional[str] = None, query_image: Optional[ Union[Image.Image, ImageBlock, ImageDocument, str] ] = None, query_image_path: Optional[os.PathLike[str]] = None, ) -> List[NodeWithScore]: """ Asynchronously retrieves nodes relevant to the given query. Args: query_str (Optional[str]): The text query string. Required if the retriever is not multimodal. query_image (Optional[Union[Image.Image, ImageBlock, ImageDocument, str]]): The image query, which can be a PIL Image, ImageBlock, ImageDocument, or a string path/URL. Used if the retriever is multimodal. query_image_path (Optional[os.PathLike[str]]): The file path to the image query. Used if the retriever is multimodal. Returns: List[NodeWithScore]: A list of nodes with associated relevance scores. Raises: ValueError: If none of the query parameters are provided. ValueError: If a text query is not provided for a non-multimodal retriever. ValueError: If neither an image nor image path is provided for a multimodal retriever. """ if not query_str and not query_image and not query_image_path: raise ValueError( "At least one among query_str, query_image and query_image_path needs to be set" ) if not self.multimodal: if query_str: query_bundle = ExtendedQueryBundle(query_str=query_str) else: raise ValueError( "No query_str provided, but the retriever is not multimodal" ) else: if query_image: query_bundle = ExtendedQueryBundle(query_str="", image=query_image) elif query_image_path: query_bundle = ExtendedQueryBundle( query_str="", image_path=query_image_path ) else: raise ValueError( "No query_image or query_image_path provided, but the retriever is multimodal" ) return await self._aretrieve(query_bundle=query_bundle)
function_complex
1
{"cognitive_complexity": 11, "loc": 48, "code_loc": 23, "docstring_loc": 17, "function_name": "aretrieve", "class_name": "LanceDBRetriever", "qualname": "LanceDBRetriever.aretrieve", "file_path": "llama-index-integrations/indices/llama-index-indices-managed-lancedb/llama_index/indices/managed/lancedb/retriever.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:doc/source/ray-overview/examples/e2e-rag/notebooks/rag_utils.py:ChromaQuerier._reformat
Write a Python method `_reformat` for the class `ChromaQuerier` to reformat Chroma DB results into a flat list of dictionaries. Parameters: chroma_results: dict Returns: list
def _reformat(self, chroma_results: dict) -> list: """ Reformat Chroma DB results into a flat list of dictionaries. """ reformatted = [] metadatas = chroma_results.get("metadatas", []) documents = chroma_results.get("documents", []) distances = chroma_results.get("distances", []) chunk_index = 1 for meta_group, doc_group, distance_group in zip( metadatas, documents, distances ): for meta, text, distance in zip(meta_group, doc_group, distance_group): entry = { "chunk_index": chunk_index, "chunk_id": meta.get("chunk_id"), "doc_id": meta.get("doc_id"), "page_number": meta.get("page_number"), "source": meta.get("source"), "text": text, "distance": distance, "score": 1 - distance, } reformatted.append(entry) chunk_index += 1 return reformatted
function_simple
0
{"cognitive_complexity": 3, "loc": 28, "code_loc": 22, "docstring_loc": 3, "function_name": "_reformat", "class_name": "ChromaQuerier", "qualname": "ChromaQuerier._reformat", "file_path": "doc/source/ray-overview/examples/e2e-rag/notebooks/rag_utils.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "self_contained"}
ray-project/ray:python/ray/train/v2/tests/test_validation_manager.py:test_checkpoint_validation_management_slow_validation_fn
# Context: import time from unittest.mock import create_autospec import pytest import ray from ray.train.v2._internal.execution.checkpoint import validation_manager from ray.train.v2._internal.execution.checkpoint.checkpoint_manager import ( CheckpointManager, ) from ray.train.v2._internal.execution.storage import StorageContext from ray.train.v2._internal.execution.training_report import ( _TrainingReport, ) from ray.train.v2.api.validation_config import ValidationConfig, ValidationTaskConfig from ray.train.v2.tests.util import create_dummy_training_reports def ray_start_4_cpus(): ... def test_before_controller_shutdown(mock_wait, monkeypatch): ... def test_before_init_train_context(): ... def test_checkpoint_validation_management_reordering(tmp_path): ... def test_checkpoint_validation_management_failure(tmp_path): ... def test_checkpoint_validation_management_success_after_retry(tmp_path): ... def test_checkpoint_validation_management_resume(tmp_path): ... # Task: Write a Python test function `test_checkpoint_validation_management_slow_validation_fn` to verify the behavior of `checkpoint_validation_management_slow_validation_fn`. Module under test: ray.train._checkpoint, ray.train._internal.session, ray.train.v2._internal.execution.checkpoint
def test_checkpoint_validation_management_slow_validation_fn(tmp_path): checkpoint_manager = create_autospec(CheckpointManager, instance=True) def infinite_waiting_validation_fn(checkpoint): while True: time.sleep(1) vm = validation_manager.ValidationManager( checkpoint_manager=checkpoint_manager, validation_config=ValidationConfig(fn=infinite_waiting_validation_fn), ) timing_out_training_result = create_dummy_training_reports( num_results=1, storage_context=StorageContext( storage_path=tmp_path, experiment_dir_name="checkpoint_validation_management_slow_validation_fn_experiment", ), )[0] vm.after_report( training_report=_TrainingReport( metrics=timing_out_training_result.metrics, checkpoint=timing_out_training_result.checkpoint, validation=True, ), metrics={}, ) assert vm._poll_validations() == 0 assert vm._kick_off_validations() == 1 # Finish the task by cancelling it timing_out_task = next(iter(vm._pending_validations)) ray.cancel(timing_out_task) with pytest.raises(ray.exceptions.TaskCancelledError): ray.get(timing_out_task) # Verify that poll processes finished task assert vm._poll_validations() == 0 assert vm._kick_off_validations() == 0 checkpoint_manager.update_checkpoints_with_metrics.assert_called_once_with( { timing_out_training_result.checkpoint: {}, } )
test
0
{"function_name": "test_checkpoint_validation_management_slow_validation_fn", "class_name": null, "qualname": "test_checkpoint_validation_management_slow_validation_fn", "file_path": "python/ray/train/v2/tests/test_validation_manager.py", "repo_id": "ray-project/ray", "loc": 44, "tested_modules": ["ray.train._checkpoint", "ray.train._internal.session", "ray.train.v2._internal.execution.checkpoint", "ray.train.v2._internal.execution.checkpoint.checkpoint_manager", "ray.train.v2._internal.execution.storage"], "has_docstring": false, "runnable_level": "plib_runnable"}
apache/airflow:providers/fab/tests/unit/fab/auth_manager/cli_commands/test_permissions_command.py:TestDagPermissions.test_cleanup_dag_permissions_removes_specific_dag_resources
# Context: from sqlalchemy import select from airflow.providers.fab.auth_manager.cli_commands.permissions_command import ( cleanup_dag_permissions, ) from airflow.providers.fab.auth_manager.models import Action, Permission, Resource from airflow.providers.fab.www.security.permissions import RESOURCE_DAG_PREFIX from airflow.utils.session import create_session from sqlalchemy import func, select from airflow.providers.fab.auth_manager.models import Resource class TestPermissionsCommand: ... class TestDagPermissions: def _setup_fab_test(self): ... def test_cleanup_dag_permissions_handles_no_matching_resources(self): ... def test_cleanup_dag_permissions_handles_resources_without_permissions(self): ... def test_cleanup_dag_permissions_with_default_session(self): ... # Task: Write a Python test method `test_cleanup_dag_permissions_removes_specific_dag_resources` in test class `TestDagPermissions` to test that cleanup_dag_permissions removes only the specified DAG resources. Module under test: __future__, contextlib, importlib
def test_cleanup_dag_permissions_removes_specific_dag_resources(self): """Test that cleanup_dag_permissions removes only the specified DAG resources.""" from sqlalchemy import select from airflow.providers.fab.auth_manager.cli_commands.permissions_command import ( cleanup_dag_permissions, ) from airflow.providers.fab.auth_manager.models import Action, Permission, Resource from airflow.providers.fab.www.security.permissions import RESOURCE_DAG_PREFIX from airflow.utils.session import create_session with create_session() as session: # Create resources for two different DAGs target_resource = Resource(name=f"{RESOURCE_DAG_PREFIX}target_dag") keep_resource = Resource(name=f"{RESOURCE_DAG_PREFIX}keep_dag") session.add_all([target_resource, keep_resource]) session.flush() # Get or create action read_action = session.scalars(select(Action).where(Action.name == "can_read")).first() if not read_action: read_action = Action(name="can_read") session.add(read_action) session.flush() # Create permissions target_perm = Permission(action=read_action, resource=target_resource) keep_perm = Permission(action=read_action, resource=keep_resource) session.add_all([target_perm, keep_perm]) session.commit() # Execute cleanup cleanup_dag_permissions("target_dag", session) # Verify: target resource deleted, keep resource remains assert not session.get(Resource, target_resource.id) assert session.get(Resource, keep_resource.id) assert not session.get(Permission, target_perm.id) assert session.get(Permission, keep_perm.id)
test
1
{"function_name": "test_cleanup_dag_permissions_removes_specific_dag_resources", "class_name": "TestDagPermissions", "qualname": "TestDagPermissions.test_cleanup_dag_permissions_removes_specific_dag_resources", "file_path": "providers/fab/tests/unit/fab/auth_manager/cli_commands/test_permissions_command.py", "repo_id": "apache/airflow", "loc": 39, "tested_modules": ["__future__", "contextlib", "importlib", "io", "airflow.cli"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/test_envs.py:TestEnvWithChoices.test_invalid_value_raises_error_case_sensitive
# Context: import os from unittest.mock import patch import pytest from vllm.envs import ( disable_envs_cache, enable_envs_cache, env_list_with_choices, env_set_with_choices, env_with_choices, environment_variables, ) def test_getattr_without_cache(monkeypatch: pytest.MonkeyPatch): ... def test_getattr_with_cache(monkeypatch: pytest.MonkeyPatch): ... def test_getattr_with_reset(monkeypatch: pytest.MonkeyPatch) -> None: ... def test_is_envs_cache_enabled() -> None: ... class TestEnvListWithChoices: ... class TestEnvSetWithChoices: ... class TestVllmConfigureLogging: ... class TestEnvWithChoices: def test_default_value_returned_when_env_not_set(self): ... def test_none_default_returned_when_env_not_set(self): ... def test_valid_value_returned_case_sensitive(self): ... def test_valid_lowercase_value_returned_case_insensitive(self): ... def test_valid_uppercase_value_returned_case_insensitive(self): ... def test_case_mismatch_raises_error_case_sensitive(self): ... def test_invalid_value_raises_error_case_insensitive(self): ... def test_callable_choices_resolved_correctly(self): ... def test_callable_choices_with_invalid_value(self): ... # Task: Write a Python test method `test_invalid_value_raises_error_case_sensitive` in test class `TestEnvWithChoices` to test that invalid value raises ValueError in case sensitive mode. Module under test: vllm.envs
def test_invalid_value_raises_error_case_sensitive(self): """Test that invalid value raises ValueError in case sensitive mode.""" with patch.dict(os.environ, {"TEST_ENV": "invalid"}): env_func = env_with_choices( "TEST_ENV", "default", ["option1", "option2"], case_sensitive=True ) with pytest.raises( ValueError, match="Invalid value 'invalid' for TEST_ENV" ): env_func()
test
1
{"function_name": "test_invalid_value_raises_error_case_sensitive", "class_name": "TestEnvWithChoices", "qualname": "TestEnvWithChoices.test_invalid_value_raises_error_case_sensitive", "file_path": "tests/test_envs.py", "repo_id": "vllm-project/vllm", "loc": 10, "tested_modules": ["vllm.envs"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/diffusers:src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py:Kandinsky5T2VPipeline.check_inputs
# Context: def basic_clean(text): ... def whitespace_clean(text): ... def prompt_clean(text): ... class Kandinsky5T2VPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin): model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" _callback_tensor_inputs = [ def __init__( self, transformer: Kandinsky5Transformer3DModel, vae: AutoencoderKLHunyuanVideo, text_encoder: Qwen2_5_VLForConditionalGeneration, tokenizer: Qwen2VLProcessor, text_encoder_2: CLIPTextModel, tokenizer_2: CLIPTokenizer, scheduler: FlowMatchEulerDiscreteScheduler, ): super().__init__() self.register_modules( transformer=transformer, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, text_encoder_2=text_encoder_2, tokenizer_2=tokenizer_2, scheduler=scheduler, ) self.prompt_template = "\n".join( [ "<|im_start|>system\nYou are a promt engineer. Describe the video in detail.", "Describe how the camera moves or shakes, describe the zoom and view angle, whether it follows the objects.", "Describe the location of the video, main characters or objects and their action.", "Describe the dynamism of the video and presented actions.", "Name the visual style of the video: whether it is a professional footage, user generated content, some kind of animation, video game or scren content.", "Describe the visual effects, postprocessing and transitions if they are presented in the video.", "Pay attention to the order of key actions shown in the scene.<|im_end|>", "<|im_start|>user\n{}<|im_end|>", ] ) self.prompt_template_encode_start_idx = 129 self.vae_scale_factor_temporal = ( self.vae.config.temporal_compression_ratio if getattr(self, "vae", None) else 4 ) self.vae_scale_factor_spatial = self.vae.config.spatial_compression_ratio if getattr(self, "vae", None) else 8 self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) def _get_scale_factor(self, height: int, width: int) -> tuple: ... def fast_sta_nabla(T: int, H: int, W: int, wT: int, wH: int, wW: int, device) -> torch.Tensor: ... def get_sparse_params(self, sample, device): ... def _encode_prompt_qwen(self, prompt: str | list[str], device: torch.device | None, max_sequence_length: int, dtype: torch.dtype | None): ... def _encode_prompt_clip(self, prompt: str | list[str], device: torch.device | None, dtype: torch.dtype | None): ... def encode_prompt(self, prompt: str | list[str], num_videos_per_prompt: int, max_sequence_length: int, device: torch.device | None, dtype: torch.dtype | None): ... def prepare_latents(self, batch_size: int, num_channels_latents: int, height: int, width: int, num_frames: int, dtype: torch.dtype | None, device: torch.device | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None) -> torch.Tensor: ... def guidance_scale(self): ... def num_timesteps(self): ... def interrupt(self): ... def __call__(self, prompt: str | list[str], negative_prompt: str | list[str] | None, height: int, width: int, num_frames: int, num_inference_steps: int, guidance_scale: float, num_videos_per_prompt: int | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None, prompt_embeds_qwen: torch.Tensor | None, prompt_embeds_clip: torch.Tensor | None, negative_prompt_embeds_qwen: torch.Tensor | None, negative_prompt_embeds_clip: torch.Tensor | None, prompt_cu_seqlens: torch.Tensor | None, negative_prompt_cu_seqlens: torch.Tensor | None, output_type: str | None, return_dict: bool, callback_on_step_end: Callable[[int, int], None] | PipelineCallback | MultiPipelineCallbacks | None, callback_on_step_end_tensor_inputs: list[str], max_sequence_length: int): ... # Task: Write a Python method `check_inputs` for the class `Kandinsky5T2VPipeline` to validate input parameters for the pipeline. Parameters: prompt, negative_prompt, height, width, prompt_embeds_qwen, prompt_embeds_clip, negative_prompt_embeds_qwen, negative_prompt_embeds_clip, prompt_cu_seqlens, negative_prompt_cu_seqlens, callback_on_step_end_tensor_inputs, max_sequence_length
def check_inputs( self, prompt, negative_prompt, height, width, prompt_embeds_qwen=None, prompt_embeds_clip=None, negative_prompt_embeds_qwen=None, negative_prompt_embeds_clip=None, prompt_cu_seqlens=None, negative_prompt_cu_seqlens=None, callback_on_step_end_tensor_inputs=None, max_sequence_length=None, ): """ Validate input parameters for the pipeline. Args: prompt: Input prompt negative_prompt: Negative prompt for guidance height: Video height width: Video width prompt_embeds_qwen: Pre-computed Qwen prompt embeddings prompt_embeds_clip: Pre-computed CLIP prompt embeddings negative_prompt_embeds_qwen: Pre-computed Qwen negative prompt embeddings negative_prompt_embeds_clip: Pre-computed CLIP negative prompt embeddings prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen positive prompt negative_prompt_cu_seqlens: Pre-computed cumulative sequence lengths for Qwen negative prompt callback_on_step_end_tensor_inputs: Callback tensor inputs Raises: ValueError: If inputs are invalid """ if max_sequence_length is not None and max_sequence_length > 1024: raise ValueError("max_sequence_length must be less than 1024") if height % 16 != 0 or width % 16 != 0: raise ValueError(f"`height` and `width` have to be divisible by 16 but are {height} and {width}.") if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) # Check for consistency within positive prompt embeddings and sequence lengths if prompt_embeds_qwen is not None or prompt_embeds_clip is not None or prompt_cu_seqlens is not None: if prompt_embeds_qwen is None or prompt_embeds_clip is None or prompt_cu_seqlens is None: raise ValueError( "If any of `prompt_embeds_qwen`, `prompt_embeds_clip`, or `prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check for consistency within negative prompt embeddings and sequence lengths if ( negative_prompt_embeds_qwen is not None or negative_prompt_embeds_clip is not None or negative_prompt_cu_seqlens is not None ): if ( negative_prompt_embeds_qwen is None or negative_prompt_embeds_clip is None or negative_prompt_cu_seqlens is None ): raise ValueError( "If any of `negative_prompt_embeds_qwen`, `negative_prompt_embeds_clip`, or `negative_prompt_cu_seqlens` is provided, " "all three must be provided." ) # Check if prompt or embeddings are provided (either prompt or all required embedding components for positive) if prompt is None and prompt_embeds_qwen is None: raise ValueError( "Provide either `prompt` or `prompt_embeds_qwen` (and corresponding `prompt_embeds_clip` and `prompt_cu_seqlens`). Cannot leave all undefined." ) # Validate types for prompt and negative_prompt if provided if prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
function_complex
1
{"cognitive_complexity": 22, "loc": 85, "code_loc": 40, "docstring_loc": 19, "function_name": "check_inputs", "class_name": "Kandinsky5T2VPipeline", "qualname": "Kandinsky5T2VPipeline.check_inputs", "file_path": "src/diffusers/pipelines/kandinsky5/pipeline_kandinsky.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "class_runnable"}
vllm-project/vllm:examples/online_serving/openai_responses_client_with_mcp_tools.py:module_doc
Write a module-level docstring for the Python module `openai_responses_client_with_mcp_tools` which contains function `example_no_filter`, function `example_wildcard`, function `example_specific_tools`, function `example_object_format`, function `main`.
Example demonstrating MCP (Model Context Protocol) tools with the Responses API. This example shows how to use MCP tools with different allowed_tools configurations: 1. No filter (allows all tools from the MCP server) 2. Wildcard "*" (explicitly allows all tools) 3. Specific tool names (filters to only those tools) Set up this example by starting a vLLM OpenAI-compatible server with MCP tools enabled. For example: vllm serve openai/gpt-oss-20b --enforce-eager --tool-server demo Environment variables: - VLLM_ENABLE_RESPONSES_API_STORE=1 - VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS=code_interpreter,container - VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS=1
documentation
1
{"doc_type": "module", "module_name": "openai_responses_client_with_mcp_tools", "file_path": "examples/online_serving/openai_responses_client_with_mcp_tools.py", "repo_id": "vllm-project/vllm", "char_length": 654}
browser-use/browser-use:browser_use/skill_cli/commands/profile.py:_handle_sync
# Context: import argparse import json import sys import tempfile from pathlib import Path from browser_use.skill_cli.commands.utils import get_sdk_client from browser_use.skill_cli.api_key import APIKeyRequired import asyncio from browser_use.skill_cli.sessions import create_browser_session class ProfileModeError(Exception): ... def get_profile_mode(args: argparse.Namespace) -> ProfileMode: ... def handle_profile_command(args: argparse.Namespace) -> int: ... def _print_usage() -> None: ... def _handle_list(args: argparse.Namespace, mode: ProfileMode) -> int: ... def _list_local_profiles(args: argparse.Namespace) -> int: ... def _list_cloud_profiles(args: argparse.Namespace) -> int: ... def _handle_get(args: argparse.Namespace, mode: ProfileMode) -> int: ... def _get_local_profile(args: argparse.Namespace) -> int: ... def _get_cloud_profile(args: argparse.Namespace) -> int: ... def _handle_create(args: argparse.Namespace, mode: ProfileMode) -> int: ... def _create_cloud_profile(args: argparse.Namespace) -> int: ... def _handle_update(args: argparse.Namespace, mode: ProfileMode) -> int: ... def _update_cloud_profile(args: argparse.Namespace) -> int: ... def _handle_delete(args: argparse.Namespace, mode: ProfileMode) -> int: ... def _delete_cloud_profile(args: argparse.Namespace) -> int: ... def _handle_cookies(args: argparse.Namespace, mode: ProfileMode) -> int: ... def _list_profile_cookies(args: argparse.Namespace) -> int: ... def list_local_chrome_profiles() -> list[dict[str, Any]]: ... # Task: Write a Python function `_handle_sync` to handle 'profile sync' command - sync local profile to cloud. Parameters: args: argparse.Namespace Returns: int
def _handle_sync(args: argparse.Namespace) -> int: """Handle 'profile sync' command - sync local profile to cloud.""" import asyncio from browser_use.skill_cli.api_key import APIKeyRequired from browser_use.skill_cli.sessions import create_browser_session # Get SDK client (validates API key) try: client = get_sdk_client() except APIKeyRequired as e: print(f'Error: {e}', file=sys.stderr) return 1 except Exception as e: print(f'Error: {e}', file=sys.stderr) return 1 # Get local profiles local_profiles = list_local_chrome_profiles() if not local_profiles: print('Error: No local Chrome profiles found', file=sys.stderr) return 1 # Determine which profile to sync from_profile = args.from_profile if not from_profile: # Show available profiles and ask user to specify print('Available local profiles:') for p in local_profiles: print(f' {p["id"]}: {p["name"]} ({p["email"]})') print() print('Use --from to specify a profile:') print(' browser-use profile sync --from "Default"') print(' browser-use profile sync --from "Profile 1"') return 1 # Find the matching profile selected_profile = None for p in local_profiles: if p['id'] == from_profile or p['name'] == from_profile: selected_profile = p break if not selected_profile: print(f'Error: Profile "{from_profile}" not found', file=sys.stderr) print('Available profiles:') for p in local_profiles: print(f' {p["id"]}: {p["name"]}') return 1 profile_id = selected_profile['id'] profile_name = selected_profile['name'] domain_filter = getattr(args, 'domain', None) # Generate cloud profile name cloud_name = args.name if args.name else None if not cloud_name: if domain_filter: cloud_name = f'Chrome - {profile_name} ({domain_filter})' else: cloud_name = f'Chrome - {profile_name}' # Use stderr for progress when JSON output is requested json_output = getattr(args, 'json', False) out = sys.stderr if json_output else sys.stdout def log(msg: str) -> None: print(msg, file=out) if domain_filter: log(f'Syncing: {profile_name} → {domain_filter} cookies only') else: log(f'Syncing: {profile_name} ({selected_profile["email"]})') # Step 1: Create cloud profile log(' Creating cloud profile...') try: cloud_profile = client.profiles.create_profile(name=cloud_name) cloud_profile_id = cloud_profile.id except Exception as e: print(f'Error creating cloud profile: {e}', file=sys.stderr) return 1 log(f' ✓ Created: {cloud_profile_id}') def cleanup_cloud_profile() -> None: """Delete the cloud profile on failure.""" try: client.profiles.delete_browser_profile(cloud_profile_id) except Exception: pass # Step 2: Export cookies from local profile async def sync_cookies(): log(' Exporting cookies from local profile...') local_session = await create_browser_session('real', headed=False, profile=profile_id) await local_session.start() try: cookies = await local_session._cdp_get_cookies() if not cookies: return 0, 'No cookies found in local profile' # Filter by domain if specified if domain_filter: cookies = [c for c in cookies if domain_filter in c.get('domain', '')] if not cookies: return 0, f'No cookies found for domain: {domain_filter}' log(f' ✓ Found {len(cookies)} cookies') # Save to temp file - convert Cookie objects to dicts for JSON serialization cookies_file = Path(tempfile.gettempdir()) / f'browser-use-sync-{cloud_profile_id}.json' cookies_data = [dict(c) if hasattr(c, '__dict__') else c for c in cookies] cookies_file.write_text(json.dumps(cookies_data)) return len(cookies), str(cookies_file) finally: await local_session.kill() try: loop = asyncio.get_event_loop() if loop.is_running(): import concurrent.futures with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(asyncio.run, sync_cookies()) cookie_count, cookies_file = future.result() else: cookie_count, cookies_file = loop.run_until_complete(sync_cookies()) except RuntimeError: cookie_count, cookies_file = asyncio.run(sync_cookies()) if cookie_count == 0: log(f' ⚠ {cookies_file}') # cookies_file contains error message cleanup_cloud_profile() return 1 # Step 3: Import cookies to cloud profile async def import_to_cloud(): log(' Importing cookies to cloud profile...') remote_session = await create_browser_session('remote', headed=False, profile=cloud_profile_id) await remote_session.start() try: cookies = json.loads(Path(cookies_file).read_text()) await remote_session._cdp_set_cookies(cookies) return True finally: await remote_session.kill() try: loop = asyncio.get_event_loop() if loop.is_running(): import concurrent.futures with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(asyncio.run, import_to_cloud()) future.result() else: loop.run_until_complete(import_to_cloud()) except RuntimeError: asyncio.run(import_to_cloud()) except Exception as e: log(f' ⚠ Failed to import cookies: {e}') cleanup_cloud_profile() return 1 # Cleanup temp file try: Path(cookies_file).unlink() except Exception: pass log('✓ Profile synced successfully!') log(f' Cloud profile ID: {cloud_profile_id}') log('') log('To use this profile:') log(f' browser-use -b remote --profile {cloud_profile_id} open <url>') if json_output: print( json.dumps( { 'success': True, 'profile_id': cloud_profile_id, 'cookies_synced': cookie_count, } ) ) return 0
function_complex
0
{"cognitive_complexity": 44, "loc": 191, "code_loc": 146, "docstring_loc": 1, "function_name": "_handle_sync", "class_name": null, "qualname": "_handle_sync", "file_path": "browser_use/skill_cli/commands/profile.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/llms/azure/test_azure.py:test_azure_completion_is_used_when_azure_provider
# Context: import pytest from crewai.llm import LLM def mock_azure_credentials(): ... def test_azure_completion_is_used_when_azure_openai_provider(): ... def test_azure_tool_use_conversation_flow(): ... def test_azure_completion_module_is_imported(): ... def test_native_azure_raises_error_when_initialization_fails(): ... def test_azure_completion_initialization_parameters(): ... def test_azure_specific_parameters(): ... def test_azure_completion_call(): ... def test_azure_completion_called_during_crew_execution(): ... def test_azure_completion_call_arguments(): ... def test_multiple_azure_calls_in_crew(): ... def test_azure_completion_with_tools(): ... def test_azure_raises_error_when_endpoint_missing(): ... def test_azure_raises_error_when_api_key_missing(): ... def test_azure_endpoint_configuration(): ... def test_azure_api_key_configuration(): ... def test_azure_model_capabilities(): ... def test_azure_completion_params_preparation(): ... def test_azure_model_detection(): ... def test_azure_supports_stop_words(): ... def test_azure_gpt5_models_do_not_support_stop_words(): ... def test_azure_o_series_models_do_not_support_stop_words(): ... def test_azure_responses_api_models_do_not_support_stop_words(): ... def test_azure_stop_words_not_included_for_unsupported_models(): ... def test_azure_context_window_size(): ... def test_azure_message_formatting(): ... def test_azure_streaming_parameter(): ... def test_azure_tool_conversion(): ... def test_azure_environment_variable_endpoint(): ... def test_azure_token_usage_tracking(): ... def test_azure_http_error_handling(): ... def test_azure_streaming_completion(): ... def test_azure_api_version_default(): ... def test_azure_function_calling_support(): ... def test_azure_openai_endpoint_url_construction(): ... def test_azure_openai_endpoint_url_with_trailing_slash(): ... def test_azure_openai_endpoint_already_complete(): ... def test_non_azure_openai_endpoint_unchanged(): ... def test_azure_openai_model_parameter_excluded(): ... def test_non_azure_openai_model_parameter_included(): ... def test_azure_message_formatting_with_role(): ... def test_azure_message_formatting_default_role(): ... def test_azure_endpoint_detection_flags(): ... def test_azure_improved_error_messages(): ... def test_azure_api_version_properly_passed(): ... def test_azure_timeout_and_max_retries_stored(): ... def test_azure_complete_params_include_optional_params(): ... def test_azure_endpoint_validation_with_azure_prefix(): ... def test_azure_message_formatting_preserves_all_roles(): ... def test_azure_deepseek_model_support(): ... def test_azure_mistral_and_other_models(): ... def test_azure_completion_params_preparation_with_drop_params(): ... def test_azure_streaming_returns_usage_metrics(): ... def test_azure_agent_kickoff_structured_output_without_tools(): ... def test_azure_agent_kickoff_structured_output_with_tools(): ... def test_azure_stop_words_not_applied_to_structured_output(): ... def test_azure_stop_words_still_applied_to_regular_responses(): ... # Task: Write a Python test function `test_azure_completion_is_used_when_azure_provider` to test that AzureCompletion from completion.py is used when LLM uses provider 'azure'. Module under test: crewai.llm, crewai.crew, crewai.agent
def test_azure_completion_is_used_when_azure_provider(): """ Test that AzureCompletion from completion.py is used when LLM uses provider 'azure' """ llm = LLM(model="azure/gpt-4") assert llm.__class__.__name__ == "AzureCompletion" assert llm.provider == "azure" assert llm.model == "gpt-4"
test
0
{"function_name": "test_azure_completion_is_used_when_azure_provider", "class_name": null, "qualname": "test_azure_completion_is_used_when_azure_provider", "file_path": "lib/crewai/tests/llms/azure/test_azure.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai.llms.providers.azure.completion"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_keycloak.py:TestKeycloakProvider.test_get_issuer
# Context: class TestKeycloakProvider: def setup_method(self): ... def test_initialization_with_valid_settings(self): ... def test_get_authorize_url(self): ... def test_get_authorize_url_with_different_domain(self): ... def test_get_token_url(self): ... def test_get_token_url_with_different_domain(self): ... def test_get_jwks_url(self): ... def test_get_jwks_url_with_different_domain(self): ... def test_get_issuer_with_different_domain(self): ... def test_get_audience(self): ... def test_get_client_id(self): ... def test_get_required_fields(self): ... def test_oauth2_base_url(self): ... def test_oauth2_base_url_strips_https_prefix(self): ... def test_oauth2_base_url_strips_http_prefix(self): ... # Task: Write a Python test method `test_get_issuer` in test class `TestKeycloakProvider` to verify the behavior of `get_issuer`. Module under test: crewai.cli.authentication.main, crewai.cli.authentication.providers.keycloak
def test_get_issuer(self): expected_issuer = "https://keycloak.example.com/realms/test-realm" assert self.provider.get_issuer() == expected_issuer
test
0
{"function_name": "test_get_issuer", "class_name": "TestKeycloakProvider", "qualname": "TestKeycloakProvider.test_get_issuer", "file_path": "lib/crewai/tests/cli/authentication/providers/test_keycloak.py", "repo_id": "crewAIInc/crewAI", "loc": 3, "tested_modules": ["crewai.cli.authentication.main", "crewai.cli.authentication.providers.keycloak"], "has_docstring": false, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/gemini/completion.py:GeminiCompletion._handle_structured_output_tool_call
# Context: import logging from typing import TYPE_CHECKING, Any, Literal, cast from pydantic import BaseModel from crewai.events.types.llm_events import LLMCallType from google.genai import types class GeminiCompletion(BaseLLM): def __init__( self, model: str = "gemini-2.0-flash-001", api_key: str | None = None, project: str | None = None, location: str | None = None, temperature: float | None = None, top_p: float | None = None, top_k: int | None = None, max_output_tokens: int | None = None, stop_sequences: list[str] | None = None, stream: bool = False, safety_settings: dict[str, Any] | None = None, client_params: dict[str, Any] | None = None, interceptor: BaseInterceptor[Any, Any] | None = None, use_vertexai: bool | None = None, response_format: type[BaseModel] | None = None, **kwargs: Any, ): """Initialize Google Gemini chat completion client. Args: model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro') api_key: Google API key for Gemini API authentication. Defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var. NOTE: Cannot be used with Vertex AI (project parameter). Use Gemini API instead. project: Google Cloud project ID for Vertex AI with ADC authentication. Requires Application Default Credentials (gcloud auth application-default login). NOTE: Vertex AI does NOT support API keys, only OAuth2/ADC. If both api_key and project are set, api_key takes precedence. location: Google Cloud location (for Vertex AI with ADC, defaults to 'us-central1') temperature: Sampling temperature (0-2) top_p: Nucleus sampling parameter top_k: Top-k sampling parameter max_output_tokens: Maximum tokens in response stop_sequences: Stop sequences stream: Enable streaming responses safety_settings: Safety filter settings client_params: Additional parameters to pass to the Google Gen AI Client constructor. Supports parameters like http_options, credentials, debug_config, etc. interceptor: HTTP interceptor (not yet supported for Gemini). use_vertexai: Whether to use Vertex AI instead of Gemini API. - True: Use Vertex AI (with ADC or Express mode with API key) - False: Use Gemini API (explicitly override env var) - None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var When using Vertex AI with API key (Express mode), http_options with api_version="v1" is automatically configured. response_format: Pydantic model for structured output. Used as default when response_model is not passed to call()/acall() methods. **kwargs: Additional parameters """ if interceptor is not None: raise NotImplementedError( "HTTP interceptors are not yet supported for Google Gemini provider. " "Interceptors are currently supported for OpenAI and Anthropic providers only." ) super().__init__( model=model, temperature=temperature, stop=stop_sequences or [], **kwargs ) # Store client params for later use self.client_params = client_params or {} # Get API configuration with environment variable fallbacks self.api_key = ( api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY") ) self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT") self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1" if use_vertexai is None: use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true" self.client = self._initialize_client(use_vertexai) # Store completion parameters self.top_p = top_p self.top_k = top_k self.max_output_tokens = max_output_tokens self.stream = stream self.safety_settings = safety_settings or {} self.stop_sequences = stop_sequences or [] self.tools: list[dict[str, Any]] | None = None self.response_format = response_format # Model-specific settings version_match = re.search(r"gemini-(\d+(?:\.\d+)?)", model.lower()) self.supports_tools = bool( version_match and float(version_match.group(1)) >= 1.5 ) self.is_gemini_2_0 = bool( version_match and float(version_match.group(1)) >= 2.0 ) def stop(self) -> list[str]: ... def stop(self, value: list[str] | str | None) -> None: ... def _initialize_client(self, use_vertexai: bool) -> genai.Client: ... def _get_client_params(self) -> dict[str, Any]: ... def call(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _prepare_generation_config(self, system_instruction: str | None, tools: list[dict[str, Any]] | None, response_model: type[BaseModel] | None) -> types.GenerateContentConfig: ... def _convert_tools_for_interference(self, tools: list[dict[str, Any]]) -> list[types.Tool]: ... def _format_messages_for_gemini(self, messages: str | list[LLMMessage]) -> tuple[list[types.Content], str | None]: ... def _validate_and_emit_structured_output(self, content: str, response_model: type[BaseModel], messages_for_event: list[LLMMessage], from_task: Any | None, from_agent: Any | None) -> BaseModel: ... def _finalize_completion_response(self, content: str, contents: list[types.Content], response_model: type[BaseModel] | None, from_task: Any | None, from_agent: Any | None) -> str | BaseModel: ... def _process_response_with_tools(self, response: GenerateContentResponse, contents: list[types.Content], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _process_stream_chunk(self, chunk: GenerateContentResponse, full_response: str, function_calls: dict[int, dict[str, Any]], usage_data: dict[str, int], from_task: Any | None, from_agent: Any | None) -> tuple[str, dict[int, dict[str, Any]], dict[str, int]]: ... def _finalize_streaming_response(self, full_response: str, function_calls: dict[int, dict[str, Any]], usage_data: dict[str, int], contents: list[types.Content], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | BaseModel | list[dict[str, Any]]: ... def _handle_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _handle_streaming_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | BaseModel | list[dict[str, Any]] | Any: ... async def _ahandle_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def _ahandle_streaming_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def supports_function_calling(self) -> bool: ... def supports_stop_words(self) -> bool: ... def get_context_window_size(self) -> int: ... def _extract_token_usage(response: GenerateContentResponse) -> dict[str, Any]: ... def _extract_text_from_response(response: GenerateContentResponse) -> str: ... def _add_property_ordering(schema: dict[str, Any]) -> dict[str, Any]: ... def _convert_contents_to_dict(contents: list[types.Content]) -> list[LLMMessage]: ... def supports_multimodal(self) -> bool: ... def format_text_content(self, text: str) -> dict[str, Any]: ... def get_file_uploader(self) -> Any: ... # Task: Write a Python method `_handle_structured_output_tool_call` for the class `GeminiCompletion` to validate and emit event for structured_output tool call. Parameters: structured_data: dict[str, Any], response_model: type[BaseModel], contents: list[types.Content], from_task: Any | None, from_agent: Any | None Returns: BaseModel
def _handle_structured_output_tool_call( self, structured_data: dict[str, Any], response_model: type[BaseModel], contents: list[types.Content], from_task: Any | None = None, from_agent: Any | None = None, ) -> BaseModel: """Validate and emit event for structured_output tool call. Args: structured_data: The arguments passed to the structured_output tool response_model: Pydantic model to validate against contents: Original contents for event conversion from_task: Task that initiated the call from_agent: Agent that initiated the call Returns: Validated Pydantic model instance Raises: ValueError: If validation fails """ try: validated_data = response_model.model_validate(structured_data) self._emit_call_completed_event( response=validated_data.model_dump_json(), call_type=LLMCallType.LLM_CALL, from_task=from_task, from_agent=from_agent, messages=self._convert_contents_to_dict(contents), ) return validated_data except Exception as e: error_msg = ( f"Failed to validate {STRUCTURED_OUTPUT_TOOL_NAME} tool response " f"with model {response_model.__name__}: {e}" ) logging.error(error_msg) raise ValueError(error_msg) from e
function_simple
0
{"cognitive_complexity": 1, "loc": 40, "code_loc": 17, "docstring_loc": 15, "function_name": "_handle_structured_output_tool_call", "class_name": "GeminiCompletion", "qualname": "GeminiCompletion._handle_structured_output_tool_call", "file_path": "lib/crewai/src/crewai/llms/providers/gemini/completion.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/test_streaming.py:TestStreamingEdgeCases.test_streaming_with_empty_content_chunks
# Context: from collections.abc import AsyncIterator, Generator from unittest.mock import MagicMock, patch from crewai.types.streaming import ( CrewStreamingOutput, FlowStreamingOutput, StreamChunk, StreamChunkType, ToolCallChunk, ) from crewai.types.streaming import ( CrewStreamingOutput, FlowStreamingOutput, StreamChunk, StreamChunkType, ToolCallChunk, ) def researcher() -> Agent: ... def simple_task(researcher: Agent) -> Task: ... def simple_crew(researcher: Agent, simple_task: Task) -> Crew: ... def streaming_crew(researcher: Agent, simple_task: Task) -> Crew: ... class TestStreamChunk: ... class TestCrewStreamingOutput: ... class TestFlowStreamingOutput: ... class TestCrewKickoffStreaming: ... class TestCrewKickoffStreamingAsync: ... class TestFlowKickoffStreaming: ... class TestFlowKickoffStreamingAsync: ... class TestStreamingImports: ... class TestStreamingEdgeCases: def test_streaming_handles_exceptions(self, researcher: Agent, simple_task: Task) -> None: ... def test_streaming_with_multiple_tasks(self, researcher: Agent) -> None: ... # Task: Write a Python test method `test_streaming_with_empty_content_chunks` in test class `TestStreamingEdgeCases` to test streaming when LLM chunks have empty content. Module under test: collections.abc, typing, crewai
def test_streaming_with_empty_content_chunks(self) -> None: """Test streaming when LLM chunks have empty content.""" mock_output = MagicMock() mock_output.raw = "No streaming" def gen() -> Generator[StreamChunk, None, None]: yield StreamChunk(content="") streaming = CrewStreamingOutput(sync_iterator=gen()) chunks = list(streaming) assert streaming.is_completed assert len(chunks) == 1 assert chunks[0].content == "" # Simulate what _finalize_streaming does streaming._set_result(mock_output) result = streaming.result assert result.raw == "No streaming"
test
0
{"function_name": "test_streaming_with_empty_content_chunks", "class_name": "TestStreamingEdgeCases", "qualname": "TestStreamingEdgeCases.test_streaming_with_empty_content_chunks", "file_path": "lib/crewai/tests/test_streaming.py", "repo_id": "crewAIInc/crewAI", "loc": 20, "tested_modules": ["collections.abc", "typing", "crewai", "crewai.events.event_bus", "crewai.events.types.llm_events"], "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_response_format.py:TestResponseFormatAsToolStrategy.test_typed_dict
# Context: from langchain_core.messages import HumanMessage from langchain.agents import create_agent from langchain.agents.structured_output import ( MultipleStructuredOutputsError, ProviderStrategy, StructuredOutputValidationError, ToolStrategy, ) from tests.unit_tests.agents.model import FakeToolCallingModel class WeatherBaseModel(BaseModel): ... class WeatherDataclass: ... class WeatherTypedDict(TypedDict): ... class LocationResponse(BaseModel): ... class LocationTypedDict(TypedDict): ... def get_weather() -> str: ... def get_location() -> str: ... class TestResponseFormatAsModel: ... class TestResponseFormatAsProviderStrategy: ... class TestDynamicModelWithResponseFormat: ... def test_union_of_types() -> None: ... class TestSupportsProviderStrategy: ... class TestResponseFormatAsToolStrategy: def test_pydantic_model(self) -> None: ... def test_dataclass(self) -> None: ... def test_json_schema(self) -> None: ... def test_union_of_json_schemas(self) -> None: ... def test_union_of_types(self) -> None: ... def test_multiple_structured_outputs_error_without_retry(self) -> None: ... def test_multiple_structured_outputs_with_retry(self) -> None: ... def test_structured_output_parsing_error_without_retry(self) -> None: ... def test_structured_output_parsing_error_with_retry(self) -> None: ... def test_retry_with_custom_function(self) -> None: ... def test_retry_with_custom_string_message(self) -> None: ... def test_validation_error_with_invalid_response(self) -> None: ... # Task: Write a Python test method `test_typed_dict` in test class `TestResponseFormatAsToolStrategy` to test response_format as ToolStrategy with TypedDict. Module under test: collections.abc, dataclasses, typing
def test_typed_dict(self) -> None: """Test response_format as ToolStrategy with TypedDict.""" tool_calls = [ [{"args": {}, "id": "1", "name": "get_weather"}], [ { "name": "WeatherTypedDict", "id": "2", "args": WEATHER_DATA, } ], ] model = FakeToolCallingModel(tool_calls=tool_calls) agent = create_agent(model, [get_weather], response_format=ToolStrategy(WeatherTypedDict)) response = agent.invoke({"messages": [HumanMessage("What's the weather?")]}) assert response["structured_response"] == EXPECTED_WEATHER_DICT assert len(response["messages"]) == 5
test
1
{"function_name": "test_typed_dict", "class_name": "TestResponseFormatAsToolStrategy", "qualname": "TestResponseFormatAsToolStrategy.test_typed_dict", "file_path": "libs/langchain_v1/tests/unit_tests/agents/test_response_format.py", "repo_id": "langchain-ai/langchain", "loc": 20, "tested_modules": ["collections.abc", "dataclasses", "typing", "langchain_core.language_models", "langchain_core.language_models.chat_models"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_rw_lock.py:test_manual_acquire_release
# Context: from crewai.utilities.rw_lock import RWLock def test_multiple_readers_concurrent(): ... def test_writer_blocks_readers(): ... def test_writer_blocks_other_writers(): ... def test_readers_block_writers(): ... def test_alternating_readers_and_writers(): ... def test_context_manager_releases_on_exception(): ... def test_write_lock_releases_on_exception(): ... def test_stress_many_readers_few_writers(): ... def test_nested_read_locks_same_thread(): ... # Task: Write a Python test function `test_manual_acquire_release` to verify the behavior of `manual_acquire_release`. Module under test: crewai.utilities.rw_lock
def test_manual_acquire_release(): lock = RWLock() lock.r_acquire() lock.r_release() lock.w_acquire() lock.w_release() with lock.r_locked(): pass
test
0
{"function_name": "test_manual_acquire_release", "class_name": null, "qualname": "test_manual_acquire_release", "file_path": "lib/crewai/tests/utilities/events/test_rw_lock.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["crewai.utilities.rw_lock"], "has_docstring": false, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/tests/unit/services/test_service_manager.py:TestServiceRegistration.test_register_storage_service
# Context: from lfx.services.schema import ServiceType from lfx.services.storage.local import LocalStorageService def service_manager(): ... def temp_config_dir(tmp_path): ... class TestPluginDiscovery: ... class TestServiceCreation: ... class TestConflictResolution: ... class TestTeardown: ... class TestConfigDirectorySource: ... class TestRealWorldScenarios: ... class TestServiceRegistration: def test_register_multiple_real_services(self, service_manager): ... def test_register_service_class_no_override(self, service_manager): ... def test_register_service_class_with_override(self, service_manager): ... def test_cannot_register_settings_service(self, service_manager): ... # Task: Write a Python test method `test_register_storage_service` in test class `TestServiceRegistration` to test registering the real LocalStorageService. Module under test: pathlib, lfx.services.base, lfx.services.manager
def test_register_storage_service(self, service_manager): """Test registering the real LocalStorageService.""" service_manager.register_service_class(ServiceType.STORAGE_SERVICE, LocalStorageService, override=True) assert ServiceType.STORAGE_SERVICE in service_manager.service_classes assert service_manager.service_classes[ServiceType.STORAGE_SERVICE] == LocalStorageService
test
1
{"function_name": "test_register_storage_service", "class_name": "TestServiceRegistration", "qualname": "TestServiceRegistration.test_register_storage_service", "file_path": "src/lfx/tests/unit/services/test_service_manager.py", "repo_id": "langflow-ai/langflow", "loc": 6, "tested_modules": ["pathlib", "lfx.services.base", "lfx.services.manager", "lfx.services.schema", "lfx.services.storage.local"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/microsoft/azure/tests/unit/microsoft/azure/fs/test_msgraph.py:TestMSGraphFS.test_get_fs_no_connection
# Context: from unittest.mock import MagicMock, patch from airflow.providers.microsoft.azure.fs.msgraph import get_fs def mock_connection(): ... def mock_connection_minimal(): ... class TestMSGraphFS: def test_get_fs_with_drive_id(self, mock_msgdrivefs, mock_get_connection, mock_connection): ... def test_get_fs_with_extra_oauth_params(self, mock_msgdrivefs, mock_get_connection): ... def test_get_fs_with_storage_options(self, mock_msgdrivefs, mock_get_connection, mock_connection_minimal): ... def test_get_fs_incomplete_credentials(self, mock_msgdrivefs, mock_get_connection): ... # Task: Write a Python test method `test_get_fs_no_connection` in test class `TestMSGraphFS` to verify the behavior of `get_fs_no_connection`. Module under test: __future__, airflow.models.connection, airflow.providers.microsoft.azure.fs.msgraph
def test_get_fs_no_connection(self, mock_msgdrivefs): mock_fs_instance = MagicMock() mock_msgdrivefs.return_value = mock_fs_instance result = get_fs(None) mock_msgdrivefs.assert_called_once_with({}) assert result == mock_fs_instance
test
1
{"function_name": "test_get_fs_no_connection", "class_name": "TestMSGraphFS", "qualname": "TestMSGraphFS.test_get_fs_no_connection", "file_path": "providers/microsoft/azure/tests/unit/microsoft/azure/fs/test_msgraph.py", "repo_id": "apache/airflow", "loc": 8, "tested_modules": ["__future__", "airflow.models.connection", "airflow.providers.microsoft.azure.fs.msgraph"], "has_docstring": false, "runnable_level": "project_runnable"}
browser-use/browser-use:tests/ci/browser/test_tabs.py:TestMultiTabOperations.test_create_and_switch_three_tabs
# Context: import asyncio import time import pytest from browser_use.agent.service import Agent from tests.ci.conftest import create_mock_llm def http_server(): ... def base_url(http_server): ... async def browser_session(): ... class TestMultiTabOperations: async def test_close_tab_with_vision(self, browser_session, base_url): ... async def test_background_tab_open_no_timeout(self, browser_session, base_url): ... async def test_rapid_tab_operations_no_timeout(self, browser_session, base_url): ... async def test_multiple_tab_switches_and_close(self, browser_session, base_url): ... # Task: Write a Python test method `test_create_and_switch_three_tabs` in test class `TestMultiTabOperations` to test that agent can create 3 tabs, switch between them, and call done(). Module under test: browser_use.agent.service, browser_use.browser, browser_use.browser.profile
async def test_create_and_switch_three_tabs(self, browser_session, base_url): """Test that agent can create 3 tabs, switch between them, and call done(). This test verifies that browser state is retrieved between each step. """ start_time = time.time() actions = [ # Action 1: Navigate to home page f""" {{ "thinking": "I'll start by navigating to the home page", "evaluation_previous_goal": "Starting task", "memory": "Navigating to home page", "next_goal": "Navigate to home page", "action": [ {{ "navigate": {{ "url": "{base_url}/home", "new_tab": false }} }} ] }} """, # Action 2: Open page1 in new tab f""" {{ "thinking": "Now I'll open page 1 in a new tab", "evaluation_previous_goal": "Home page loaded", "memory": "Opening page 1 in new tab", "next_goal": "Open page 1 in new tab", "action": [ {{ "navigate": {{ "url": "{base_url}/page1", "new_tab": true }} }} ] }} """, # Action 3: Open page2 in new tab f""" {{ "thinking": "Now I'll open page 2 in a new tab", "evaluation_previous_goal": "Page 1 opened in new tab", "memory": "Opening page 2 in new tab", "next_goal": "Open page 2 in new tab", "action": [ {{ "navigate": {{ "url": "{base_url}/page2", "new_tab": true }} }} ] }} """, # Action 4: Switch to first tab """ { "thinking": "Now I'll switch back to the first tab", "evaluation_previous_goal": "Page 2 opened in new tab", "memory": "Switching to first tab", "next_goal": "Switch to first tab", "action": [ { "switch": { "tab_id": "0000" } } ] } """, # Action 5: Done """ { "thinking": "I've successfully created 3 tabs and switched between them", "evaluation_previous_goal": "Switched to first tab", "memory": "All tabs created and switched", "next_goal": "Complete task", "action": [ { "done": { "text": "Successfully created 3 tabs and switched between them", "success": true } } ] } """, ] mock_llm = create_mock_llm(actions=actions) agent = Agent( task=f'Navigate to {base_url}/home, then open {base_url}/page1 and {base_url}/page2 in new tabs, then switch back to the first tab', llm=mock_llm, browser_session=browser_session, ) # Run with timeout - should complete within 2 minutes try: history = await asyncio.wait_for(agent.run(max_steps=5), timeout=120) elapsed = time.time() - start_time print(f'\n⏱️ Test completed in {elapsed:.2f} seconds') print(f'📊 Completed {len(history)} steps') # Verify each step has browser state for i, step in enumerate(history.history): assert step.state is not None, f'Step {i} should have browser state' assert step.state.url is not None, f'Step {i} should have URL in browser state' print(f' Step {i + 1}: URL={step.state.url}, tabs={len(step.state.tabs) if step.state.tabs else 0}') assert len(history) >= 4, 'Agent should have completed at least 4 steps' # Verify we have 3 tabs open tabs = await browser_session.get_tabs() assert len(tabs) >= 3, f'Should have at least 3 tabs open, got {len(tabs)}' # Verify agent completed successfully final_result = history.final_result() assert final_result is not None, 'Agent should return a final result' assert 'Successfully' in final_result, 'Agent should report success' # Note: Test is fast (< 1s) because mock LLM returns instantly and pages are simple, # but browser state IS being retrieved correctly between steps as verified above except TimeoutError: pytest.fail('Test timed out after 2 minutes - agent hung during tab operations')
test
0
{"function_name": "test_create_and_switch_three_tabs", "class_name": "TestMultiTabOperations", "qualname": "TestMultiTabOperations.test_create_and_switch_three_tabs", "file_path": "tests/ci/browser/test_tabs.py", "repo_id": "browser-use/browser-use", "loc": 131, "tested_modules": ["browser_use.agent.service", "browser_use.browser", "browser_use.browser.profile", "tests.ci.conftest"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:tests/quantization/metal/test_metal.py:ReplaceWithMetalLinearTest.test_all_linears_replaced
# Context: from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, MetalConfig, OPTForCausalLM import torch.nn as nn from transformers.integrations.metal_quantization import MetalLinear from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear def _patch_mps_available(available: bool): ... def _patch_no_mps(): ... def _patch_has_mps(): ... class MetalConfigTest(unittest.TestCase): ... class MetalQuantizerEnvironmentTest(unittest.TestCase): ... class AffineQuantizeDequantizeTest(unittest.TestCase): ... class MetalLinearTest(unittest.TestCase): ... class MetalConversionOpsTest(unittest.TestCase): ... class MetalWeightConversionsTest(unittest.TestCase): ... class MetalModelConversionTest(unittest.TestCase): ... class MetalSlowIntegrationTest(unittest.TestCase): ... class ReplaceWithMetalLinearTest(unittest.TestCase): def _make_small_model(self): ... def test_modules_to_not_convert(self): ... def test_dequantize_skips_replacement(self): ... def test_prequantized_dtype_is_uint32(self): ... def test_quantize_on_the_fly_dtype_is_not_uint32(self): ... # Task: Write a Python test method `test_all_linears_replaced` in test class `ReplaceWithMetalLinearTest` to verify the behavior of `all_linears_replaced`. Module under test: contextlib, transformers, transformers.quantizers.quantizer_metal
def test_all_linears_replaced(self): from transformers.integrations.metal_quantization import MetalLinear, replace_with_metal_linear model = self._make_small_model() nb_linears = sum(1 for m in model.modules() if isinstance(m, nn.Linear)) self.assertGreater(nb_linears, 0) config = MetalConfig(bits=4, group_size=64) replace_with_metal_linear(model, quantization_config=config, pre_quantized=True) nb_metal = sum(1 for m in model.modules() if isinstance(m, MetalLinear)) self.assertEqual(nb_linears, nb_metal)
test
0
{"function_name": "test_all_linears_replaced", "class_name": "ReplaceWithMetalLinearTest", "qualname": "ReplaceWithMetalLinearTest.test_all_linears_replaced", "file_path": "tests/quantization/metal/test_metal.py", "repo_id": "huggingface/transformers", "loc": 12, "tested_modules": ["contextlib", "transformers", "transformers.quantizers.quantizer_metal", "transformers.testing_utils", "transformers.utils"], "has_docstring": false, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/uploaders/anthropic.py:AnthropicFileUploader.__init__
# Context: import os from typing import Any class AnthropicFileUploader(FileUploader): def provider_name(self) -> str: ... def _get_client(self) -> Any: ... def _get_async_client(self) -> Any: ... def upload(self, file: FileInput, purpose: str | None) -> UploadResult: ... def delete(self, file_id: str) -> bool: ... def get_file_info(self, file_id: str) -> dict[str, Any] | None: ... def list_files(self) -> list[dict[str, Any]]: ... async def aupload(self, file: FileInput, purpose: str | None) -> UploadResult: ... async def adelete(self, file_id: str) -> bool: ... # Task: Write a Python method `__init__` for the class `AnthropicFileUploader` to initialize the Anthropic uploader. Parameters: api_key: str | None, client: Any, async_client: Any Returns: None
def __init__( self, api_key: str | None = None, client: Any = None, async_client: Any = None, ) -> None: """Initialize the Anthropic uploader. Args: api_key: Optional Anthropic API key. If not provided, uses ANTHROPIC_API_KEY environment variable. client: Optional pre-instantiated Anthropic client. async_client: Optional pre-instantiated async Anthropic client. """ self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY") self._client: Any = client self._async_client: Any = async_client
function_simple
0
{"cognitive_complexity": 1, "loc": 17, "code_loc": 3, "docstring_loc": 8, "function_name": "__init__", "class_name": "AnthropicFileUploader", "qualname": "AnthropicFileUploader.__init__", "file_path": "lib/crewai-files/src/crewai_files/uploaders/anthropic.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "class_runnable"}
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py:sync_example
# Context: from agents import Agent, Runner async def async_example(): ... async def streaming_example(): ... # Task: Write a Python function `sync_example` to synchronous execution example.
def sync_example(): """Synchronous execution example""" result = Runner.run_sync(root_agent, "Hello, how does sync execution work?") return result.final_output
function_simple
0
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "sync_example", "class_name": null, "qualname": "sync_example", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/1_starter_agent/1_personal_assistant_agent/agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/databricks/src/airflow/providers/databricks/utils/mixins.py:DatabricksSQLStatementsMixin._handle_deferrable_execution
# Context: import time from airflow.providers.common.compat.sdk import AirflowException from airflow.providers.databricks.hooks.databricks import DatabricksHook, SQLStatementState from airflow.providers.databricks.triggers.databricks import DatabricksSQLStatementExecutionTrigger class GetHookHasFields(Protocol): ... class HandleExecutionHasFields(Protocol): ... class HandleDeferrableExecutionHasFields(Protocol): ... class ExecuteCompleteHasFields(Protocol): ... class OnKillHasFields(Protocol): ... class DatabricksSQLStatementsMixin: def _handle_execution(self: HandleExecutionHasFields) -> None: ... def execute_complete(self: ExecuteCompleteHasFields, context: Context, event: dict): ... def on_kill(self: OnKillHasFields) -> None: ... # Task: Write a Python method `_handle_deferrable_execution` for the class `DatabricksSQLStatementsMixin` to execute a SQL statement in deferrable mode. Parameters: defer_method_name: str Returns: None
def _handle_deferrable_execution( self: HandleDeferrableExecutionHasFields, defer_method_name: str = "execute_complete" ) -> None: """Execute a SQL statement in deferrable mode.""" statement_state: SQLStatementState = self._hook.get_sql_statement_state(self.statement_id) end_time: float = time.time() + self.timeout if not statement_state.is_terminal: # If the query is still running and there is no statement_id, this is somewhat of a "zombie" # query, and should throw an exception if not self.statement_id: raise AirflowException("Failed to retrieve statement_id after submitting SQL statement.") self.defer( trigger=DatabricksSQLStatementExecutionTrigger( statement_id=self.statement_id, databricks_conn_id=self.databricks_conn_id, end_time=end_time, polling_period_seconds=self.polling_period_seconds, retry_limit=self.databricks_retry_limit, retry_delay=self.databricks_retry_delay, retry_args=self.databricks_retry_args, ), method_name=defer_method_name, ) else: if statement_state.is_successful: self.log.info("%s completed successfully.", self.task_id) else: error_message = ( f"{self.task_id} failed with terminal state: {statement_state.state} " f"and with the error code {statement_state.error_code} " f"and error message {statement_state.error_message}" ) raise AirflowException(error_message)
function_complex
1
{"cognitive_complexity": 7, "loc": 36, "code_loc": 27, "docstring_loc": 1, "function_name": "_handle_deferrable_execution", "class_name": "DatabricksSQLStatementsMixin", "qualname": "DatabricksSQLStatementsMixin._handle_deferrable_execution", "file_path": "providers/databricks/src/airflow/providers/databricks/utils/mixins.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/locust/diagnose_remote.py:module_doc
Write a module-level docstring for the Python module `diagnose_remote` which contains function `test_connectivity`, function `test_flow_endpoint`, function `run_load_simulation`, function `main`.
Diagnostic tool for remote Langflow instances. Helps debug connection issues and performance problems.
documentation
1
{"doc_type": "module", "module_name": "diagnose_remote", "file_path": "src/backend/tests/locust/diagnose_remote.py", "repo_id": "langflow-ai/langflow", "char_length": 103}
zhayujie/chatgpt-on-wechat:models/doubao/doubao_bot.py:DoubaoBot._convert_messages_to_openai_format
# Context: import json class DoubaoBot(Bot): def __init__(self): super().__init__() self.sessions = SessionManager(DoubaoSession, model=conf().get("model") or "doubao-seed-2-0-pro-260215") model = conf().get("model") or "doubao-seed-2-0-pro-260215" self.args = { "model": model, "temperature": conf().get("temperature", 0.8), "top_p": conf().get("top_p", 1.0), } self.api_key = conf().get("ark_api_key") self.base_url = conf().get("ark_base_url", "https://ark.cn-beijing.volces.com/api/v3") # Ensure base_url does not end with /chat/completions if self.base_url.endswith("/chat/completions"): self.base_url = self.base_url.rsplit("/chat/completions", 1)[0] if self.base_url.endswith("/"): self.base_url = self.base_url.rstrip("/") def reply(self, query, context): ... def reply_text(self, session: DoubaoSession, args, retry_count: int) -> dict: ... def call_with_tools(self, messages, tools, stream: bool, **kwargs): ... def _handle_stream_response(self, request_body: dict): ... def _handle_sync_response(self, request_body: dict): ... def _convert_tools_to_openai_format(self, tools): ... # Task: Write a Python method `_convert_messages_to_openai_format` for the class `DoubaoBot` to convert messages from Claude format to OpenAI format. Parameters: messages
def _convert_messages_to_openai_format(self, messages): """ Convert messages from Claude format to OpenAI format. Claude format uses content blocks: tool_use / tool_result / text OpenAI format uses tool_calls in assistant, role=tool for results """ if not messages: return [] converted = [] for msg in messages: role = msg.get("role") content = msg.get("content") # Already a simple string - pass through if isinstance(content, str): converted.append(msg) continue if not isinstance(content, list): converted.append(msg) continue if role == "user": text_parts = [] tool_results = [] for block in content: if not isinstance(block, dict): continue if block.get("type") == "text": text_parts.append(block.get("text", "")) elif block.get("type") == "tool_result": tool_call_id = block.get("tool_use_id") or "" result_content = block.get("content", "") if not isinstance(result_content, str): result_content = json.dumps(result_content, ensure_ascii=False) tool_results.append({ "role": "tool", "tool_call_id": tool_call_id, "content": result_content }) # Tool results first (must come right after assistant with tool_calls) for tr in tool_results: converted.append(tr) if text_parts: converted.append({"role": "user", "content": "\n".join(text_parts)}) elif role == "assistant": openai_msg = {"role": "assistant"} text_parts = [] tool_calls = [] for block in content: if not isinstance(block, dict): continue if block.get("type") == "text": text_parts.append(block.get("text", "")) elif block.get("type") == "tool_use": tool_calls.append({ "id": block.get("id"), "type": "function", "function": { "name": block.get("name"), "arguments": json.dumps(block.get("input", {})) } }) if text_parts: openai_msg["content"] = "\n".join(text_parts) elif not tool_calls: openai_msg["content"] = "" if tool_calls: openai_msg["tool_calls"] = tool_calls if not text_parts: openai_msg["content"] = None converted.append(openai_msg) else: converted.append(msg) return converted
function_complex
1
{"cognitive_complexity": 57, "loc": 87, "code_loc": 64, "docstring_loc": 6, "function_name": "_convert_messages_to_openai_format", "class_name": "DoubaoBot", "qualname": "DoubaoBot._convert_messages_to_openai_format", "file_path": "models/doubao/doubao_bot.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": true, "runnable_level": "slib_runnable"}
ray-project/ray:python/ray/experimental/gpu_object_manager/cuda_ipc_transport.py:CudaIpcTransportMetadata:class_doc
Write a class-level docstring for `CudaIpcTransportMetadata` (inherits from TensorTransportMetadata) which has methods: various methods.
Metadata for tensors stored in the GPU object store for CUDA IPC transport.
documentation
0
{"doc_type": "class", "class_name": "CudaIpcTransportMetadata", "file_path": "python/ray/experimental/gpu_object_manager/cuda_ipc_transport.py", "repo_id": "ray-project/ray", "char_length": 75, "methods": []}
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_docx_loader.py:TestDOCXLoader.test_load_docx_parsing_error
# Context: import tempfile from unittest.mock import Mock, patch from crewai_tools.rag.loaders.docx_loader import DOCXLoader from crewai_tools.rag.source_content import SourceContent import pytest class TestDOCXLoader: def test_load_docx_from_file(self, mock_docx_class): ... def test_load_docx_with_tables(self, mock_docx_class): ... def test_load_docx_from_url(self, mock_unlink, mock_tempfile, mock_docx_class, mock_get): ... def test_load_docx_from_url_with_custom_headers(self, mock_docx_class, mock_get): ... def test_load_docx_url_download_error(self, mock_get): ... def test_load_docx_url_http_error(self, mock_get): ... def test_load_docx_invalid_source(self): ... def test_load_docx_empty_document(self, mock_docx_class): ... def test_docx_doc_id_generation(self, mock_docx_class): ... # Task: Write a Python test method `test_load_docx_parsing_error` in test class `TestDOCXLoader` to verify the behavior of `load_docx_parsing_error`. Module under test: crewai_tools.rag.base_loader, crewai_tools.rag.loaders.docx_loader, crewai_tools.rag.source_content
def test_load_docx_parsing_error(self, mock_docx_class): mock_docx_class.side_effect = Exception("Invalid DOCX file") with tempfile.NamedTemporaryFile(suffix=".docx") as f: loader = DOCXLoader() with pytest.raises(ValueError, match="Error loading DOCX file"): loader.load(SourceContent(f.name))
test
0
{"function_name": "test_load_docx_parsing_error", "class_name": "TestDOCXLoader", "qualname": "TestDOCXLoader.test_load_docx_parsing_error", "file_path": "lib/crewai-tools/tests/rag/test_docx_loader.py", "repo_id": "crewAIInc/crewAI", "loc": 7, "tested_modules": ["crewai_tools.rag.base_loader", "crewai_tools.rag.loaders.docx_loader", "crewai_tools.rag.source_content"], "has_docstring": false, "runnable_level": "project_runnable"}
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/relevance_agent.py:module_doc
Write a module-level docstring for the Python module `relevance_agent` which contains class `RelevanceAgent`.
Relevance Agent — Scores signals by developer relevance (0–100). This agent uses LLM reasoning to evaluate each signal's importance to AI/ML developers. It's a legitimate agent because relevance scoring requires judgment, context understanding, and nuanced assessment that pure heuristics cannot capture. Model Selection: Uses a fast, cost-efficient model (gpt-4.1-mini by default) because relevance scoring is high-volume and doesn't require deep reasoning — it's a classification task, not a synthesis task.
documentation
0
{"doc_type": "module", "module_name": "relevance_agent", "file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/agents/relevance_agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "char_length": 523}
vllm-project/vllm:vllm/distributed/eplb/rebalance_execute.py:RecvMetadata:class_doc
Write a class-level docstring for `RecvMetadata` which has methods: various methods.
Metadata describing remote receives during EPLB rebalancing.
documentation
1
{"doc_type": "class", "class_name": "RecvMetadata", "file_path": "vllm/distributed/eplb/rebalance_execute.py", "repo_id": "vllm-project/vllm", "char_length": 60, "methods": []}
Zie619/n8n-workflows:src/user_management.py:UserManager.get_all_users
# Context: from typing import List, Optional import sqlite3 class User(BaseModel): ... class UserCreate(BaseModel): ... class UserLogin(BaseModel): ... class UserUpdate(BaseModel): ... class Token(BaseModel): ... def get_current_user(credentials: HTTPAuthorizationCredentials) -> User: ... def require_admin(current_user: User) -> User: ... async def register_user(user_data: UserCreate): ... async def login_user(login_data: UserLogin): ... async def get_current_user_info(current_user: User): ... async def get_all_users(admin: User): ... async def get_user(user_id: int, current_user: User): ... async def update_user(user_id: int, update_data: UserUpdate, current_user: User): ... async def delete_user(user_id: int, admin: User): ... async def get_auth_dashboard(): ... class UserManager: def __init__(self, db_path: str = "users.db"): self.db_path = db_path self.init_database() def init_database(self): ... def create_default_admin(self): ... def hash_password(self, password: str) -> str: ... def verify_password(self, password: str, hashed: str) -> bool: ... def create_user(self, user_data: UserCreate) -> User: ... def authenticate_user(self, username: str, password: str) -> Optional[User]: ... def create_access_token(self, user: User) -> str: ... def verify_token(self, token: str) -> Optional[User]: ... def get_user_by_id(self, user_id: int) -> Optional[User]: ... def update_user(self, user_id: int, update_data: UserUpdate) -> Optional[User]: ... def delete_user(self, user_id: int) -> bool: ... # Task: Write a Python method `get_all_users` for the class `UserManager` to get all users. Returns: List[User]
def get_all_users(self) -> List[User]: """Get all users.""" conn = sqlite3.connect(self.db_path) cursor = conn.cursor() cursor.execute(""" SELECT id, username, email, full_name, role, active, created_at FROM users ORDER BY created_at DESC """) users = [] for row in cursor.fetchall(): users.append( User( id=row[0], username=row[1], email=row[2], full_name=row[3], role=row[4], active=bool(row[5]), created_at=row[6], ) ) conn.close() return users
function_simple
0
{"cognitive_complexity": 1, "loc": 26, "code_loc": 21, "docstring_loc": 1, "function_name": "get_all_users", "class_name": "UserManager", "qualname": "UserManager.get_all_users", "file_path": "src/user_management.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:src/transformers/models/sam2_video/modular_sam2_video.py:Sam2VideoInferenceSession.remove_mask_inputs
# Context: class Sam2VideoPromptEncoderConfig(Sam2PromptEncoderConfig): ... class Sam2VideoMaskDecoderConfig(Sam2MaskDecoderConfig): ... class Sam2VideoConfig(PreTrainedConfig): ... class Sam2VideoInferenceCache: ... class Sam2VideoProcessor(Sam2Processor): ... class Sam2VideoLayerNorm(Sam2LayerNorm): ... class Sam2VideoPositionEmbeddingSine(Sam2SinePositionEmbedding): ... class Sam2VideoTwoWayAttentionBlock(Sam2TwoWayAttentionBlock): ... class Sam2VideoFeedForward(Sam2FeedForward): ... class Sam2VideoImageSegmentationOutput(Sam2ImageSegmentationOutput): ... class Sam2VideoSegmentationOutput(ModelOutput): ... class Sam2VideoPreTrainedModel(PreTrainedModel): ... class Sam2VideoVisionRotaryEmbedding(nn.Module): ... def rotate_pairwise(x): ... def apply_rotary_pos_emb_2d(q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, num_k_exclude_rope: int, repeat_freqs_k: bool) -> tuple[torch.Tensor, torch.Tensor]: ... class Sam2VideoRoPEAttention(nn.Module): ... class Sam2VideoMemoryAttentionLayer(nn.Module): ... class Sam2VideoMemoryAttention(nn.Module): ... class Sam2VideoMemoryFuserCXBlock(GradientCheckpointingLayer): ... class Sam2VideoMemoryFuser(nn.Module): ... class Sam2VideoMaskDownSamplerLayer(nn.Module): ... class Sam2VideoMaskDownSampler(nn.Module): ... class Sam2VideoMemoryEncoder(nn.Module): ... class Sam2VideoPositionalEmbedding(Sam2PositionalEmbedding): ... def get_1d_sine_pe(pos_inds, dim, temperature): ... class Sam2VideoModel(Sam2Model): ... class Sam2VideoInferenceSession: def __init__( self, video: torch.FloatTensor | None = None, video_height: int | None = None, video_width: int | None = None, inference_device: torch.device | str = "cpu", inference_state_device: torch.device | str = "cpu", video_storage_device: torch.device | str = "cpu", dtype: torch.dtype | str = "float32", max_vision_features_cache_size: int = 1, ): # store as a dictionary to avoid double memory allocation with torch.cat when adding new frames self.processed_frames = ( dict(enumerate(video.to(video_storage_device, dtype=dtype))) if video is not None else None ) self.video_height = video_height self.video_width = video_width self.inference_device = inference_device self.inference_state_device = inference_state_device self.video_storage_device = video_storage_device self.dtype = dtype self.max_vision_features_cache_size = max_vision_features_cache_size # Cache for computed features self.cache = Sam2VideoInferenceCache( inference_device=self.inference_device, inference_state_device=self.inference_state_device, max_vision_features_cache_size=self.max_vision_features_cache_size, ) # Persistent object tracking state self._obj_id_to_idx = OrderedDict() self._obj_idx_to_id = OrderedDict() self.obj_ids = [] # Persistent user inputs self.point_inputs_per_obj = {} self.mask_inputs_per_obj = {} # Persistent model outputs/history self.output_dict_per_obj = {} self.frames_tracked_per_obj = {} # Session state flags self.obj_with_new_inputs = [] def num_frames(self) -> int | None: ... def obj_id_to_idx(self, obj_id: int) -> int: ... def obj_idx_to_id(self, obj_idx: int) -> int: ... def get_obj_num(self) -> int: ... def add_point_inputs(self, obj_idx: int, frame_idx: int, inputs: dict): ... def remove_point_inputs(self, obj_idx: int, frame_idx: int): ... def add_mask_inputs(self, obj_idx: int, frame_idx: int, inputs: torch.Tensor): ... def store_output(self, obj_idx: int, frame_idx: int, output_key: str | None, output_value: torch.Tensor | dict | None, is_conditioning_frame: bool): ... def get_output(self, obj_idx: int, frame_idx: int, output_key: str, is_conditioning_frame: bool): ... def add_new_frame(self, pixel_values: torch.Tensor, frame_idx: int | None) -> int: ... def get_frame(self, frame_idx: int) -> torch.Tensor: ... def reset_tracking_data(self): ... def reset_inference_session(self): ... # Task: Write a Python method `remove_mask_inputs` for the class `Sam2VideoInferenceSession` to remove mask inputs. Parameters: obj_idx: int, frame_idx: int
def remove_mask_inputs(self, obj_idx: int, frame_idx: int): """Remove mask inputs.""" self.mask_inputs_per_obj[obj_idx].pop(frame_idx, None)
function_simple
0
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "remove_mask_inputs", "class_name": "Sam2VideoInferenceSession", "qualname": "Sam2VideoInferenceSession.remove_mask_inputs", "file_path": "src/transformers/models/sam2_video/modular_sam2_video.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai/tests/llms/google/test_google.py:test_gemini_completion_call_arguments
# Context: from unittest.mock import patch, MagicMock from crewai.llm import LLM from crewai.crew import Crew from crewai.agent import Agent from crewai.task import Task def mock_google_api_key(): ... def test_gemini_completion_is_used_when_google_provider(): ... def test_gemini_completion_is_used_when_gemini_provider(): ... def test_gemini_completion_module_is_imported(): ... def test_native_gemini_raises_error_when_initialization_fails(): ... def test_gemini_completion_initialization_parameters(): ... def test_gemini_specific_parameters(): ... def test_gemini_completion_call(): ... def test_gemini_completion_called_during_crew_execution(): ... def test_multiple_gemini_calls_in_crew(): ... def test_gemini_completion_with_tools(): ... def test_gemini_raises_error_when_model_not_supported(): ... def test_gemini_vertex_ai_setup(): ... def test_gemini_api_key_configuration(): ... def test_gemini_model_capabilities(): ... def test_gemini_generation_config(): ... def test_gemini_model_detection(): ... def test_gemini_supports_stop_words(): ... def test_gemini_context_window_size(): ... def test_gemini_message_formatting(): ... def test_gemini_streaming_parameter(): ... def test_gemini_tool_conversion(): ... def test_gemini_environment_variable_api_key(): ... def test_gemini_token_usage_tracking(): ... def test_gemini_tool_returning_float(): ... def test_gemini_stop_sequences_sync(): ... def test_gemini_stop_sequences_sent_to_api(): ... def test_google_streaming_returns_usage_metrics(): ... def test_google_express_mode_works() -> None: ... def test_gemini_2_0_model_detection(): ... def test_add_property_ordering_to_schema(): ... def test_gemini_2_0_response_model_with_property_ordering(): ... def test_gemini_1_5_response_model_uses_response_schema(): ... def test_gemini_agent_kickoff_structured_output_without_tools(): ... def test_gemini_agent_kickoff_structured_output_with_tools(): ... def test_gemini_crew_structured_output_with_tools(): ... def test_gemini_stop_words_not_applied_to_structured_output(): ... def test_gemini_stop_words_still_applied_to_regular_responses(): ... def test_gemini_structured_output_preserves_json_with_stop_word_patterns(): ... def test_gemini_cached_prompt_tokens(): ... def test_gemini_cached_prompt_tokens_with_tools(): ... # Task: Write a Python test function `test_gemini_completion_call_arguments` to test that GeminiCompletion.call is invoked with correct arguments. Module under test: crewai.llm, crewai.crew, crewai.agent
def test_gemini_completion_call_arguments(): """ Test that GeminiCompletion.call is invoked with correct arguments """ # Create LLM instance first gemini_llm = LLM(model="google/gemini-2.0-flash-001") # Mock the instance method with patch.object(gemini_llm, 'call') as mock_call: mock_call.return_value = "Task completed successfully." agent = Agent( role="Test Agent", goal="Complete a simple task", backstory="You are a test agent.", llm=gemini_llm # Use same instance ) task = Task( description="Say hello world", expected_output="Hello world", agent=agent, ) crew = Crew(agents=[agent], tasks=[task]) crew.kickoff() # Verify call was made assert mock_call.called # Check the arguments passed to the call method call_args = mock_call.call_args assert call_args is not None # The first argument should be the messages messages = call_args[0][0] # First positional argument assert isinstance(messages, (str, list)) # Verify that the task description appears in the messages if isinstance(messages, str): assert "hello world" in messages.lower() elif isinstance(messages, list): message_content = str(messages).lower() assert "hello world" in message_content
test
0
{"function_name": "test_gemini_completion_call_arguments", "class_name": null, "qualname": "test_gemini_completion_call_arguments", "file_path": "lib/crewai/tests/llms/google/test_google.py", "repo_id": "crewAIInc/crewAI", "loc": 44, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai.llms.providers.gemini.completion"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/layers/quantization/qutlass_utils.py:triton_scale_swizzle
# Context: import torch from vllm.triton_utils import tl, triton def triton_mx_block_rearrange(scale_tensor: torch.Tensor) -> torch.Tensor: ... def to_blocked(input_matrix: torch.Tensor, backend: Literal['torch', 'triton']) -> torch.Tensor: ... # Task: Write a Python function `triton_scale_swizzle` to rearranges tensor data from row-major to block-scaled swizzle format. Parameters: scale_ptr: torch.Tensor, scale_rows: int, scale_cols: int, output_ptr: torch.Tensor, input_row_stride: int, output_block_stride: int, BLOCK_ROWS: tl.constexpr, BLOCK_COLS: tl.constexpr
def triton_scale_swizzle( scale_ptr: torch.Tensor, scale_rows: int, scale_cols: int, output_ptr: torch.Tensor, input_row_stride: int, output_block_stride: int, BLOCK_ROWS: tl.constexpr, BLOCK_COLS: tl.constexpr, ): """ Rearranges tensor data from row-major to block-scaled swizzle format. Args: scale_ptr: Pointer to the input scale tensor scale_rows: Number of rows in the scale tensor scale_cols: Number of columns in the scale tensor output_ptr: Pointer to the output tensor input_row_stride: Stride between rows in the input tensor output_block_stride: Stride between blocks in the output tensor BLOCK_ROWS: Number of rows in a tile (compile-time constant) BLOCK_COLS: Number of columns in a tile (compile-time constant) """ pid_row = tl.program_id(0) pid_col = tl.program_id(1) rows = tl.arange(0, BLOCK_ROWS)[:, None] cols = tl.arange(0, BLOCK_COLS)[None, :] # Calculate starting row and column for this tile start_row = pid_row * BLOCK_ROWS start_col = pid_col * BLOCK_COLS global_rows = start_row + rows global_cols = start_col + cols mask = (global_rows < scale_rows) & (global_cols < scale_cols) input_scales = tl.load( scale_ptr + global_rows * input_row_stride + global_cols, mask=mask, other=0.0, ) r_div_32 = rows // 32 r_mod_32 = rows % 32 # 2) Rearrange to (32, 4, 4) then to final (32, 16) coordinates dest_indices = r_mod_32 * 16 + r_div_32 * 4 + cols # Flatten dest_indices_flat = tl.reshape(dest_indices, (BLOCK_ROWS * BLOCK_COLS)) scales_flat = tl.reshape(input_scales, (BLOCK_ROWS * BLOCK_COLS)) # Calculate block offset using provided output block stride LOCAL_NUMEL = BLOCK_ROWS * BLOCK_COLS block_offset = pid_col * LOCAL_NUMEL + (pid_row * output_block_stride) tl.store( output_ptr + block_offset + dest_indices_flat, scales_flat, )
function_simple
1
{"cognitive_complexity": 0, "loc": 61, "code_loc": 25, "docstring_loc": 13, "function_name": "triton_scale_swizzle", "class_name": null, "qualname": "triton_scale_swizzle", "file_path": "vllm/model_executor/layers/quantization/qutlass_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-core/tests/vector_stores/test_utils.py:test_multimedia_node_serdes
# Context: from typing import Any from llama_index.core.schema import ( BaseNode, Document, MediaResource, Node, NodeRelationship, TextNode, ImageNode, IndexNode, ) from llama_index.core.vector_stores.utils import ( metadata_dict_to_node, node_to_metadata_dict, ) def source_node(): ... def text_node(source_node: Document): ... def image_node(): ... def index_node(): ... def multimedia_node(): ... def test_text_node_serdes(text_node: TextNode, source_node: Document): ... def test_image_node_serdes(image_node: ImageNode): ... def test_index_node_serdes(index_node: IndexNode): ... def test_flat_metadata_serdes(text_node: TextNode): ... # Task: Write a Python test function `test_multimedia_node_serdes` to verify the behavior of `multimedia_node_serdes`. Module under test: typing, llama_index.core.schema, llama_index.core.vector_stores.utils
def test_multimedia_node_serdes(multimedia_node: Node): serialized_node: dict[str, Any] = node_to_metadata_dict(multimedia_node) assert "multimedia_node" in serialized_node["_node_content"] assert serialized_node["_node_type"] == multimedia_node.class_name() deserialized_node: BaseNode = metadata_dict_to_node(serialized_node) assert isinstance(deserialized_node, Node) assert deserialized_node.text_resource is not None assert isinstance(deserialized_node.text_resource, MediaResource) assert deserialized_node.text_resource.text is not None assert deserialized_node.text_resource.text == multimedia_node.text_resource.text
test
1
{"function_name": "test_multimedia_node_serdes", "class_name": null, "qualname": "test_multimedia_node_serdes", "file_path": "llama-index-core/tests/vector_stores/test_utils.py", "repo_id": "run-llama/llama_index", "loc": 11, "tested_modules": ["typing", "llama_index.core.schema", "llama_index.core.vector_stores.utils"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/tests/gpu_objects/test_gpu_objects_custom.py:test_register_and_use_custom_transport
# Context: import sys import numpy import ray from ray.experimental import ( CommunicatorMetadata, TensorTransportManager, TensorTransportMetadata, register_tensor_transport, ) from ray import cloudpickle class ShmTransportMetadata(TensorTransportMetadata): ... class ShmCommunicatorMetadata(CommunicatorMetadata): ... class SharedMemoryTransport(TensorTransportManager): ... # Task: Write a Python test function `test_register_and_use_custom_transport` to verify the behavior of `register_and_use_custom_transport`. Module under test: dataclasses, typing, ray.experimental
def test_register_and_use_custom_transport(ray_start_regular): register_tensor_transport( "shared_memory", ["cpu"], SharedMemoryTransport, numpy.ndarray ) @ray.remote class Actor: @ray.method(tensor_transport="shared_memory") def echo(self, data): return data def non_rdt_echo(self, data): return data def sum(self, data): return data.sum().item() # Classes defined in test files get pickled by ref. So we need to # explicitly pickle the transport class in this module by value. # Note that this doesn't happen if you define the transport class on the # driver, something with pytest convinces cloudpickle to pickle by ref. from ray import cloudpickle cloudpickle.register_pickle_by_value(sys.modules[SharedMemoryTransport.__module__]) actors = [Actor.remote() for _ in range(2)] ref = actors[0].echo.remote(numpy.array([1, 2, 3])) result = actors[1].sum.remote(ref) assert ray.get(result) == 6 # Test that non-rdt methods that return the data type still work. ref = actors[0].non_rdt_echo.remote(numpy.array([1, 2, 3])) result = actors[1].sum.remote(ref) assert ray.get(result) == 6
test
0
{"function_name": "test_register_and_use_custom_transport", "class_name": null, "qualname": "test_register_and_use_custom_transport", "file_path": "python/ray/tests/gpu_objects/test_gpu_objects_custom.py", "repo_id": "ray-project/ray", "loc": 34, "tested_modules": ["dataclasses", "typing", "ray.experimental", "ray"], "has_docstring": false, "runnable_level": "project_runnable"}
browser-use/browser-use:tests/ci/test_markdown_chunking.py:TestChunkMarkdownTable.test_table_header_in_overlap_for_continuation
# Context: from browser_use.dom.markdown_extractor import chunk_markdown_by_structure class TestChunkMarkdownBasic: ... class TestChunkMarkdownHeaders: ... class TestChunkMarkdownHeaderPreferred: ... class TestChunkMarkdownCodeFence: ... class TestChunkMarkdownListItems: ... class TestChunkMarkdownStartFromChar: ... class TestChunkMarkdownOverlap: ... class TestChunkMarkdownMixed: ... class TestHTMLToMarkdownChunking: ... class TestTableNormalizationIntegration: ... class TestChunkMarkdownTable: def test_table_not_split_mid_row(self): ... def test_table_header_carried_across_three_plus_chunks(self): ... # Task: Write a Python test method `test_table_header_in_overlap_for_continuation` in test class `TestChunkMarkdownTable` to when a table spans multiple chunks, the header should be in the overlap prefix. Module under test: markdownify, browser_use.dom.markdown_extractor, browser_use.dom.markdown_extractor
def test_table_header_in_overlap_for_continuation(self): """When a table spans multiple chunks, the header should be in the overlap prefix.""" header = '| Col1 | Col2 |' separator = '| --- | --- |' rows = [f'| r{i} | d{i} |' for i in range(100)] table = '\n'.join([header, separator] + rows) content = table # Force split within the table chunks = chunk_markdown_by_structure(content, max_chunk_chars=300) if len(chunks) > 1: # Second chunk should have table header in overlap assert '| Col1 | Col2 |' in chunks[1].overlap_prefix assert '| --- | --- |' in chunks[1].overlap_prefix
test
0
{"function_name": "test_table_header_in_overlap_for_continuation", "class_name": "TestChunkMarkdownTable", "qualname": "TestChunkMarkdownTable.test_table_header_in_overlap_for_continuation", "file_path": "tests/ci/test_markdown_chunking.py", "repo_id": "browser-use/browser-use", "loc": 15, "tested_modules": ["markdownify", "browser_use.dom.markdown_extractor", "browser_use.dom.markdown_extractor", "browser_use.dom.markdown_extractor"], "has_docstring": true, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/core/tests/unit_tests/test_ssrf_protection.py:TestIPValidation.test_is_localhost_hostnames
# Context: from langchain_core._security._ssrf_protection import ( SSRFProtectedUrl, SSRFProtectedUrlRelaxed, is_cloud_metadata, is_localhost, is_private_ip, is_safe_url, validate_safe_url, ) class TestValidateSafeUrl: ... class TestIsSafeUrl: ... class TestSSRFProtectedUrlType: ... class TestSSRFProtectedUrlRelaxedType: ... class TestRealWorldURLs: ... class TestIPValidation: def test_is_private_ip_ipv4(self) -> None: ... def test_is_private_ip_ipv6(self) -> None: ... def test_is_private_ip_public(self) -> None: ... def test_is_private_ip_invalid(self) -> None: ... def test_is_cloud_metadata_ips(self) -> None: ... def test_is_cloud_metadata_hostnames(self) -> None: ... def test_is_cloud_metadata_safe(self) -> None: ... def test_is_localhost_ips(self) -> None: ... def test_is_localhost_safe(self) -> None: ... # Task: Write a Python test method `test_is_localhost_hostnames` in test class `TestIPValidation` to test localhost hostname detection. Module under test: typing, pydantic, langchain_core._security._ssrf_protection
def test_is_localhost_hostnames(self) -> None: """Test localhost hostname detection.""" assert is_localhost("localhost") is True assert is_localhost("LOCALHOST") is True assert is_localhost("localhost.localdomain") is True
test
1
{"function_name": "test_is_localhost_hostnames", "class_name": "TestIPValidation", "qualname": "TestIPValidation.test_is_localhost_hostnames", "file_path": "libs/core/tests/unit_tests/test_ssrf_protection.py", "repo_id": "langchain-ai/langchain", "loc": 5, "tested_modules": ["typing", "pydantic", "langchain_core._security._ssrf_protection"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/models/gemma3n_mm.py:Gemma3nMultimodalEmbedder:class_doc
Write a class-level docstring for `Gemma3nMultimodalEmbedder` (inherits from nn.Module) which has methods: `__init__`, `forward`.
Embeds token ids or soft tokens for multimodal content into language model space.
documentation
1
{"doc_type": "class", "class_name": "Gemma3nMultimodalEmbedder", "file_path": "vllm/model_executor/models/gemma3n_mm.py", "repo_id": "vllm-project/vllm", "char_length": 81, "methods": ["__init__", "forward"]}
huggingface/transformers:src/transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py:Ernie4_5_VLMoeProcessor.save_pretrained
# Context: from pathlib import Path from shutil import SameFileError, copyfile class Ernie4_5_VLMoeProcessorKwargs(ProcessingKwargs): ... class Ernie4_5_VLMoeProcessor(ProcessorMixin): def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs): self.image_token = tokenizer.image_token self.image_end_token = tokenizer.image_end_token self.image_start_token = tokenizer.image_start_token self.video_token = tokenizer.video_token self.video_end_token = tokenizer.video_end_token self.video_start_token = tokenizer.video_start_token self.image_token_id = tokenizer.image_token_id self.image_end_token_id = tokenizer.image_end_token_id self.image_start_token_id = tokenizer.image_start_token_id self.video_token_id = tokenizer.video_token_id self.video_end_token_id = tokenizer.video_end_token_id self.video_start_token_id = tokenizer.video_start_token_id super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template) def __call__(self, images: ImageInput | None, text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput], videos: VideoInput | None, **kwargs) -> BatchFeature: ... def model_input_names(self): ... def _get_num_multimodal_tokens(self, image_sizes, video_sizes, **kwargs): ... # Task: Write a Python method `save_pretrained` for the class `Ernie4_5_VLMoeProcessor` to we additionally save a copy of the font to the `save_directory` (if we found a file there). Parameters: save_directory, push_to_hub: bool
def save_pretrained(self, save_directory, push_to_hub: bool = False, **kwargs): """We additionally save a copy of the font to the `save_directory` (if we found a file there)""" os.makedirs(save_directory, exist_ok=True) if os.path.isfile(self.video_processor.font): try: copyfile(self.video_processor.font, Path(save_directory, Path(self.video_processor.font).name)) except SameFileError: # already exists which we allow (copy if needed) pass return super().save_pretrained(save_directory, push_to_hub, **kwargs)
function_simple
0
{"cognitive_complexity": 2, "loc": 11, "code_loc": 7, "docstring_loc": 1, "function_name": "save_pretrained", "class_name": "Ernie4_5_VLMoeProcessor", "qualname": "Ernie4_5_VLMoeProcessor.save_pretrained", "file_path": "src/transformers/models/ernie4_5_vl_moe/processing_ernie4_5_vl_moe.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"}
mem0ai/mem0:mem0/graphs/neptune/base.py:NeptuneBase._establish_nodes_relations_from_data
# Context: from mem0.graphs.tools import ( DELETE_MEMORY_STRUCT_TOOL_GRAPH, DELETE_MEMORY_TOOL_GRAPH, EXTRACT_ENTITIES_STRUCT_TOOL, EXTRACT_ENTITIES_TOOL, RELATIONS_STRUCT_TOOL, RELATIONS_TOOL, ) from mem0.graphs.utils import EXTRACT_RELATIONS_PROMPT, get_delete_messages class NeptuneBase(ABC): def _create_embedding_model(config): ... def _create_llm(config, llm_provider): ... def _create_vector_store(vector_store_provider, config): ... def add(self, data, filters): ... def _retrieve_nodes_from_data(self, data, filters): ... def _remove_spaces_from_entities(self, entity_list): ... def _get_delete_entities_from_search_output(self, search_output, data, filters): ... def _delete_entities(self, to_be_deleted, user_id): ... def _delete_entities_cypher(self, source, destination, relationship, user_id): ... def _add_entities(self, to_be_added, user_id, entity_type_map): ... def _add_entities_cypher(self, source_node_list, source, source_embedding, source_type, destination_node_list, destination, dest_embedding, destination_type, relationship, user_id): ... def _add_entities_by_source_cypher(self, source_node_list, destination, dest_embedding, destination_type, relationship, user_id): ... def _add_entities_by_destination_cypher(self, source, source_embedding, source_type, destination_node_list, relationship, user_id): ... def _add_relationship_entities_cypher(self, source_node_list, destination_node_list, relationship, user_id): ... def _add_new_entities_cypher(self, source, source_embedding, source_type, destination, dest_embedding, destination_type, relationship, user_id): ... def search(self, query, filters, limit): ... def _search_source_node(self, source_embedding, user_id, threshold): ... def _search_source_node_cypher(self, source_embedding, user_id, threshold): ... def _search_destination_node(self, destination_embedding, user_id, threshold): ... def _search_destination_node_cypher(self, destination_embedding, user_id, threshold): ... def delete_all(self, filters): ... def _delete_all_cypher(self, filters): ... def get_all(self, filters, limit): ... def _get_all_cypher(self, filters, limit): ... def _search_graph_db(self, node_list, filters, limit): ... def _search_graph_db_cypher(self, n_embedding, filters, limit): ... def reset(self): ... # Task: Write a Python method `_establish_nodes_relations_from_data` for the class `NeptuneBase` to establish relations among the extracted nodes. Parameters: data, filters, entity_type_map
def _establish_nodes_relations_from_data(self, data, filters, entity_type_map): """ Establish relations among the extracted nodes. """ if self.config.graph_store.custom_prompt: messages = [ { "role": "system", "content": EXTRACT_RELATIONS_PROMPT.replace("USER_ID", filters["user_id"]).replace( "CUSTOM_PROMPT", f"4. {self.config.graph_store.custom_prompt}" ), }, {"role": "user", "content": data}, ] else: messages = [ { "role": "system", "content": EXTRACT_RELATIONS_PROMPT.replace("USER_ID", filters["user_id"]), }, { "role": "user", "content": f"List of entities: {list(entity_type_map.keys())}. \n\nText: {data}", }, ] _tools = [RELATIONS_TOOL] if self.llm_provider in ["azure_openai_structured", "openai_structured"]: _tools = [RELATIONS_STRUCT_TOOL] extracted_entities = self.llm.generate_response( messages=messages, tools=_tools, ) entities = [] if extracted_entities["tool_calls"]: entities = extracted_entities["tool_calls"][0]["arguments"]["entities"] entities = self._remove_spaces_from_entities(entities) logger.debug(f"Extracted entities: {entities}") return entities
function_simple
1
{"cognitive_complexity": 4, "loc": 42, "code_loc": 34, "docstring_loc": 3, "function_name": "_establish_nodes_relations_from_data", "class_name": "NeptuneBase", "qualname": "NeptuneBase._establish_nodes_relations_from_data", "file_path": "mem0/graphs/neptune/base.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/quantization/test_mixed_precision.py:test_mixed_precision_model_accuracies
# Context: import lm_eval import pytest class ModelCase: ... class EvaluationConfig: ... # Task: Write a Python test function `test_mixed_precision_model_accuracies` to verify the behavior of `mixed_precision_model_accuracies`. Module under test: dataclasses, packaging
def test_mixed_precision_model_accuracies(model_name: str, accuracy_numbers: dict): results = lm_eval.simple_evaluate( model="vllm", model_args=EvaluationConfig(model_name).get_model_args(), tasks=list(accuracy_numbers.keys()), batch_size=8, ) rtol = 0.05 for task, expect_accuracy in accuracy_numbers.items(): measured_accuracy = results["results"][task]["acc,none"] assert ( measured_accuracy - rtol < expect_accuracy and measured_accuracy + rtol > expect_accuracy ), f"Expected: {expect_accuracy} | Measured: {measured_accuracy}"
test
1
{"function_name": "test_mixed_precision_model_accuracies", "class_name": null, "qualname": "test_mixed_precision_model_accuracies", "file_path": "tests/quantization/test_mixed_precision.py", "repo_id": "vllm-project/vllm", "loc": 16, "tested_modules": ["dataclasses", "packaging"], "has_docstring": false, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/unit/components/files_and_knowledge/test_file_component_image_processing.py:module_doc
Write a module-level docstring for the Python module `test_file_component_image_processing` which contains class `TestDoclingEmptyTextExtraction`, class `TestDoclingSubprocessErrors`, class `TestStoragePathResolution`, class `TestFileNotFoundHandling`, class `TestDataFrameEmptyHandling`.
Tests for FileComponent image processing with Docling. These tests cover scenarios where: - Images are processed but contain no extractable text (e.g., profile pictures) - Docling returns empty doc_rows - Storage path resolution for uploaded files - Edge cases in error handling
documentation
1
{"doc_type": "module", "module_name": "test_file_component_image_processing", "file_path": "src/backend/tests/unit/components/files_and_knowledge/test_file_component_image_processing.py", "repo_id": "langflow-ai/langflow", "char_length": 279}
langflow-ai/langflow:src/backend/tests/unit/services/telemetry/test_component_inputs_splitting.py:test_split_truncates_oversized_single_field
# Context: from langflow.services.telemetry.schema import MAX_TELEMETRY_URL_SIZE, ComponentInputsPayload def test_chunk_fields_exist(): ... def test_chunk_fields_serialize_with_aliases(): ... def test_chunk_fields_optional_default_none(): ... def test_calculate_url_size_returns_integer(): ... def test_calculate_url_size_accounts_for_encoding(): ... def test_calculate_url_size_includes_all_fields(): ... def test_split_if_needed_returns_list(): ... def test_split_if_needed_no_split_returns_single_payload(): ... def test_split_if_needed_no_split_has_no_chunk_metadata(): ... def test_split_if_needed_splits_large_payload(): ... def test_split_preserves_fixed_fields(): ... def test_split_chunk_metadata_correct(): ... def test_split_preserves_all_data(): ... def test_split_chunks_respect_max_size(): ... def test_split_handles_empty_inputs(): ... def test_split_truncates_oversized_non_string_field(): ... def test_split_truncates_oversized_field_in_multi_field_payload(): ... def test_property_split_never_exceeds_max_size(inputs_dict): ... def test_property_split_preserves_all_data(inputs_dict): ... def test_property_fixed_fields_identical_across_chunks(inputs_dict, run_id, comp_id, comp_name): ... def test_property_chunk_indices_sequential(inputs_dict): ... def test_property_handles_special_characters(inputs_dict): ... # Task: Write a Python test function `test_split_truncates_oversized_single_field` to test that single field exceeding max size gets truncated. Module under test: hypothesis, hypothesis, langflow.services.telemetry.schema
def test_split_truncates_oversized_single_field(): """Test that single field exceeding max size gets truncated.""" # Create input with single field that's too large oversized_value = "x" * 3000 inputs = {"large_field": oversized_value} payload = ComponentInputsPayload( component_run_id="test-run-id", component_id="test-comp-id", component_name="TestComponent", component_inputs=inputs, ) result = payload.split_if_needed(max_url_size=MAX_TELEMETRY_URL_SIZE) # Should return single payload with truncated value assert len(result) == 1 chunk_inputs = result[0].component_inputs assert "large_field" in chunk_inputs assert len(chunk_inputs["large_field"]) < len(oversized_value) assert "...[truncated]" in chunk_inputs["large_field"] # Verify the chunk respects max size chunk_size = result[0]._calculate_url_size() assert chunk_size <= MAX_TELEMETRY_URL_SIZE
test
1
{"function_name": "test_split_truncates_oversized_single_field", "class_name": null, "qualname": "test_split_truncates_oversized_single_field", "file_path": "src/backend/tests/unit/services/telemetry/test_component_inputs_splitting.py", "repo_id": "langflow-ai/langflow", "loc": 25, "tested_modules": ["hypothesis", "hypothesis", "langflow.services.telemetry.schema"], "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:api/db/services/evaluation_service.py:EvaluationService.get_recommendations
# Context: import logging from typing import List, Dict, Any, Optional, Tuple from api.db.db_models import EvaluationDataset, EvaluationCase, EvaluationRun, EvaluationResult class EvaluationService(CommonService): model = EvaluationDataset def create_dataset(cls, name: str, description: str, kb_ids: List[str], tenant_id: str, user_id: str) -> Tuple[bool, str]: ... def get_dataset(cls, dataset_id: str) -> Optional[Dict[str, Any]]: ... def list_datasets(cls, tenant_id: str, user_id: str, page: int, page_size: int) -> Dict[str, Any]: ... def update_dataset(cls, dataset_id: str, **kwargs) -> bool: ... def delete_dataset(cls, dataset_id: str) -> bool: ... def add_test_case(cls, dataset_id: str, question: str, reference_answer: Optional[str], relevant_doc_ids: Optional[List[str]], relevant_chunk_ids: Optional[List[str]], metadata: Optional[Dict[str, Any]]) -> Tuple[bool, str]: ... def get_test_cases(cls, dataset_id: str) -> List[Dict[str, Any]]: ... def delete_test_case(cls, case_id: str) -> bool: ... def import_test_cases(cls, dataset_id: str, cases: List[Dict[str, Any]]) -> Tuple[int, int]: ... def start_evaluation(cls, dataset_id: str, dialog_id: str, user_id: str, name: Optional[str]) -> Tuple[bool, str]: ... def _execute_evaluation(cls, run_id: str, dataset_id: str, dialog: Any): ... def _evaluate_single_case(cls, run_id: str, case: Dict[str, Any], dialog: Any) -> Optional[Dict[str, Any]]: ... def _compute_metrics(cls, question: str, generated_answer: str, reference_answer: Optional[str], retrieved_chunks: List[Dict[str, Any]], relevant_chunk_ids: Optional[List[str]], dialog: Any) -> Dict[str, float]: ... def _compute_retrieval_metrics(cls, retrieved_ids: List[str], relevant_ids: List[str]) -> Dict[str, float]: ... def _compute_summary_metrics(cls, results: List[Dict[str, Any]]) -> Dict[str, Any]: ... def get_run_results(cls, run_id: str) -> Dict[str, Any]: ... # Task: Write a Python method `get_recommendations` for the class `EvaluationService` to analyze evaluation results and provide configuration recommendations. Parameters: run_id: str Returns: List[Dict[str, Any]]
def get_recommendations(cls, run_id: str) -> List[Dict[str, Any]]: """ Analyze evaluation results and provide configuration recommendations. Args: run_id: Evaluation run ID Returns: List of recommendation dictionaries """ try: run = EvaluationRun.get_by_id(run_id) if not run or not run.metrics_summary: return [] metrics = run.metrics_summary recommendations = [] # Low precision: retrieving irrelevant chunks if metrics.get("avg_precision", 1.0) < 0.7: recommendations.append({ "issue": "Low Precision", "severity": "high", "description": "System is retrieving many irrelevant chunks", "suggestions": [ "Increase similarity_threshold to filter out less relevant chunks", "Enable reranking to improve chunk ordering", "Reduce top_k to return fewer chunks" ] }) # Low recall: missing relevant chunks if metrics.get("avg_recall", 1.0) < 0.7: recommendations.append({ "issue": "Low Recall", "severity": "high", "description": "System is missing relevant chunks", "suggestions": [ "Increase top_k to retrieve more chunks", "Lower similarity_threshold to be more inclusive", "Enable hybrid search (keyword + semantic)", "Check chunk size - may be too large or too small" ] }) # Slow response time if metrics.get("avg_execution_time", 0) > 5.0: recommendations.append({ "issue": "Slow Response Time", "severity": "medium", "description": f"Average response time is {metrics['avg_execution_time']:.2f}s", "suggestions": [ "Reduce top_k to retrieve fewer chunks", "Optimize embedding model selection", "Consider caching frequently asked questions" ] }) return recommendations except Exception as e: logging.error(f"Error generating recommendations for run {run_id}: {e}") return []
function_complex
1
{"cognitive_complexity": 10, "loc": 62, "code_loc": 44, "docstring_loc": 9, "function_name": "get_recommendations", "class_name": "EvaluationService", "qualname": "EvaluationService.get_recommendations", "file_path": "api/db/services/evaluation_service.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/serve/tests/test_https_proxy.py:TestSSLConfiguration.test_ssl_config_with_ca_certs
# Context: from ray.serve.config import HTTPOptions def ssl_cert_and_key(): ... def https_serve_instance(ssl_cert_and_key): ... class TestHTTPSProxy: ... class TestHTTPSErrorHandling: ... class TestHTTPSIntegration: ... class TestSSLConfiguration: def test_ssl_config_validation_success(self, ssl_cert_and_key): ... def test_ssl_config_validation_missing_key(self): ... def test_ssl_config_validation_missing_cert(self): ... def test_ssl_config_with_password(self, ssl_cert_and_key): ... # Task: Write a Python test method `test_ssl_config_with_ca_certs` in test class `TestSSLConfiguration` to test SSL configuration with CA certificates. Module under test: ray, ray._private.tls_utils, ray.serve.config
def test_ssl_config_with_ca_certs(self, ssl_cert_and_key): """Test SSL configuration with CA certificates.""" key_path = ssl_cert_and_key["key_path"] cert_path = ssl_cert_and_key["cert_path"] # Use cert as CA for testing purposes ca_path = cert_path options = HTTPOptions( ssl_keyfile=key_path, ssl_certfile=cert_path, ssl_ca_certs=ca_path ) assert options.ssl_ca_certs == ca_path
test
0
{"function_name": "test_ssl_config_with_ca_certs", "class_name": "TestSSLConfiguration", "qualname": "TestSSLConfiguration.test_ssl_config_with_ca_certs", "file_path": "python/ray/serve/tests/test_https_proxy.py", "repo_id": "ray-project/ray", "loc": 11, "tested_modules": ["ray", "ray._private.tls_utils", "ray.serve.config", "fastapi", "fastapi"], "has_docstring": true, "runnable_level": "file_runnable"}
apache/airflow:airflow-core/src/airflow/serialization/definitions/notset.py:ArgNotSet:class_doc
Write a class-level docstring for `ArgNotSet` which has methods: various methods.
Sentinel type for annotations, useful when None is not viable.
documentation
1
{"doc_type": "class", "class_name": "ArgNotSet", "file_path": "airflow-core/src/airflow/serialization/definitions/notset.py", "repo_id": "apache/airflow", "char_length": 62, "methods": []}
huggingface/transformers:tests/models/falcon_h1/test_modeling_falcon_h1.py:FalconH1ModelTest.test_batching_equivalence
# Context: class FalconH1ModelTester: ... class FalconH1ModelIntegrationTest(unittest.TestCase): ... class FalconH1ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FalconH1Model, FalconH1ForCausalLM) if is_torch_available() else () model_split_percents = [0.5, 0.7, 0.8] pipeline_model_mapping = ( def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): ... def _check_caches_are_equal(self, cache1, cache2): ... def setUp(self): ... def test_config(self): ... def test_model(self): ... def test_for_causal_lm(self): ... def test_decoder_model_past_with_large_inputs(self): ... def test_attention_outputs(self): ... def test_left_padding_compatibility(self): ... # Task: Write a Python test method `test_batching_equivalence` in test class `FalconH1ModelTest` to verify the behavior of `batching_equivalence`. Module under test: transformers, transformers.testing_utils, generation.test_utils
def test_batching_equivalence(self): # need to disable the tril input mask orig = self.model_tester.use_input_mask self.model_tester.use_input_mask = False super().test_batching_equivalence() self.model_tester.use_input_mask = orig
test
0
{"function_name": "test_batching_equivalence", "class_name": "FalconH1ModelTest", "qualname": "FalconH1ModelTest.test_batching_equivalence", "file_path": "tests/models/falcon_h1/test_modeling_falcon_h1.py", "repo_id": "huggingface/transformers", "loc": 6, "tested_modules": ["transformers", "transformers.testing_utils", "generation.test_utils", "test_configuration_common", "test_modeling_common"], "has_docstring": false, "runnable_level": "class_runnable"}
browser-use/browser-use:tests/ci/test_multi_act_guards.py:TestRuntimeGuard.test_click_link_aborts_remaining
# Context: import asyncio from browser_use.agent.service import Agent from tests.ci.conftest import create_mock_llm def http_server(): ... def base_url(http_server): ... async def browser_session(): ... def tools(): ... class TestTerminatesSequenceMetadata: ... class TestStaticGuard: ... class TestSafeChain: ... class TestRuntimeGuard: # Task: Write a Python test method `test_click_link_aborts_remaining` in test class `TestRuntimeGuard` to click a link that navigates to another page — remaining actions skipped. Module under test: browser_use.agent.service, browser_use.browser, browser_use.browser.profile
async def test_click_link_aborts_remaining(self, browser_session, base_url, tools): """Click a link that navigates to another page — remaining actions skipped.""" await tools.navigate(url=f'{base_url}/page_a', new_tab=False, browser_session=browser_session) await asyncio.sleep(0.5) # Get the selector map to find the link index state = await browser_session.get_browser_state_summary() assert state.dom_state is not None selector_map = state.dom_state.selector_map # Find the link element (a#link_b) link_index = None for idx, element in selector_map.items(): if hasattr(element, 'tag_name') and element.tag_name == 'a': link_index = idx break assert link_index is not None, 'Could not find link element in selector map' ActionModel = tools.registry.create_action_model() actions = [ ActionModel.model_validate({'click': {'index': link_index}}), ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}), ActionModel.model_validate({'scroll': {'down': True, 'pages': 1}}), ] mock_llm = create_mock_llm() agent = Agent(task='test', llm=mock_llm, browser_session=browser_session, tools=tools) results = await agent.multi_act(actions) # Click navigated to page_b — runtime guard should stop at 1 assert len(results) == 1, f'Expected 1 result but got {len(results)}: {results}' # Verify we're on page_b url = await browser_session.get_current_page_url() assert '/page_b' in url
test
0
{"function_name": "test_click_link_aborts_remaining", "class_name": "TestRuntimeGuard", "qualname": "TestRuntimeGuard.test_click_link_aborts_remaining", "file_path": "tests/ci/test_multi_act_guards.py", "repo_id": "browser-use/browser-use", "loc": 37, "tested_modules": ["browser_use.agent.service", "browser_use.browser", "browser_use.browser.profile", "browser_use.tools.service", "tests.ci.conftest"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/models/voyage.py:VoyageQwen3BidirectionalEmbedModel._fuse_gate_up_proj
# Context: from collections import defaultdict from collections.abc import Iterable import torch class VoyageQwen3BidirectionalEmbedModel(Qwen3Model): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Embedding head (hidden_size -> num_labels, bias=False) self.linear = nn.Linear( self.config.hidden_size, self.config.num_labels, bias=False, ) def forward(self, *args, **kwargs): ... def _fuse_qkv_proj(self, weights: Iterable[WeightItem]) -> Iterable[WeightItem]: ... def load_weights(self, weights: Iterable[WeightItem]) -> set[str]: ... # Task: Write a Python method `_fuse_gate_up_proj` for the class `VoyageQwen3BidirectionalEmbedModel` to fuse gate_proj and up_proj into gate_up_proj. Parameters: weights: Iterable[WeightItem] Returns: Iterable[WeightItem]
def _fuse_gate_up_proj(self, weights: Iterable[WeightItem]) -> Iterable[WeightItem]: """Fuse gate_proj and up_proj into gate_up_proj.""" mlp_buf: dict[int, dict[str, torch.Tensor]] = defaultdict(dict) mlp_suffixes = { "mlp.gate_proj.weight": "gate", "mlp.up_proj.weight": "up", } for name, tensor in weights: m = _LAYER_RE.match(name) if m and m.group(2) in mlp_suffixes: layer_idx = int(m.group(1)) mlp_buf[layer_idx][mlp_suffixes[m.group(2)]] = tensor else: yield name, tensor # Yield fused gate_up weights for layer_idx in sorted(mlp_buf.keys()): parts = mlp_buf[layer_idx] if all(p in parts for p in ("gate", "up")): fused = torch.cat([parts["gate"], parts["up"]], dim=0) yield f"layers.{layer_idx}.mlp.gate_up_proj.weight", fused elif parts: missing = [p for p in ("gate", "up") if p not in parts] raise ValueError(f"Layer {layer_idx} missing MLP parts: {missing}")
function_complex
1
{"cognitive_complexity": 9, "loc": 25, "code_loc": 20, "docstring_loc": 1, "function_name": "_fuse_gate_up_proj", "class_name": "VoyageQwen3BidirectionalEmbedModel", "qualname": "VoyageQwen3BidirectionalEmbedModel._fuse_gate_up_proj", "file_path": "vllm/model_executor/models/voyage.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestBugFixTextStripTabs.test_strip_removes_tabs
# Context: from lfx.components.processing.text_operations import TextOperations class TestTextOperationsComponent(ComponentTestBaseWithoutClient): ... class TestTextOperationsWordCount: ... class TestTextOperationsCaseConversion: ... class TestTextOperationsReplace: ... class TestTextOperationsExtract: ... class TestTextOperationsHead: ... class TestTextOperationsTail: ... class TestTextOperationsStrip: ... class TestTextOperationsJoin: ... class TestTextOperationsClean: ... class TestTextOperationsToDataFrame: ... class TestTextOperationsUpdateBuildConfig: ... class TestTextOperationsUpdateOutputs: ... class TestTextOperationsOutputMethods: ... class TestBugFixWordCountEmptyText: ... class TestBugFixTextJoinEmptyFirst: ... class TestBugFixDataFrameHeaderValidation: ... class TestBugFixInputValidation: ... class TestBugFixTextStripTabs: def test_strip_removes_mixed_whitespace(self): ... # Task: Write a Python test method `test_strip_removes_tabs` in test class `TestBugFixTextStripTabs` to strip should remove tabs when using default whitespace stripping. Module under test: lfx.components.processing.text_operations, lfx.schema.data, lfx.schema.dataframe
def test_strip_removes_tabs(self): """Strip should remove tabs when using default whitespace stripping.""" component = TextOperations() component.strip_mode = "both" component.strip_characters = "" result = component._text_strip("\t\thello world\t\t") assert result == "hello world"
test
1
{"function_name": "test_strip_removes_tabs", "class_name": "TestBugFixTextStripTabs", "qualname": "TestBugFixTextStripTabs.test_strip_removes_tabs", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "loc": 9, "tested_modules": ["lfx.components.processing.text_operations", "lfx.schema.data", "lfx.schema.dataframe", "lfx.schema.message", "tests.base"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:release/nightly_tests/dataset/training_ingest_benchmark.py:run_benchmark
# Context: import itertools from typing import Dict, List, Optional import ray class BenchmarkConfig: ... class BaseDataLoader(ABC): ... class S3ParquetDataLoader(BaseDataLoader): ... class S3UrlImageDataLoader(BaseDataLoader): ... class S3ReadImagesDataLoader(BaseDataLoader): ... def create_data_loader(data_loader: str, split: str) -> BaseDataLoader: ... def benchmark_iteration(dataset: ray.data.Dataset, batch_size: int, prefetch_batches: int, num_batches: int, simulated_training_time: float, device: str, pin_memory: bool) -> Dict[str, float]: ... def print_summary(results: List[Dict]): ... def main(): ... # Task: Write a Python function `run_benchmark` to run benchmarks with all hyperparameter combinations. Parameters: config: BenchmarkConfig Returns: List[Dict]
def run_benchmark(config: BenchmarkConfig) -> List[Dict]: """Run benchmarks with all hyperparameter combinations. Args: config: Benchmark configuration Returns: List of benchmark results """ config.validate() results = [] # Create data loader for the specified format data_loader = create_data_loader(config.data_loader, config.split) logger.info( f"Using {data_loader.__class__.__name__} with " f"{len(data_loader.label_to_id_map)} classes" ) logger.info(f"Data directory: {data_loader.data_dir}") # Generate all combinations combinations = list( itertools.product( config.transform_types, config.batch_sizes, config.prefetch_batches_list, config.num_image_columns_list, ) ) logger.info(f"Running {len(combinations)} benchmark combinations...") for transform_type, batch_size, prefetch_batches, num_image_columns in combinations: logger.info( f"Benchmarking: transform={transform_type}, " f"batch_size={batch_size}, prefetch_batches={prefetch_batches}, " f"num_image_columns={num_image_columns}" ) # Create dataset using the data loader ds = data_loader.create_dataset( transform_type=transform_type, batch_size=batch_size, num_batches=config.num_batches, num_image_columns=num_image_columns, ) # Run benchmark (request GPU if device is cuda) num_gpus = 1 if config.device == "cuda" else 0 metrics = ray.get( benchmark_iteration.options(num_gpus=num_gpus).remote( dataset=ds, batch_size=batch_size, prefetch_batches=prefetch_batches, num_batches=config.num_batches, simulated_training_time=config.simulated_training_time, device=config.device, pin_memory=config.pin_memory, ) ) # Store results result = { "transform_type": transform_type, "batch_size": batch_size, "prefetch_batches": prefetch_batches, "num_image_columns": num_image_columns, **metrics, } results.append(result) logger.info( f" Results: {metrics['rows_per_second']:.2f} rows/sec, " f"{metrics['batches_per_second']:.2f} batches/sec" ) return results
function_simple
0
{"cognitive_complexity": 3, "loc": 77, "code_loc": 54, "docstring_loc": 8, "function_name": "run_benchmark", "class_name": null, "qualname": "run_benchmark", "file_path": "release/nightly_tests/dataset/training_ingest_benchmark.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-moss/tests/test_base.py:test_list_indexes
# Context: import pytest from llama_index.tools.moss.base import MossToolSpec, QueryOptions class MockBaseToolSpec: ... def _make_mock_index(name: str, doc_count: int, status: str) -> MagicMock: ... def mock_client(): ... async def test_index_docs(mock_client): ... async def test_query(mock_client): ... async def test_query_passes_options_to_client(mock_client): ... async def test_query_skips_load_when_already_loaded(mock_client): ... async def test_list_indexes_empty(mock_client): ... async def test_list_indexes_formatting(mock_client): ... async def test_delete_index(mock_client): ... async def test_delete_current_index_resets_loaded_state(mock_client): ... def test_query_options_application(): ... def test_initialization_validation(): ... async def test_delete_index_return_message(mock_client): ... # Task: Write a Python test function `test_list_indexes` to verify the behavior of `list_indexes`. Module under test: llama_index.tools.moss.base
async def test_list_indexes(mock_client): spec = MossToolSpec(client=mock_client, index_name="test") output = await spec.list_indexes() mock_client.list_indexes.assert_awaited_once() # Verify all indexes are in output assert "index_a" in output assert "index_b" in output assert "5" in output assert "12" in output assert "ready" in output # Verify formatting assert "Available indexes:" in output
test
1
{"function_name": "test_list_indexes", "class_name": null, "qualname": "test_list_indexes", "file_path": "llama-index-integrations/tools/llama-index-tools-moss/tests/test_base.py", "repo_id": "run-llama/llama_index", "loc": 14, "tested_modules": ["llama_index.tools.moss.base"], "has_docstring": false, "runnable_level": "project_runnable"}
infiniflow/ragflow:test/unit_test/utils/test_raptor_utils.py:TestIntegrationScenarios.test_financial_excel_report
# Context: from rag.utils.raptor_utils import ( is_structured_file_type, is_tabular_pdf, should_skip_raptor, get_skip_reason, EXCEL_EXTENSIONS, CSV_EXTENSIONS, STRUCTURED_EXTENSIONS ) class TestIsStructuredFileType: ... class TestIsTabularPDF: ... class TestShouldSkipRaptor: ... class TestGetSkipReason: ... class TestEdgeCases: ... class TestIntegrationScenarios: def test_scientific_csv_data(self): ... def test_legal_contract_with_tables(self): ... def test_text_heavy_pdf_document(self): ... def test_mixed_dataset_processing(self): ... def test_override_for_special_excel(self): ... # Task: Write a Python test method `test_financial_excel_report` in test class `TestIntegrationScenarios` to test scenario: Financial quarterly Excel report. Module under test: rag.utils.raptor_utils
def test_financial_excel_report(self): """Test scenario: Financial quarterly Excel report""" file_type = ".xlsx" parser_id = "naive" parser_config = {} raptor_config = {"use_raptor": True} # Should skip Raptor assert should_skip_raptor(file_type, parser_id, parser_config, raptor_config) is True reason = get_skip_reason(file_type, parser_id, parser_config) assert "Structured data file" in reason
test
1
{"function_name": "test_financial_excel_report", "class_name": "TestIntegrationScenarios", "qualname": "TestIntegrationScenarios.test_financial_excel_report", "file_path": "test/unit_test/utils/test_raptor_utils.py", "repo_id": "infiniflow/ragflow", "loc": 11, "tested_modules": ["rag.utils.raptor_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tools/pre_commit/generate_attention_backend_docs.py:is_relevant_file
# Context: import fnmatch from pathlib import Path def find_class_in_ast(tree: ast.AST, class_name: str) -> ast.ClassDef | None: ... def find_method(node: ast.ClassDef, method_name: str) -> ast.FunctionDef | None: ... def method_returns_true(method: ast.FunctionDef | None) -> bool: ... def check_method_overrides(node: ast.ClassDef, method_name: str) -> bool: ... def _find_bool_class_var(class_node: ast.ClassDef, var_name: str) -> bool | None: ... def _parse_list_class_var(node: ast.ClassDef, var_name: str) -> list[str] | None: ... def _parse_return_list(method: ast.FunctionDef | None, handle_multiple_of: bool) -> list[str]: ... def _get_parent_class_name(class_node: ast.ClassDef) -> str | None: ... def _resolve_import_to_file(tree: ast.AST, class_name: str, source_file: Path | None) -> Path | None: ... def _find_cc_in_function(tree: ast.AST, func_name: str) -> str | None: ... def parse_registry() -> dict[str, str]: ... def _extract_enum_values(node: ast.ClassDef) -> dict[str, str]: ... def get_file_from_class_path(class_path: str) -> Path | None: ... def parse_supported_dtypes(node: ast.ClassDef) -> str: ... def parse_kv_cache_dtypes(node: ast.ClassDef) -> str: ... def parse_block_sizes(node: ast.ClassDef) -> str: ... def parse_head_sizes(node: ast.ClassDef) -> str: ... def parse_compute_capability(node: ast.ClassDef) -> str: ... def parse_attention_types(node: ast.ClassDef) -> str: ... def parse_impl_bool_attr(tree: ast.AST, class_name: str, attr_name: str, default: bool, source_file: Path | None, _visited: set[str] | None) -> bool: ... def analyze_backend(backend_name: str, class_path: str) -> dict[str, Any] | None: ... def _parse_fa4_supported_caps() -> str | None: ... def parse_flash_attn_features() -> dict[str, dict[str, Any]]: ... def parse_flashinfer_trtllm_features() -> dict[str, dict[str, Any]]: ... def parse_mla_prefill_backends() -> list[dict[str, Any]]: ... def _expand_flash_attn_variants(all_backends: list[dict[str, Any]], fa_features: dict[str, dict[str, Any]]) -> list[dict[str, Any]]: ... def _expand_flashinfer_variants(all_backends: list[dict[str, Any]], fi_features: dict[str, dict[str, Any]]) -> list[dict[str, Any]]: ... def parse_cuda_priority_lists() -> dict[str, list[str]]: ... def _get_backends_from_return(stmts: list) -> list[str]: ... def _is_sm100_check(test: ast.expr) -> bool: ... def _extract_priorities(body: list, priorities: dict[str, list[str]], prefix: str): ... def add_literal_quotes(value: str) -> str: ... def bool_to_emoji(value: bool) -> str: ... def _build_columns(is_mla: bool, has_versions: bool) -> list[TableColumn]: ... def _sort_key(x: dict[str, Any]) -> tuple[str, int]: ... def _render_table(columns: list[TableColumn], backends: list[dict[str, Any]]) -> list[str]: ... def generate_markdown_table(backends: list[dict[str, Any]], title: str, is_mla_table: bool) -> str: ... def generate_usage_section() -> str: ... def _priority_table(title: str, backends: list[str]) -> list[str]: ... def generate_priority_section(priorities: dict[str, list[str]]) -> str: ... def generate_legend() -> str: ... def generate_mla_section(prefill_backends: list[dict[str, Any]], decode_backends: list[dict[str, Any]]) -> str: ... def generate_docs() -> str: ... def main(): ... # Task: Write a Python function `is_relevant_file` to check if a file matches any of the relevant patterns. Parameters: filepath: str Returns: bool
def is_relevant_file(filepath: str) -> bool: """Check if a file matches any of the relevant patterns.""" path = Path(filepath) if path.is_absolute(): try: path = path.relative_to(REPO_ROOT) except ValueError: return False path_str = str(path) return any(fnmatch.fnmatch(path_str, pattern) for pattern in RELEVANT_PATTERNS)
function_simple
1
{"cognitive_complexity": 2, "loc": 11, "code_loc": 8, "docstring_loc": 1, "function_name": "is_relevant_file", "class_name": null, "qualname": "is_relevant_file", "file_path": "tools/pre_commit/generate_attention_backend_docs.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:tests/ci/browser/test_cdp_headers.py:module_doc
Write a module-level docstring for the Python module `test_cdp_headers` which contains function `test_browser_profile_headers_attribute`, function `test_browser_profile_headers_inherited`.
Test that headers are properly passed to CDPClient for authenticated remote browser connections. This tests the fix for: When using browser-use with remote browser services that require authentication headers, these headers need to be included in the WebSocket handshake.
documentation
0
{"doc_type": "module", "module_name": "test_cdp_headers", "file_path": "tests/ci/browser/test_cdp_headers.py", "repo_id": "browser-use/browser-use", "char_length": 272}
ray-project/ray:python/ray/data/tests/datasource/test_turbopuffer_datasink.py:TestMultiNamespaceWrites.test_drops_namespace_column_before_writing
# Context: from unittest.mock import MagicMock, patch import pyarrow as pa def mock_turbopuffer_module(monkeypatch): ... def sink(): ... def mock_client(): ... def sample_table(): ... def make_sink(**kwargs) -> TurbopufferDatasink: ... class TestConstructorValidation: ... class TestClientInitialization: ... class TestArrowTablePreparation: ... class TestSingleNamespaceBatching: ... class TestTransformToTurbopufferFormat: ... class TestRetryLogic: ... class TestWriteOrchestration: ... class TestStreamingBehavior: ... class TestSerialization: ... class TestMultiNamespaceWrites: def test_routes_rows_to_correct_namespaces(self): ... def test_missing_namespace_column_raises(self): ... def test_null_namespace_values_raise(self): ... def test_skips_empty_blocks_in_multi_namespace(self): ... # Task: Write a Python test method `test_drops_namespace_column_before_writing` in test class `TestMultiNamespaceWrites` to the namespace column is not included in the written data. Module under test: typing, packaging.version, ray.data._internal.datasource.turbopuffer_datasink
def test_drops_namespace_column_before_writing(self): """The namespace column is not included in the written data.""" sink = make_sink(namespace=None, namespace_column="tenant") table = pa.table( { "tenant": ["ns_a"], "id": [1], "vector": [[0.1]], } ) written_batches = [] def capture_batch(ns, batch, namespace_name=None): written_batches.append(batch) mock_client = MagicMock() mock_client.namespace.return_value = MagicMock() with patch.object(sink, "_get_client", return_value=mock_client): with patch.object( sink, "_write_batch_with_retry", side_effect=capture_batch ): sink.write([table], ctx=None) assert len(written_batches) == 1 assert "tenant" not in written_batches[0].column_names assert "id" in written_batches[0].column_names
test
0
{"function_name": "test_drops_namespace_column_before_writing", "class_name": "TestMultiNamespaceWrites", "qualname": "TestMultiNamespaceWrites.test_drops_namespace_column_before_writing", "file_path": "python/ray/data/tests/datasource/test_turbopuffer_datasink.py", "repo_id": "ray-project/ray", "loc": 27, "tested_modules": ["typing", "packaging.version", "ray.data._internal.datasource.turbopuffer_datasink", "ray.data._internal.utils.arrow_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py:TestWorkerSets.test_overwrite_hpa_disable
# Context: from chart_utils.helm_template_generator import render_chart class TestWorkerSets: def test_enable_default_worker_set_default(self): ... def test_enable_default_worker_set(self, enable_default, objects_number): ... def test_create_multiple_worker_sets(self, enable_default, expected): ... def test_overwrite_replicas(self, values): ... def test_overwrite_revision_history_limit(self, values): ... def test_overwrite_command(self, values): ... def test_overwrite_args(self, values): ... def test_disable_livenessprobe(self): ... def test_overwrite_livenessprobe_enabled(self, values): ... def test_overwrite_livenessprobe_values(self, values): ... def test_overwrite_update_strategy(self, values): ... def test_overwrite_strategy(self, values): ... def test_overwrite_pod_management_policy(self, values): ... def test_disable_persistence(self): ... def test_overwrite_persistence_enabled(self, values): ... def test_overwrite_persistence_persistent_volume_claim_retention_policy(self, values): ... def test_overwrite_persistence_size(self, values): ... def test_overwrite_persistence_storage_class_name(self, values): ... def test_enable_persistence_fix_permissions(self): ... def test_overwrite_persistence_fix_permissions(self, values): ... def test_overwrite_persistence_annotations(self, values): ... def test_overwrite_kerberos_init_container_enabled(self): ... def test_overwrite_kerberos_init_container_disable(self, values): ... def test_overwrite_kerberos_init_container_resources(self, values): ... def test_overwrite_kerberos_init_container_security_context(self, workers_values): ... def test_overwrite_kerberos_init_container_lifecycle_hooks(self, workers_values): ... def test_overwrite_container_lifecycle_hooks(self, workers_values): ... def test_enable_default_pod_disruption_budget(self, enable_default, objects_number): ... def test_create_pod_disruption_budget_sets(self, enable_default, expected): ... def test_overwrite_pod_disruption_budget_enabled(self): ... def test_overwrite_pod_disruption_budget_disable(self, workers_values): ... def test_overwrite_pod_disruption_budget_config(self, workers_values): ... def test_enable_default_service_account(self, enable_default, objects_number): ... def test_create_service_account_sets(self, enable_default, expected): ... def test_overwrite_service_account_automount_service_account_token_disable(self): ... def test_overwrite_service_account_create_disable(self): ... def test_overwrite_service_account_name(self): ... def test_overwrite_service_account_annotations(self, workers_values): ... def test_enable_default_keda(self, enable_default, objects_number): ... def test_create_keda_sets(self, enable_default, expected): ... def test_overwrite_keda_enabled(self): ... def test_overwrite_keda_disable(self): ... def test_overwrite_keda_pooling_interval(self): ... def test_overwrite_keda_cooldown_period(self): ... def test_overwrite_keda_min_replica_count(self): ... def test_overwrite_keda_max_replica_count(self): ... def test_overwrite_keda_advanced(self, workers_values): ... def test_overwrite_keda_query(self): ... def test_overwrite_keda_use_pgbouncer_enable(self): ... def test_overwrite_keda_use_pgbouncer_disable(self): ... def test_overwrite_queue(self): ... def test_enable_default_hpa(self, enable_default, objects_number): ... def test_create_hpa_sets(self, enable_default, expected): ... def test_overwrite_hpa_enabled(self): ... def test_overwrite_hpa_min_replica_count(self): ... def test_overwrite_hpa_max_replica_count(self): ... def test_overwrite_hpa_metrics(self, workers_values): ... def test_overwrite_hpa_behavior(self, workers_values): ... def test_overwrite_kerberos_sidecar_enabled(self): ... def test_overwrite_kerberos_sidecar_disable(self, values): ... def test_overwrite_kerberos_sidecar_resources(self, values): ... def test_overwrite_kerberos_sidecar_security_context_container(self, values): ... def test_overwrite_kerberos_sidecar_container_lifecycle_hooks(self, values): ... def test_overwrite_resources(self, values): ... def test_overwrite_termination_grace_period_seconds(self): ... def test_overwrite_safe_to_evict_enable(self): ... def test_overwrite_safe_to_evict_disable(self): ... def test_overwrite_extra_containers(self, workers_values): ... def test_overwrite_extra_init_containers(self, workers_values): ... def test_overwrite_extra_volumes(self, workers_values): ... def test_overwrite_extra_volume_mounts(self, workers_values): ... def test_overwrite_extra_ports(self, workers_values): ... def test_overwrite_node_selector(self, workers_values): ... def test_overwrite_runtime_class_name(self, workers_values): ... def test_overwrite_priority_class_name(self, workers_values): ... def test_overwrite_affinity(self, workers_values): ... def test_overwrite_tolerations(self, workers_values): ... def test_overwrite_topology_spread_constraints(self, workers_values): ... def test_overwrite_host_aliases(self, workers_values): ... def test_overwrite_annotations(self, workers_values): ... def test_overwrite_pod_annotations(self, workers_values): ... def test_overwrite_labels(self, workers_values): ... def test_overwrite_wait_for_migration_disable(self): ... def test_overwrite_wait_for_migration_enable(self): ... def test_overwrite_wait_for_migration_env(self, workers_values): ... def test_overwrite_wait_for_migration_security_context_container(self, workers_values): ... def test_overwrite_env(self, workers_values): ... def test_overwrite_volume_claim_templates(self, workers_values): ... # Task: Write a Python test method `test_overwrite_hpa_disable` in test class `TestWorkerSets` to verify the behavior of `overwrite_hpa_disable`. Module under test: __future__, chart_utils.helm_template_generator
def test_overwrite_hpa_disable(self): docs = render_chart( values={ "workers": { "hpa": {"enabled": True}, "celery": {"enableDefault": False, "sets": [{"name": "test", "hpa": {"enabled": False}}]}, } }, show_only=["templates/workers/worker-hpa.yaml"], ) assert len(docs) == 0
test
1
{"function_name": "test_overwrite_hpa_disable", "class_name": "TestWorkerSets", "qualname": "TestWorkerSets.test_overwrite_hpa_disable", "file_path": "helm-tests/tests/helm_tests/airflow_core/test_worker_sets.py", "repo_id": "apache/airflow", "loc": 12, "tested_modules": ["__future__", "chart_utils.helm_template_generator"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:benchmark_v2/framework/hardware_metrics.py:get_intel_xpu_stats
# Context: import subprocess def get_device_name_and_memory_total() -> tuple[str, float]: ... class HardwareInfo: ... def get_amd_gpu_stats(device_handle) -> tuple[int, float]: ... def get_nvidia_gpu_stats(device_handle) -> tuple[int, float]: ... class GPUMonitoringStatus(Enum): ... class GPURawMetrics: ... class GPUMonitor: ... # Task: Write a Python function `get_intel_xpu_stats` to returns the utilization and memory used of an Intel XPU. Returns: tuple[int, float]
def get_intel_xpu_stats() -> tuple[int, float]: """Returns the utilization and memory used of an Intel XPU""" # xpu-smi outputs CSV format: Timestamp, DeviceId, GPU Memory Utilization (%), GPU Memory Used (MiB) xpu_smi_output = subprocess.check_output(["xpu-smi", "dump", "-m", "5,18", "-n", "1"]) lines = xpu_smi_output.decode("utf-8").strip().split("\n") # Parse all data lines (skip header) and collect stats from all cards xpu_stats = [] for line in lines[1:]: data_line = line.split(",") if len(data_line) < 4: continue device_id = data_line[1].strip() utilization_str = data_line[2].strip() memory_used_str = data_line[3].strip() if utilization_str != "N/A" and memory_used_str != "N/A": utilization = int(float(utilization_str)) memory_used_mib = float(memory_used_str) xpu_stats.append((device_id, utilization, memory_used_mib)) if not xpu_stats: return 0, 0.0 # Sort by utilization (descending) and pick the highest xpu_stats.sort(key=lambda x: x[1], reverse=True) device_id, utilization, memory_used_mib = xpu_stats[0] memory_used_gb = memory_used_mib / 1024 return utilization, memory_used_gb
function_complex
0
{"cognitive_complexity": 7, "loc": 28, "code_loc": 20, "docstring_loc": 1, "function_name": "get_intel_xpu_stats", "class_name": null, "qualname": "get_intel_xpu_stats", "file_path": "benchmark_v2/framework/hardware_metrics.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"}
jax-ml/jax:jax/experimental/mosaic/gpu/mma.py:mma
# Context: from jax.experimental.mosaic.gpu import fragmented_array as fa from jaxlib.mlir import ir class MMALayouts: ... def _ptx_dtype_str(dtype: ir.Type, is_signed: bool | None) -> str: ... def _mma_single_tile(acc: fa.FragmentedArray, a: fa.FragmentedArray, b: fa.FragmentedArray) -> fa.FragmentedArray: ... # Task: Write a Python function `mma` to computes `acc + a @ b.T` using synchronouse MMA instructions. Parameters: acc: fa.FragmentedArray, a: fa.FragmentedArray, b: fa.FragmentedArray Returns: fa.FragmentedArray
def mma( acc: fa.FragmentedArray, a: fa.FragmentedArray, b: fa.FragmentedArray, ) -> fa.FragmentedArray: """Computes `acc + a @ b.T` using synchronouse MMA instructions. All operands must have `TiledLayout`s. The layouts must be generated by the `MMALayouts` class, which ensures that the tiles are mapped to the warps correctly. Args: acc: A `FragmentedArray` with a `TiledLayout` generated from `MMALayouts.acc`. a: A `FragmentedArray` with a `TiledLayout` generated from `MMALayouts.lhs`. b: A `FragmentedArray` with a `TiledLayout` generated from `MMALayouts.rhs`. Returns: A new `FragmentedArray` with the result of the computation with the same type as `acc`. """ (m, k) = a.shape (n, k2) = b.shape (m2, n2) = acc.shape if m != m2: raise ValueError(f"M mismatch: {m} != {m2}") if n != n2: raise ValueError(f"N mismatch: {n} != {n2}") if k != k2: raise ValueError(f"K mismatch: {k} != {k2}") # todo(cperivol): A tile shape can have dimensions that are higher # multiples of the mma op size as long as those dimensions are not # sharded across warps. bf16 = ir.BF16Type.get() f16 = ir.F16Type.get() i8 = ir.IntegerType.get_signless(8) i32 = ir.IntegerType.get_signless(32) f8e4m3fn = ir.Float8E4M3FNType.get() f8e5m2 = ir.Float8E5M2Type.get() if (element_type := a.mlir_dtype) != b.mlir_dtype: raise ValueError(f"Dtype mismatch: {a.mlir_dtype} != {b.mlir_dtype}") if element_type not in (bf16, f16, f8e4m3fn, f8e5m2, i8): raise NotImplementedError( "Only bf16, f16, float8_e4m3fn, float8_e5m2 and i8 supported for the" " operands." ) if element_type == i8: if acc.mlir_dtype != i32: raise NotImplementedError("Only s32 accumulator supported for i8 operands.") if not acc.is_signed: raise ValueError("Only signed accumulator supported for i8 operands.") elif acc.mlir_dtype != ir.F32Type.get(): raise NotImplementedError("Only f32 accumulator supported for floating operands.") layouts = MMALayouts(element_type) if layouts.lhs != a.layout: raise ValueError("Expected MMALayouts.lhs layout for A") if layouts.rhs != b.layout: raise ValueError("Expected MMALayouts.rhs layout for B") if layouts.acc != acc.layout: raise ValueError("Expected MMALayouts.acc layout for acc") assert isinstance(a.layout, fa.TiledLayout) assert isinstance(b.layout, fa.TiledLayout) assert isinstance(acc.layout, fa.TiledLayout) m_tile, k_tile = a.layout.base_tile_shape n_tile, k_tile2 = b.layout.base_tile_shape m_tile2, n_tile2 = acc.layout.base_tile_shape assert k_tile == k_tile2 assert m_tile2 == m_tile assert n_tile2 == n_tile num_m_tiles, num_n_tiles, num_k_tiles = m // m_tile, n // n_tile, k // k_tile # Do not modify the accumualtor itself. acc = acc.copy() s = lambda idx, length: slice(idx * length, (idx + 1) * length) for k_idx in range(num_k_tiles): for m_idx in range(num_m_tiles): for n_idx in range(num_n_tiles): ms = s(m_idx, m_tile) ns = s(n_idx, n_tile) ks = s(k_idx, k_tile) acc[ms, ns] = _mma_single_tile(acc[ms, ns], a[ms, ks], b[ns, ks]) return acc
function_complex
1
{"cognitive_complexity": 20, "loc": 91, "code_loc": 56, "docstring_loc": 17, "function_name": "mma", "class_name": null, "qualname": "mma", "file_path": "jax/experimental/mosaic/gpu/mma.py", "repo_id": "jax-ml/jax", "has_docstring": true, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store_query_utils.py:test_recursively_unpack_filters_valid_inputs
# Context: import pytest from llama_index.core.vector_stores.types import ( FilterCondition, FilterOperator, MetadataFilter, MetadataFilters, ) from llama_index.vector_stores.solr.query_utils import ( recursively_unpack_filters, ) def test_recursively_unpack_filters_invalid_operators(input_operator: FilterOperator, input_value: str, error_match: str) -> None: ... def test_recursively_unpack_filters_invalid_list_value_with_non_list_operator(input_operator: FilterOperator, input_value: Union[list[str], list[int], list[float]]) -> None: ... def test_recursively_unpack_filters_warnings(operator: FilterOperator, value: str, expected_warning: str, caplog) -> None: ... def test_recursively_unpack_filters_no_condition_warning(caplog) -> None: ... def test_any_and_in_list_equivalence() -> None: ... def test_all_list_and_semantics() -> None: ... def test_all_any_in_fallbacks_warnings(caplog) -> None: ... # Task: Write a Python test function `test_recursively_unpack_filters_valid_inputs` to verify the behavior of `recursively_unpack_filters_valid_inputs`. Module under test: typing, llama_index.core.vector_stores.types, llama_index.vector_stores.solr.query_utils
def test_recursively_unpack_filters_valid_inputs( input_filters: MetadataFilters, expected_output: list[str], ) -> None: actual_output = recursively_unpack_filters(input_filters) assert actual_output == expected_output
test
1
{"function_name": "test_recursively_unpack_filters_valid_inputs", "class_name": null, "qualname": "test_recursively_unpack_filters_valid_inputs", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/tests/test_solr_vector_store_query_utils.py", "repo_id": "run-llama/llama_index", "loc": 7, "tested_modules": ["typing", "llama_index.core.vector_stores.types", "llama_index.vector_stores.solr.query_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/streamlit/components/v2/component_registry.py:BidiComponentRegistry.__init__
# Context: import threading from collections.abc import MutableMapping class BidiComponentDefinition: ... class BidiComponentRegistry: def register_components_from_definitions(self, component_definitions: dict[str, dict[str, Any]]) -> None: ... def register(self, definition: BidiComponentDefinition) -> None: ... def get(self, name: str) -> BidiComponentDefinition | None: ... def unregister(self, name: str) -> None: ... def clear(self) -> None: ... def update_component(self, definition: BidiComponentDefinition) -> None: ... # Task: Write a Python method `__init__` for the class `BidiComponentRegistry` to initialize the component registry with an empty, thread-safe store. Returns: None
def __init__(self) -> None: """Initialize the component registry with an empty, thread-safe store.""" self._components: MutableMapping[str, BidiComponentDefinition] = {} self._lock = threading.Lock()
function_simple
1
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "__init__", "class_name": "BidiComponentRegistry", "qualname": "BidiComponentRegistry.__init__", "file_path": "lib/streamlit/components/v2/component_registry.py", "repo_id": "streamlit/streamlit", "has_docstring": true, "runnable_level": "file_runnable"}
infiniflow/ragflow:test/testcases/test_web_api/test_canvas_app/test_canvas_routes_unit.py:test_test_db_connect_dialect_matrix_unit
# Context: import inspect import sys from types import ModuleType, SimpleNamespace import pytest class _DummyManager: ... class _AwaitableValue: ... class _Args(dict): ... class _StubHeaders: ... class _StubResponse: ... class _DummyRequest: ... class _DummyRetCode: ... class _DummyCanvasCategory: ... class _TaskField: ... class _DummyTask: ... class _FileMap(dict): ... def _run(coro): ... async def _collect_stream(body): ... def _set_request_json(monkeypatch, module, payload): ... def auth(): ... def set_tenant_info(): ... def _load_canvas_module(monkeypatch): ... def test_templates_rm_save_get_matrix_unit(monkeypatch): ... def test_getsse_auth_token_and_ownership_matrix_unit(monkeypatch): ... def test_run_dataflow_and_canvas_sse_matrix_unit(monkeypatch): ... def test_exp_agent_completion_trace_and_filtering_unit(monkeypatch): ... def test_rerun_and_cancel_matrix_unit(monkeypatch): ... def test_reset_upload_input_form_debug_matrix_unit(monkeypatch): ... def test_debug_sync_iter_and_exception_matrix_unit(monkeypatch): ... def test_canvas_history_list_and_setting_matrix_unit(monkeypatch): ... def test_trace_and_sessions_matrix_unit(monkeypatch): ... def test_session_crud_prompts_and_download_matrix_unit(monkeypatch): ... # Task: Write a Python test function `test_test_db_connect_dialect_matrix_unit` to verify the behavior of `test_db_connect_dialect_matrix_unit`. Module under test: copy, functools, pathlib
def test_test_db_connect_dialect_matrix_unit(monkeypatch): module = _load_canvas_module(monkeypatch) class _FakeDB: def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.connected = 0 self.closed = 0 def connect(self): self.connected += 1 def close(self): self.closed += 1 mysql_objs = [] postgres_objs = [] def _mysql_ctor(*args, **kwargs): obj = _FakeDB(*args, **kwargs) mysql_objs.append(obj) return obj def _postgres_ctor(*args, **kwargs): obj = _FakeDB(*args, **kwargs) postgres_objs.append(obj) return obj monkeypatch.setattr(module, "MySQLDatabase", _mysql_ctor) monkeypatch.setattr(module, "PostgresqlDatabase", _postgres_ctor) def _run_case(payload): _set_request_json(monkeypatch, module, payload) return _run(inspect.unwrap(module.test_db_connect)()) req_base = { "database": "db", "username": "user", "host": "host", "port": 3306, "password": "pwd", } res = _run_case({**req_base, "db_type": "mysql"}) assert res["code"] == module.RetCode.SUCCESS assert mysql_objs[-1].connected == 1 assert mysql_objs[-1].closed == 1 res = _run_case({**req_base, "db_type": "mariadb"}) assert res["code"] == module.RetCode.SUCCESS assert mysql_objs[-1].connected == 1 res = _run_case({**req_base, "db_type": "oceanbase"}) assert res["code"] == module.RetCode.SUCCESS assert mysql_objs[-1].kwargs["charset"] == "utf8mb4" res = _run_case({**req_base, "db_type": "postgres"}) assert res["code"] == module.RetCode.SUCCESS assert postgres_objs[-1].closed == 1 mssql_calls = {} class _MssqlCursor: def execute(self, sql): mssql_calls["sql"] = sql def close(self): mssql_calls["cursor_closed"] = True class _MssqlConn: def cursor(self): mssql_calls["cursor_opened"] = True return _MssqlCursor() def close(self): mssql_calls["conn_closed"] = True pyodbc_mod = ModuleType("pyodbc") def _pyodbc_connect(conn_str): mssql_calls["conn_str"] = conn_str return _MssqlConn() pyodbc_mod.connect = _pyodbc_connect monkeypatch.setitem(sys.modules, "pyodbc", pyodbc_mod) res = _run_case({**req_base, "db_type": "mssql"}) assert res["code"] == module.RetCode.SUCCESS assert "DRIVER={ODBC Driver 17 for SQL Server}" in mssql_calls["conn_str"] assert mssql_calls["sql"] == "SELECT 1" ibm_calls = {} ibm_db_mod = ModuleType("ibm_db") def _ibm_connect(conn_str, *_args): ibm_calls["conn_str"] = conn_str return "ibm-conn" def _ibm_exec_immediate(conn, sql): ibm_calls["exec"] = (conn, sql) return "ibm-stmt" ibm_db_mod.connect = _ibm_connect ibm_db_mod.exec_immediate = _ibm_exec_immediate ibm_db_mod.fetch_assoc = lambda stmt: ibm_calls.update({"fetch": stmt}) or {"one": 1} ibm_db_mod.close = lambda conn: ibm_calls.update({"close": conn}) monkeypatch.setitem(sys.modules, "ibm_db", ibm_db_mod) res = _run_case({**req_base, "db_type": "IBM DB2"}) assert res["code"] == module.RetCode.SUCCESS assert ibm_calls["exec"] == ("ibm-conn", "SELECT 1 FROM sysibm.sysdummy1") monkeypatch.setitem(sys.modules, "trino", None) res = _run_case({**req_base, "db_type": "trino", "database": "catalog.schema"}) assert res["code"] == module.RetCode.EXCEPTION_ERROR assert "Missing dependency 'trino'" in res["message"] trino_calls = {"connect": [], "auth": []} class _TrinoCursor: def execute(self, sql): trino_calls["sql"] = sql def fetchall(self): trino_calls["fetched"] = True return [(1,)] def close(self): trino_calls["cursor_closed"] = True class _TrinoConn: def cursor(self): return _TrinoCursor() def close(self): trino_calls["conn_closed"] = True trino_mod = ModuleType("trino") trino_mod.BasicAuthentication = lambda user, password: trino_calls["auth"].append((user, password)) or ("auth", user) trino_mod.dbapi = SimpleNamespace(connect=lambda **kwargs: trino_calls["connect"].append(kwargs) or _TrinoConn()) monkeypatch.setitem(sys.modules, "trino", trino_mod) res = _run_case({**req_base, "db_type": "trino", "database": ""}) assert res["code"] == module.RetCode.EXCEPTION_ERROR assert "catalog.schema" in res["message"] monkeypatch.setenv("TRINO_USE_TLS", "1") res = _run_case({**req_base, "db_type": "trino", "database": "cat.schema"}) assert res["code"] == module.RetCode.SUCCESS assert trino_calls["connect"][-1]["catalog"] == "cat" assert trino_calls["connect"][-1]["schema"] == "schema" assert trino_calls["auth"][-1] == ("user", "pwd") res = _run_case({**req_base, "db_type": "trino", "database": "cat/schema"}) assert res["code"] == module.RetCode.SUCCESS assert trino_calls["connect"][-1]["catalog"] == "cat" assert trino_calls["connect"][-1]["schema"] == "schema" res = _run_case({**req_base, "db_type": "trino", "database": "catalog"}) assert res["code"] == module.RetCode.SUCCESS assert trino_calls["connect"][-1]["catalog"] == "catalog" assert trino_calls["connect"][-1]["schema"] == "default" res = _run_case({**req_base, "db_type": "unknown"}) assert res["code"] == module.RetCode.EXCEPTION_ERROR assert "Unsupported database type." in res["message"] class _BoomDB(_FakeDB): def connect(self): raise RuntimeError("connect boom") monkeypatch.setattr(module, "MySQLDatabase", lambda *_args, **_kwargs: _BoomDB()) res = _run_case({**req_base, "db_type": "mysql"}) assert res["code"] == module.RetCode.EXCEPTION_ERROR assert "connect boom" in res["message"]
test
1
{"function_name": "test_test_db_connect_dialect_matrix_unit", "class_name": null, "qualname": "test_test_db_connect_dialect_matrix_unit", "file_path": "test/testcases/test_web_api/test_canvas_app/test_canvas_routes_unit.py", "repo_id": "infiniflow/ragflow", "loc": 174, "tested_modules": ["copy", "functools", "pathlib", "types"], "has_docstring": false, "runnable_level": "file_runnable"}
ray-project/ray:python/ray/data/tests/datasource/test_uc_datasource.py:TestReadUnityCatalogAPI.test_raises_with_incomplete_credentials
# Context: import pytest def static_credential_provider(): ... def refreshable_credential_provider(): ... def requests_mocker(): ... class TestBuildHeaders: ... class TestRequestWith401Retry: ... class TestUnityCatalogConnectorInit: ... class TestUnityCatalogConnector401Retry: ... class TestReadUnityCatalogAPI: def test_successful_read_with_valid_credentials(self, requests_mocker, credential_provider, url, token): ... # Task: Write a Python test method `test_raises_with_incomplete_credentials` in test class `TestReadUnityCatalogAPI` to test that read_unity_catalog raises when credentials are incomplete. Module under test: ray.data._internal.datasource.databricks_credentials, ray.data._internal.datasource.uc_datasource, ray.data.tests.datasource.databricks_test_utils
def test_raises_with_incomplete_credentials(self, url, token): """Test that read_unity_catalog raises when credentials are incomplete.""" import ray.data with pytest.raises(ValueError, match="Either 'credential_provider' or both"): ray.data.read_unity_catalog( table="catalog.schema.table", url=url, token=token, )
test
0
{"function_name": "test_raises_with_incomplete_credentials", "class_name": "TestReadUnityCatalogAPI", "qualname": "TestReadUnityCatalogAPI.test_raises_with_incomplete_credentials", "file_path": "python/ray/data/tests/datasource/test_uc_datasource.py", "repo_id": "ray-project/ray", "loc": 10, "tested_modules": ["ray.data._internal.datasource.databricks_credentials", "ray.data._internal.datasource.uc_datasource", "ray.data.tests.datasource.databricks_test_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:test/unit_test/common/test_string_utils.py:TestRemoveRedundantSpaces.test_multiple_punctuation
# Context: import pytest from common.string_utils import remove_redundant_spaces, clean_markdown_block class TestCleanMarkdownBlock: ... class TestRemoveRedundantSpaces: def test_remove_spaces_before_commas(self): ... def test_remove_spaces_before_periods(self): ... def test_remove_spaces_before_exclamation(self): ... def test_remove_spaces_after_opening_parenthesis(self): ... def test_remove_spaces_before_closing_parenthesis(self): ... def test_keep_spaces_between_words(self): ... def test_mixed_punctuation(self): ... def test_with_numbers(self): ... def test_decimal_numbers(self): ... def test_time_format(self): ... def test_currency_symbols(self): ... def test_empty_string(self): ... def test_only_spaces(self): ... def test_no_redundant_spaces(self): ... def test_multiple_spaces(self): ... def test_angle_brackets(self): ... def test_case_insensitive(self): ... def test_semicolon_and_colon(self): ... def test_quotation_marks(self): ... def test_abbreviations(self): ... def test_email_addresses(self): ... def test_urls(self): ... def test_hashtags_and_mentions(self): ... def test_nested_parentheses(self): ... def test_math_expressions(self): ... def test_html_tags(self): ... def test_programming_code(self): ... def test_unicode_and_special_symbols(self): ... def test_mixed_chinese_english(self): ... def test_special_characters_in_pattern(self): ... def test_tabs_and_newlines(self): ... # Task: Write a Python test method `test_multiple_punctuation` in test class `TestRemoveRedundantSpaces` to test multiple consecutive punctuation marks. Module under test: common.string_utils
def test_multiple_punctuation(self): """Test multiple consecutive punctuation marks""" input_text = "Wow !! ... Really ??" expected = "Wow!! ... Really??" assert remove_redundant_spaces(input_text) == expected
test
1
{"function_name": "test_multiple_punctuation", "class_name": "TestRemoveRedundantSpaces", "qualname": "TestRemoveRedundantSpaces.test_multiple_punctuation", "file_path": "test/unit_test/common/test_string_utils.py", "repo_id": "infiniflow/ragflow", "loc": 5, "tested_modules": ["common.string_utils"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/inputs/inputs.py:MultilineInput:class_doc
Write a class-level docstring for `MultilineInput` (inherits from MessageTextInput, AIMixin, MultilineMixin, InputTraceMixin, ToolModeMixin) which has methods: various methods.
Represents a multiline input field. Attributes: field_type (SerializableFieldTypes): The type of the field. Defaults to FieldTypes.TEXT. multiline (CoalesceBool): Indicates whether the input field should support multiple lines. Defaults to True. password (CoalesceBool): Whether to mask the input as a password field. Defaults to False.
documentation
1
{"doc_type": "class", "class_name": "MultilineInput", "file_path": "src/lfx/src/lfx/inputs/inputs.py", "repo_id": "langflow-ai/langflow", "char_length": 349, "methods": []}
exo-explore/exo:bench/eval_tool_calls.py:run_scenario
# Context: import json import sys import httpx class Scenario: ... def load_scenarios(path: Path) -> list[Scenario]: ... class ParsedResponse: ... class ScenarioResult: ... def validate_args(args_str: str, required_keys: list[str]) -> tuple[bool, str | None]: ... def validate_nested_args(args_str: str, array_key: str, required_item_keys: list[str]) -> tuple[bool, str | None]: ... def call_api(client: httpx.Client, host: str, port: int, path: str, body: dict[str, Any], timeout: float) -> tuple[dict[str, Any], float]: ... def _openai_build_request(model: str, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[str, dict[str, Any]]: ... def _openai_parse_response(data: dict[str, Any]) -> ParsedResponse: ... def _openai_build_followup(messages: list[dict[str, Any]], tools: list[dict[str, Any]], model: str, parsed: ParsedResponse, tool_result: str) -> tuple[str, dict[str, Any]]: ... def _claude_translate_tools(tools: list[dict[str, Any]]) -> list[dict[str, Any]]: ... def _claude_translate_messages(messages: list[dict[str, Any]]) -> list[dict[str, Any]]: ... def _claude_build_request(model: str, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[str, dict[str, Any]]: ... def _claude_parse_response(data: dict[str, Any]) -> ParsedResponse: ... def _claude_build_followup(messages: list[dict[str, Any]], tools: list[dict[str, Any]], model: str, parsed: ParsedResponse, tool_result: str) -> tuple[str, dict[str, Any]]: ... def _responses_translate_input(messages: list[dict[str, Any]]) -> list[dict[str, Any]]: ... def _responses_build_request(model: str, messages: list[dict[str, Any]], tools: list[dict[str, Any]]) -> tuple[str, dict[str, Any]]: ... def _responses_parse_response(data: dict[str, Any]) -> ParsedResponse: ... def _responses_build_followup(messages: list[dict[str, Any]], tools: list[dict[str, Any]], model: str, parsed: ParsedResponse, tool_result: str) -> tuple[str, dict[str, Any]]: ... def result_to_dict(result: ScenarioResult) -> dict[str, Any]: ... def _placement_sort_key(p: dict[str, Any]) -> tuple[int, int]: ... def main() -> None: ... # Task: Write a Python function `run_scenario` to run a single scenario against one API adapter. Returns 1-2 results. Parameters: client: httpx.Client, host: str, port: int, model: str, scenario: Scenario, api_name: ApiName, timeout: float, verbose: bool Returns: list[ScenarioResult]
def run_scenario( client: httpx.Client, host: str, port: int, model: str, scenario: Scenario, api_name: ApiName, timeout: float, verbose: bool, ) -> list[ScenarioResult]: """Run a single scenario against one API adapter. Returns 1-2 results.""" adapter = ADAPTERS[api_name] build_request = adapter["build_request"] parse_response = adapter["parse_response"] build_followup = adapter["build_followup"] results: list[ScenarioResult] = [] # --- Phase 1: initial request --- path, body = build_request(model, scenario.messages, scenario.tools) if verbose: print( f" [{api_name}] request: {path} {json.dumps(body, indent=2)}", file=sys.stderr, ) try: data, latency = call_api(client, host, port, path, body, timeout) except Exception as exc: results.append( ScenarioResult( name=scenario.name, api=api_name, phase="tool_call", passed=False, error=f"API error: {exc}", ) ) return results if verbose: print( f" [{api_name}] response: {json.dumps(data, indent=2)}", file=sys.stderr ) parsed = parse_response(data) checks: dict[str, bool] = {} if scenario.expect_tool_call: checks["finish_reason_tool_calls"] = parsed.finish_reason == "tool_calls" checks["has_tool_call"] = parsed.has_tool_call args_err: str | None = None if parsed.has_tool_call and parsed.tool_call is not None: checks["correct_function"] = ( scenario.expected_function is None or parsed.tool_call["name"] == scenario.expected_function ) if scenario.required_arg_keys: ok, args_err = validate_args( parsed.tool_call["arguments"], scenario.required_arg_keys ) checks["valid_arguments"] = ok else: checks["valid_arguments"] = True if scenario.nested_array_key and scenario.required_item_keys: ok, nested_err = validate_nested_args( parsed.tool_call["arguments"], scenario.nested_array_key, scenario.required_item_keys, ) checks["valid_nested_structure"] = ok if not ok: args_err = nested_err else: checks["correct_function"] = False checks["valid_arguments"] = False args_err = "No tool call returned" passed = all(checks.values()) error = args_err if not passed else None else: checks["finish_reason_stop"] = parsed.finish_reason == "stop" checks["no_tool_call"] = not parsed.has_tool_call checks["has_content"] = ( parsed.content is not None and len(parsed.content.strip()) > 0 ) passed = all(checks.values()) error = ( None if passed else ( f"finish_reason={parsed.finish_reason}, " f"tool_call={'yes' if parsed.has_tool_call else 'no'}, " f"content={'yes' if parsed.content else 'no'}" ) ) results.append( ScenarioResult( name=scenario.name, api=api_name, phase="tool_call", passed=passed, checks=checks, error=error, latency_ms=latency, ) ) # --- Phase 2: multi-turn follow-up --- if ( scenario.tool_result is not None and parsed.has_tool_call and parsed.tool_call is not None ): followup_path, followup_body = build_followup( scenario.messages, scenario.tools, model, parsed, scenario.tool_result, ) if verbose: print( f" [{api_name}] follow_up request: {followup_path} {json.dumps(followup_body, indent=2)}", file=sys.stderr, ) try: data2, latency2 = call_api( client, host, port, followup_path, followup_body, timeout ) except Exception as exc: results.append( ScenarioResult( name=scenario.name, api=api_name, phase="follow_up", passed=False, error=f"API error: {exc}", ) ) return results if verbose: print( f" [{api_name}] follow_up response: {json.dumps(data2, indent=2)}", file=sys.stderr, ) parsed2 = parse_response(data2) checks2: dict[str, bool] = {} checks2["finish_reason_stop"] = parsed2.finish_reason == "stop" checks2["no_tool_call"] = not parsed2.has_tool_call checks2["has_content"] = ( parsed2.content is not None and len(parsed2.content.strip()) > 0 ) passed2 = all(checks2.values()) error2: str | None = None if not passed2: error2 = ( f"finish_reason={parsed2.finish_reason}, " f"tool_call={'yes' if parsed2.has_tool_call else 'no'}, " f"content={'yes' if parsed2.content else 'no'}" ) results.append( ScenarioResult( name=scenario.name, api=api_name, phase="follow_up", passed=passed2, checks=checks2, error=error2, latency_ms=latency2, ) ) return results
function_complex
0
{"cognitive_complexity": 47, "loc": 181, "code_loc": 152, "docstring_loc": 1, "function_name": "run_scenario", "class_name": null, "qualname": "run_scenario", "file_path": "bench/eval_tool_calls.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "file_runnable"}
apache/airflow:airflow-core/src/airflow/api_fastapi/core_api/services/public/task_instances.py:BulkTaskInstanceService.handle_bulk_delete
# Context: from fastapi import HTTPException, Query, status from sqlalchemy import select, tuple_ from airflow.api_fastapi.core_api.datamodels.common import ( BulkActionNotOnExistence, BulkActionResponse, BulkBody, BulkCreateAction, BulkDeleteAction, BulkUpdateAction, ) from airflow.api_fastapi.core_api.datamodels.task_instances import BulkTaskInstanceBody, PatchTaskInstanceBody from airflow.models.taskinstance import TaskInstance as TI def _patch_ti_validate_request(dag_id: str, dag_run_id: str, task_id: str, dag_bag: DagBagDep, body: PatchTaskInstanceBody, session: SessionDep, map_index: int | None, update_mask: list[str] | None) -> tuple[SerializedDAG, list[TI], dict]: ... def _patch_task_instance_state(task_id: str, dag_run_id: str, dag: SerializedDAG, task_instance_body: BulkTaskInstanceBody | PatchTaskInstanceBody, data: dict, session: Session) -> None: ... def _patch_task_instance_note(task_instance_body: BulkTaskInstanceBody | PatchTaskInstanceBody, tis: list[TI], user: GetUserDep, update_mask: list[str] | None) -> None: ... class BulkTaskInstanceService(BulkService[BulkTaskInstanceBody]): def __init__( self, session: Session, request: BulkBody[BulkTaskInstanceBody], dag_id: str, dag_run_id: str, dag_bag: DagBagDep, user: GetUserDep, ): super().__init__(session, request) self.dag_id = dag_id self.dag_run_id = dag_run_id self.dag_bag = dag_bag self.user = user def _extract_task_identifiers(self, entity: str | BulkTaskInstanceBody) -> tuple[str, str, str, int | None]: ... def _categorize_entities(self, entities: Sequence[str | BulkTaskInstanceBody], results: BulkActionResponse) -> tuple[set[tuple[str, str, str, int]], set[tuple[str, str, str]]]: ... def _categorize_task_instances(self, task_keys: set[tuple[str, str, str, int]]) -> tuple[dict[tuple[str, str, str, int], TI], set[tuple[str, str, str, int]], set[tuple[str, str, str, int]]]: ... def _perform_update(self, entity: BulkTaskInstanceBody, dag_id: str, dag_run_id: str, task_id: str, map_index: int, results: BulkActionResponse, update_mask: list[str] | None) -> None: ... def handle_bulk_create(self, action: BulkCreateAction[BulkTaskInstanceBody], results: BulkActionResponse) -> None: ... def handle_bulk_update(self, action: BulkUpdateAction[BulkTaskInstanceBody], results: BulkActionResponse) -> None: ... # Task: Write a Python method `handle_bulk_delete` for the class `BulkTaskInstanceService` to bulk delete task instances. Parameters: action: BulkDeleteAction[BulkTaskInstanceBody], results: BulkActionResponse Returns: None
def handle_bulk_delete( self, action: BulkDeleteAction[BulkTaskInstanceBody], results: BulkActionResponse ) -> None: """Bulk delete task instances.""" # Validate and categorize entities into specific and all map index delete sets delete_specific_map_index_task_keys, delete_all_map_index_task_keys = self._categorize_entities( action.entities, results ) try: # Handle deletion of specific (dag_id, dag_run_id, task_id, map_index) tuples if delete_specific_map_index_task_keys: _, matched_task_keys, not_found_task_keys = self._categorize_task_instances( delete_specific_map_index_task_keys ) not_found_task_ids = [ {"dag_id": dag_id, "dag_run_id": run_id, "task_id": task_id, "map_index": map_index} for dag_id, run_id, task_id, map_index in not_found_task_keys ] if action.action_on_non_existence == BulkActionNotOnExistence.FAIL and not_found_task_keys: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"The task instances with these identifiers: {not_found_task_ids} were not found", ) for dag_id, run_id, task_id, map_index in matched_task_keys: ti = ( self.session.execute( select(TI).where( TI.dag_id == dag_id, TI.run_id == run_id, TI.task_id == task_id, TI.map_index == map_index, ) ) .scalars() .one_or_none() ) if ti: self.session.delete(ti) results.success.append(f"{dag_id}.{run_id}.{task_id}[{map_index}]") # Handle deletion of all map indexes for certain (dag_id, dag_run_id, task_id) tuples if delete_all_map_index_task_keys: all_dag_ids = {dag_id for dag_id, _, _ in delete_all_map_index_task_keys} all_run_ids = {run_id for _, run_id, _ in delete_all_map_index_task_keys} all_task_ids = {task_id for _, _, task_id in delete_all_map_index_task_keys} batch_task_instances = self.session.scalars( select(TI).where( TI.dag_id.in_(all_dag_ids), TI.run_id.in_(all_run_ids), TI.task_id.in_(all_task_ids), ) ).all() # Group task instances by (dag_id, run_id, task_id) for efficient lookup task_instances_by_key: dict[tuple[str, str, str], list[TI]] = {} for ti in batch_task_instances: key = (ti.dag_id, ti.run_id, ti.task_id) task_instances_by_key.setdefault(key, []).append(ti) for dag_id, run_id, task_id in delete_all_map_index_task_keys: all_task_instances = task_instances_by_key.get((dag_id, run_id, task_id), []) if ( not all_task_instances and action.action_on_non_existence == BulkActionNotOnExistence.FAIL ): raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=f"No task instances found for dag_id: {dag_id}, run_id: {run_id}, task_id: {task_id}", ) for ti in all_task_instances: self.session.delete(ti) results.success.append(f"{dag_id}.{run_id}.{task_id}[{ti.map_index}]") except HTTPException as e: results.errors.append({"error": f"{e.detail}", "status_code": e.status_code})
function_complex
1
{"cognitive_complexity": 31, "loc": 82, "code_loc": 63, "docstring_loc": 1, "function_name": "handle_bulk_delete", "class_name": "BulkTaskInstanceService", "qualname": "BulkTaskInstanceService.handle_bulk_delete", "file_path": "airflow-core/src/airflow/api_fastapi/core_api/services/public/task_instances.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/edge3/tests/unit/edge3/cli/test_definition.py:TestEdgeCliDefinition.test_maintenance_command_args_on
# Context: class TestEdgeCliDefinition: def setup_parser(self): ... def test_edge_cli_commands_count(self): ... def test_edge_commands_count(self): ... def test_edge_subcommands_defined(self, command): ... def test_worker_command_args(self): ... def test_status_command_args(self): ... def test_maintenance_command_args_off(self): ... def test_stop_command_args(self): ... def test_list_workers_command_args(self): ... def test_remote_edge_worker_request_maintenance_args(self): ... def test_remote_edge_worker_exit_maintenance_args(self): ... def test_remote_edge_worker_update_maintenance_comment_args(self): ... def test_remove_remote_edge_worker_args(self): ... def test_shutdown_remote_edge_worker_args(self): ... def test_add_worker_queues_args(self): ... def test_remove_worker_queues_args(self): ... def test_shutdown_all_workers_args(self): ... # Task: Write a Python test method `test_maintenance_command_args_on` in test class `TestEdgeCliDefinition` to test maintenance command to enable maintenance mode. Module under test: __future__, airflow.cli, airflow.providers.edge3.cli.definition
def test_maintenance_command_args_on(self): """Test maintenance command to enable maintenance mode.""" params = [ "edge", "maintenance", "on", "--comments", "Scheduled maintenance", "--wait", ] args = self.arg_parser.parse_args(params) assert args.maintenance == "on" assert args.comments == "Scheduled maintenance" assert args.wait is True
test
1
{"function_name": "test_maintenance_command_args_on", "class_name": "TestEdgeCliDefinition", "qualname": "TestEdgeCliDefinition.test_maintenance_command_args_on", "file_path": "providers/edge3/tests/unit/edge3/cli/test_definition.py", "repo_id": "apache/airflow", "loc": 14, "tested_modules": ["__future__", "airflow.cli", "airflow.providers.edge3.cli.definition", "tests_common.test_utils.version_compat"], "has_docstring": true, "runnable_level": "class_runnable"}
unclecode/crawl4ai:crawl4ai/table_extraction.py:LLMTableExtraction._merge_chunk_results
# Context: from typing import Dict, List, Optional, Any, Union, Tuple class TableExtractionStrategy(ABC): ... class DefaultTableExtraction(TableExtractionStrategy): ... class NoTableExtraction(TableExtractionStrategy): ... class LLMTableExtraction(TableExtractionStrategy): TABLE_EXTRACTION_PROMPT = """You are a specialized table extraction system that converts complex HTML tables into structured JSON data. Your primary goal is to handle difficult, irregular HTML tables that cannot be easily parsed by standard tools, transforming them into clean, tabulated data. def __init__(self, llm_config: Optional[LLMConfig] = None, css_selector: Optional[str] = None, max_tries: int = 3, enable_chunking: bool = True, chunk_token_threshold: int = 3000, min_rows_per_chunk: int = 10, max_parallel_chunks: int = 5, verbose: bool = False, **kwargs): """ Initialize the LLM-based table extraction strategy. Args: llm_config: LLM configuration for the extraction css_selector: Optional CSS selector to focus on specific page areas max_tries: Maximum number of retries if LLM fails to extract tables (default: 3) enable_chunking: Enable smart chunking for large tables (default: True) chunk_token_threshold: Token threshold for triggering chunking (default: 3000) min_rows_per_chunk: Minimum rows per chunk (default: 10) max_parallel_chunks: Maximum parallel chunk processing (default: 5) verbose: Enable verbose logging **kwargs: Additional parameters passed to parent class """ super().__init__(verbose=verbose, **kwargs) # Set up LLM configuration self.llm_config = llm_config if not self.llm_config: # Use default configuration if not provided self.llm_config = create_llm_config( provider=os.getenv("DEFAULT_PROVIDER", "openai/gpt-4o-mini"), api_token=os.getenv("OPENAI_API_KEY"), ) self.css_selector = css_selector self.max_tries = max(1, max_tries) # Ensure at least 1 try self.enable_chunking = enable_chunking self.chunk_token_threshold = chunk_token_threshold self.min_rows_per_chunk = max(5, min_rows_per_chunk) # At least 5 rows per chunk self.max_parallel_chunks = max(1, max_parallel_chunks) self.extra_args = kwargs.get("extra_args", {}) def extract_tables(self, element: etree.Element, **kwargs) -> List[Dict[str, Any]]: ... def _estimate_tokens(self, text: str) -> int: ... def _needs_chunking(self, html_content: str) -> bool: ... def _extract_table_structure(self, html_content: str) -> Tuple[List[etree.Element], List[etree.Element], List[etree.Element], bool]: ... def _create_smart_chunks(self, html_content: str) -> Tuple[List[str], bool]: ... def _create_chunk_html(self, header_html: str, body_rows: List[str], footer_html: Optional[str]) -> str: ... def _rebalance_chunks(self, chunks: List[str], min_rows: int) -> List[str]: ... def _process_chunk(self, chunk_html: str, chunk_index: int, total_chunks: int, has_headers: bool) -> Dict[str, Any]: ... def _extract_with_chunking(self, html_content: str) -> List[Dict[str, Any]]: ... def _css_to_xpath_select(self, element: etree.Element, css_selector: str) -> List[etree.Element]: ... def _validate_table_structure(self, table: Dict) -> bool: ... def _ensure_table_format(self, table: Dict) -> Dict[str, Any]: ... # Task: Write a Python method `_merge_chunk_results` for the class `LLMTableExtraction` to merge results from multiple chunks into a single table. Parameters: chunk_results: List[Dict[str, Any]] Returns: List[Dict[str, Any]]
def _merge_chunk_results(self, chunk_results: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ Merge results from multiple chunks into a single table. """ # Sort by chunk index to maintain order chunk_results.sort(key=lambda x: x.get('chunk_index', 0)) # Filter out failed chunks valid_chunks = [r for r in chunk_results if r.get('table')] if not valid_chunks: return [] # Start with the first chunk's structure merged_table = valid_chunks[0]['table'].copy() # Concatenate rows from all chunks all_rows = [] for chunk_result in valid_chunks: table = chunk_result['table'] # Skip headers from non-first chunks (they're duplicates) rows = table.get('rows', []) all_rows.extend(rows) merged_table['rows'] = all_rows # Update metadata merged_table['metadata']['row_count'] = len(all_rows) merged_table['metadata']['chunked'] = True merged_table['metadata']['chunk_count'] = len(valid_chunks) if self.verbose: self._log("info", f"Merged {len(valid_chunks)} chunks into table with {len(all_rows)} rows") return [merged_table]
function_simple
1
{"cognitive_complexity": 3, "loc": 35, "code_loc": 17, "docstring_loc": 3, "function_name": "_merge_chunk_results", "class_name": "LLMTableExtraction", "qualname": "LLMTableExtraction._merge_chunk_results", "file_path": "crawl4ai/table_extraction.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:tests/models/lighton_ocr/test_modeling_lighton_ocr.py:LightOnOcrForConditionalGenerationModelTest.test_forward_pass_with_image_sizes
# Context: from transformers.testing_utils import ( cleanup, require_torch, slow, torch_device, ) from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor import torch class LightOnOcrVisionText2TextModelTester: ... class LightOnOcrForConditionalGenerationIntegrationTest(unittest.TestCase): ... class LightOnOcrForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( pipeline_model_mapping = {"image-text-to-text": LightOnOcrForConditionalGeneration} if is_torch_available() else {} skip_test_image_features_output_shape = True _is_composite = True test_torch_exportable = False def setUp(self): ... def _prepare_for_class(self, inputs_dict, model_class, return_labels): ... def prepare_config_and_inputs_for_generate(self, batch_size): ... def test_config(self): ... def test_mismatching_num_image_tokens(self): ... def test_spatial_merge_size(self): ... def test_model_outputs_equivalence(self): ... def test_vision_projection(self): ... def test_get_image_features(self): ... # Task: Write a Python test method `test_forward_pass_with_image_sizes` in test class `LightOnOcrForConditionalGenerationModelTest` to test that the model correctly handles variable image sizes. Module under test: difflib, transformers, transformers.testing_utils
def test_forward_pass_with_image_sizes(self): """ Test that the model correctly handles variable image sizes. """ config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) model.eval() # Test with different image sizes in the same batch batch_size = 2 pixel_values = floats_tensor( [batch_size, 3, self.model_tester.image_size, self.model_tester.image_size] ).to(torch_device) # Different image sizes (but still need to be divisible by patch_size) image_sizes = torch.tensor( [[self.model_tester.image_size, self.model_tester.image_size]] * batch_size, dtype=torch.long, device=torch_device, ) num_patches = (self.model_tester.image_size // self.model_tester.patch_size) ** 2 num_image_tokens = num_patches // (config.spatial_merge_size**2) input_ids = ids_tensor([batch_size, 10 + num_image_tokens], config.text_config.vocab_size - 1) + 1 # Ensure no tokens accidentally equal image_token_id input_ids[input_ids == config.image_token_id] = config.image_token_id + 1 # Now place image tokens at the beginning input_ids[:, :num_image_tokens] = config.image_token_id input_ids = input_ids.to(torch_device) outputs = model( pixel_values=pixel_values, input_ids=input_ids, image_sizes=image_sizes, ) self.assertIsNotNone(outputs)
test
0
{"function_name": "test_forward_pass_with_image_sizes", "class_name": "LightOnOcrForConditionalGenerationModelTest", "qualname": "LightOnOcrForConditionalGenerationModelTest.test_forward_pass_with_image_sizes", "file_path": "tests/models/lighton_ocr/test_modeling_lighton_ocr.py", "repo_id": "huggingface/transformers", "loc": 40, "tested_modules": ["difflib", "transformers", "transformers.testing_utils", "generation.test_utils", "test_configuration_common"], "has_docstring": true, "runnable_level": "project_runnable"}
commaai/openpilot:selfdrive/ui/widgets/prime.py:PrimeWidget._render_for_non_prime_users
# Context: import pyray as rl from openpilot.system.ui.lib.application import gui_app, FontWeight from openpilot.system.ui.lib.multilang import tr from openpilot.system.ui.lib.text_measure import measure_text_cached from openpilot.system.ui.lib.wrap_text import wrap_text from openpilot.system.ui.widgets.label import gui_label class PrimeWidget(Widget): PRIME_BG_COLOR = rl.Color(51, 51, 51, 255) def _render(self, rect): ... def _render_for_prime_user(self, rect: rl.Rectangle): ... # Task: Write a Python method `_render_for_non_prime_users` for the class `PrimeWidget` to renders the advertisement for non-Prime users. Parameters: rect: rl.Rectangle
def _render_for_non_prime_users(self, rect: rl.Rectangle): """Renders the advertisement for non-Prime users.""" rl.draw_rectangle_rounded(rect, 0.025, 10, self.PRIME_BG_COLOR) # Layout x, y = rect.x + 80, rect.y + 90 w = rect.width - 160 # Title gui_label(rl.Rectangle(x, y, w, 90), tr("Upgrade Now"), 75, font_weight=FontWeight.BOLD) # Description with wrapping desc_y = y + 140 font = gui_app.font(FontWeight.NORMAL) wrapped_text = "\n".join(wrap_text(font, tr("Become a comma prime member at connect.comma.ai"), 56, int(w))) text_size = measure_text_cached(font, wrapped_text, 56) rl.draw_text_ex(font, wrapped_text, rl.Vector2(x, desc_y), 56, 0, rl.WHITE) # Features section features_y = desc_y + text_size.y + 50 gui_label(rl.Rectangle(x, features_y, w, 50), tr("PRIME FEATURES:"), 41, font_weight=FontWeight.BOLD) # Feature list features = [tr("Remote access"), tr("24/7 LTE connectivity"), tr("1 year of drive storage"), tr("Remote snapshots")] for i, feature in enumerate(features): item_y = features_y + 80 + i * 65 gui_label(rl.Rectangle(x, item_y, 100, 60), "✓", 50, color=rl.Color(70, 91, 234, 255)) gui_label(rl.Rectangle(x + 60, item_y, w - 60, 60), feature, 50)
function_simple
0
{"cognitive_complexity": 1, "loc": 29, "code_loc": 16, "docstring_loc": 1, "function_name": "_render_for_non_prime_users", "class_name": "PrimeWidget", "qualname": "PrimeWidget._render_for_non_prime_users", "file_path": "selfdrive/ui/widgets/prime.py", "repo_id": "commaai/openpilot", "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/integration/test_image_providers.py:test_anthropic_vision_api_with_jpeg
# Context: import os import pytest from langflow.utils.image import create_image_content_dict from tests.api_keys import has_api_key import anthropic def sample_image(tmp_path): ... def sample_jpeg_image(tmp_path): ... def test_openai_vision_api_real_call(sample_image): ... def test_openai_vision_api_with_jpeg(sample_jpeg_image): ... def test_anthropic_vision_api_real_call(sample_image): ... def test_google_gemini_vision_api_real_call(sample_image): ... def test_google_gemini_vision_api_with_jpeg(sample_jpeg_image): ... def test_langchain_integration_format_compatibility(sample_image): ... def test_cross_provider_consistency(sample_image): ... def test_error_handling_without_api_keys(sample_image): ... # Task: Write a Python test function `test_anthropic_vision_api_with_jpeg` to test Anthropic Claude API with JPEG image format. Module under test: langflow.utils.image, tests.api_keys, tests.api_keys
def test_anthropic_vision_api_with_jpeg(sample_jpeg_image): """Test Anthropic Claude API with JPEG image format.""" try: import anthropic except ImportError: pytest.skip("Anthropic package not installed") client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) content_dict = create_image_content_dict(sample_jpeg_image) # Verify JPEG format is correctly detected from file extension assert "data:image/jpeg;base64," in content_dict["image_url"]["url"] # Convert our standardized format to Anthropic's format data_url = content_dict["image_url"]["url"] mime_type, base64_data = data_url.split(";base64,") mime_type = mime_type.replace("data:", "") # Anthropic format anthropic_image = {"type": "image", "source": {"type": "base64", "media_type": mime_type, "data": base64_data}} messages = [ {"role": "user", "content": [{"type": "text", "text": "What do you see? One word answer."}, anthropic_image]} ] try: response = client.messages.create(model="claude-3-haiku-20240307", max_tokens=10, messages=messages) assert response.content[0].text is not None # API call successful except Exception as e: pytest.fail(f"Anthropic API call failed with JPEG image: {e}")
test
1
{"function_name": "test_anthropic_vision_api_with_jpeg", "class_name": null, "qualname": "test_anthropic_vision_api_with_jpeg", "file_path": "src/backend/tests/integration/test_image_providers.py", "repo_id": "langflow-ai/langflow", "loc": 33, "tested_modules": ["langflow.utils.image", "tests.api_keys", "tests.api_keys", "tests.api_keys", "tests.api_keys"], "has_docstring": true, "runnable_level": "project_runnable"}