sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/mcp_adapter.py:MCPServerAdapter:class_doc | Write a class-level docstring for `MCPServerAdapter` which has methods: `__init__`, `start`, `stop`, `tools`, `__enter__`. | Manages the lifecycle of an MCP server and make its tools available to CrewAI.
Note: tools can only be accessed after the server has been started with the
`start()` method.
Usage:
# context manager + stdio
with MCPServerAdapter(...) as tools:
# tools is now available
# context manager + sse
with MCPServerAdapter({"url": "http://localhost:8000/sse"}) as tools:
# tools is now available
# context manager with filtered tools
with MCPServerAdapter(..., "tool1", "tool2") as filtered_tools:
# only tool1 and tool2 are available
# context manager with custom connect timeout (60 seconds)
with MCPServerAdapter(..., connect_timeout=60) as tools:
# tools is now available with longer timeout
# manually stop mcp server
try:
mcp_server = MCPServerAdapter(...)
tools = mcp_server.tools # all tools
# or with filtered tools and custom timeout
mcp_server = MCPServerAdapter(..., "tool1", "tool2", connect_timeout=45)
filtered_tools = mcp_server.tools # only tool1 and tool2
...
finally:
mcp_server.stop()
# Best practice is ensure cleanup is done after use.
mcp_server.stop() # run after crew().kickoff() | documentation | 0 | {"doc_type": "class", "class_name": "MCPServerAdapter", "file_path": "lib/crewai-tools/src/crewai_tools/adapters/mcp_adapter.py", "repo_id": "crewAIInc/crewAI", "char_length": 1249, "methods": ["__init__", "start", "stop", "tools", "__enter__", "__exit__"]} |
huggingface/transformers:tests/trainer/test_training_args.py:TestTrainingArguments.test_custom_output_dir | # Context:
import tempfile
from transformers import TrainingArguments
class TestTrainingArguments(unittest.TestCase):
def test_default_output_dir(self): ...
def test_output_dir_creation(self): ...
def test_torch_empty_cache_steps_requirements(self): ...
def test_output_dir_expands_user(self): ...
def test_enum_coercions(self): ...
def test_do_eval_auto_enabled(self): ...
def test_eval_steps_fallback_to_logging_steps(self): ...
def test_eval_steps_required_when_strategy_steps(self): ...
def test_logging_steps_required_nonzero(self): ...
def test_steps_must_be_integer_when_greater_than_one(self): ...
def test_load_best_model_requires_matching_strategies(self): ...
def test_metric_for_best_model_defaults(self): ...
def test_fp16_bf16_mutual_exclusivity(self): ...
def test_reduce_on_plateau_requires_eval(self): ...
def test_torch_compile_auto_enable(self): ...
def test_report_to_none_handling(self): ...
def test_warmup_steps_validation(self): ...
def test_debug_option_parsing(self): ...
def test_dataloader_prefetch_requires_workers(self): ...
def test_use_cpu_disables_pin_memory(self): ...
def test_include_num_input_tokens_seen_coercion(self): ...
def test_dict_field_parsing(self): ...
def test_dtype_to_json(self): ...
# Task:
Write a Python test method `test_custom_output_dir` in test class `TestTrainingArguments` to test that output_dir is respected when specified.
Module under test: transformers, transformers.debug_utils, transformers.trainer_utils | def test_custom_output_dir(self):
"""Test that output_dir is respected when specified."""
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(output_dir=tmp_dir)
self.assertEqual(args.output_dir, tmp_dir) | test | 0 | {"function_name": "test_custom_output_dir", "class_name": "TestTrainingArguments", "qualname": "TestTrainingArguments.test_custom_output_dir", "file_path": "tests/trainer/test_training_args.py", "repo_id": "huggingface/transformers", "loc": 5, "tested_modules": ["transformers", "transformers.debug_utils", "transformers.trainer_utils", "transformers.training_args"], "has_docstring": true, "runnable_level": "class_runnable"} |
fastapi/fastapi:tests/test_request_params/test_body/test_list.py:test_required_list_alias_by_name | # Context:
import pytest
from dirty_equals import IsOneOf, IsPartialDict
from fastapi.testclient import TestClient
async def read_required_list_str(p: Annotated[list[str], Body(embed=True)]): ...
class BodyModelRequiredListStr(BaseModel): ...
def read_model_required_list_str(p: BodyModelRequiredListStr): ...
def test_required_list_str_schema(path: str): ...
def test_required_list_str_missing(path: str, json: dict | None): ...
def test_required_list_str(path: str): ...
async def read_required_list_alias(p: Annotated[list[str], Body(embed=True, alias='p_alias')]): ...
class BodyModelRequiredListAlias(BaseModel): ...
async def read_model_required_list_alias(p: BodyModelRequiredListAlias): ...
def test_required_list_str_alias_schema(path: str): ...
def test_required_list_alias_missing(path: str, json: dict | None): ...
def test_required_list_alias_by_alias(path: str): ...
def read_required_list_validation_alias(p: Annotated[list[str], Body(embed=True, validation_alias='p_val_alias')]): ...
class BodyModelRequiredListValidationAlias(BaseModel): ...
async def read_model_required_list_validation_alias(p: BodyModelRequiredListValidationAlias): ...
def test_required_list_validation_alias_schema(path: str): ...
def test_required_list_validation_alias_missing(path: str, json: dict | None): ...
def test_required_list_validation_alias_by_name(path: str): ...
def test_required_list_validation_alias_by_validation_alias(path: str): ...
def read_required_list_alias_and_validation_alias(p: Annotated[list[str], Body(embed=True, alias='p_alias', validation_alias='p_val_alias')]): ...
class BodyModelRequiredListAliasAndValidationAlias(BaseModel): ...
def read_model_required_list_alias_and_validation_alias(p: BodyModelRequiredListAliasAndValidationAlias): ...
def test_required_list_alias_and_validation_alias_schema(path: str): ...
def test_required_list_alias_and_validation_alias_missing(path: str, json): ...
def test_required_list_alias_and_validation_alias_by_name(path: str): ...
def test_required_list_alias_and_validation_alias_by_alias(path: str): ...
def test_required_list_alias_and_validation_alias_by_validation_alias(path: str): ...
# Task:
Write a Python test function `test_required_list_alias_by_name` to verify the behavior of `required_list_alias_by_name`.
Module under test: typing, dirty_equals, fastapi | def test_required_list_alias_by_name(path: str):
client = TestClient(app)
response = client.post(path, json={"p": ["hello", "world"]})
assert response.status_code == 422
assert response.json() == {
"detail": [
{
"type": "missing",
"loc": ["body", "p_alias"],
"msg": "Field required",
"input": IsOneOf(None, {"p": ["hello", "world"]}),
}
]
} | test | 1 | {"function_name": "test_required_list_alias_by_name", "class_name": null, "qualname": "test_required_list_alias_by_name", "file_path": "tests/test_request_params/test_body/test_list.py", "repo_id": "fastapi/fastapi", "loc": 14, "tested_modules": ["typing", "dirty_equals", "fastapi", "fastapi.testclient", "pydantic"], "has_docstring": false, "runnable_level": "project_runnable"} |
apache/airflow:airflow-core/tests/unit/serialization/test_stringify.py:TestStringify.test_stringify_nested_serialized | # Context:
from airflow.serialization.stringify import CLASSNAME, VERSION, stringify
class W: ...
class V: ...
class TestStringify:
def test_stringify(self): ...
def test_serde_stringify_primitives(self, value, expected): ...
def test_stringify_none(self): ...
def test_stringify_primitives(self, value): ...
def test_stringify_raw_list(self): ...
def test_stringify_raw_tuple(self): ...
def test_stringify_raw_set(self): ...
def test_stringify_raw_frozenset(self): ...
def test_stringify_plain_dict(self): ...
def test_stringify_serialized_tuple(self): ...
def test_stringify_serialized_set(self): ...
def test_stringify_serialized_frozenset(self): ...
def test_stringify_old_style_tuple(self): ...
def test_stringify_old_style_dict(self): ...
def test_stringify_list_with_serialized_tuple(self): ...
def test_stringify_dict_with_serialized_tuple(self): ...
def test_stringify_empty_list(self): ...
def test_stringify_empty_tuple(self): ...
def test_stringify_empty_set(self): ...
def test_stringify_empty_dict(self): ...
def test_stringify_dict_with_none_value(self): ...
def test_stringify_list_with_none(self): ...
def test_stringify_custom_object(self): ...
def test_stringify_empty_classname_error(self): ...
def test_stringify_already_deserialized_object(self): ...
def test_stringify_nested_plain_dict(self): ...
def test_stringify_recursive_collection(self): ...
def test_stringify_dict_with_nested_serialized(self): ...
def test_error_thrown_for_airflow_classes(self): ...
# Task:
Write a Python test method `test_stringify_nested_serialized` in test class `TestStringify` to verify the behavior of `stringify_nested_serialized`.
Module under test: __future__, dataclasses, typing | def test_stringify_nested_serialized(self):
e = {
CLASSNAME: "test.Outer",
VERSION: 1,
"__data__": {
"inner": {CLASSNAME: "builtins.tuple", VERSION: 1, "__data__": [1, 2, 3]},
},
}
result = stringify(e)
assert "test.Outer@version=1" in result
assert "inner=(1,2,3)" in result | test | 1 | {"function_name": "test_stringify_nested_serialized", "class_name": "TestStringify", "qualname": "TestStringify.test_stringify_nested_serialized", "file_path": "airflow-core/tests/unit/serialization/test_stringify.py", "repo_id": "apache/airflow", "loc": 11, "tested_modules": ["__future__", "dataclasses", "typing", "airflow._shared.module_loading", "airflow.sdk.serde"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/base/tools/test_run_flow.py:TestRunFlowBaseComponentFlowRetrieval.test_get_flow_with_id | # Context:
from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
from uuid import uuid4
import pytest
from lfx.base.tools.run_flow import RunFlowBaseComponent
from lfx.schema.data import Data
def mock_shared_cache(): ...
class TestRunFlowBaseComponentInitialization: ...
class TestRunFlowBaseComponentFlowCaching: ...
class TestRunFlowBaseComponentInputOutputHandling: ...
class TestRunFlowBaseComponentOutputMethods: ...
class TestRunFlowBaseComponentToolGeneration: ...
class TestRunFlowBaseComponentTweakData: ...
class TestRunFlowBaseComponentUpdateOutputs: ...
class TestRunFlowBaseComponentTweaks: ...
class TestRunFlowBaseComponentFlowRetrieval:
async def test_get_flow_with_name(self): ...
async def test_get_flow_returns_empty_data_when_none(self): ...
async def test_get_graph_raises_error_without_id_or_name(self): ...
async def test_get_graph_uses_cache_when_available_and_up_to_date(self): ...
async def test_get_graph_fetches_and_caches_when_not_cached(self): ...
async def test_get_graph_deletes_stale_cache_and_refetches(self): ...
# Task:
Write a Python test method `test_get_flow_with_id` in test class `TestRunFlowBaseComponentFlowRetrieval` to test getting a flow by ID.
Module under test: uuid, lfx.base.tools.run_flow, lfx.graph.graph.base | async def test_get_flow_with_id(self):
"""Test getting a flow by ID."""
component = RunFlowBaseComponent()
component._user_id = str(uuid4())
flow_id = str(uuid4())
expected_flow = Data(data={"name": "test_flow"})
with patch("lfx.base.tools.run_flow.get_flow_by_id_or_name", new_callable=AsyncMock) as mock_get:
mock_get.return_value = expected_flow
result = await component.get_flow(flow_id_selected=flow_id)
assert result == expected_flow
mock_get.assert_called_once_with(
user_id=component._user_id,
flow_id=flow_id,
flow_name=None,
) | test | 1 | {"function_name": "test_get_flow_with_id", "class_name": "TestRunFlowBaseComponentFlowRetrieval", "qualname": "TestRunFlowBaseComponentFlowRetrieval.test_get_flow_with_id", "file_path": "src/backend/tests/unit/base/tools/test_run_flow.py", "repo_id": "langflow-ai/langflow", "loc": 18, "tested_modules": ["uuid", "lfx.base.tools.run_flow", "lfx.graph.graph.base", "lfx.graph.vertex.base", "lfx.schema.data"], "has_docstring": true, "runnable_level": "project_runnable"} |
karpathy/nanochat:nanochat/report.py:Report:class_doc | Write a class-level docstring for `Report` which has methods: `__init__`, `log`, `generate`, `reset`. | Maintains a bunch of logs, generates a final markdown report. | documentation | 0 | {"doc_type": "class", "class_name": "Report", "file_path": "nanochat/report.py", "repo_id": "karpathy/nanochat", "char_length": 61, "methods": ["__init__", "log", "generate", "reset"]} |
vllm-project/vllm:vllm/utils/async_utils.py:AsyncMicrobatchTokenizer._batch_encode_loop | # Context:
import asyncio
from functools import partial
from transformers.tokenization_utils_base import BatchEncoding
def cancel_task_threadsafe(task: Task): ...
def make_async(func: Callable[P, T], executor: Executor | None) -> Callable[P, Awaitable[T]]: ...
def run_in_loop(loop: AbstractEventLoop, function: Callable, *args): ...
def in_loop(event_loop: AbstractEventLoop) -> bool: ...
async def merge_async_iterators(*iterators) -> AsyncGenerator[tuple[int, T], None]: ...
async def collect_from_async_generator(iterator: AsyncGenerator[T, None]) -> list[T]: ...
class AsyncMicrobatchTokenizer:
def __init__(
self,
tokenizer,
max_batch_size: int = 32,
batch_wait_timeout_s: float = 0.002,
) -> None:
self.tokenizer = tokenizer
self.max_batch_size = max_batch_size
self.batch_wait_timeout_s = batch_wait_timeout_s
self._loop = asyncio.get_running_loop()
self._queues: dict[
tuple,
asyncio.Queue[tuple[str, dict, Future] | tuple[list[int], Future]],
] = {}
self._batcher_tasks: list[Task] = []
# Single-thread executor for blocking tokenizer calls.
self._executor = ThreadPoolExecutor(max_workers=1)
async def __call__(self, prompt, **kwargs) -> BatchEncoding: ...
async def encode(self, prompt, **kwargs) -> list[int]: ...
async def decode(self, token_ids, **kwargs) -> str: ...
def _get_queue(self, loop: asyncio.AbstractEventLoop, key: tuple) -> asyncio.Queue[tuple[str, dict, Future] | tuple[list[int], Future]]: ...
async def _batch_decode_loop(self, queue: asyncio.Queue): ...
def _queue_key(self, op: str, kwargs: dict) -> tuple: ...
def __del__(self): ...
# Task:
Write a Python async method `_batch_encode_loop` for the class `AsyncMicrobatchTokenizer` to batch incoming encode requests for efficiency.
Parameters: queue: asyncio.Queue, can_batch: bool | async def _batch_encode_loop(self, queue: asyncio.Queue, can_batch: bool):
"""Batch incoming encode requests for efficiency."""
while True:
prompt, kwargs, result_future = await queue.get()
prompts = [prompt]
kwargs_list = [kwargs]
result_futures = [result_future]
deadline = self._loop.time() + self.batch_wait_timeout_s
while len(prompts) < self.max_batch_size:
timeout = deadline - self._loop.time()
if timeout <= 0:
break
try:
prompt, kwargs, result_future = await asyncio.wait_for(
queue.get(), timeout
)
prompts.append(prompt)
result_futures.append(result_future)
if not can_batch:
kwargs_list.append(kwargs)
except asyncio.TimeoutError:
break
try:
# If every request uses identical kwargs we can run a single
# batched tokenizer call for a big speed-up.
if can_batch and len(prompts) > 1:
batch_encode_fn = partial(self.tokenizer, prompts, **kwargs)
results = await self._loop.run_in_executor(
self._executor, batch_encode_fn
)
for i, fut in enumerate(result_futures):
if not fut.done():
data = {k: v[i] for k, v in results.items()}
fut.set_result(BatchEncoding(data))
else:
encode_fn = lambda prompts=prompts, kwargs=kwargs_list: [
self.tokenizer(p, **kw) for p, kw in zip(prompts, kwargs)
]
results = await self._loop.run_in_executor(
self._executor, encode_fn
)
for fut, res in zip(result_futures, results):
if not fut.done():
fut.set_result(res)
except Exception as e:
for fut in result_futures:
if not fut.done():
fut.set_exception(e) | function_complex | 1 | {"cognitive_complexity": 42, "loc": 52, "code_loc": 44, "docstring_loc": 1, "function_name": "_batch_encode_loop", "class_name": "AsyncMicrobatchTokenizer", "qualname": "AsyncMicrobatchTokenizer._batch_encode_loop", "file_path": "vllm/utils/async_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/api/test_s3_endpoints.py:module_doc | Write a module-level docstring for the Python module `test_s3_endpoints` which contains class `TestS3FileEndpoints`. | API endpoint tests for S3 storage.
This module tests the file API endpoints (download, upload, delete) work correctly
with S3 storage. These are unit tests that mock the storage layer to focus on
testing API logic:
- Path parsing from database file records
- HTTP response construction (StreamingResponse vs content)
- Error handling and HTTP status codes
- Request parameter validation
For actual S3 storage service testing, see:
- tests/unit/services/storage/ - Unit tests with mocked boto3
- tests/integration/storage/ - Integration tests with real AWS S3 | documentation | 1 | {"doc_type": "module", "module_name": "test_s3_endpoints", "file_path": "src/backend/tests/unit/api/test_s3_endpoints.py", "repo_id": "langflow-ai/langflow", "char_length": 560} |
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py:TestBaseAgentCoreMemoryMethods:class_doc | Write a class-level docstring for `TestBaseAgentCoreMemoryMethods` which has methods: `test_create_event_success`, `test_create_event_no_client`, `test_create_event_empty_messages`, `test_create_event_no_event_id`, `test_list_events_simple`. | Test BaseAgentCoreMemory methods using AgentCoreMemory instance. | documentation | 1 | {"doc_type": "class", "class_name": "TestBaseAgentCoreMemoryMethods", "file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py", "repo_id": "run-llama/llama_index", "char_length": 64, "methods": ["test_create_event_success", "test_create_event_no_client", "test_create_event_empty_messages", "test_create_event_no_event_id", "test_list_events_simple", "test_list_events_with_pagination", "test_retrieve_memories", "test_list_raw_events_pagination", "test_list_memory_records_pagination", "test_list_sessions_pagination"]} |
streamlit/streamlit:lib/tests/streamlit/components/v2/test_component_path_utils.py:test_looks_like_inline_content_heuristic | # Context:
import pytest
from streamlit.components.v2.component_path_utils import ComponentPathUtils
def _touch(path: Path) -> None: ...
def test_resolve_glob_pattern_accepts_file_within_root(tmp_path: Path) -> None: ...
def test_resolve_glob_pattern_handles_subdirectory_wildcards_single_match(tmp_path: Path) -> None: ...
def test_resolve_glob_pattern_raises_on_multiple_matches(tmp_path: Path) -> None: ...
def test_ensure_within_root_blocks_outside_with_prefix_collision(tmp_path: Path) -> None: ...
def test_resolve_glob_pattern_rejects_symlink_pointing_outside_root(tmp_path: Path) -> None: ...
def test_validate_path_security_rejects_invalid_paths(invalid_path: str) -> None: ...
def test_validate_path_security_allows_non_traversal_double_dots() -> None: ...
def test_resolve_glob_pattern_rejects_invalid_patterns(tmp_path: Path, invalid_pattern: str) -> None: ...
def test_validate_path_security_allows_current_dir_segment() -> None: ...
def test_resolve_glob_pattern_accepts_dot_prefixed_relative(tmp_path: Path) -> None: ...
# Task:
Write a Python test function `test_looks_like_inline_content_heuristic` to inline content heuristic should classify strings correctly across cases.
Module under test: __future__, typing, streamlit.components.v2.component_path_utils | def test_looks_like_inline_content_heuristic(value: str, expected_inline: bool) -> None:
"""Inline content heuristic should classify strings correctly across cases."""
assert ComponentPathUtils.looks_like_inline_content(value) == expected_inline | test | 1 | {"function_name": "test_looks_like_inline_content_heuristic", "class_name": null, "qualname": "test_looks_like_inline_content_heuristic", "file_path": "lib/tests/streamlit/components/v2/test_component_path_utils.py", "repo_id": "streamlit/streamlit", "loc": 3, "tested_modules": ["__future__", "typing", "streamlit.components.v2.component_path_utils", "streamlit.errors", "pathlib"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/datasource/test_turbopuffer_datasink.py:TestConstructorValidation.test_accepts_region_only | # Context:
def mock_turbopuffer_module(monkeypatch): ...
def sink(): ...
def mock_client(): ...
def sample_table(): ...
def make_sink(**kwargs) -> TurbopufferDatasink: ...
class TestClientInitialization: ...
class TestArrowTablePreparation: ...
class TestSingleNamespaceBatching: ...
class TestTransformToTurbopufferFormat: ...
class TestRetryLogic: ...
class TestWriteOrchestration: ...
class TestStreamingBehavior: ...
class TestMultiNamespaceWrites: ...
class TestSerialization: ...
class TestConstructorValidation:
def test_requires_namespace_or_namespace_column(self): ...
def test_rejects_both_namespace_and_namespace_column(self): ...
def test_namespace_column_cannot_be_id_or_vector(self): ...
def test_api_key_from_env(self, monkeypatch): ...
def test_rejects_same_id_and_vector_column(self): ...
def test_accepts_base_url_only(self): ...
def test_rejects_both_region_and_base_url(self): ...
def test_rejects_neither_region_nor_base_url(self): ...
# Task:
Write a Python test method `test_accepts_region_only` in test class `TestConstructorValidation` to constructor succeeds with region and no base_url.
Module under test: typing, packaging.version, ray.data._internal.datasource.turbopuffer_datasink | def test_accepts_region_only(self):
"""Constructor succeeds with region and no base_url."""
sink = make_sink(region="gcp-us-central1")
assert sink.region == "gcp-us-central1"
assert sink.base_url is None | test | 0 | {"function_name": "test_accepts_region_only", "class_name": "TestConstructorValidation", "qualname": "TestConstructorValidation.test_accepts_region_only", "file_path": "python/ray/data/tests/datasource/test_turbopuffer_datasink.py", "repo_id": "ray-project/ray", "loc": 5, "tested_modules": ["typing", "packaging.version", "ray.data._internal.datasource.turbopuffer_datasink", "ray.data._internal.utils.arrow_utils"], "has_docstring": true, "runnable_level": "file_runnable"} |
deepfakes/faceswap:lib/config/config.py:FaceswapConfig.__init__ | # Context:
from .ini import ConfigFile
from .objects import ConfigItem, ConfigSection, GlobalSection
def get_configs() -> dict[str, FaceswapConfig]: ...
def generate_configs(force: bool) -> None: ...
class FaceswapConfig:
def _get_plugin_group(self) -> str: ...
def add_section(self, title: str, info: str) -> None: ...
def add_item(self, section: str, title: str, config_item: ConfigItem) -> None: ...
def _import_defaults_from_module(self, filename: str, module_path: str, plugin_type: str) -> None: ...
def _defaults_from_plugin(self, plugin_folder: str) -> None: ...
def set_defaults(self, helptext: str) -> None: ...
def _set_defaults(self) -> None: ...
def save_config(self) -> None: ...
# Task:
Write a Python method `__init__` for the class `FaceswapConfig` to init Configuration.
Parameters: configfile: str | None
Returns: None | def __init__(self, configfile: str | None = None) -> None:
""" Init Configuration
Parameters
----------
configfile : str, optional
Optional path to a config file. ``None`` for default location. Default: ``None``
"""
logger.debug("Initializing: %s", self.__class__.__name__)
self._plugin_group = self._get_plugin_group()
self._ini = ConfigFile(self._plugin_group, ini_path=configfile)
self.sections: dict[str, ConfigSection] = {}
""" dict[str, :class:`ConfigSection`] : The Faceswap config sections and options """
self._set_defaults()
self._ini.on_load(self.sections)
_CONFIGS[self._plugin_group] = self
logger.debug("Initialized: %s", self.__class__.__name__) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 21, "code_loc": 9, "docstring_loc": 7, "function_name": "__init__", "class_name": "FaceswapConfig", "qualname": "FaceswapConfig.__init__", "file_path": "lib/config/config.py", "repo_id": "deepfakes/faceswap", "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_parameterized_queries.py:test_build_where_clause_and_params_with_single_filter | # Context:
import pytest
from google.cloud import bigquery
from llama_index.core.vector_stores import MetadataFilter, MetadataFilters
from llama_index.vector_stores.bigquery.utils import build_where_clause_and_params
def test_build_where_clause_and_params(): ...
def test_build_where_clause_and_params_with_nested_filters(): ...
def test_build_where_clause_and_params_without_args(): ...
# Task:
Write a Python test function `test_build_where_clause_and_params_with_single_filter` to it should construct a parameterized SQL WHERE clause and corresponding query parameters.
Module under test: google.cloud, google.cloud.bigquery, llama_index.core.vector_stores | def test_build_where_clause_and_params_with_single_filter(
key, value, operator, expected_where_clause, expected_query_parameter
):
"""It should construct a parameterized SQL WHERE clause and corresponding query parameters."""
# Given a MetadataFilters instance
filters = MetadataFilters(
filters=[MetadataFilter(key=key, value=value, operator=operator)]
)
# When the WHERE clause and query parameters are built
where_clause, query_params = build_where_clause_and_params(filters=filters)
# Then the WHERE clause should reflect the MetadataFilters
assert where_clause == expected_where_clause
# And the parameters should match the expected values
assert query_params == expected_query_parameter | test | 1 | {"function_name": "test_build_where_clause_and_params_with_single_filter", "class_name": null, "qualname": "test_build_where_clause_and_params_with_single_filter", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_parameterized_queries.py", "repo_id": "run-llama/llama_index", "loc": 17, "tested_modules": ["google.cloud", "google.cloud.bigquery", "llama_index.core.vector_stores", "llama_index.vector_stores.bigquery.utils", "sql_assertions"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/events/depends.py:EventHandler:class_doc | Write a class-level docstring for `EventHandler` (inherits from Protocol[EventT_co]) which has methods: `__call__`. | Protocol for event handler functions.
Generic protocol that accepts any subclass of BaseEvent.
Handlers can be either synchronous (returning None) or asynchronous
(returning a coroutine). | documentation | 0 | {"doc_type": "class", "class_name": "EventHandler", "file_path": "lib/crewai/src/crewai/events/depends.py", "repo_id": "crewAIInc/crewAI", "char_length": 188, "methods": ["__call__"]} |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-signnow/tests/test_tools_signnow.py:test_class | # Context:
from llama_index.tools.signnow.base import SignNowMCPToolSpec
def test_from_env_returns_spec(mock_which: MagicMock) -> None: ...
# Task:
Write a Python test function `test_class` to verify the behavior of `class`.
Module under test: llama_index.tools.signnow.base | def test_class() -> None:
names_of_base_classes = [b.__name__ for b in SignNowMCPToolSpec.__mro__]
assert "BaseToolSpec" in names_of_base_classes | test | 1 | {"function_name": "test_class", "class_name": null, "qualname": "test_class", "file_path": "llama-index-integrations/tools/llama-index-tools-signnow/tests/test_tools_signnow.py", "repo_id": "run-llama/llama_index", "loc": 3, "tested_modules": ["llama_index.tools.signnow.base"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/resolution/resolver.py:FileResolver._resolve_as_url | # Context:
from crewai_files.core.resolved import (
FileReference,
InlineBase64,
InlineBytes,
ResolvedFile,
UrlReference,
)
from crewai_files.core.sources import FileUrl
from crewai_files.core.types import FileInput
class FileContext: ...
class FileResolverConfig: ...
def create_resolver(provider: str | None, prefer_upload: bool, upload_threshold_bytes: int | None, enable_cache: bool) -> FileResolver: ...
class FileResolver:
def _build_file_context(file: FileInput) -> FileContext: ...
def _is_url_source(file: FileInput) -> bool: ...
def _supports_url(constraints: ProviderConstraints | None) -> bool: ...
def resolve(self, file: FileInput, provider: ProviderType) -> ResolvedFile: ...
def resolve_files(self, files: dict[str, FileInput], provider: ProviderType) -> dict[str, ResolvedFile]: ...
def _get_type_constraint(content_type: str, constraints: ProviderConstraints) -> ImageConstraints | PDFConstraints | AudioConstraints | VideoConstraints | None: ...
def _should_upload(self, file: FileInput, provider: str, constraints: ProviderConstraints | None, file_size: int) -> bool: ...
def _resolve_via_upload(self, file: FileInput, provider: ProviderType, context: FileContext) -> ResolvedFile | None: ...
def _upload_with_retry(uploader: FileUploader, file: FileInput, provider: str, file_size: int) -> UploadResult | None: ...
def _resolve_inline(self, file: FileInput, provider: str, context: FileContext) -> ResolvedFile: ...
async def aresolve(self, file: FileInput, provider: ProviderType) -> ResolvedFile: ...
async def aresolve_files(self, files: dict[str, FileInput], provider: ProviderType, max_concurrency: int) -> dict[str, ResolvedFile]: ...
async def _aresolve_via_upload(self, file: FileInput, provider: ProviderType, context: FileContext) -> ResolvedFile | None: ...
async def _aupload_with_retry(uploader: FileUploader, file: FileInput, provider: str, file_size: int) -> UploadResult | None: ...
def _get_uploader(self, provider: ProviderType) -> FileUploader | None: ...
def get_cached_uploads(self, provider: ProviderType) -> list[CachedUpload]: ...
def clear_cache(self) -> None: ...
# Task:
Write a Python method `_resolve_as_url` for the class `FileResolver` to resolve a URL source as UrlReference.
Parameters: file: FileInput
Returns: UrlReference | def _resolve_as_url(file: FileInput) -> UrlReference:
"""Resolve a URL source as UrlReference.
Args:
file: The file with URL source.
Returns:
UrlReference with the URL and content type.
"""
source = file._file_source
if not isinstance(source, FileUrl):
raise TypeError(f"Expected FileUrl source, got {type(source).__name__}")
return UrlReference(
content_type=file.content_type,
url=source.url,
) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 16, "code_loc": 7, "docstring_loc": 8, "function_name": "_resolve_as_url", "class_name": "FileResolver", "qualname": "FileResolver._resolve_as_url", "file_path": "lib/crewai-files/src/crewai_files/resolution/resolver.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tests/test_exceptions.py:TestAuthenticationError.test_is_ray_error_subclass | # Context:
from ray.exceptions import AuthenticationError, RayError
class FakeAuthMode(Enum): ...
class TestAuthenticationError:
auth_doc_url = "https://docs.ray.io/en/latest/ray-security/token-auth.html"
def test_basic_creation(self): ...
def test_auth_mode_note_in_message(self, auth_mode, expected_note): ...
# Task:
Write a Python test method `test_is_ray_error_subclass` in test class `TestAuthenticationError` to test that AuthenticationError is a RayError subclass.
Module under test: enum, ray.exceptions | def test_is_ray_error_subclass(self):
"""Test that AuthenticationError is a RayError subclass."""
error = AuthenticationError("Test")
assert isinstance(error, RayError) | test | 0 | {"function_name": "test_is_ray_error_subclass", "class_name": "TestAuthenticationError", "qualname": "TestAuthenticationError.test_is_ray_error_subclass", "file_path": "python/ray/tests/test_exceptions.py", "repo_id": "ray-project/ray", "loc": 4, "tested_modules": ["enum", "ray.exceptions"], "has_docstring": true, "runnable_level": "plib_runnable"} |
google/langextract:tests/inference_test.py:TestOllamaLanguageModel.test_ollama_default_timeout | # Context:
from unittest import mock
from langextract.providers import ollama
class TestBaseLanguageModel(absltest.TestCase): ...
class TestGeminiLanguageModel(absltest.TestCase): ...
class TestOpenAILanguageModelInference(parameterized.TestCase): ...
class TestOpenAILanguageModel(absltest.TestCase): ...
class TestOllamaLanguageModel(absltest.TestCase):
def test_ollama_infer(self, mock_ollama_query): ...
def test_ollama_extra_kwargs_passed_to_api(self, mock_post): ...
def test_ollama_stop_and_top_p_passthrough(self, mock_post): ...
def test_ollama_defaults_when_unspecified(self, mock_post): ...
def test_ollama_runtime_kwargs_override_stored(self, mock_post): ...
def test_ollama_temperature_zero(self, mock_post): ...
def test_ollama_timeout_through_infer(self): ...
# Task:
Write a Python test method `test_ollama_default_timeout` in test class `TestOllamaLanguageModel` to test that default timeout is used when not specified.
Module under test: absl.testing, absl.testing, langextract | def test_ollama_default_timeout(self):
"""Test that default timeout is used when not specified."""
model = ollama.OllamaLanguageModel(
model_id="test-model",
model_url="http://localhost:11434",
)
mock_response = mock.Mock(spec=["status_code", "json"])
mock_response.status_code = 200
mock_response.json.return_value = {"response": "test output"}
with mock.patch.object(
model._requests, "post", return_value=mock_response
) as mock_post:
model._ollama_query(prompt="test prompt")
mock_post.assert_called_once()
call_kwargs = mock_post.call_args[1]
self.assertEqual(
120,
call_kwargs["timeout"],
"Should use default timeout of 120 seconds",
) | test | 1 | {"function_name": "test_ollama_default_timeout", "class_name": "TestOllamaLanguageModel", "qualname": "TestOllamaLanguageModel.test_ollama_default_timeout", "file_path": "tests/inference_test.py", "repo_id": "google/langextract", "loc": 23, "tested_modules": ["absl.testing", "absl.testing", "langextract", "langextract.core", "langextract.core"], "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/screenshots/service.py:ScreenshotService.store_screenshot | # Context:
import base64
import anyio
from browser_use.observability import observe_debug
class ScreenshotService:
def __init__(self, agent_directory: str | Path):
"""Initialize with agent directory path"""
self.agent_directory = Path(agent_directory) if isinstance(agent_directory, str) else agent_directory
# Create screenshots subdirectory
self.screenshots_dir = self.agent_directory / 'screenshots'
self.screenshots_dir.mkdir(parents=True, exist_ok=True)
async def get_screenshot(self, screenshot_path: str) -> str | None: ...
# Task:
Write a Python async method `store_screenshot` for the class `ScreenshotService` to store screenshot to disk and return the full path as string.
Parameters: screenshot_b64: str, step_number: int
Returns: str | async def store_screenshot(self, screenshot_b64: str, step_number: int) -> str:
"""Store screenshot to disk and return the full path as string"""
screenshot_filename = f'step_{step_number}.png'
screenshot_path = self.screenshots_dir / screenshot_filename
# Decode base64 and save to disk
screenshot_data = base64.b64decode(screenshot_b64)
async with await anyio.open_file(screenshot_path, 'wb') as f:
await f.write(screenshot_data)
return str(screenshot_path) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 12, "code_loc": 6, "docstring_loc": 1, "function_name": "store_screenshot", "class_name": "ScreenshotService", "qualname": "ScreenshotService.store_screenshot", "file_path": "browser_use/screenshots/service.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/examples/serve/example_reset_kv_cache/reset_kv_cache_example.py:module_doc | Write a module-level docstring for the Python module `reset_kv_cache_example` which contains function `create_llm_config`, function `start_server`, function `reset_cache_via_handle`. | Example: Resetting KV Cache in Ray Serve LLM via Control Plane Messages.
This example demonstrates two approaches to reset the KV cache on all replicas
of a Ray Serve LLM deployment using DevIngress:
1. **HTTP Endpoint Path** (`--use-http`):
Calls the built-in `/reset_prefix_cache` HTTP endpoint provided by
DevIngress via CacheManagerIngressMixin. Useful for external clients.
2. **In-Cluster Serve Handle Path** (default):
Uses Ray Serve's deployment handles and the broadcast API to send control
plane messages directly to all replicas. This keeps cache reset logic
within the cluster, avoiding HTTP overhead.
Both approaches use the same DevIngress server which provides control plane
endpoints (/sleep, /wakeup, /is_sleeping, /reset_prefix_cache).
The example:
1. Starts a Serve application with DevIngress and 2 replicas
2. Populates the KV cache on both replicas by sending multiple requests
3. Measures request time for a cached request (control)
4. Resets the KV cache using the selected method
5. Measures request time after cache reset (test)
6. Verifies that the cache was cleared by comparing request times
Usage:
# In-cluster path (using serve handles directly)
python reset_kv_cache_example.py
# HTTP endpoint path
python reset_kv_cache_example.py --use-http | documentation | 0 | {"doc_type": "module", "module_name": "reset_kv_cache_example", "file_path": "python/ray/llm/examples/serve/example_reset_kv_cache/reset_kv_cache_example.py", "repo_id": "ray-project/ray", "char_length": 1312} |
roboflow/supervision:.github/scripts/augment_links.py:get_repo_root | # Context:
import os
def augment_links_in_file(file_path: str, branch: str) -> None: ...
def main() -> None: ...
# Task:
Write a Python function `get_repo_root` to get the repository root path.
Returns: str | def get_repo_root() -> str:
"""Get the repository root path."""
script_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.dirname(os.path.dirname(script_dir)) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "get_repo_root", "class_name": null, "qualname": "get_repo_root", "file_path": ".github/scripts/augment_links.py", "repo_id": "roboflow/supervision", "has_docstring": true, "runnable_level": "slib_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py:EnterpriseActionKitToolAdapter:class_doc | Write a class-level docstring for `EnterpriseActionKitToolAdapter` which has methods: `__init__`, `tools`, `_fetch_actions`, `_generate_detailed_description`, `_create_tools`. | Adapter that creates BaseTool instances for enterprise actions. | documentation | 0 | {"doc_type": "class", "class_name": "EnterpriseActionKitToolAdapter", "file_path": "lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py", "repo_id": "crewAIInc/crewAI", "char_length": 63, "methods": ["__init__", "tools", "_fetch_actions", "_generate_detailed_description", "_create_tools", "_set_enterprise_action_token", "__enter__", "__exit__"]} |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/transports/base.py:BaseTransport:class_doc | Write a class-level docstring for `BaseTransport` (inherits from ABC) which has methods: `__init__`, `transport_type`, `connected`, `read_stream`, `write_stream`. | Base class for MCP transport implementations.
This abstract base class defines the interface that all transport
implementations must follow. Transports handle the low-level communication
with MCP servers. | documentation | 0 | {"doc_type": "class", "class_name": "BaseTransport", "file_path": "lib/crewai/src/crewai/mcp/transports/base.py", "repo_id": "crewAIInc/crewAI", "char_length": 205, "methods": ["__init__", "transport_type", "connected", "read_stream", "write_stream", "connect", "disconnect", "__aenter__", "__aexit__", "_set_streams"]} |
fastapi/fastapi:tests/test_tutorial/test_security/test_tutorial004.py:test_openapi_schema | # Context:
from types import ModuleType
from fastapi.testclient import TestClient
from inline_snapshot import snapshot
def get_mod(request: pytest.FixtureRequest): ...
def get_access_token(username, password, client: TestClient): ...
def test_login(mod: ModuleType): ...
def test_login_incorrect_password(mod: ModuleType): ...
def test_login_incorrect_username(mod: ModuleType): ...
def test_no_token(mod: ModuleType): ...
def test_token(mod: ModuleType): ...
def test_incorrect_token(mod: ModuleType): ...
def test_incorrect_token_type(mod: ModuleType): ...
def test_verify_password(mod: ModuleType): ...
def test_get_password_hash(mod: ModuleType): ...
def test_create_access_token(mod: ModuleType): ...
def test_token_no_sub(mod: ModuleType): ...
def test_token_no_username(mod: ModuleType): ...
def test_token_nonexistent_user(mod: ModuleType): ...
def test_token_inactive_user(mod: ModuleType): ...
def test_read_items(mod: ModuleType): ...
# Task:
Write a Python test function `test_openapi_schema` to verify the behavior of `openapi_schema`.
Module under test: types, fastapi.testclient, inline_snapshot | def test_openapi_schema(mod: ModuleType):
client = TestClient(mod.app)
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/token": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Token"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Login For Access Token",
"operationId": "login_for_access_token_token_post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"$ref": "#/components/schemas/Body_login_for_access_token_token_post"
}
}
},
"required": True,
},
}
},
"/users/me/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/User"}
}
},
}
},
"summary": "Read Users Me",
"operationId": "read_users_me_users_me__get",
"security": [{"OAuth2PasswordBearer": []}],
}
},
"/users/me/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Own Items",
"operationId": "read_own_items_users_me_items__get",
"security": [{"OAuth2PasswordBearer": []}],
}
},
},
"components": {
"schemas": {
"User": {
"title": "User",
"required": ["username"],
"type": "object",
"properties": {
"username": {"title": "Username", "type": "string"},
"email": {
"title": "Email",
"anyOf": [{"type": "string"}, {"type": "null"}],
},
"full_name": {
"title": "Full Name",
"anyOf": [{"type": "string"}, {"type": "null"}],
},
"disabled": {
"title": "Disabled",
"anyOf": [{"type": "boolean"}, {"type": "null"}],
},
},
},
"Token": {
"title": "Token",
"required": ["access_token", "token_type"],
"type": "object",
"properties": {
"access_token": {"title": "Access Token", "type": "string"},
"token_type": {"title": "Token Type", "type": "string"},
},
},
"Body_login_for_access_token_token_post": {
"title": "Body_login_for_access_token_token_post",
"required": ["username", "password"],
"type": "object",
"properties": {
"grant_type": {
"title": "Grant Type",
"anyOf": [
{"pattern": "^password$", "type": "string"},
{"type": "null"},
],
},
"username": {"title": "Username", "type": "string"},
"password": {
"title": "Password",
"type": "string",
"format": "password",
},
"scope": {
"title": "Scope",
"type": "string",
"default": "",
},
"client_id": {
"title": "Client Id",
"anyOf": [{"type": "string"}, {"type": "null"}],
},
"client_secret": {
"title": "Client Secret",
"anyOf": [{"type": "string"}, {"type": "null"}],
"format": "password",
},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
"input": {"title": "Input"},
"ctx": {"title": "Context", "type": "object"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {
"$ref": "#/components/schemas/ValidationError"
},
}
},
},
},
"securitySchemes": {
"OAuth2PasswordBearer": {
"type": "oauth2",
"flows": {
"password": {
"scopes": {},
"tokenUrl": "token",
}
},
}
},
},
}
) | test | 1 | {"function_name": "test_openapi_schema", "class_name": null, "qualname": "test_openapi_schema", "file_path": "tests/test_tutorial/test_security/test_tutorial004.py", "repo_id": "fastapi/fastapi", "loc": 187, "tested_modules": ["types", "fastapi.testclient", "inline_snapshot", "utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
deepfakes/faceswap:lib/config/ini.py:ConfigFile.format_help | # Context:
import textwrap
class ConfigFile:
def __init__(self, plugin_group: str, ini_path: str | None = None) -> None:
parse_class_init(locals())
self._plugin_group = plugin_group
self._file_path = self._get_config_path(ini_path)
self._parser = self._get_new_configparser()
if self._exists: # Load or create new
self.load()
def _exists(self) -> bool: ...
def _get_config_path(self, ini_path: str | None) -> str: ...
def _get_new_configparser(self) -> ConfigParser: ...
def load(self) -> None: ...
def save(self) -> None: ...
def _sections_synced(self, app_config: dict[str, ConfigSection]) -> bool: ...
def _options_synced(self, app_config: dict[str, ConfigSection]) -> bool: ...
def _values_synced(self, app_section: ConfigSection, section: str) -> bool: ...
def _is_synced_structure(self, app_config: dict[str, ConfigSection]) -> bool: ...
def _insert_section(self, section: str, helptext: str, config: ConfigParser) -> None: ...
def _insert_option(self, section: str, name: str, helptext: str, value: str, config: ConfigParser) -> None: ...
def _sync_from_app(self, app_config: dict[str, ConfigSection]) -> None: ...
def _get_converted_value(self, section: str, option: str, datatype: type) -> ConfigValueType: ...
def _sync_to_app(self, app_config: dict[str, ConfigSection]) -> None: ...
def on_load(self, app_config: dict[str, ConfigSection]) -> None: ...
def update_from_app(self, app_config: dict[str, ConfigSection]) -> None: ...
# Task:
Write a Python method `format_help` for the class `ConfigFile` to format comments for insertion into a config ini file.
Parameters: helptext: str, is_section: bool
Returns: str | def format_help(self, helptext: str, is_section: bool = False) -> str:
""" Format comments for insertion into a config ini file
Parameters
----------
helptext : str
The help text to be formatted
is_section : bool, optional
``True`` if the help text pertains to a section. ``False`` if it pertains to an option.
Default: ``True``
Returns
-------
str
The formatted help text
"""
logger.debug("[%s] Formatting help: (helptext: '%s', is_section: '%s')",
self._plugin_group, helptext, is_section)
formatted = ""
for hlp in helptext.split("\n"):
subsequent_indent = "\t\t" if hlp.startswith("\t") else ""
hlp = f"\t- {hlp[1:].strip()}" if hlp.startswith("\t") else hlp
formatted += textwrap.fill(hlp,
100,
tabsize=4,
subsequent_indent=subsequent_indent) + "\n"
helptext = '# {}'.format(formatted[:-1].replace("\n", "\n# ")) # Strip last newline
helptext = helptext.upper() if is_section else f"\n{helptext}"
return helptext | function_complex | 1 | {"cognitive_complexity": 6, "loc": 29, "code_loc": 13, "docstring_loc": 15, "function_name": "format_help", "class_name": "ConfigFile", "qualname": "ConfigFile.format_help", "file_path": "lib/config/ini.py", "repo_id": "deepfakes/faceswap", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/serve/tests/test_list_outbound_deployments.py:TestListOutboundDeployments.test_no_handles | # Context:
from typing import List
import ray
from ray import serve
from ray.serve._private.common import DeploymentID
class DownstreamA: ...
class DownstreamB: ...
class UpstreamWithStoredHandles: ...
class UpstreamWithNestedHandles: ...
class DynamicDeployment: ...
def get_replica_actor_handle(deployment_name: str, app_name: str): ...
class TestListOutboundDeployments:
async def test_stored_handles_in_init(self, serve_instance): ...
async def test_nested_handles_in_dict_and_list(self, serve_instance): ...
async def test_dynamic_handles(self, serve_instance): ...
# Task:
Write a Python test method `test_no_handles` in test class `TestListOutboundDeployments` to test deployment with no outbound handles.
Module under test: typing, ray, ray.serve._private.common | async def test_no_handles(self, serve_instance):
"""Test deployment with no outbound handles."""
app_name = "test_no_handles"
# Deploy a simple deployment with no handles
app = DownstreamA.bind()
serve.run(app, name=app_name)
# Get the replica actor
replica_actor = get_replica_actor_handle("DownstreamA", app_name)
# Call list_outbound_deployments
outbound_deployments: List[DeploymentID] = ray.get(
replica_actor.list_outbound_deployments.remote()
)
# Should be empty
assert len(outbound_deployments) == 0 | test | 0 | {"function_name": "test_no_handles", "class_name": "TestListOutboundDeployments", "qualname": "TestListOutboundDeployments.test_no_handles", "file_path": "python/ray/serve/tests/test_list_outbound_deployments.py", "repo_id": "ray-project/ray", "loc": 18, "tested_modules": ["typing", "ray", "ray.serve._private.common", "ray.serve._private.constants", "ray.serve.handle"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/base/mcp/test_mcp_util.py:TestMCPUtilityFunctions.test_get_unique_name | # Context:
from lfx.base.mcp import util
class TestMCPSessionManager: ...
class TestHeaderValidation: ...
class TestGlobalVariableResolution: ...
class TestStreamableHTTPHeaderIntegration: ...
class TestUpdateToolsStdioHeaders: ...
class TestFieldNameConversion: ...
class TestToolExecutionWithFieldConversion: ...
class TestMCPStdioClientWithEverythingServer: ...
class TestMCPStreamableHttpClientWithDeepWikiServer: ...
class TestMCPSseClientUnit: ...
class TestMCPStructuredTool: ...
class TestSnakeToCamelConversion: ...
class TestMCPUtilityFunctions:
def test_sanitize_mcp_name(self): ...
def test_is_valid_key_value_item(self): ...
def test_validate_node_installation(self): ...
def test_create_input_schema_from_json_schema(self): ...
async def test_validate_connection_params(self): ...
async def test_get_flow_snake_case_mocked(self): ...
# Task:
Write a Python test method `test_get_unique_name` in test class `TestMCPUtilityFunctions` to test unique name generation.
Module under test: lfx.base.mcp, lfx.base.mcp.util, lfx.base.mcp.util | def test_get_unique_name(self):
"""Test unique name generation."""
names = {"foo", "foo_1"}
assert util.get_unique_name("foo", 10, names) == "foo_2"
assert util.get_unique_name("bar", 10, names) == "bar"
assert util.get_unique_name("longname", 4, {"long"}) == "lo_1" | test | 1 | {"function_name": "test_get_unique_name", "class_name": "TestMCPUtilityFunctions", "qualname": "TestMCPUtilityFunctions.test_get_unique_name", "file_path": "src/backend/tests/unit/base/mcp/test_mcp_util.py", "repo_id": "langflow-ai/langflow", "loc": 6, "tested_modules": ["lfx.base.mcp", "lfx.base.mcp.util", "lfx.base.mcp.util", "lfx.base.mcp.util", "lfx.base.mcp.util"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:common/metadata_utils.py:apply_meta_data_filter | # Context:
from typing import Any, Callable, Dict
from rag.prompts.generator import gen_meta_filter # move from the top of the file to avoid circular import
def convert_conditions(metadata_condition): ...
def meta_filter(metas: dict, filters: list[dict], logic: str): ...
def dedupe_list(values: list) -> list: ...
def update_metadata_to(metadata, meta): ...
def metadata_schema(metadata: dict | list | None) -> Dict[str, Any]: ...
def _is_json_schema(obj: dict) -> bool: ...
def _is_metadata_list(obj: list) -> bool: ...
def turn2jsonschema(obj: dict | list) -> Dict[str, Any]: ...
# Task:
Write a Python async function `apply_meta_data_filter` to apply metadata filtering rules and return the filtered doc_ids.
Parameters: meta_data_filter: dict | None, metas: dict, question: str, chat_mdl: Any, base_doc_ids: list[str] | None, manual_value_resolver: Callable[[dict], dict] | None
Returns: list[str] | None | async def apply_meta_data_filter(
meta_data_filter: dict | None,
metas: dict,
question: str,
chat_mdl: Any = None,
base_doc_ids: list[str] | None = None,
manual_value_resolver: Callable[[dict], dict] | None = None,
) -> list[str] | None:
"""
Apply metadata filtering rules and return the filtered doc_ids.
meta_data_filter supports three modes:
- auto: generate filter conditions via LLM (gen_meta_filter)
- semi_auto: generate conditions using selected metadata keys only
- manual: directly filter based on provided conditions
Returns:
list of doc_ids, ["-999"] when manual filters yield no result, or None
when auto/semi_auto filters return empty.
"""
from rag.prompts.generator import gen_meta_filter # move from the top of the file to avoid circular import
doc_ids = list(base_doc_ids) if base_doc_ids else []
if not meta_data_filter:
return doc_ids
method = meta_data_filter.get("method")
if method == "auto":
filters: dict = await gen_meta_filter(chat_mdl, metas, question)
doc_ids.extend(meta_filter(metas, filters["conditions"], filters.get("logic", "and")))
if not doc_ids:
return None
elif method == "semi_auto":
selected_keys = []
constraints = {}
for item in meta_data_filter.get("semi_auto", []):
if isinstance(item, str):
selected_keys.append(item)
elif isinstance(item, dict):
key = item.get("key")
op = item.get("op")
selected_keys.append(key)
if op:
constraints[key] = op
if selected_keys:
filtered_metas = {key: metas[key] for key in selected_keys if key in metas}
if filtered_metas:
filters: dict = await gen_meta_filter(chat_mdl, filtered_metas, question, constraints=constraints)
doc_ids.extend(meta_filter(metas, filters["conditions"], filters.get("logic", "and")))
if not doc_ids:
return None
elif method == "manual":
filters = meta_data_filter.get("manual", [])
if manual_value_resolver:
filters = [manual_value_resolver(flt) for flt in filters]
doc_ids.extend(meta_filter(metas, filters, meta_data_filter.get("logic", "and")))
if filters and not doc_ids:
doc_ids = ["-999"]
return doc_ids | function_complex | 1 | {"cognitive_complexity": 31, "loc": 63, "code_loc": 37, "docstring_loc": 12, "function_name": "apply_meta_data_filter", "class_name": null, "qualname": "apply_meta_data_filter", "file_path": "common/metadata_utils.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/parakeet/test_feature_extraction_parakeet.py:module_doc | Write a module-level docstring for the Python module `test_feature_extraction_parakeet` which contains function `floats_list`, class `ParakeetFeatureExtractionTester`, class `ParakeetFeatureExtractionTest`. | Testing suite for the Parakeet feature extraction. | documentation | 0 | {"doc_type": "module", "module_name": "test_feature_extraction_parakeet", "file_path": "tests/models/parakeet/test_feature_extraction_parakeet.py", "repo_id": "huggingface/transformers", "char_length": 50} |
browser-use/browser-use:browser_use/llm/browser_use/chat.py:module_doc | Write a module-level docstring for the Python module `chat` which contains class `ChatBrowserUse`. | ChatBrowserUse - Client for browser-use cloud API
This wraps the BaseChatModel protocol and sends requests to the browser-use cloud API
for optimized browser automation LLM inference. | documentation | 0 | {"doc_type": "module", "module_name": "chat", "file_path": "browser_use/llm/browser_use/chat.py", "repo_id": "browser-use/browser-use", "char_length": 184} |
docling-project/docling:docling/experimental/pipeline/threaded_layout_vlm_pipeline.py:ThreadedLayoutVlmPipeline._build_document | # Context:
from typing import TYPE_CHECKING, List, Optional, Union, cast
from docling.backend.pdf_backend import PdfDocumentBackend
from docling.datamodel.base_models import ConversionStatus, Page
from docling.datamodel.document import ConversionResult
from docling.pipeline.standard_pdf_pipeline import (
ProcessingResult,
RunContext,
ThreadedItem,
ThreadedPipelineStage,
ThreadedQueue,
)
class ThreadedLayoutVlmPipeline(BasePipeline):
def __init__(self, pipeline_options: ThreadedLayoutVlmPipelineOptions) -> None:
super().__init__(pipeline_options)
self.pipeline_options: ThreadedLayoutVlmPipelineOptions = pipeline_options
self._run_seq = itertools.count(1) # deterministic, monotonic run ids
# VLM model type (initialized in _init_models)
self.vlm_model: BaseVlmPageModel
# Initialize models
self._init_models()
def _init_models(self) -> None: ...
def _resolve_artifacts_path(self) -> Optional[Path]: ...
def _create_run_ctx(self) -> RunContext: ...
def _integrate_results(self, conv_res: ConversionResult, proc: ProcessingResult) -> None: ...
def _assemble_document(self, conv_res: ConversionResult) -> ConversionResult: ...
def _turn_dt_into_doc(self, conv_res: ConversionResult) -> DoclingDocument: ...
def get_default_options(cls) -> ThreadedLayoutVlmPipelineOptions: ...
def is_backend_supported(cls, backend: AbstractDocumentBackend) -> bool: ...
def _determine_status(self, conv_res: ConversionResult) -> ConversionStatus: ...
def _unload(self, conv_res: ConversionResult) -> None: ...
# Task:
Write a Python method `_build_document` for the class `ThreadedLayoutVlmPipeline` to build document using threaded layout+VLM pipeline.
Parameters: conv_res: ConversionResult
Returns: ConversionResult | def _build_document(self, conv_res: ConversionResult) -> ConversionResult:
"""Build document using threaded layout+VLM pipeline."""
run_id = next(self._run_seq)
assert isinstance(conv_res.input._backend, PdfDocumentBackend)
backend = conv_res.input._backend
# Initialize pages
start_page, end_page = conv_res.input.limits.page_range
pages: List[Page] = []
images_scale = self.pipeline_options.images_scale
for i in range(conv_res.input.page_count):
if start_page - 1 <= i <= end_page - 1:
page = Page(page_no=i + 1)
if images_scale is not None:
page._default_image_scale = images_scale
page._backend = backend.load_page(i)
if page._backend and page._backend.is_valid():
page.size = page._backend.get_size()
conv_res.pages.append(page)
pages.append(page)
if not pages:
conv_res.status = ConversionStatus.FAILURE
return conv_res
total_pages = len(pages)
ctx = self._create_run_ctx()
for st in ctx.stages:
st.start()
proc = ProcessingResult(total_expected=total_pages)
fed_idx = 0
batch_size = 32
try:
while proc.success_count + proc.failure_count < total_pages:
# Feed pages to first stage
while fed_idx < total_pages:
ok = ctx.first_stage.input_queue.put(
ThreadedItem(
payload=pages[fed_idx],
run_id=run_id,
page_no=pages[fed_idx].page_no,
conv_res=conv_res,
),
timeout=0.0,
)
if ok:
fed_idx += 1
if fed_idx == total_pages:
ctx.first_stage.input_queue.close()
else:
break
# Drain results from output
out_batch = ctx.output_queue.get_batch(batch_size, timeout=0.05)
for itm in out_batch:
if itm.run_id != run_id:
continue
if itm.is_failed or itm.error:
proc.failed_pages.append(
(itm.page_no, itm.error or RuntimeError("unknown error"))
)
else:
assert itm.payload is not None
proc.pages.append(itm.payload)
# Handle early termination
if not out_batch and ctx.output_queue.closed:
missing = total_pages - (proc.success_count + proc.failure_count)
if missing > 0:
proc.failed_pages.extend(
[(-1, RuntimeError("pipeline terminated early"))] * missing
)
break
finally:
for st in ctx.stages:
st.stop()
ctx.output_queue.close()
self._integrate_results(conv_res, proc)
return conv_res | function_complex | 1 | {"cognitive_complexity": 50, "loc": 82, "code_loc": 68, "docstring_loc": 1, "function_name": "_build_document", "class_name": "ThreadedLayoutVlmPipeline", "qualname": "ThreadedLayoutVlmPipeline._build_document", "file_path": "docling/experimental/pipeline/threaded_layout_vlm_pipeline.py", "repo_id": "docling-project/docling", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/_internal/common/utils/lora_utils.py:get_lora_id | Write a Python function `get_lora_id` to get lora id for a given lora model id.
Parameters: lora_model_id: str
Returns: str | def get_lora_id(lora_model_id: str) -> str:
"""Get lora id for a given lora model id."""
return ":".join(lora_model_id.split(":")[1:]) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "get_lora_id", "class_name": null, "qualname": "get_lora_id", "file_path": "python/ray/llm/_internal/common/utils/lora_utils.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "self_contained"} |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_decorators.py:TestMultipleDecorators.test_decorator_and_manual_registration_work_together | # Context:
from crewai.hooks import (
after_llm_call,
after_tool_call,
before_llm_call,
before_tool_call,
get_after_llm_call_hooks,
get_after_tool_call_hooks,
get_before_llm_call_hooks,
get_before_tool_call_hooks,
)
from crewai.hooks import register_before_tool_call_hook
def clear_hooks(): ...
class TestLLMHookDecorators: ...
class TestToolHookDecorators: ...
class TestDecoratorAttributes: ...
class TestMultipleDecorators:
def test_multiple_decorators_all_register(self): ...
# Task:
Write a Python test method `test_decorator_and_manual_registration_work_together` in test class `TestMultipleDecorators` to test that decorators and manual registration can be mixed.
Module under test: __future__, crewai.hooks, crewai.hooks.llm_hooks | def test_decorator_and_manual_registration_work_together(self):
"""Test that decorators and manual registration can be mixed."""
from crewai.hooks import register_before_tool_call_hook
@before_tool_call
def decorated_hook(context):
return None
def manual_hook(context):
return None
register_before_tool_call_hook(manual_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 2 | test | 0 | {"function_name": "test_decorator_and_manual_registration_work_together", "class_name": "TestMultipleDecorators", "qualname": "TestMultipleDecorators.test_decorator_and_manual_registration_work_together", "file_path": "lib/crewai/tests/hooks/test_decorators.py", "repo_id": "crewAIInc/crewAI", "loc": 16, "tested_modules": ["__future__", "crewai.hooks", "crewai.hooks.llm_hooks", "crewai.hooks.tool_hooks", "crewai.hooks"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/tests/test_task_processor.py:TestTaskConsumerWithRayServe.test_task_processor_with_cancel_tasks_and_app_custom_config | # Context:
import os
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve.schema import CeleryAdapterConfig, TaskProcessorConfig
from ray.serve.task_consumer import (
instantiate_adapter_from_config,
task_consumer,
task_handler,
)
class ProcessedTasksTracker: ...
def send_request_to_queue(processor_config: TaskProcessorConfig, data, task_name): ...
def temp_queue_directory(): ...
def transport_options(temp_queue_directory): ...
def create_processor_config(temp_queue_directory, transport_options): ...
def _get_task_counts_by_routing_key(queue_path): ...
class TestTaskConsumerWithDLQsConfiguration: ...
class TestTaskConsumerWithRayServe:
def test_task_consumer_as_serve_deployment(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_as_serve_deployment_with_failed_task(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_persistence_across_restarts(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_as_serve_deployment_with_async_task_handler(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_metrics(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_health_check(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_with_task_custom_config(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_failed_task_queue_consumption(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_multiple_task_consumers_in_single_app(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_with_one_queue_and_multiple_different_tasks(self, temp_queue_directory, serve_instance, create_processor_config): ...
# Task:
Write a Python test method `test_task_processor_with_cancel_tasks_and_app_custom_config` in test class `TestTaskConsumerWithRayServe` to test the cancel task functionality with celery broker.
Module under test: collections, pathlib, ray | def test_task_processor_with_cancel_tasks_and_app_custom_config(
self, external_redis, serve_instance # noqa: F811
):
"""Test the cancel task functionality with celery broker."""
redis_address = os.environ.get("RAY_REDIS_ADDRESS")
processor_config = TaskProcessorConfig(
queue_name="my_app_queue",
adapter_config=CeleryAdapterConfig(
broker_url=f"redis://{redis_address}/0",
backend_url=f"redis://{redis_address}/1",
app_custom_config={"worker_prefetch_multiplier": 1},
),
)
signal = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1)
@task_consumer(task_processor_config=processor_config)
class MyTaskConsumer:
def __init__(self, signal_actor):
self._signal = signal_actor
self.message_received = []
@task_handler(name="process")
def process(self, data):
ray.get(self._signal.wait.remote())
self.message_received.append(data)
def get_message_received(self):
return self.message_received
handle = serve.run(MyTaskConsumer.bind(signal), name="app_v1")
task_ids = []
for i in range(2):
task_id_ref = send_request_to_queue.remote(
processor_config, f"test_data_{i}", task_name="process"
)
task_ids.append(ray.get(task_id_ref))
wait_for_condition(
lambda: ray.get(signal.cur_num_waiters.remote()) == 1, timeout=10
)
adapter_instance = instantiate_adapter_from_config(
task_processor_config=processor_config
)
adapter_instance.cancel_task_sync(task_ids[1])
ray.get(signal.send.remote())
def check_revoked():
status = adapter_instance.get_task_status_sync(task_ids[1])
return status.status == "REVOKED"
wait_for_condition(check_revoked, timeout=20)
assert "test_data_0" in handle.get_message_received.remote().result()
assert "test_data_1" not in handle.get_message_received.remote().result()
serve.delete("app_v1") | test | 0 | {"function_name": "test_task_processor_with_cancel_tasks_and_app_custom_config", "class_name": "TestTaskConsumerWithRayServe", "qualname": "TestTaskConsumerWithRayServe.test_task_processor_with_cancel_tasks_and_app_custom_config", "file_path": "python/ray/serve/tests/test_task_processor.py", "repo_id": "ray-project/ray", "loc": 62, "tested_modules": ["collections", "pathlib", "ray", "ray._common.test_utils", "ray.serve.schema"], "has_docstring": true, "runnable_level": "project_runnable"} |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/services/source_service.py:SourceService.update_feed | # Context:
from typing import List, Optional, Dict, Any
from fastapi import HTTPException
from services.db_service import sources_db, tracking_db
class SourceService:
async def get_sources(self, page: int, per_page: int, category: Optional[str], search: Optional[str], include_inactive: bool) -> PaginatedSources: ...
async def get_source(self, source_id: int) -> Dict[str, Any]: ...
async def get_source_categories(self, source_id: int) -> List[str]: ...
async def get_source_last_crawled(self, source_id: int) -> Optional[str]: ...
async def get_source_by_name(self, name: str) -> Dict[str, Any]: ...
async def get_source_feeds(self, source_id: int) -> List[Dict[str, Any]]: ...
async def get_categories(self) -> List[Dict[str, Any]]: ...
async def get_source_by_category(self, category_name: str) -> List[Dict[str, Any]]: ...
async def create_source(self, source_data: SourceCreate) -> Dict[str, Any]: ...
async def add_source_category(self, source_id: int, category_name: str) -> None: ...
async def update_source(self, source_id: int, source_data: SourceUpdate) -> Dict[str, Any]: ...
async def delete_source(self, source_id: int) -> Dict[str, Any]: ...
async def hard_delete_source(self, source_id: int) -> Dict[str, str]: ...
async def add_feed_to_source(self, source_id: int, feed_data: SourceFeedCreate) -> Dict[str, Any]: ...
async def delete_feed(self, feed_id: int) -> Dict[str, str]: ...
# Task:
Write a Python async method `update_feed` for the class `SourceService` to update an existing feed.
Parameters: feed_id: int, feed_data: Dict[str, Any]
Returns: Dict[str, Any] | async def update_feed(self, feed_id: int, feed_data: Dict[str, Any]) -> Dict[str, Any]:
"""Update an existing feed."""
try:
feed_query = "SELECT id, source_id FROM source_feeds WHERE id = ?"
feed = await sources_db.execute_query(feed_query, (feed_id,), fetch=True, fetch_one=True)
if not feed:
raise HTTPException(status_code=404, detail="Feed not found")
update_fields = []
update_params = []
if "feed_url" in feed_data:
update_fields.append("feed_url = ?")
update_params.append(feed_data["feed_url"])
if "feed_type" in feed_data:
update_fields.append("feed_type = ?")
update_params.append(feed_data["feed_type"])
if "is_active" in feed_data:
update_fields.append("is_active = ?")
update_params.append(feed_data["is_active"])
if not update_fields:
return await self.get_source_feeds(feed["source_id"])
update_params.append(feed_id)
update_query = f"""
UPDATE source_feeds
SET {", ".join(update_fields)}
WHERE id = ?
"""
await sources_db.execute_query(update_query, tuple(update_params))
return await self.get_source_feeds(feed["source_id"])
except Exception as e:
if isinstance(e, HTTPException):
raise e
if "UNIQUE constraint failed" in str(e) and "feed_url" in str(e):
raise HTTPException(status_code=409, detail="Feed URL already exists")
raise HTTPException(status_code=500, detail=f"Error updating feed: {str(e)}") | function_complex | 0 | {"cognitive_complexity": 16, "loc": 34, "code_loc": 32, "docstring_loc": 1, "function_name": "update_feed", "class_name": "SourceService", "qualname": "SourceService.update_feed", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/services/source_service.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/integrations/gmail/service.py:GmailService._parse_email | # Context:
from typing import Any
class GmailService:
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
def __init__(
self,
credentials_file: str | None = None,
token_file: str | None = None,
config_dir: str | None = None,
access_token: str | None = None,
):
"""
Initialize Gmail Service
Args:
credentials_file: Path to OAuth credentials JSON from Google Cloud Console
token_file: Path to store/load access tokens
config_dir: Directory to store config files (defaults to browser-use config directory)
access_token: Direct access token (skips file-based auth if provided)
"""
# Set up configuration directory using browser-use's config system
if config_dir is None:
self.config_dir = CONFIG.BROWSER_USE_CONFIG_DIR
else:
self.config_dir = Path(config_dir).expanduser().resolve()
# Ensure config directory exists (only if not using direct token)
if access_token is None:
self.config_dir.mkdir(parents=True, exist_ok=True)
# Set up credential paths
self.credentials_file = credentials_file or self.config_dir / 'gmail_credentials.json'
self.token_file = token_file or self.config_dir / 'gmail_token.json'
# Direct access token support
self.access_token = access_token
self.service = None
self.creds = None
self._authenticated = False
def is_authenticated(self) -> bool: ...
async def authenticate(self) -> bool: ...
async def get_recent_emails(self, max_results: int, query: str, time_filter: str) -> list[dict[str, Any]]: ...
def _extract_body(self, payload: dict[str, Any]) -> str: ...
# Task:
Write a Python method `_parse_email` for the class `GmailService` to parse Gmail message into readable format.
Parameters: message: dict[str, Any]
Returns: dict[str, Any] | def _parse_email(self, message: dict[str, Any]) -> dict[str, Any]:
"""Parse Gmail message into readable format"""
headers = {h['name']: h['value'] for h in message['payload']['headers']}
return {
'id': message['id'],
'thread_id': message['threadId'],
'subject': headers.get('Subject', ''),
'from': headers.get('From', ''),
'to': headers.get('To', ''),
'date': headers.get('Date', ''),
'timestamp': int(message['internalDate']),
'body': self._extract_body(message['payload']),
'raw_message': message,
} | function_simple | 0 | {"cognitive_complexity": 0, "loc": 15, "code_loc": 12, "docstring_loc": 1, "function_name": "_parse_email", "class_name": "GmailService", "qualname": "GmailService._parse_email", "file_path": "browser_use/integrations/gmail/service.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"} |
vllm-project/vllm:vllm/model_executor/models/bailing_moe_linear.py:BailingMoeV25:class_doc | Write a class-level docstring for `BailingMoeV25` (inherits from nn.Module) which has methods: `__init__`, `forward`. | Bailing MoE v2.5 - standalone implementation for linear attention model. | documentation | 1 | {"doc_type": "class", "class_name": "BailingMoeV25", "file_path": "vllm/model_executor/models/bailing_moe_linear.py", "repo_id": "vllm-project/vllm", "char_length": 72, "methods": ["__init__", "forward"]} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_agent_utils.py:TestConvertToolsToOpenaiSchema.test_converts_multiple_tools | # Context:
from crewai.utilities.agent_utils import (
_asummarize_chunks,
_estimate_token_count,
_extract_summary_tags,
_format_messages_for_summary,
_split_messages_into_chunks,
convert_tools_to_openai_schema,
parse_tool_call_args,
summarize_messages,
)
class CalculatorInput(BaseModel): ...
class CalculatorTool(BaseTool): ...
class SearchInput(BaseModel): ...
class SearchTool(BaseTool): ...
class NoSchemaTool(BaseTool): ...
def _make_mock_i18n() -> MagicMock: ...
class MCPStyleInput(BaseModel): ...
class MCPStyleTool(BaseTool): ...
class TestOptionalFieldsPreserveNull: ...
class TestSummarizeMessages: ...
class TestFormatMessagesForSummary: ...
class TestExtractSummaryTags: ...
class TestSplitMessagesIntoChunks: ...
class TestEstimateTokenCount: ...
class TestParallelSummarization: ...
def _build_long_conversation() -> list[dict[str, Any]]: ...
class TestParallelSummarizationVCR: ...
class TestParseToolCallArgs: ...
class TestConvertToolsToOpenaiSchema:
def test_converts_single_tool(self) -> None: ...
def test_functions_dict_contains_callables(self) -> None: ...
def test_function_can_be_called(self) -> None: ...
def test_empty_tools_list(self) -> None: ...
def test_schema_has_required_fields(self) -> None: ...
def test_tool_without_args_schema(self) -> None: ...
def test_schema_structure_matches_openai_format(self) -> None: ...
def test_removes_redundant_schema_fields(self) -> None: ...
def test_preserves_field_descriptions(self) -> None: ...
def test_preserves_default_values(self) -> None: ...
# Task:
Write a Python test method `test_converts_multiple_tools` in test class `TestConvertToolsToOpenaiSchema` to test converting multiple tools to OpenAI schema.
Module under test: __future__, typing, pydantic | def test_converts_multiple_tools(self) -> None:
"""Test converting multiple tools to OpenAI schema."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 2
assert len(functions) == 2
# Check calculator
calc_schema = next(s for s in schemas if s["function"]["name"] == "calculator")
assert calc_schema["function"]["description"] == "Perform mathematical calculations"
# Check search
search_schema = next(s for s in schemas if s["function"]["name"] == "web_search")
assert search_schema["function"]["description"] == "Search the web for information"
assert "query" in search_schema["function"]["parameters"]["properties"]
assert "max_results" in search_schema["function"]["parameters"]["properties"] | test | 0 | {"function_name": "test_converts_multiple_tools", "class_name": "TestConvertToolsToOpenaiSchema", "qualname": "TestConvertToolsToOpenaiSchema.test_converts_multiple_tools", "file_path": "lib/crewai/tests/utilities/test_agent_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 17, "tested_modules": ["__future__", "typing", "pydantic", "crewai.tools.base_tool", "crewai.utilities.agent_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_arr.py:module_doc | Write a module-level docstring for the Python module `test_namespace_arr` which contains function `_make_fixed_size_list_table`, function `test_arr_to_list_fixed_size`, function `test_arr_to_list_invalid_dtype_raises`. | Integration tests for array namespace expressions.
These tests require Ray and test end-to-end array namespace expression evaluation. | documentation | 0 | {"doc_type": "module", "module_name": "test_namespace_arr", "file_path": "python/ray/data/tests/expressions/test_namespace_arr.py", "repo_id": "ray-project/ray", "char_length": 134} |
google/langextract:tests/progress_test.py:ProgressTest.test_extraction_progress_bar | # Context:
import tqdm
from langextract import progress
class ProgressTest(unittest.TestCase):
def test_download_progress_bar(self): ...
def test_save_load_progress_bars(self): ...
def test_model_info_extraction(self): ...
def test_formatting_functions(self): ...
# Task:
Write a Python test method `test_extraction_progress_bar` in test class `ProgressTest` to test extraction progress bar creation.
Module under test: langextract | def test_extraction_progress_bar(self):
"""Test extraction progress bar creation."""
pbar = progress.create_extraction_progress_bar(
range(10), "gemini-2.0-flash"
)
self.assertIsInstance(pbar, tqdm.tqdm)
self.assertIn("LangExtract", pbar.desc)
self.assertIn("gemini-2.0-flash", pbar.desc) | test | 1 | {"function_name": "test_extraction_progress_bar", "class_name": "ProgressTest", "qualname": "ProgressTest.test_extraction_progress_bar", "file_path": "tests/progress_test.py", "repo_id": "google/langextract", "loc": 9, "tested_modules": ["langextract"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/_internal/planner/plan_download_op.py:PartitionActor:class_doc | Write a class-level docstring for `PartitionActor` which has methods: `__init__`, `__call__`, `_estimate_nrows_per_partition`, `_sample_sizes`. | Actor that partitions download operations based on estimated file sizes.
For multiple URI columns, estimates the combined size across all columns. | documentation | 0 | {"doc_type": "class", "class_name": "PartitionActor", "file_path": "python/ray/data/_internal/planner/plan_download_op.py", "repo_id": "ray-project/ray", "char_length": 147, "methods": ["__init__", "__call__", "_estimate_nrows_per_partition", "_sample_sizes"]} |
ccxt/ccxt:python/ccxt/static_dependencies/bip/bip32/bip32_path.py:Bip32PathParser.Parse | # Context:
class Bip32PathConst: ...
class Bip32Path: ...
class Bip32PathParser:
def __ParseElements(path_elems: List[str]) -> Bip32Path: ...
def __ParseElem(path_elem: str) -> int: ...
# Task:
Write a Python method `Parse` for the class `Bip32PathParser` to parse a path and return a Bip32Path object.
Parameters: path: str
Returns: Bip32Path | def Parse(path: str) -> Bip32Path:
"""
Parse a path and return a Bip32Path object.
Args:
path (str): Path
Returns:
Bip32Path object: Bip32Path object
Raises:
Bip32PathError: If the path is not valid
"""
# Remove trailing "/" if any
if path.endswith("/"):
path = path[:-1]
# Parse elements
return Bip32PathParser.__ParseElements(
list(filter(None, path.split("/")))
) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 22, "code_loc": 5, "docstring_loc": 12, "function_name": "Parse", "class_name": "Bip32PathParser", "qualname": "Bip32PathParser.Parse", "file_path": "python/ccxt/static_dependencies/bip/bip32/bip32_path.py", "repo_id": "ccxt/ccxt", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:browser_use/llm/openai/serializer.py:OpenAIMessageSerializer._serialize_user_content | # Context:
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartRefusalParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageFunctionToolCallParam,
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class OpenAIMessageSerializer:
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam: ...
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam: ...
def _serialize_content_part_refusal(part: ContentPartRefusalParam) -> ChatCompletionContentPartRefusalParam: ...
def _serialize_system_content(content: str | list[ContentPartTextParam]) -> str | list[ChatCompletionContentPartTextParam]: ...
def _serialize_assistant_content(content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] | None: ...
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageFunctionToolCallParam: ...
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
def serialize(message: BaseMessage) -> ChatCompletionMessageParam: ...
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]: ...
# Task:
Write a Python method `_serialize_user_content` for the class `OpenAIMessageSerializer` to serialize content for user messages (text and images allowed).
Parameters: content: str | list[ContentPartTextParam | ContentPartImageParam]
Returns: str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] | def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
"""Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_image(part))
return serialized_parts | function_simple | 0 | {"cognitive_complexity": 5, "loc": 14, "code_loc": 9, "docstring_loc": 1, "function_name": "_serialize_user_content", "class_name": "OpenAIMessageSerializer", "qualname": "OpenAIMessageSerializer._serialize_user_content", "file_path": "browser_use/llm/openai/serializer.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/sam2/test_modeling_sam2.py:Sam2ModelIntegrationTest.test_inference_batched_images_batched_boxes | # Context:
from transformers.testing_utils import (
backend_empty_cache,
require_torch,
slow,
torch_device,
)
import torch
class Sam2VisionModelTester: ...
class Sam2VisionModelTest(ModelTesterMixin, unittest.TestCase): ...
class Sam2PromptEncoderTester: ...
class Sam2MaskDecoderTester: ...
class Sam2ModelTester: ...
class Sam2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...
def prepare_image(): ...
def prepare_groceries_image(): ...
def prepare_dog_img(): ...
def prepare_video(): ...
class Sam2ModelIntegrationTest(unittest.TestCase):
def setUp(self): ...
def tearDown(self): ...
def test_inference_mask_generation_one_point_multimask(self): ...
def test_inference_mask_generation_one_point_no_multimask(self): ...
def test_inference_mask_generation_batched_images_multi_points(self): ...
def test_inference_mask_generation_batched_images_batched_points_multi_points(self): ...
def test_inference_mask_generation_from_existing_points_and_mask(self): ...
def test_dummy_pipeline_generation(self): ...
# Task:
Write a Python test method `test_inference_batched_images_batched_boxes` in test class `Sam2ModelIntegrationTest` to verify the behavior of `inference_batched_images_batched_boxes`.
Module under test: transformers, transformers.testing_utils, transformers.utils | def test_inference_batched_images_batched_boxes(self):
raw_image1 = prepare_image()
raw_image2 = prepare_groceries_image()
input_boxes = [
[[75, 275, 1725, 850], [425, 600, 700, 875], [1375, 550, 1650, 800], [1240, 675, 1400, 750]],
[[450, 170, 520, 350], [350, 190, 450, 350], [500, 170, 580, 350], [580, 170, 640, 350]],
]
inputs = self.processor(images=[raw_image1, raw_image2], input_boxes=input_boxes, return_tensors="pt").to(
torch_device
)
with torch.no_grad():
outputs = self.model(**inputs, multimask_output=False)
self.assertEqual(outputs.iou_scores.shape, (2, 4, 1))
self.assertEqual(outputs.pred_masks.shape, (2, 4, 1, 256, 256))
torch.testing.assert_close(
outputs.iou_scores,
torch.tensor([[[0.9904], [0.9689], [0.9770], [0.9079]], [[0.9739], [0.9816], [0.9838], [0.9781]]]).to(
torch_device
),
atol=1e-4,
rtol=1e-4,
)
torch.testing.assert_close(
outputs.pred_masks[:, :, :, :2, :2],
torch.tensor(
[
[
[[[-11.1540, -18.3994], [-12.4230, -17.4403]]],
[[[-19.3144, -29.3947], [-24.6341, -24.1144]]],
[[[-24.2983, -37.6470], [-31.6659, -31.0893]]],
[[[-25.4313, -44.0231], [-34.0903, -34.7447]]],
],
[
[[[-22.5539, -30.4633], [-32.8940, -21.6813]]],
[[[-23.6637, -31.3489], [-32.5095, -22.4442]]],
[[[-25.2987, -30.9999], [-34.6243, -24.1717]]],
[[[-26.3150, -30.5313], [-35.0152, -24.0271]]],
],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
) | test | 0 | {"function_name": "test_inference_batched_images_batched_boxes", "class_name": "Sam2ModelIntegrationTest", "qualname": "Sam2ModelIntegrationTest.test_inference_batched_images_batched_boxes", "file_path": "tests/models/sam2/test_modeling_sam2.py", "repo_id": "huggingface/transformers", "loc": 43, "tested_modules": ["transformers", "transformers.testing_utils", "transformers.utils", "transformers.video_utils", "test_configuration_common"], "has_docstring": false, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py:TestFileExistenceValidation.test_docx_file_not_found_raises_error | # Context:
import pytest
from crewai_tools.tools.rag.rag_tool import RagTool
def mock_rag_client() -> MagicMock: ...
def rag_tool(mock_rag_client: MagicMock) -> RagTool: ...
class TestDataTypeFileAlias: ...
class TestDataTypeStringValues: ...
class TestDataTypeEnumValues: ...
class TestInvalidDataType: ...
class TestKeywordArgumentVariants: ...
class TestAutoDetection: ...
class TestMetadataHandling: ...
class TestFileExistenceValidation:
def test_pdf_file_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
def test_text_file_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
def test_csv_file_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
def test_json_file_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
def test_xml_file_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
def test_mdx_file_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
def test_directory_not_found_raises_error(self, rag_tool: RagTool) -> None: ...
# Task:
Write a Python test method `test_docx_file_not_found_raises_error` in test class `TestFileExistenceValidation` to test that non-existent DOCX file raises FileNotFoundError.
Module under test: pathlib, tempfile, crewai_tools.rag.data_types | def test_docx_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent DOCX file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.docx", data_type="docx") | test | 0 | {"function_name": "test_docx_file_not_found_raises_error", "class_name": "TestFileExistenceValidation", "qualname": "TestFileExistenceValidation.test_docx_file_not_found_raises_error", "file_path": "lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["pathlib", "tempfile", "crewai_tools.rag.data_types", "crewai_tools.tools.rag.rag_tool"], "has_docstring": true, "runnable_level": "project_runnable"} |
TheAlgorithms/Python:machine_learning/t_stochastic_neighbour_embedding.py:main | # Context:
import numpy as np
def collect_dataset() -> tuple[ndarray, ndarray]: ...
def compute_pairwise_affinities(data_matrix: ndarray, sigma: float) -> ndarray: ...
def compute_low_dim_affinities(embedding_matrix: ndarray) -> tuple[ndarray, ndarray]: ...
def apply_tsne(data_matrix: ndarray, n_components: int, learning_rate: float, n_iter: int) -> ndarray: ...
# Task:
Write a Python function `main` to run t-SNE on the Iris dataset and display the first 5 embeddings.
Returns: None | def main() -> None:
"""
Run t-SNE on the Iris dataset and display the first 5 embeddings.
>>> main() # doctest: +ELLIPSIS
t-SNE embedding (first 5 points):
[[...
"""
features, _labels = collect_dataset()
embedding = apply_tsne(features, n_components=2, n_iter=300)
if not isinstance(embedding, np.ndarray):
raise TypeError("t-SNE embedding must be an ndarray")
print("t-SNE embedding (first 5 points):")
print(embedding[:5]) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 16, "code_loc": 6, "docstring_loc": 7, "function_name": "main", "class_name": null, "qualname": "main", "file_path": "machine_learning/t_stochastic_neighbour_embedding.py", "repo_id": "TheAlgorithms/Python", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/tests/test_label_scheduling.py:test_fallback_strategy | # Context:
import ray
class MyActor: ...
def get_node_id(): ...
def cluster_with_labeled_nodes(ray_start_cluster): ...
def test_label_selector_equals(cluster_with_labeled_nodes): ...
def test_label_selector_not_equals(cluster_with_labeled_nodes): ...
def test_label_selector_in(cluster_with_labeled_nodes): ...
def test_label_selector_not_in(cluster_with_labeled_nodes): ...
def test_label_selector_multiple(cluster_with_labeled_nodes): ...
def test_empty_selector_fallback_strategy(cluster_with_labeled_nodes): ...
def test_infeasible_fallback_strategy(cluster_with_labeled_nodes): ...
def test_fallback_with_feasible_primary_selector(cluster_with_labeled_nodes): ...
# Task:
Write a Python test function `test_fallback_strategy` to verify the behavior of `fallback_strategy`. | def test_fallback_strategy(cluster_with_labeled_nodes):
# Create a RayCluster with labelled nodes.
gpu_node, _, _ = cluster_with_labeled_nodes
# Define an unsatisfiable label selector.
infeasible_label_selector = {"ray.io/accelerator-type": "does-not-exist"}
# Create a fallback strategy with multiple accelerator options.
accelerator_fallbacks = [
{"label_selector": {"ray.io/accelerator-type": "A100"}},
{"label_selector": {"ray.io/accelerator-type": "TPU"}},
]
# Attempt to schedule the actor. The scheduler should fail to find a node with the
# primary `label_selector` and fall back to the first available option, 'A100'.
label_selector_actor = MyActor.options(
label_selector=infeasible_label_selector,
fallback_strategy=accelerator_fallbacks,
).remote()
# Assert that the actor was scheduled on the expected node.
assert ray.get(label_selector_actor.get_node_id.remote(), timeout=5) == gpu_node | test | 0 | {"function_name": "test_fallback_strategy", "class_name": null, "qualname": "test_fallback_strategy", "file_path": "python/ray/tests/test_label_scheduling.py", "repo_id": "ray-project/ray", "loc": 22, "tested_modules": [], "has_docstring": false, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/dashboard/modules/reporter/tests/test_gpu_profiler_manager.py:test_start_monitoring_daemon | # Context:
from ray.dashboard.modules.reporter.gpu_profile_manager import GpuProfilingManager
def mock_node_has_gpus(monkeypatch): ...
def mock_dynolog_binaries(monkeypatch): ...
def mock_subprocess_popen(monkeypatch): ...
def mock_asyncio_create_subprocess_exec(monkeypatch): ...
def test_enabled(tmp_path, mock_node_has_gpus, mock_dynolog_binaries): ...
def test_disabled_no_gpus(tmp_path, monkeypatch): ...
def test_disabled_no_dynolog_bin(tmp_path, mock_node_has_gpus): ...
async def test_gpu_profile_disabled(tmp_path): ...
async def test_gpu_profile_without_starting_daemon(tmp_path, mock_node_has_gpus, mock_dynolog_binaries): ...
async def test_gpu_profile_with_dead_daemon(tmp_path, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen): ...
async def test_gpu_profile_on_dead_process(tmp_path, monkeypatch, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen): ...
async def test_gpu_profile_no_matched_processes(tmp_path, monkeypatch, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen, mock_asyncio_create_subprocess_exec): ...
async def test_gpu_profile_timeout(tmp_path, monkeypatch, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen, mock_asyncio_create_subprocess_exec): ...
async def test_gpu_profile_process_dies_during_profiling(tmp_path, monkeypatch, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen, mock_asyncio_create_subprocess_exec): ...
async def test_gpu_profile_success(tmp_path, monkeypatch, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen, mock_asyncio_create_subprocess_exec): ...
# Task:
Write a Python test function `test_start_monitoring_daemon` to verify the behavior of `start_monitoring_daemon`.
Module under test: pathlib, ray.dashboard.modules.reporter.gpu_profile_manager | def test_start_monitoring_daemon(
tmp_path, mock_node_has_gpus, mock_dynolog_binaries, mock_subprocess_popen
):
gpu_profiler = GpuProfilingManager(tmp_path, ip_address=LOCALHOST)
mocked_popen, mocked_proc = mock_subprocess_popen
mocked_proc.pid = 123
mocked_proc.poll.return_value = None
gpu_profiler.start_monitoring_daemon()
assert gpu_profiler.is_monitoring_daemon_running
assert mocked_popen.call_count == 1
assert mocked_popen.call_args[0][0] == [
"/usr/bin/fake_dynolog",
"--enable_ipc_monitor",
"--port",
str(gpu_profiler._DYNOLOG_PORT),
]
# "Terminate" the daemon
mocked_proc.poll.return_value = 0
assert not gpu_profiler.is_monitoring_daemon_running | test | 0 | {"function_name": "test_start_monitoring_daemon", "class_name": null, "qualname": "test_start_monitoring_daemon", "file_path": "python/ray/dashboard/modules/reporter/tests/test_gpu_profiler_manager.py", "repo_id": "ray-project/ray", "loc": 23, "tested_modules": ["pathlib", "ray.dashboard.modules.reporter.gpu_profile_manager"], "has_docstring": false, "runnable_level": "file_runnable"} |
apache/airflow:providers/common/sql/tests/unit/common/sql/datafusion/test_engine.py:TestDataFusionEngine.test_get_credentials_unknown_type | # Context:
from unittest.mock import MagicMock, patch
import pytest
from airflow.providers.common.sql.datafusion.engine import DataFusionEngine
class TestDataFusionEngine:
def setup_connections(self, create_connection_without_db): ...
def test_init(self): ...
def test_session_context_property(self): ...
def test_register_datasource_invalid_config(self): ...
def test_register_datasource_success(self, mock_get_conn, mock_factory, storage_type, format, scheme): ...
def test_register_datasource_object_store_exception(self, mock_get_conn, mock_factory): ...
def test_register_datasource_duplicate_table(self, mock_get_conn): ...
def test_execute_query_success(self): ...
def test_execute_query_failure(self): ...
def test_execute_query_with_local_csv(self, mock_get_conn): ...
def test_register_datasource_with_options(self, mock_get_conn, mock_factory): ...
def test_remove_none_values(self): ...
def test_get_connection_config(self): ...
def test_get_schema_success(self): ...
def test_get_schema_with_local_csv(self, mock_get_conn): ...
# Task:
Write a Python test method `test_get_credentials_unknown_type` in test class `TestDataFusionEngine` to verify the behavior of `get_credentials_unknown_type`.
Module under test: __future__, datafusion, airflow.models | def test_get_credentials_unknown_type(self):
mock_conn = MagicMock()
mock_conn.conn_type = "dummy"
engine = DataFusionEngine()
with pytest.raises(ValueError, match="Unknown connection type dummy"):
engine._get_credentials(mock_conn) | test | 1 | {"function_name": "test_get_credentials_unknown_type", "class_name": "TestDataFusionEngine", "qualname": "TestDataFusionEngine.test_get_credentials_unknown_type", "file_path": "providers/common/sql/tests/unit/common/sql/datafusion/test_engine.py", "repo_id": "apache/airflow", "loc": 7, "tested_modules": ["__future__", "datafusion", "airflow.models", "airflow.providers.common.sql.config", "airflow.providers.common.sql.datafusion.base"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/diffusers:src/diffusers/models/transformers/transformer_glm_image.py:license_header | Add a Apache-2.0 license header comment for the project 'diffusers', authored by The CogView team, Tsinghua University & ZhipuAI and The HuggingFace Team, year 2025. | # Copyright 2025 The CogView team, Tsinghua University & ZhipuAI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | license | 1 | {"license_type": "Apache-2.0", "author": "The CogView team, Tsinghua University & ZhipuAI and The HuggingFace Team", "year": "2025", "source": "header", "repo_id": "huggingface/diffusers"} |
jax-ml/jax:tests/mosaic/gpu_torch_test_distributed.py:TorchTest.test_get_device_id | # Context:
import jax
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import arith
from jax._src.lib.mlir.dialects import memref
import jax.numpy as jnp
import jax.experimental.mosaic.gpu as mgpu
import torch
import torch.distributed as dist
class TorchTest(parameterized.TestCase):
def setUpClass(): ...
def setUp(self): ...
def test_remote_semaphore(self): ...
# Task:
Write a Python test method `test_get_device_id` in test class `TorchTest` to verify the behavior of `get_device_id`.
Module under test: absl.testing, jax._src, jax._src | def test_get_device_id(self):
index = ir.IndexType.get()
def kernel_body(ctx, dst, _):
device_id = ctx.device_id()
memref.store(device_id, dst, [arith.constant(index, 0)])
out_shape = jax.ShapeDtypeStruct((1,), jnp.int32)
kernel = mgpu.as_torch_gpu_kernel(
kernel_body, (1, 1, 1), (128, 1, 1), (), out_shape, ()
)
gathered = torch.empty((2,), dtype=torch.int32)
dist.all_gather_into_tensor(gathered, kernel())
self.assertEqual(gathered.tolist(), list(range(jax.process_count()))) | test | 1 | {"function_name": "test_get_device_id", "class_name": "TorchTest", "qualname": "TorchTest.test_get_device_id", "file_path": "tests/mosaic/gpu_torch_test_distributed.py", "repo_id": "jax-ml/jax", "loc": 13, "tested_modules": ["absl.testing", "jax._src", "jax._src", "jax._src", "jax._src.interpreters"], "has_docstring": false, "runnable_level": "file_runnable"} |
streamlit/streamlit:lib/tests/streamlit/elements/feedback_test.py:test_apptest_feedback_value_retained_on_rerun | # Context:
import streamlit as st
from streamlit.testing.v1 import AppTest
class TestFeedbackSerde: ...
class TestFeedbackCommand(DeltaGeneratorTestCase): ...
class TestFeedbackWidthConfig(DeltaGeneratorTestCase): ...
class TestFeedbackStableId(DeltaGeneratorTestCase): ...
class TestFeedbackDuplicateId(DeltaGeneratorTestCase): ...
def test_apptest_feedback_clearing_with_default(): ...
def test_apptest_feedback_no_default_clearing(): ...
# Task:
Write a Python test function `test_apptest_feedback_value_retained_on_rerun` to test that feedback value is retained across reruns.
Module under test: __future__, typing, parameterized | def test_apptest_feedback_value_retained_on_rerun():
"""Test that feedback value is retained across reruns."""
from streamlit.testing.v1 import AppTest
def script():
import streamlit as st
st.feedback("faces", key="test_feedback")
st.button("Rerun")
at = AppTest.from_function(script).run()
# Set a value
at = at.feedback[0].set_value(2).run()
assert at.feedback[0].value == 2
# Trigger a rerun via button click
at = at.button[0].click().run()
# Value should be retained
assert at.feedback[0].value == 2 | test | 1 | {"function_name": "test_apptest_feedback_value_retained_on_rerun", "class_name": null, "qualname": "test_apptest_feedback_value_retained_on_rerun", "file_path": "lib/tests/streamlit/elements/feedback_test.py", "repo_id": "streamlit/streamlit", "loc": 21, "tested_modules": ["__future__", "typing", "parameterized", "streamlit.elements.widgets.feedback", "streamlit.errors"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/kernels/helion/test_register.py:TestValidateHelionSettings.test_accepts_valid_settings | # Context:
import helion
from vllm.kernels.helion.register import (
_HOP_AVAILABLE,
ConfiguredHelionKernel,
HelionKernelWrapper,
get_kernel_by_name,
get_registered_kernels,
register_kernel,
validate_helion_settings,
)
def sample_configs(): ...
def sample_kernel(): ...
def config_manager_with_test_configs(sample_configs): ...
def configured_kernel(sample_kernel, sample_configs, config_manager_with_test_configs): ...
def create_configured_kernel_with_configs(op_name, config_picker, kernel_func, configs, platform, helion_settings): ...
class TestConfiguredHelionKernel: ...
class TestHelionKernelWrapper: ...
class TestKernelRegistry: ...
class TestValidateHelionSettings:
def test_accepts_none_settings(self): ...
def test_rejects_autotuner_fn(self): ...
def test_warns_on_static_shapes_true(self): ...
# Task:
Write a Python test method `test_accepts_valid_settings` in test class `TestValidateHelionSettings` to test that valid settings without conflicts are accepted.
Module under test: vllm.utils.import_utils, vllm.kernels.helion.config_manager, vllm.kernels.helion.register | def test_accepts_valid_settings(self):
"""Test that valid settings without conflicts are accepted."""
settings = helion.Settings()
settings.static_shapes = False
settings.print_output_code = True
validate_helion_settings(settings, "test_kernel") # Should not raise | test | 1 | {"function_name": "test_accepts_valid_settings", "class_name": "TestValidateHelionSettings", "qualname": "TestValidateHelionSettings.test_accepts_valid_settings", "file_path": "tests/kernels/helion/test_register.py", "repo_id": "vllm-project/vllm", "loc": 6, "tested_modules": ["vllm.utils.import_utils", "vllm.kernels.helion.config_manager", "vllm.kernels.helion.register", "vllm.kernels.helion.register", "vllm.kernels.helion.register"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/v1/kv_connector/unit/test_nixl_connector.py:TestNixlHandshake.test_handshake_succeed_on_kv_cache_layout_mismatch_with_experimental | # Context:
from unittest.mock import MagicMock, patch
from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import (
KVConnectorRole,
NixlAgentMetadata,
NixlConnector,
NixlConnectorMetadata,
NixlConnectorScheduler,
NixlConnectorWorker,
NixlHandshakePayload,
NixlKVConnectorStats,
compute_nixl_compatibility_hash,
)
from .utils import create_request, create_scheduler, create_vllm_config
def clear_kv_transfer(): ...
def get_default_xfer_telemetry(xferDurationS: float, postDurationS: float, totalBytes: int, descCount: int) -> dict: ...
class FakeNixlWrapper: ...
def _make_fake_nixl_pkg(): ...
def test_basic_interface(): ...
def test_prompt_less_than_block_size(): ...
def test_kv_transfer_handshake(dist_init): ...
class FakeNixlConnectorWorker(NixlConnectorWorker): ...
def test_kv_connector_stats(default_vllm_config, dist_init): ...
def test_kv_connector_stats_aggregation(): ...
def test_multi_kv_connector_stats_aggregation(): ...
def test_scheduler_kv_connector_stats_aggregation(): ...
def test_abort_timeout_on_prefiller(monkeypatch, distributed_executor_backend): ...
class RequestIdMapper: ...
def _run_abort_timeout_test(llm: LLM, timeout: int): ...
def test_register_kv_caches(default_vllm_config, dist_init, attn_backend, enable_cross_layers): ...
class FakePlatform(Platform): ...
def test_kv_buffer_to_nixl_memory_types(default_vllm_config, dist_init, kv_buffer_device, nixl_memory_type): ...
def test_shutdown_cleans_up_resources(default_vllm_config, dist_init): ...
def test_aborted_request_removed_from_worker_in_batch(default_vllm_config, dist_init): ...
class FailingNixlWrapper(FakeNixlWrapper): ...
def test_transfer_failure_logging(default_vllm_config, dist_init, failure_type, wrapper_config, needs_get_finished): ...
def test_handshake_failure_returns_finished(default_vllm_config, dist_init): ...
def test_transfer_setup_failure_returns_finished(default_vllm_config, dist_init): ...
def test_compatibility_hash_validation(default_vllm_config, dist_init, mismatch_type, config_overrides, version_override, should_fail, enforce_handshake_compat): ...
def test_handshake_decode_errors(default_vllm_config, dist_init, error_scenario): ...
class TestNixlHandshake:
def test_multi_xfer_one_engine(self, default_vllm_config, dist_init): ...
def test_async_load_kv(self, default_vllm_config, dist_init, decode_tp_size, prefill_tp_size): ...
def test_prefill_tp_size_greater_than_decode_tp_size(self, local_tp_size: int, default_vllm_config, dist_init): ...
def test_prefill_tp_size_greater_than_decode_tp_size_mla(self, local_tp_size: int, default_vllm_config, dist_init): ...
def test_concurrent_load_kv(self, default_vllm_config, dist_init): ...
def test_handshake_fails_on_kv_cache_layout_mismatch(self, default_vllm_config, dist_init): ...
# Task:
Write a Python test method `test_handshake_succeed_on_kv_cache_layout_mismatch_with_experimental` in test class `TestNixlHandshake` to verify that adding a remote agent fails if kv_cache_layout differs.
Module under test: collections, typing, vllm | def test_handshake_succeed_on_kv_cache_layout_mismatch_with_experimental(
self, default_vllm_config, dist_init
):
"""
Verify that adding a remote agent fails if kv_cache_layout differs.
This test is only relevant for heterogeneous TP.
"""
vllm_config = create_vllm_config(enable_permute_local_kv=True)
# Mock TP world size to 2 to force heterogeneous TP when
# remote_tp_size=1
with patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.get_tensor_model_parallel_world_size", # noqa: E501
return_value=2,
):
# Initialize connector and worker (with fake NIXL wrapper)
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config,
connector.engine_id,
hand_shake_latency=0,
kv_cache_layout="NHD",
)
worker = connector.connector_worker
# Minimal local registration params used by add_remote_agent
worker.slot_size_per_layer = [2048]
worker.block_len_per_layer = [2048 * worker.block_size]
worker.num_blocks = 1
worker.dst_num_blocks[worker.engine_id] = worker.num_blocks
# Metadata with different kv_cache_layout than local worker
meta = NixlAgentMetadata(
engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
agent_metadata=FakeNixlWrapper.AGENT_METADATA,
kv_caches_base_addr=[0],
device_id=0,
num_blocks=1,
# prefill TP=1, decode TP=2, remote block_lens is double to local
block_lens=[i * 2 for i in worker.block_len_per_layer],
kv_cache_layout="HND",
block_size=worker.block_size,
)
# We don't check layout for homogeneous TP and MLA for now, as the
# whole block is moved.
worker.add_remote_agent(meta, remote_tp_size=1) | test | 1 | {"function_name": "test_handshake_succeed_on_kv_cache_layout_mismatch_with_experimental", "class_name": "TestNixlHandshake", "qualname": "TestNixlHandshake.test_handshake_succeed_on_kv_cache_layout_mismatch_with_experimental", "file_path": "tests/v1/kv_connector/unit/test_nixl_connector.py", "repo_id": "vllm-project/vllm", "loc": 47, "tested_modules": ["collections", "typing", "vllm", "vllm.config", "vllm.distributed.kv_transfer.kv_connector.utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/dashboard/modules/reporter/tests/test_gpu_profiler_manager.py:module_doc | Write a module-level docstring for the Python module `test_gpu_profiler_manager` which contains function `mock_node_has_gpus`, function `mock_dynolog_binaries`, function `mock_subprocess_popen`, function `mock_asyncio_create_subprocess_exec`, function `test_enabled`. | Unit tests for the GPU profiler manager.
All GPU and dynolog dependencies are mocked out.
This test just verifies that commands are launched correctly and that
validations are correctly performed. | documentation | 0 | {"doc_type": "module", "module_name": "test_gpu_profiler_manager", "file_path": "python/ray/dashboard/modules/reporter/tests/test_gpu_profiler_manager.py", "repo_id": "ray-project/ray", "char_length": 197} |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_websocket_test.py:TestStarletteSessionClient.test_write_forward_msg_queues_message | # Context:
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from streamlit.web.server.starlette.starlette_websocket import (
StarletteClientContext,
StarletteSessionClient,
_gather_user_info,
_get_signed_cookie_with_chunks,
_is_origin_allowed,
_parse_decoded_user_cookie,
_parse_subprotocols,
_parse_user_cookie_signed,
create_websocket_handler,
create_websocket_routes,
)
class TestParseSubprotocols: ...
class TestGatherUserInfo: ...
class TestParseDecodedUserCookie: ...
class TestParseUserCookieSigned: ...
class TestIsOriginAllowed: ...
class TestWebsocketHandlerUserInfoPrecedence: ...
class TestGetSignedCookieWithChunks: ...
class TestCreateWebsocketRoutes: ...
class TestStarletteClientContext: ...
class TestStarletteSessionClientClientContext: ...
class TestStarletteSessionClient:
async def test_write_forward_msg_raises_when_closed(self) -> None: ...
async def test_aclose_sets_closed_and_cancels_task(self) -> None: ...
# Task:
Write a Python test method `test_write_forward_msg_queues_message` in test class `TestStarletteSessionClient` to test that write_forward_msg adds message to queue.
Module under test: __future__, streamlit.web.server.starlette, streamlit.web.server.starlette.starlette_websocket | async def test_write_forward_msg_queues_message(self) -> None:
"""Test that write_forward_msg adds message to queue."""
mock_websocket = MagicMock()
client = StarletteSessionClient(mock_websocket)
mock_msg = MagicMock()
with patch(
"streamlit.web.server.starlette.starlette_websocket.serialize_forward_msg"
) as mock_serialize:
mock_serialize.return_value = b"serialized"
client.write_forward_msg(mock_msg)
assert client._send_queue.qsize() == 1
# Cleanup
await client.aclose() | test | 1 | {"function_name": "test_write_forward_msg_queues_message", "class_name": "TestStarletteSessionClient", "qualname": "TestStarletteSessionClient.test_write_forward_msg_queues_message", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_websocket_test.py", "repo_id": "streamlit/streamlit", "loc": 17, "tested_modules": ["__future__", "streamlit.web.server.starlette", "streamlit.web.server.starlette.starlette_websocket", "tests.testutil", "starlette.websockets"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/bedrock/test_bedrock_async.py:test_bedrock_async_with_temperature | # Context:
import pytest
from crewai.llm import LLM
async def test_bedrock_async_basic_call(): ...
async def test_bedrock_async_with_max_tokens(): ...
async def test_bedrock_async_with_system_message(): ...
async def test_bedrock_async_conversation(): ...
async def test_bedrock_async_multiple_calls(): ...
async def test_bedrock_async_with_parameters(): ...
# Task:
Write a Python test function `test_bedrock_async_with_temperature` to test async call with temperature parameter.
Module under test: crewai.llm | async def test_bedrock_async_with_temperature():
"""Test async call with temperature parameter."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0", temperature=0.1)
result = await llm.acall("Say the word 'test' once")
assert result is not None
assert isinstance(result, str) | test | 0 | {"function_name": "test_bedrock_async_with_temperature", "class_name": null, "qualname": "test_bedrock_async_with_temperature", "file_path": "lib/crewai/tests/llms/bedrock/test_bedrock_async.py", "repo_id": "crewAIInc/crewAI", "loc": 8, "tested_modules": ["crewai.llm"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/model_executor/models/gpt_oss.py:_get_moe_weight_dtype | Write a Python function `_get_moe_weight_dtype` to helper function to get MoE quantization weight dtype.
Parameters: layer_id: int
Returns: str | None | def _get_moe_weight_dtype(layer_id: int = 0) -> str | None:
"""Helper function to get MoE quantization weight dtype.
Args:
layer_id: Layer index to check (default 0, as all layers should
have the same quantization method)
Returns:
Weight dtype string (e.g., "mxfp4", "fp8") or None if not available
"""
if hasattr(self.layers[layer_id].mlp.experts.quant_method, "weight_dtype"):
return self.layers[layer_id].mlp.experts.quant_method.weight_dtype
return None | function_simple | 1 | {"cognitive_complexity": 1, "loc": 13, "code_loc": 3, "docstring_loc": 9, "function_name": "_get_moe_weight_dtype", "class_name": null, "qualname": "_get_moe_weight_dtype", "file_path": "vllm/model_executor/models/gpt_oss.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "self_contained"} |
apache/airflow:providers/teradata/src/airflow/providers/teradata/utils/bteq_util.py:is_valid_encoding | Write a Python function `is_valid_encoding` to check if the file can be read with the specified encoding.
Parameters: file_path: str, encoding: str
Returns: bool | def is_valid_encoding(file_path: str, encoding: str = "UTF-8") -> bool:
"""
Check if the file can be read with the specified encoding.
:param file_path: Path to the file to be checked.
:param encoding: Encoding to use for reading the file.
:return: True if the file can be read with the specified encoding, False otherwise.
"""
with open(file_path, encoding=encoding) as f:
f.read()
return True | function_simple | 1 | {"cognitive_complexity": 0, "loc": 11, "code_loc": 3, "docstring_loc": 7, "function_name": "is_valid_encoding", "class_name": null, "qualname": "is_valid_encoding", "file_path": "providers/teradata/src/airflow/providers/teradata/utils/bteq_util.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "self_contained"} |
binary-husky/gpt_academic:crazy_functions/doc_fns/read_fns/unstructured_all/paper_metadata_extractor.py:PaperMetadataExtractor._validate_file | # Context:
from pathlib import Path
from typing import Optional, Set, Dict, Union, List
import os
class PaperMetadata: ...
class ExtractorConfig: ...
def main(): ...
class PaperMetadataExtractor:
SECTION_PATTERNS = {
def __init__(self, config: Optional[ExtractorConfig] = None):
"""初始化提取器
Args:
config: 提取器配置对象,如果为None则使用默认配置
"""
self.config = config or ExtractorConfig()
self._setup_logging()
def _setup_logging(self) -> None: ...
def _cleanup_text(self, text: str) -> str: ...
def get_supported_formats() -> List[str]: ...
def extract_metadata(self, file_path: Union[str, Path], strategy: str) -> PaperMetadata: ...
def _extract_title_and_authors(self, elements, metadata: PaperMetadata) -> None: ...
def _evaluate_title_candidate(self, text, position, element): ...
def _extract_abstract_and_keywords(self, elements, metadata: PaperMetadata) -> None: ...
def _extract_additional_metadata(self, elements, metadata: PaperMetadata) -> None: ...
# Task:
Write a Python method `_validate_file` for the class `PaperMetadataExtractor` to 验证文件.
Parameters: file_path: Union[str, Path], max_size_mb: int
Returns: Path | def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
"""验证文件
Args:
file_path: 文件路径
max_size_mb: 允许的最大文件大小(MB)
Returns:
Path: 验证后的Path对象
Raises:
ValueError: 文件不存在、格式不支持或大小超限
PermissionError: 没有读取权限
"""
path = Path(file_path).resolve()
if not path.exists():
raise ValueError(f"文件不存在: {path}")
if not path.is_file():
raise ValueError(f"不是文件: {path}")
if not os.access(path, os.R_OK):
raise PermissionError(f"没有读取权限: {path}")
file_size_mb = path.stat().st_size / (1024 * 1024)
if file_size_mb > max_size_mb:
raise ValueError(
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
)
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
raise ValueError(
f"不支持的文件格式: {path.suffix}. "
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
)
return path | function_simple | 1 | {"cognitive_complexity": 5, "loc": 38, "code_loc": 18, "docstring_loc": 13, "function_name": "_validate_file", "class_name": "PaperMetadataExtractor", "qualname": "PaperMetadataExtractor._validate_file", "file_path": "crazy_functions/doc_fns/read_fns/unstructured_all/paper_metadata_extractor.py", "repo_id": "binary-husky/gpt_academic", "has_docstring": true, "runnable_level": "class_runnable"} |
ray-project/ray:rllib/algorithms/tqc/default_tqc_rl_module.py:DefaultTQCRLModule:class_doc | Write a class-level docstring for `DefaultTQCRLModule` (inherits from RLModule, InferenceOnlyAPI, TargetNetworkAPI, QNetAPI) which has methods: `setup`, `make_target_networks`, `get_non_inference_attributes`, `get_target_network_pairs`, `get_initial_state`. | RLModule for the TQC (Truncated Quantile Critics) algorithm.
TQC extends SAC by using distributional critics with quantile regression.
Each critic outputs n_quantiles values instead of a single Q-value.
Architecture:
- Policy (Actor): Same as SAC
[obs] -> [pi_encoder] -> [pi_head] -> [action_dist_inputs]
- Quantile Critics: Multiple critics, each outputting n_quantiles
[obs, action] -> [qf_encoder_i] -> [qf_head_i] -> [n_quantiles values]
- Target Quantile Critics: Target networks for each critic
[obs, action] -> [target_qf_encoder_i] -> [target_qf_head_i] -> [n_quantiles] | documentation | 0 | {"doc_type": "class", "class_name": "DefaultTQCRLModule", "file_path": "rllib/algorithms/tqc/default_tqc_rl_module.py", "repo_id": "ray-project/ray", "char_length": 589, "methods": ["setup", "make_target_networks", "get_non_inference_attributes", "get_target_network_pairs", "get_initial_state"]} |
ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/agent_builder.py:module_doc | Write a module-level docstring for the Python module `agent_builder` which contains function `build_llm`. | Shared agent-building helpers.
This repo had multiple agents with very similar boilerplate:
- load config
- build LLM
- (optionally) discover MCP tools
- create a LangChain agent with MemorySaver
This module centralizes that logic so individual agents only specify:
- system prompt
- tools source (MCP endpoints vs explicitly provided tools) | documentation | 0 | {"doc_type": "module", "module_name": "agent_builder", "file_path": "doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/agent_builder.py", "repo_id": "ray-project/ray", "char_length": 343} |
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/resolution/resolver.py:FileResolver._resolve_inline | # Context:
import base64
from crewai_files.core.resolved import (
FileReference,
InlineBase64,
InlineBytes,
ResolvedFile,
UrlReference,
)
from crewai_files.core.types import FileInput
class FileContext: ...
class FileResolverConfig: ...
def create_resolver(provider: str | None, prefer_upload: bool, upload_threshold_bytes: int | None, enable_cache: bool) -> FileResolver: ...
class FileResolver:
def _build_file_context(file: FileInput) -> FileContext: ...
def _is_url_source(file: FileInput) -> bool: ...
def _supports_url(constraints: ProviderConstraints | None) -> bool: ...
def _resolve_as_url(file: FileInput) -> UrlReference: ...
def resolve(self, file: FileInput, provider: ProviderType) -> ResolvedFile: ...
def resolve_files(self, files: dict[str, FileInput], provider: ProviderType) -> dict[str, ResolvedFile]: ...
def _get_type_constraint(content_type: str, constraints: ProviderConstraints) -> ImageConstraints | PDFConstraints | AudioConstraints | VideoConstraints | None: ...
def _should_upload(self, file: FileInput, provider: str, constraints: ProviderConstraints | None, file_size: int) -> bool: ...
def _resolve_via_upload(self, file: FileInput, provider: ProviderType, context: FileContext) -> ResolvedFile | None: ...
def _upload_with_retry(uploader: FileUploader, file: FileInput, provider: str, file_size: int) -> UploadResult | None: ...
async def aresolve(self, file: FileInput, provider: ProviderType) -> ResolvedFile: ...
async def aresolve_files(self, files: dict[str, FileInput], provider: ProviderType, max_concurrency: int) -> dict[str, ResolvedFile]: ...
async def _aresolve_via_upload(self, file: FileInput, provider: ProviderType, context: FileContext) -> ResolvedFile | None: ...
async def _aupload_with_retry(uploader: FileUploader, file: FileInput, provider: str, file_size: int) -> UploadResult | None: ...
def _get_uploader(self, provider: ProviderType) -> FileUploader | None: ...
def get_cached_uploads(self, provider: ProviderType) -> list[CachedUpload]: ...
def clear_cache(self) -> None: ...
# Task:
Write a Python method `_resolve_inline` for the class `FileResolver` to resolve a file as inline content.
Parameters: file: FileInput, provider: str, context: FileContext
Returns: ResolvedFile | def _resolve_inline(
self,
file: FileInput,
provider: str,
context: FileContext,
) -> ResolvedFile:
"""Resolve a file as inline content.
Args:
file: The file to resolve (used for logging).
provider: Provider name.
context: Pre-computed file context.
Returns:
InlineBase64 or InlineBytes depending on provider.
"""
logger.debug(f"Resolving {file.filename} as inline for {provider}")
if self.config.use_bytes_for_bedrock and "bedrock" in provider:
return InlineBytes(
content_type=context.content_type,
data=context.content,
)
encoded = base64.b64encode(context.content).decode("ascii")
return InlineBase64(
content_type=context.content_type,
data=encoded,
) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 28, "code_loc": 11, "docstring_loc": 10, "function_name": "_resolve_inline", "class_name": "FileResolver", "qualname": "FileResolver._resolve_inline", "file_path": "lib/crewai-files/src/crewai_files/resolution/resolver.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/_internal/execution/bundle_queue/base.py:QueueWithRemoval.remove | # Context:
from ray.data._internal.execution.interfaces import RefBundle
class BaseBundleQueue: ...
class QueueWithRemoval(BaseBundleQueue):
def __contains__(self, bundle: RefBundle) -> bool: ...
def _remove_inner(self, bundle: RefBundle) -> RefBundle: ...
# Task:
Write a Python method `remove` for the class `QueueWithRemoval` to remove the specified bundle from the queue. If multiple instances exist, remove the first one.
Parameters: bundle: RefBundle
Returns: RefBundle | def remove(self, bundle: RefBundle) -> RefBundle:
"""Remove the specified bundle from the queue. If multiple instances exist, remove the first one."""
bundle = self._remove_inner(bundle)
self._on_dequeue_bundle(bundle)
return bundle | function_simple | 0 | {"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "remove", "class_name": "QueueWithRemoval", "qualname": "QueueWithRemoval.remove", "file_path": "python/ray/data/_internal/execution/bundle_queue/base.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/util.py:AudioPlayer.stop | # Context:
def record_audio(duration: float, sample_rate: int, channels: int, dtype) -> np.ndarray: ...
def create_silence(duration: float, sample_rate: int, dtype) -> np.ndarray: ...
def save_audio(audio_data: np.ndarray, filename: str, sample_rate: int): ...
def load_audio(filename: str, sample_rate: int, dtype) -> np.ndarray: ...
class AudioPlayer:
def __init__(self, sample_rate: int = 24000, channels: int = 1, dtype=np.int16):
self.sample_rate = sample_rate
self.channels = channels
self.dtype = dtype
self.stream: Optional[sd.OutputStream] = None
self._stop_event = threading.Event()
def __enter__(self): ...
def __exit__(self, exc_type, exc_val, exc_tb): ...
def add_audio(self, audio_data: np.ndarray): ...
# Task:
Write a Python method `stop` for the class `AudioPlayer` to stop the audio player. | def stop(self):
"""Stop the audio player."""
self._stop_event.set() | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "stop", "class_name": "AudioPlayer", "qualname": "AudioPlayer.stop", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/util.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py:ContextualAIRerankTool:class_doc | Write a class-level docstring for `ContextualAIRerankTool` (inherits from BaseTool) which has methods: `_run`. | Tool to rerank documents using Contextual AI's instruction-following reranker. | documentation | 0 | {"doc_type": "class", "class_name": "ContextualAIRerankTool", "file_path": "lib/crewai-tools/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py", "repo_id": "crewAIInc/crewAI", "char_length": 78, "methods": ["_run"]} |
browser-use/browser-use:browser_use/browser/watchdogs/downloads_watchdog.py:DownloadsWatchdog.trigger_pdf_download | # Context:
import asyncio
import json
import os
from urllib.parse import urlparse
import anyio
from cdp_use.cdp.target import SessionID, TargetID
from browser_use.browser.events import (
BrowserLaunchEvent,
BrowserStateRequestEvent,
BrowserStoppedEvent,
DownloadProgressEvent,
DownloadStartedEvent,
FileDownloadedEvent,
NavigationCompleteEvent,
TabClosedEvent,
TabCreatedEvent,
)
from browser_use.browser.events import FileDownloadedEvent
class DownloadsWatchdog(BaseWatchdog):
def register_download_callbacks(self, on_start: Any | None, on_progress: Any | None, on_complete: Any | None) -> None: ...
def unregister_download_callbacks(self, on_start: Any | None, on_progress: Any | None, on_complete: Any | None) -> None: ...
async def on_BrowserLaunchEvent(self, event: BrowserLaunchEvent) -> None: ...
async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None: ...
async def on_TabClosedEvent(self, event: TabClosedEvent) -> None: ...
async def on_BrowserStateRequestEvent(self, event: BrowserStateRequestEvent) -> None: ...
async def on_BrowserStoppedEvent(self, event: BrowserStoppedEvent) -> None: ...
async def on_NavigationCompleteEvent(self, event: NavigationCompleteEvent) -> None: ...
def _is_auto_download_enabled(self) -> bool: ...
async def attach_to_target(self, target_id: TargetID) -> None: ...
async def _setup_network_monitoring(self, target_id: TargetID) -> None: ...
async def download_file_from_url(self, url: str, target_id: TargetID, content_type: str | None, suggested_filename: str | None) -> str | None: ...
def _track_download(self, file_path: str, guid: str | None) -> None: ...
async def _handle_cdp_download(self, event: DownloadWillBeginEvent, target_id: TargetID, session_id: SessionID | None) -> None: ...
async def _handle_download(self, download: Any) -> None: ...
async def check_for_pdf_viewer(self, target_id: TargetID) -> bool: ...
def _check_url_for_pdf(self, url: str) -> bool: ...
def _is_chrome_pdf_viewer_url(self, url: str) -> bool: ...
async def _check_network_headers_for_pdf(self, target_id: TargetID) -> bool: ...
async def _get_unique_filename(directory: str, filename: str) -> str: ...
# Task:
Write a Python async method `trigger_pdf_download` for the class `DownloadsWatchdog` to trigger download of a PDF from Chrome's PDF viewer.
Parameters: target_id: TargetID
Returns: str | None | async def trigger_pdf_download(self, target_id: TargetID) -> str | None:
"""Trigger download of a PDF from Chrome's PDF viewer.
Returns the download path if successful, None otherwise.
"""
self.logger.debug(f'[DownloadsWatchdog] trigger_pdf_download called for target_id={target_id}')
if not self.browser_session.browser_profile.downloads_path:
self.logger.warning('[DownloadsWatchdog] ❌ No downloads path configured, cannot save PDF download')
return None
downloads_path = self.browser_session.browser_profile.downloads_path
self.logger.debug(f'[DownloadsWatchdog] Downloads path: {downloads_path}')
try:
# Create a temporary CDP session for this target without switching focus
import asyncio
self.logger.debug(f'[DownloadsWatchdog] Creating CDP session for PDF download from target {target_id}')
temp_session = await self.browser_session.get_or_create_cdp_session(target_id, focus=False)
# Try to get the PDF URL with timeout
result = await asyncio.wait_for(
temp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': """
(() => {
// For Chrome's PDF viewer, the actual URL is in window.location.href
// The embed element's src is often "about:blank"
const embedElement = document.querySelector('embed[type="application/x-google-chrome-pdf"]') ||
document.querySelector('embed[type="application/pdf"]');
if (embedElement) {
// Chrome PDF viewer detected - use the page URL
return { url: window.location.href };
}
// Fallback to window.location.href anyway
return { url: window.location.href };
})()
""",
'returnByValue': True,
},
session_id=temp_session.session_id,
),
timeout=5.0, # 5 second timeout to prevent hanging
)
pdf_info = result.get('result', {}).get('value', {})
pdf_url = pdf_info.get('url', '')
if not pdf_url:
self.logger.warning(f'[DownloadsWatchdog] ❌ Could not determine PDF URL for download {pdf_info}')
return None
# Generate filename from URL
pdf_filename = os.path.basename(pdf_url.split('?')[0]) # Remove query params
if not pdf_filename or not pdf_filename.endswith('.pdf'):
parsed = urlparse(pdf_url)
pdf_filename = os.path.basename(parsed.path) or 'document.pdf'
if not pdf_filename.endswith('.pdf'):
pdf_filename += '.pdf'
self.logger.debug(f'[DownloadsWatchdog] Generated filename: {pdf_filename}')
# Check if already downloaded in this session
self.logger.debug(f'[DownloadsWatchdog] PDF_URL: {pdf_url}, session_pdf_urls: {self._session_pdf_urls}')
if pdf_url in self._session_pdf_urls:
existing_path = self._session_pdf_urls[pdf_url]
self.logger.debug(f'[DownloadsWatchdog] PDF already downloaded in session: {existing_path}')
return existing_path
# Generate unique filename if file exists from previous run
downloads_dir = str(self.browser_session.browser_profile.downloads_path)
os.makedirs(downloads_dir, exist_ok=True)
final_filename = pdf_filename
existing_files = os.listdir(downloads_dir)
if pdf_filename in existing_files:
# Generate unique name with (1), (2), etc.
base, ext = os.path.splitext(pdf_filename)
counter = 1
while f'{base} ({counter}){ext}' in existing_files:
counter += 1
final_filename = f'{base} ({counter}){ext}'
self.logger.debug(f'[DownloadsWatchdog] File exists, using: {final_filename}')
self.logger.debug(f'[DownloadsWatchdog] Starting PDF download from: {pdf_url[:100]}...')
# Download using JavaScript fetch to leverage browser cache
try:
# Properly escape the URL to prevent JavaScript injection
escaped_pdf_url = json.dumps(pdf_url)
result = await asyncio.wait_for(
temp_session.cdp_client.send.Runtime.evaluate(
params={
'expression': f"""
(async () => {{
try {{
// Use fetch with cache: 'force-cache' to prioritize cached version
const response = await fetch({escaped_pdf_url}, {{
cache: 'force-cache'
}});
if (!response.ok) {{
throw new Error(`HTTP error! status: ${{response.status}}`);
}}
const blob = await response.blob();
const arrayBuffer = await blob.arrayBuffer();
const uint8Array = new Uint8Array(arrayBuffer);
// Check if served from cache
const fromCache = response.headers.has('age') ||
!response.headers.has('date');
return {{
data: Array.from(uint8Array),
fromCache: fromCache,
responseSize: uint8Array.length,
transferSize: response.headers.get('content-length') || 'unknown'
}};
}} catch (error) {{
throw new Error(`Fetch failed: ${{error.message}}`);
}}
}})()
""",
'awaitPromise': True,
'returnByValue': True,
},
session_id=temp_session.session_id,
),
timeout=10.0, # 10 second timeout for download operation
)
download_result = result.get('result', {}).get('value', {})
if download_result and download_result.get('data') and len(download_result['data']) > 0:
# Ensure downloads directory exists
downloads_dir = str(self.browser_session.browser_profile.downloads_path)
os.makedirs(downloads_dir, exist_ok=True)
download_path = os.path.join(downloads_dir, final_filename)
# Save the PDF asynchronously
async with await anyio.open_file(download_path, 'wb') as f:
await f.write(bytes(download_result['data']))
# Verify file was written successfully
if os.path.exists(download_path):
actual_size = os.path.getsize(download_path)
self.logger.debug(
f'[DownloadsWatchdog] PDF file written successfully: {download_path} ({actual_size} bytes)'
)
else:
self.logger.error(f'[DownloadsWatchdog] ❌ Failed to write PDF file to: {download_path}')
return None
# Log cache information
cache_status = 'from cache' if download_result.get('fromCache') else 'from network'
response_size = download_result.get('responseSize', 0)
self.logger.debug(
f'[DownloadsWatchdog] ✅ Auto-downloaded PDF ({cache_status}, {response_size:,} bytes): {download_path}'
)
# Store URL->path mapping for this session
self._session_pdf_urls[pdf_url] = download_path
# Emit file downloaded event
self.logger.debug(f'[DownloadsWatchdog] Dispatching FileDownloadedEvent for {final_filename}')
self.event_bus.dispatch(
FileDownloadedEvent(
url=pdf_url,
path=download_path,
file_name=final_filename,
file_size=response_size,
file_type='pdf',
mime_type='application/pdf',
from_cache=download_result.get('fromCache', False),
auto_download=True,
)
)
# No need to detach - session is cached
return download_path
else:
self.logger.warning(f'[DownloadsWatchdog] No data received when downloading PDF from {pdf_url}')
return None
except Exception as e:
self.logger.warning(f'[DownloadsWatchdog] Failed to auto-download PDF from {pdf_url}: {type(e).__name__}: {e}')
return None
except TimeoutError:
self.logger.debug('[DownloadsWatchdog] PDF download operation timed out')
return None
except Exception as e:
self.logger.error(f'[DownloadsWatchdog] Error in PDF download: {type(e).__name__}: {e}')
return None | function_complex | 0 | {"cognitive_complexity": 34, "loc": 192, "code_loc": 148, "docstring_loc": 4, "function_name": "trigger_pdf_download", "class_name": "DownloadsWatchdog", "qualname": "DownloadsWatchdog.trigger_pdf_download", "file_path": "browser_use/browser/watchdogs/downloads_watchdog.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
zhayujie/chatgpt-on-wechat:agent/protocol/agent.py:Agent.get_skills_prompt | # Context:
from common.log import logger
class Agent:
def __init__(self, system_prompt: str, description: str = "AI Agent", model: LLMModel = None,
tools=None, output_mode="print", max_steps=100, max_context_tokens=None,
context_reserve_tokens=None, memory_manager=None, name: str = None,
workspace_dir: str = None, skill_manager=None, enable_skills: bool = True,
runtime_info: dict = None):
"""
Initialize the Agent with system prompt, model, description.
:param system_prompt: The system prompt for the agent.
:param description: A description of the agent.
:param model: An instance of LLMModel to be used by the agent.
:param tools: Optional list of tools for the agent to use.
:param output_mode: Control how execution progress is displayed:
"print" for console output or "logger" for using logger
:param max_steps: Maximum number of steps the agent can take (default: 100)
:param max_context_tokens: Maximum tokens to keep in context (default: None, auto-calculated based on model)
:param context_reserve_tokens: Reserve tokens for new requests (default: None, auto-calculated)
:param memory_manager: Optional MemoryManager instance for memory operations
:param name: [Deprecated] The name of the agent (no longer used in single-agent system)
:param workspace_dir: Optional workspace directory for workspace-specific skills
:param skill_manager: Optional SkillManager instance (will be created if None and enable_skills=True)
:param enable_skills: Whether to enable skills support (default: True)
:param runtime_info: Optional runtime info dict (with _get_current_time callable for dynamic time)
"""
self.name = name or "Agent"
self.system_prompt = system_prompt
self.model: LLMModel = model # Instance of LLMModel
self.description = description
self.tools: list = []
self.max_steps = max_steps # max tool-call steps, default 100
self.max_context_tokens = max_context_tokens # max tokens in context
self.context_reserve_tokens = context_reserve_tokens # reserve tokens for new requests
self.captured_actions = [] # Initialize captured actions list
self.output_mode = output_mode
self.last_usage = None # Store last API response usage info
self.messages = [] # Unified message history for stream mode
self.messages_lock = threading.Lock() # Lock for thread-safe message operations
self.memory_manager = memory_manager # Memory manager for auto memory flush
self.workspace_dir = workspace_dir # Workspace directory
self.enable_skills = enable_skills # Skills enabled flag
self.runtime_info = runtime_info # Runtime info for dynamic time update
# Initialize skill manager
self.skill_manager = None
if enable_skills:
if skill_manager:
self.skill_manager = skill_manager
else:
# Auto-create skill manager
try:
from agent.skills import SkillManager
custom_dir = os.path.join(workspace_dir, "skills") if workspace_dir else None
self.skill_manager = SkillManager(custom_dir=custom_dir)
logger.debug(f"Initialized SkillManager with {len(self.skill_manager.skills)} skills")
except Exception as e:
logger.warning(f"Failed to initialize SkillManager: {e}")
if tools:
for tool in tools:
self.add_tool(tool)
def add_tool(self, tool: BaseTool): ...
def get_full_system_prompt(self, skill_filter) -> str: ...
def _rebuild_runtime_section(self, prompt: str) -> str: ...
def _rebuild_tool_list_section(self, prompt: str) -> str: ...
def refresh_skills(self): ...
def list_skills(self): ...
def _get_model_context_window(self) -> int: ...
def _get_context_reserve_tokens(self) -> int: ...
def _estimate_message_tokens(self, message: dict) -> int: ...
def _estimate_text_tokens(text: str) -> int: ...
def _find_tool(self, tool_name: str): ...
def output(self, message, end): ...
def _execute_post_process_tools(self): ...
def capture_tool_use(self, tool_name, input_params, output, status, thought, error_message, execution_time): ...
def run_stream(self, user_message: str, on_event, clear_history: bool, skill_filter) -> str: ...
def clear_history(self): ...
# Task:
Write a Python method `get_skills_prompt` for the class `Agent` to get the skills prompt to append to system prompt.
Parameters: skill_filter
Returns: str | def get_skills_prompt(self, skill_filter=None) -> str:
"""
Get the skills prompt to append to system prompt.
:param skill_filter: Optional list of skill names to include
:return: Formatted skills prompt or empty string
"""
if not self.skill_manager:
return ""
try:
return self.skill_manager.build_skills_prompt(skill_filter=skill_filter)
except Exception as e:
logger.warning(f"Failed to build skills prompt: {e}")
return "" | function_simple | 1 | {"cognitive_complexity": 2, "loc": 15, "code_loc": 7, "docstring_loc": 6, "function_name": "get_skills_prompt", "class_name": "Agent", "qualname": "Agent.get_skills_prompt", "file_path": "agent/protocol/agent.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:providers/teradata/tests/unit/teradata/operators/test_bteq.py:TestBteqOperator.test_invalid_file_path | # Context:
from unittest import mock
import pytest
from airflow.providers.teradata.operators.bteq import BteqOperator
class TestBteqOperator:
def test_execute(self, mock_hook_init, mock_execute_bteq): ...
def test_execute_sql_only(self, mock_hook_init, mock_execute_bteq): ...
def test_execute_sql_local(self, mock_hook_init, mock_execute_script): ...
def test_on_kill(self, mock_on_kill): ...
def test_on_kill_not_initialized(self): ...
def test_template_fields(self): ...
def test_execute_raises_if_no_sql_or_file(self): ...
def test_file_encoding_error(self, mock_encoding, mock_valid_file): ...
def test_execute_local_file(self, mock_read_file, mock_valid_encoding, mock_valid_file, mock_execute_bteq_script): ...
def test_on_kill_calls_hook(self): ...
def test_on_kill_logs_if_no_hook(self): ...
def test_remote_execution_with_sql(self, mock_bteq_hook_init, mock_ssh_hook_class, mock_get_conn, mock_execute_bteq_script): ...
def test_render_template_in_sql(self, mock_render): ...
def test_bteq_timeout_with_custom_rc(self, mock_hook_init, mock_exec): ...
def test_bteq_return_code_not_in_quit_rc(self, mock_hook_init, mock_exec): ...
# Task:
Write a Python test method `test_invalid_file_path` in test class `TestBteqOperator` to verify the behavior of `invalid_file_path`.
Module under test: __future__, airflow.providers.teradata.hooks.bteq, airflow.providers.teradata.operators.bteq | def test_invalid_file_path(self, mock_is_valid_file):
op = BteqOperator(
task_id="fail_invalid_file",
file_path="/invalid/path.sql",
teradata_conn_id="td_conn",
)
with pytest.raises(ValueError, match="Failed to execute BTEQ script due to invalid file path"):
op.execute({}) | test | 1 | {"function_name": "test_invalid_file_path", "class_name": "TestBteqOperator", "qualname": "TestBteqOperator.test_invalid_file_path", "file_path": "providers/teradata/tests/unit/teradata/operators/test_bteq.py", "repo_id": "apache/airflow", "loc": 8, "tested_modules": ["__future__", "airflow.providers.teradata.hooks.bteq", "airflow.providers.teradata.operators.bteq"], "has_docstring": false, "runnable_level": "project_runnable"} |
apache/airflow:providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py:TestEcsTaskCollection.test_get_all_task_keys | # Context:
class TestEcsQueuedTask: ...
class TestEcsTaskInfo: ...
class TestRunTaskKwargsConfigKeys: ...
class TestAllEcsConfigKeys: ...
class TestEcsExecutorException: ...
class TestEcsExecutorTask: ...
class TestRecursiveFlattenDict: ...
class TestParseAssignPublicIp: ...
class TestCamelizeDictKeys: ...
class TestEcsTaskCollection:
def setup_method(self): ...
def test_init(self): ...
def test_add_task(self): ...
def test_update_task(self): ...
def test_task_by_key(self): ...
def test_task_by_arn(self): ...
def test_pop_by_key(self): ...
def test_get_all_arns(self): ...
def test_failure_count_by_key(self): ...
def test_increment_failure_count(self): ...
def test_info_by_key(self): ...
def test_getitem(self): ...
def test_len(self): ...
# Task:
Write a Python test method `test_get_all_task_keys` in test class `TestEcsTaskCollection` to test getting all task keys from collection.
Module under test: __future__, airflow.models.taskinstance, airflow.providers.amazon.aws.executors.ecs.utils | def test_get_all_task_keys(self):
"""Test getting all task keys from collection."""
self.collection.add_task(
task=self.task,
airflow_task_key=self.task_key,
queue=self.queue,
airflow_cmd=self.cmd,
exec_config=self.exec_config,
attempt_number=1,
)
keys = self.collection.get_all_task_keys()
assert keys == [self.task_key] | test | 1 | {"function_name": "test_get_all_task_keys", "class_name": "TestEcsTaskCollection", "qualname": "TestEcsTaskCollection.test_get_all_task_keys", "file_path": "providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py", "repo_id": "apache/airflow", "loc": 13, "tested_modules": ["__future__", "airflow.models.taskinstance", "airflow.providers.amazon.aws.executors.ecs.utils", "airflow.utils.state"], "has_docstring": true, "runnable_level": "class_runnable"} |
ocrmypdf/OCRmyPDF:tests/test_null_ocr_engine.py:TestNullOcrEngineInterface.test_version_returns_none | # Context:
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
class TestNullOcrEngineExists: ...
class TestNullOcrEngineGenerateOcr: ...
class TestOcrEngineOption: ...
class TestNullOcrEngineInterface:
def test_creator_tag(self): ...
def test_languages_returns_empty_set(self): ...
def test_supports_generate_ocr_returns_true(self): ...
def test_get_orientation_returns_zero(self): ...
def test_get_deskew_returns_zero(self): ...
# Task:
Write a Python test method `test_version_returns_none` in test class `TestNullOcrEngineInterface` to nullOcrEngine.version() should return 'none'.
Module under test: __future__, pathlib, ocrmypdf.builtin_plugins | def test_version_returns_none(self):
"""NullOcrEngine.version() should return 'none'."""
from ocrmypdf.builtin_plugins.null_ocr import NullOcrEngine
assert NullOcrEngine.version() == "none" | test | 1 | {"function_name": "test_version_returns_none", "class_name": "TestNullOcrEngineInterface", "qualname": "TestNullOcrEngineInterface.test_version_returns_none", "file_path": "tests/test_null_ocr_engine.py", "repo_id": "ocrmypdf/OCRmyPDF", "loc": 5, "tested_modules": ["__future__", "pathlib", "ocrmypdf.builtin_plugins", "ocrmypdf.builtin_plugins.null_ocr", "ocrmypdf.builtin_plugins.null_ocr"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/tests/unit/test_deployment_rank_manager.py:TestDeploymentRankManagerMultiNode.test_complex_multi_node_lifecycle | # Context:
from ray.serve._private.deployment_state import DeploymentRankManager
def rank_manager() -> DeploymentRankManager: ...
class MockDeploymentReplica: ...
class TestDeploymentRankManager: ...
class TestDeploymentRankManagerEdgeCases: ...
class TestDeploymentRankManagerErrorHandling: ...
class TestDeploymentRankManagerMultiNode:
def test_assign_rank_multiple_nodes(self): ...
def test_local_rank_independence_across_nodes(self): ...
def test_release_rank_removes_node_when_last_replica(self): ...
def test_release_rank_keeps_node_when_replicas_remain(self): ...
def test_node_rank_reuse_after_complete_release(self): ...
def test_local_rank_reuse_within_node(self): ...
def test_recover_rank_multiple_nodes(self): ...
def test_recover_rank_preserves_node_rank_when_node_exists(self): ...
def test_check_rank_consistency_across_multiple_nodes(self): ...
def test_check_rank_consistency_local_ranks_per_node(self): ...
def test_check_rank_consistency_node_ranks(self): ...
def test_clear_with_multiple_nodes(self): ...
def test_get_replica_ranks_mapping_multiple_nodes(self): ...
def test_scaling_up_and_down_across_nodes(self): ...
def test_minimal_reassignment_preserves_node_assignments(self): ...
# Task:
Write a Python test method `test_complex_multi_node_lifecycle` in test class `TestDeploymentRankManagerMultiNode` to test a complex scenario with adds, releases, and consistency checks across nodes.
Module under test: ray.serve._private.common, ray.serve._private.deployment_state, ray.serve.schema | def test_complex_multi_node_lifecycle(self):
"""Test a complex scenario with adds, releases, and consistency checks across nodes."""
rank_manager = DeploymentRankManager()
# Phase 1: Initial deployment across 3 nodes
rank_manager.assign_rank("n1_r1", "node_1")
rank_manager.assign_rank("n1_r2", "node_1")
rank_manager.assign_rank("n2_r1", "node_2")
rank_manager.assign_rank("n3_r1", "node_3")
rank_manager.assign_rank("n3_r2", "node_3")
# Phase 2: Scale down - remove some replicas
rank_manager.release_rank("n1_r2") # Remove from node_1
rank_manager.release_rank("n2_r1") # Remove all from node_2
# Phase 3: Scale up - add replicas to new and existing nodes
rank_manager.assign_rank("n1_r3", "node_1") # Add to existing node_1
rank_manager.assign_rank("n4_r1", "node_4") # New node
# Verify state is consistent
mapping = rank_manager.get_replica_ranks_mapping()
assert len(mapping) == 5
# Verify node ranks - node_2 was removed, so node_4 should reuse its rank
assert mapping["n4_r1"].node_rank == 1 # Reused from node_2
# Verify local ranks per node
assert mapping["n1_r1"].local_rank == 0
assert mapping["n1_r3"].local_rank == 1 # Reused local rank
assert mapping["n3_r1"].local_rank == 0
assert mapping["n3_r2"].local_rank == 1
assert mapping["n4_r1"].local_rank == 0 | test | 0 | {"function_name": "test_complex_multi_node_lifecycle", "class_name": "TestDeploymentRankManagerMultiNode", "qualname": "TestDeploymentRankManagerMultiNode.test_complex_multi_node_lifecycle", "file_path": "python/ray/serve/tests/unit/test_deployment_rank_manager.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["ray.serve._private.common", "ray.serve._private.deployment_state", "ray.serve.schema"], "has_docstring": true, "runnable_level": "plib_runnable"} |
huggingface/transformers:src/transformers/trainer_optimizer.py:_get_adamw_torch | # Context:
from typing import TYPE_CHECKING, Any
from .training_args import OptimizerNames, ParallelMode
from torch.optim import AdamW
from bitsandbytes.optim import AdamW, Lion, RMSprop
from torch_xla.amp.syncfree import AdamW
class OptimizerContext: ...
def _parse_optim_args(optim_args_str: str | None) -> dict[str, str]: ...
def is_optimizer_factory(optimizer_cls_or_factory: Any) -> bool: ...
def _setup_low_rank_optimizer(args: TrainingArguments, model: PreTrainedModel, optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], optimizer_kwargs: dict[str, Any], is_layerwise_supported: bool) -> tuple[Any, dict[str, Any]]: ...
def _get_adafactor(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_adamw_torch_xla(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_adamw_torch_npu_fused(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_adamw_apex_fused(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_bitsandbytes_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_adamw_anyprecision(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_sgd(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_adagrad(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_rmsprop(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_galore_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_apollo_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_lomo_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_grokadamw(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_torchao_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_schedule_free_optimizer(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
def _get_stable_adamw(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]: ...
# Task:
Write a Python function `_get_adamw_torch` to get PyTorch AdamW optimizer (regular or fused).
Parameters: ctx: OptimizerContext
Returns: tuple[Any, dict[str, Any]] | def _get_adamw_torch(ctx: OptimizerContext) -> tuple[Any, dict[str, Any]]:
"""Get PyTorch AdamW optimizer (regular or fused)."""
from torch.optim import AdamW
ctx.optimizer_kwargs.update(ctx.adam_kwargs)
if ctx.args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
ctx.optimizer_kwargs.update({"fused": True})
return AdamW, ctx.optimizer_kwargs | function_simple | 0 | {"cognitive_complexity": 1, "loc": 8, "code_loc": 5, "docstring_loc": 1, "function_name": "_get_adamw_torch", "class_name": null, "qualname": "_get_adamw_torch", "file_path": "src/transformers/trainer_optimizer.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:tests/ci/interactions/test_autocomplete_interaction.py:TestAutocompleteInteraction.test_datalist_field_no_delay | # Context:
import asyncio
from browser_use.browser import BrowserSession
from browser_use.tools.service import Tools
import time
def http_server(): ...
def base_url(http_server): ...
async def browser_session(): ...
def tools(): ...
class TestAutocompleteInteraction:
async def test_value_mismatch_detected(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_combobox_field_detected(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_datalist_field_detected(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_normal_input_no_false_positive(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_sensitive_data_skips_value_verification(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_prefilled_input_cleared_by_default(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_prefilled_input_append_with_clear_false(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_concatenation_retry_on_sticky_field(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
async def test_combobox_field_adds_delay(self, tools: Tools, browser_session: BrowserSession, base_url: str): ...
# Task:
Write a Python test method `test_datalist_field_no_delay` in test class `TestAutocompleteInteraction` to native datalist fields should NOT get the 400ms delay — browser handles them instantly.
Module under test: browser_use.agent.views, browser_use.browser, browser_use.browser.profile | async def test_datalist_field_no_delay(self, tools: Tools, browser_session: BrowserSession, base_url: str):
"""Native datalist fields should NOT get the 400ms delay — browser handles them instantly."""
import time
await tools.navigate(url=f'{base_url}/datalist-field', new_tab=False, browser_session=browser_session)
await asyncio.sleep(0.3)
await browser_session.get_browser_state_summary()
city_idx = await browser_session.get_index_by_id('city')
assert city_idx is not None
t0 = time.monotonic()
await tools.input(index=city_idx, text='Chi', browser_session=browser_session)
duration = time.monotonic() - t0
# Datalist fields should complete without the 400ms tax.
# Normal typing for 3 chars takes well under 400ms.
assert duration < 0.4, f'Datalist field got unexpected delay: {duration:.3f}s (should be < 0.4s)' | test | 0 | {"function_name": "test_datalist_field_no_delay", "class_name": "TestAutocompleteInteraction", "qualname": "TestAutocompleteInteraction.test_datalist_field_no_delay", "file_path": "tests/ci/interactions/test_autocomplete_interaction.py", "repo_id": "browser-use/browser-use", "loc": 17, "tested_modules": ["browser_use.agent.views", "browser_use.browser", "browser_use.browser.profile", "browser_use.tools.service"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/components/processing/test_text_operations_component.py:TestTextOperationsOutputMethods.test_get_dataframe | # Context:
from lfx.components.processing.text_operations import TextOperations
from lfx.schema.dataframe import DataFrame
class TestTextOperationsComponent(ComponentTestBaseWithoutClient): ...
class TestTextOperationsWordCount: ...
class TestTextOperationsCaseConversion: ...
class TestTextOperationsReplace: ...
class TestTextOperationsExtract: ...
class TestTextOperationsHead: ...
class TestTextOperationsTail: ...
class TestTextOperationsStrip: ...
class TestTextOperationsJoin: ...
class TestTextOperationsClean: ...
class TestTextOperationsToDataFrame: ...
class TestTextOperationsUpdateBuildConfig: ...
class TestTextOperationsUpdateOutputs: ...
class TestBugFixWordCountEmptyText: ...
class TestBugFixTextJoinEmptyFirst: ...
class TestBugFixTextStripTabs: ...
class TestBugFixDataFrameHeaderValidation: ...
class TestBugFixInputValidation: ...
class TestTextOperationsOutputMethods:
def test_get_data_word_count(self): ...
def test_get_data_non_word_count(self): ...
def test_get_message(self): ...
def test_get_text(self): ...
# Task:
Write a Python test method `test_get_dataframe` in test class `TestTextOperationsOutputMethods` to test get_dataframe method.
Module under test: lfx.components.processing.text_operations, lfx.schema.data, lfx.schema.dataframe | def test_get_dataframe(self):
"""Test get_dataframe method."""
component = TextOperations()
component.operation = [{"name": "Text to DataFrame"}]
component.text_input = "| A | B |\n| 1 | 2 |"
component.table_separator = "|"
component.has_header = True
component.log = lambda _: None
result = component.get_dataframe()
assert isinstance(result, DataFrame) | test | 1 | {"function_name": "test_get_dataframe", "class_name": "TestTextOperationsOutputMethods", "qualname": "TestTextOperationsOutputMethods.test_get_dataframe", "file_path": "src/backend/tests/unit/components/processing/test_text_operations_component.py", "repo_id": "langflow-ai/langflow", "loc": 12, "tested_modules": ["lfx.components.processing.text_operations", "lfx.schema.data", "lfx.schema.dataframe", "lfx.schema.message", "tests.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
exo-explore/exo:src/exo/worker/engines/image/models/flux/kontext_adapter.py:FluxKontextModelAdapter.encode_prompt | # Context:
from mflux.models.flux.model.flux_text_encoder.prompt_encoder import PromptEncoder
from mflux.models.flux.variants.kontext.kontext_util import KontextUtil
class FluxKontextPromptData(PromptData): ...
class FluxKontextModelAdapter(ModelAdapter[Flux1Kontext, Transformer]):
def __init__(
self,
config: ImageModelConfig,
model_id: str,
local_path: Path,
quantize: int | None = None,
):
self._config = config
self._model = Flux1Kontext(
model_config=ModelConfig.from_name(model_name=model_id, base_model=None),
model_path=str(local_path),
quantize=quantize,
)
self._transformer = self._model.transformer
# Stores image path and computed dimensions after set_image_dimensions
self._image_path: str | None = None
self._output_height: int | None = None
self._output_width: int | None = None
def hidden_dim(self) -> int: ...
def needs_cfg(self) -> bool: ...
def _get_latent_creator(self) -> type: ...
def get_joint_block_wrappers(self, text_seq_len: int, encoder_hidden_states_mask: mx.array | None) -> list[JointBlockWrapper[Any]]: ...
def get_single_block_wrappers(self, text_seq_len: int) -> list[SingleBlockWrapper[Any]]: ...
def slice_transformer_blocks(self, start_layer: int, end_layer: int): ...
def set_image_dimensions(self, image_path: Path) -> tuple[int, int]: ...
def create_latents(self, seed: int, runtime_config: Config) -> mx.array: ...
def compute_embeddings(self, hidden_states: mx.array, prompt_embeds: mx.array) -> tuple[mx.array, mx.array]: ...
def compute_text_embeddings(self, t: int, runtime_config: Config, pooled_prompt_embeds: mx.array | None, hidden_states: mx.array | None) -> mx.array: ...
def compute_rotary_embeddings(self, prompt_embeds: mx.array, runtime_config: Config, encoder_hidden_states_mask: mx.array | None, cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]] | None, kontext_image_ids: mx.array | None) -> RotaryEmbeddings: ...
def apply_guidance(self, noise_positive: mx.array, noise_negative: mx.array, guidance_scale: float) -> mx.array: ...
# Task:
Write a Python method `encode_prompt` for the class `FluxKontextModelAdapter` to encode prompt and create conditioning from stored input image.
Parameters: prompt: str, negative_prompt: str | None
Returns: FluxKontextPromptData | def encode_prompt(
self, prompt: str, negative_prompt: str | None = None
) -> FluxKontextPromptData:
"""Encode prompt and create conditioning from stored input image.
Must call set_image_dimensions() before this method.
Args:
prompt: Text prompt for editing
negative_prompt: Ignored (Kontext doesn't use CFG)
Returns:
FluxKontextPromptData with text embeddings and image conditioning
"""
del negative_prompt # Kontext doesn't support negative prompts or CFG
if (
self._image_path is None
or self._output_height is None
or self._output_width is None
):
raise RuntimeError(
"set_image_dimensions() must be called before encode_prompt() "
"for FluxKontextModelAdapter"
)
assert isinstance(self.model.prompt_cache, dict)
assert isinstance(self.model.tokenizers, dict)
# Encode text prompt
prompt_embeds, pooled_prompt_embeds = PromptEncoder.encode_prompt(
prompt=prompt,
prompt_cache=self.model.prompt_cache,
t5_tokenizer=self.model.tokenizers["t5"], # pyright: ignore[reportAny]
clip_tokenizer=self.model.tokenizers["clip"], # pyright: ignore[reportAny]
t5_text_encoder=self.model.t5_text_encoder,
clip_text_encoder=self.model.clip_text_encoder,
)
# Create conditioning latents from input image
conditioning_latents, kontext_image_ids = (
KontextUtil.create_image_conditioning_latents(
vae=self.model.vae,
height=self._output_height,
width=self._output_width,
image_path=self._image_path,
)
)
return FluxKontextPromptData(
prompt_embeds=prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
conditioning_latents=conditioning_latents,
kontext_image_ids=kontext_image_ids,
) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 55, "code_loc": 34, "docstring_loc": 11, "function_name": "encode_prompt", "class_name": "FluxKontextModelAdapter", "qualname": "FluxKontextModelAdapter.encode_prompt", "file_path": "src/exo/worker/engines/image/models/flux/kontext_adapter.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:common/data_source/utils.py:SlackTextCleaner._get_slack_name | # Context:
import logging
from slack_sdk.errors import SlackApiError
def datetime_from_string(datetime_string: str) -> datetime: ...
def is_valid_image_type(mime_type: str) -> bool: ...
def _handle_http_error(e: requests.HTTPError, attempt: int) -> int: ...
def update_param_in_path(path: str, param: str, value: str) -> str: ...
def build_confluence_document_id(base_url: str, content_url: str, is_cloud: bool) -> str: ...
def get_single_param_from_url(url: str, param: str) -> str | None: ...
def get_start_param_from_url(url: str) -> int: ...
def wrap_request_to_handle_ratelimiting(request_fn: R, default_wait_time_sec: int, max_waits: int) -> R: ...
class _RateLimitedRequest: ...
def create_s3_client(bucket_type: BlobType, credentials: dict[str, Any], european_residency: bool) -> S3Client: ...
def detect_bucket_region(s3_client: S3Client, bucket_name: str) -> str | None: ...
def download_object(s3_client: S3Client, bucket_name: str, key: str, size_threshold: int | None) -> bytes | None: ...
def read_stream_with_limit(body: Any, key: str, size_threshold: int) -> bytes | None: ...
def _extract_onyx_metadata(line: str) -> dict | None: ...
def read_text_file(file: IO, encoding: str, errors: str, ignore_onyx_metadata: bool) -> tuple[str, dict]: ...
def get_blob_link(bucket_type: BlobType, s3_client: S3Client, bucket_name: str, key: str, bucket_region: str | None) -> str: ...
def extract_size_bytes(obj: Mapping[str, Any]) -> int | None: ...
def get_file_ext(file_name: str) -> str: ...
def is_accepted_file_ext(file_ext: str, extension_type: OnyxExtensionType) -> bool: ...
def detect_encoding(file: IO[bytes]) -> str: ...
def get_markitdown_converter(): ...
def to_bytesio(stream: IO[bytes]) -> BytesIO: ...
def get_base_url(token: str) -> str: ...
def get_message_link(event: dict, client: WebClient, channel_id: str) -> str: ...
def make_slack_api_call(call: Callable[..., SlackResponse], **kwargs) -> SlackResponse: ...
def make_paginated_slack_api_call(call: Callable[..., SlackResponse], **kwargs) -> Generator[dict[str, Any], None, None]: ...
def _make_slack_api_call_paginated(call: Callable[..., SlackResponse]) -> Callable[..., Generator[dict[str, Any], None, None]]: ...
def is_atlassian_date_error(e: Exception) -> bool: ...
def expert_info_from_slack_id(user_id: str | None, client: WebClient, user_cache: dict[str, BasicExpertInfo | None]) -> BasicExpertInfo | None: ...
def is_mail_service_disabled_error(error: HttpError) -> bool: ...
def build_time_range_query(time_range_start: SecondsSinceUnixEpoch | None, time_range_end: SecondsSinceUnixEpoch | None) -> str | None: ...
def clean_email_and_extract_name(email: str) -> tuple[str, str | None]: ...
def get_message_body(payload: dict[str, Any]) -> str: ...
def time_str_to_utc(time_str: str): ...
def gmail_time_str_to_utc(time_str: str): ...
def batch_generator(items: Iterable[T], batch_size: int, pre_batch_yield: Callable[[list[T]], None] | None) -> Generator[list[T], None, None]: ...
def fetch_notion_data(url: str, headers: dict[str, str], method: str, json_data: Optional[dict]) -> dict[str, Any]: ...
def properties_to_str(properties: dict[str, Any]) -> str: ...
def filter_pages_by_time(pages: list[dict[str, Any]], start: float, end: float, filter_field: str) -> list[dict[str, Any]]: ...
def _load_all_docs(connector: CheckpointedConnector[CT], load: LoadFunction) -> list[Document]: ...
def load_all_docs_from_checkpoint_connector(connector: CheckpointedConnector[CT], start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> list[Document]: ...
def is_atlassian_cloud_url(url: str) -> bool: ...
def get_cloudId(base_url: str) -> str: ...
def scoped_url(url: str, product: str) -> str: ...
def process_confluence_user_profiles_override(confluence_user_email_override: list[dict[str, str]]) -> list[ConfluenceUser]: ...
def confluence_refresh_tokens(client_id: str, client_secret: str, cloud_id: str, refresh_token: str) -> dict[str, Any]: ...
class TimeoutThread(threading.Thread, Generic[R]): ...
def run_with_timeout(timeout: float, func: Callable[..., R], *args, **kwargs) -> R: ...
def validate_attachment_filetype(attachment: dict[str, Any]) -> bool: ...
class CallableProtocol(Protocol): ...
def run_functions_tuples_in_parallel(functions_with_args: Sequence[tuple[CallableProtocol, tuple[Any, ...]]], allow_failures: bool, max_workers: int | None) -> list[Any]: ...
def _next_or_none(ind: int, gen: Iterator[R]) -> tuple[int, R | None]: ...
def parallel_yield(gens: list[Iterator[R]], max_workers: int) -> Iterator[R]: ...
def sanitize_filename(name: str, extension: str) -> str: ...
class _RateLimitDecorator: ...
def retry_builder(tries: int, delay: float, max_delay: float | None, backoff: float, jitter: tuple[float, float] | float, exceptions: type[Exception] | tuple[type[Exception], ...]) -> Callable[[F], F]: ...
class SlackTextCleaner:
def __init__(self, client: WebClient) -> None:
self._client = client
self._id_to_name_map: dict[str, str] = {}
def _replace_user_ids_with_names(self, message: str) -> str: ...
def index_clean(self, message: str) -> str: ...
def replace_tags_basic(message: str) -> str: ...
def replace_channels_basic(message: str) -> str: ...
def replace_special_mentions(message: str) -> str: ...
def replace_special_catchall(message: str) -> str: ...
def add_zero_width_whitespace_after_tag(message: str) -> str: ...
# Task:
Write a Python method `_get_slack_name` for the class `SlackTextCleaner` to get Slack username.
Parameters: user_id: str
Returns: str | def _get_slack_name(self, user_id: str) -> str:
"""Get Slack username"""
if user_id not in self._id_to_name_map:
try:
response = self._client.users_info(user=user_id)
self._id_to_name_map[user_id] = response["user"]["profile"]["display_name"] or response["user"]["profile"]["real_name"]
except SlackApiError as e:
logging.exception(f"Error fetching data for user {user_id}: {e.response['error']}")
raise
return self._id_to_name_map[user_id] | function_simple | 1 | {"cognitive_complexity": 3, "loc": 11, "code_loc": 8, "docstring_loc": 1, "function_name": "_get_slack_name", "class_name": "SlackTextCleaner", "qualname": "SlackTextCleaner._get_slack_name", "file_path": "common/data_source/utils.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py:TestPrefixTreeEviction.test_eviction_insufficient_chars_evicts_all | # Context:
from ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree import (
Node,
PrefixTree,
PrefixTreeActor,
)
def tree() -> PrefixTree: ...
def tree_actor(): ...
def get_lru_texts_from_tree(tree: PrefixTree, tenant_id: str) -> List[str]: ...
async def get_lru_texts_from_tree_actor(tree_actor: PrefixTreeActor, tenant_id: str) -> List[str]: ...
class TestPrefixTreeInitialization: ...
class TestPrefixTreeInsert: ...
class TestPrefixTreeMatch: ...
class TestPrefixTreeRemove: ...
class TestPrefixTreeGetSmallestTenants: ...
class TestPrefixTreeComprehensive: ...
class TestPrefixTreeActorComprehensive: ...
class TestPrefixTreeActorEvictionLoop: ...
class TestPrefixTreeEviction:
def test_eviction_non_existent_tenant(self, tree: PrefixTree) -> None: ...
def test_eviction_exact_min_remove_size_single_node(self, tree: PrefixTree) -> None: ...
def test_eviction_exceed_min_remove_size_single_node(self, tree: PrefixTree) -> None: ...
def test_eviction_multiple_nodes(self, tree: PrefixTree) -> None: ...
def test_eviction_same_timestamps(self, tree: PrefixTree) -> None: ...
# Task:
Write a Python test method `test_eviction_insufficient_chars_evicts_all` in test class `TestPrefixTreeEviction` to test evicting when min_remove_size is larger than available; evicts all.
Module under test: typing, ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree | def test_eviction_insufficient_chars_evicts_all(self, tree: PrefixTree) -> None:
"""Test evicting when min_remove_size is larger than available; evicts all."""
tree.add_tenants(["tenant_1"], 0)
tree.insert("xyz", "tenant_1", 1) # 3 chars available
evicted_count = tree.evict_tenant_by_lru("tenant_1", 10)
assert evicted_count == 3
assert tree.tenant_to_char_count == {"tenant_1": 0}
assert get_lru_texts_from_tree(tree, "tenant_1") == [""] | test | 0 | {"function_name": "test_eviction_insufficient_chars_evicts_all", "class_name": "TestPrefixTreeEviction", "qualname": "TestPrefixTreeEviction.test_eviction_insufficient_chars_evicts_all", "file_path": "python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py", "repo_id": "ray-project/ray", "loc": 8, "tested_modules": ["typing", "ray.llm._internal.serve.routing_policies.prefix_aware.prefix_tree"], "has_docstring": true, "runnable_level": "file_runnable"} |
langflow-ai/langflow:src/backend/base/langflow/agentic/utils/template_create.py:create_flow_from_template_and_get_link | # Context:
from typing import TYPE_CHECKING, Any
from fastapi import HTTPException
from langflow.agentic.utils.template_search import get_template_by_id
from langflow.api.v1.flows import _new_flow, _save_flow_to_fs
from langflow.initial_setup.setup import get_or_create_default_folder
from langflow.services.database.models.flow.model import FlowCreate
from langflow.services.database.models.folder.model import Folder
from langflow.services.deps import get_storage_service
from uuid import UUID
from sqlmodel.ext.asyncio.session import AsyncSession
# Task:
Write a Python async function `create_flow_from_template_and_get_link` to create a new flow from a starter template and return its id and UI link.
Returns: dict[str, Any] | async def create_flow_from_template_and_get_link(
*,
session: AsyncSession,
user_id: UUID,
template_id: str,
target_folder_id: UUID | None = None,
) -> dict[str, Any]:
"""Create a new flow from a starter template and return its id and UI link.
Args:
session: Active async DB session.
user_id: The owner user id for the new flow.
template_id: The string id field inside the starter template JSON.
target_folder_id: Optional folder id to place the flow. If not provided,
the user's default folder will be used.
Returns:
Dict with keys: {"id": str, "link": str}
"""
# 1) Load template JSON from starter_projects
template = get_template_by_id(template_id=template_id, fields=None)
if not template:
raise HTTPException(status_code=404, detail="Template not found")
# 2) Resolve target folder
if target_folder_id:
folder = await session.get(Folder, target_folder_id)
if not folder or folder.user_id != user_id:
raise HTTPException(status_code=400, detail="Invalid target folder")
folder_id = folder.id
else:
default_folder = await get_or_create_default_folder(session, user_id)
folder_id = default_folder.id
# 3) Build FlowCreate from template fields (ignore unknowns)
new_flow = FlowCreate(
name=template.get("name"),
description=template.get("description"),
icon=template.get("icon"),
icon_bg_color=template.get("icon_bg_color"),
gradient=template.get("gradient"),
data=template.get("data"),
is_component=template.get("is_component", False),
endpoint_name=template.get("endpoint_name"),
tags=template.get("tags"),
mcp_enabled=template.get("mcp_enabled"),
folder_id=folder_id,
user_id=user_id,
)
# 4) Use the same creation path as API
storage_service = get_storage_service()
db_flow = await _new_flow(session=session, flow=new_flow, user_id=user_id, storage_service=storage_service)
await session.commit()
await session.refresh(db_flow)
await _save_flow_to_fs(db_flow, user_id, storage_service)
# 5) Build relative UI link
link = f"/flow/{db_flow.id}/folder/{folder_id}"
return {"id": str(db_flow.id), "link": link} | function_complex | 1 | {"cognitive_complexity": 6, "loc": 60, "code_loc": 32, "docstring_loc": 12, "function_name": "create_flow_from_template_and_get_link", "class_name": null, "qualname": "create_flow_from_template_and_get_link", "file_path": "src/backend/base/langflow/agentic/utils/template_create.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"} |
Zie619/n8n-workflows:scripts/update_readme_stats.py:get_category_list | Write a Python function `get_category_list` to get formatted list of all categories (same logic as search index).
Parameters: categories | def get_category_list(categories):
"""Get formatted list of all categories (same logic as search index)."""
formatted_categories = set()
# Map technical categories to display names
category_mapping = {
"messaging": "Communication & Messaging",
"email": "Communication & Messaging",
"cloud_storage": "Cloud Storage & File Management",
"database": "Data Processing & Analysis",
"project_management": "Project Management",
"ai_ml": "AI Agent Development",
"social_media": "Social Media Management",
"ecommerce": "E-commerce & Retail",
"analytics": "Data Processing & Analysis",
"calendar_tasks": "Project Management",
"forms": "Data Processing & Analysis",
"development": "Technical Infrastructure & DevOps",
}
for category_key in categories.keys():
display_name = category_mapping.get(
category_key, category_key.replace("_", " ").title()
)
formatted_categories.add(display_name)
# Add categories from the create_categories.py system
additional_categories = [
"Business Process Automation",
"Web Scraping & Data Extraction",
"Marketing & Advertising Automation",
"Creative Content & Video Automation",
"Creative Design Automation",
"CRM & Sales",
"Financial & Accounting",
]
for cat in additional_categories:
formatted_categories.add(cat)
return sorted(list(formatted_categories)) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 41, "code_loc": 32, "docstring_loc": 1, "function_name": "get_category_list", "class_name": null, "qualname": "get_category_list", "file_path": "scripts/update_readme_stats.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "self_contained"} |
streamlit/streamlit:lib/tests/streamlit/web/server/component_file_utils_test.py:test_mixed_separators_not_rejected_early | # Context:
from pathlib import Path
from streamlit.web.server.component_file_utils import (
build_safe_abspath,
guess_content_type,
)
def root(tmp_path: Path) -> Path: ...
def test_path_security_cases(root: Path, candidate: str, expect_allowed: bool) -> None: ...
def test_rejects_symlink_escape(root: Path, tmp_path: Path) -> None: ...
def test_commonpath_valueerror_treated_as_forbidden(root: Path) -> None: ...
def test_symlink_within_root_allowed(root: Path) -> None: ...
def test_normalization_and_nonexistent_paths(root: Path, candidate: str, expect: Callable[[Path], str]) -> None: ...
def test_normalized_parent_segments_rejected(root: Path) -> None: ...
def test_component_root_is_symlink(tmp_path: Path) -> None: ...
def test_guess_content_type_gzip(path: str, expected: str) -> None: ...
def test_guess_content_type_other_encoding_bzip2() -> None: ...
def test_guess_content_type_basic_types(path: str, expected_prefix: str) -> None: ...
def test_guess_content_type_unknown_extension() -> None: ...
def test_rejects_unsafe_paths_before_realpath(root: Path, unsafe_path: str) -> None: ...
def test_realpath_not_called_for_unsafe_paths(root: Path, unsafe_path: str) -> None: ...
def test_rejects_windows_drive_paths(root: Path, unsafe_path: str) -> None: ...
def test_safe_path_still_resolves_correctly(root: Path) -> None: ...
def test_safe_nested_path_resolves(root: Path) -> None: ...
def test_url_decoded_paths_are_rejected(root: Path, decoded_path: str) -> None: ...
def test_rejects_null_bytes(root: Path, path_with_null: str) -> None: ...
def test_rejects_windows_special_path_prefixes(root: Path, windows_special_path: str) -> None: ...
def test_traversal_with_mixed_separators_rejected(root: Path) -> None: ...
# Task:
Write a Python test function `test_mixed_separators_not_rejected_early` to paths with mixed separators should not be rejected by the early validation.
Module under test: __future__, pathlib, typing | def test_mixed_separators_not_rejected_early(root: Path) -> None:
"""Paths with mixed separators should not be rejected by the early validation.
On Windows, backslashes are path separators. On Unix, they're valid filename
characters. Safe relative paths with backslashes should not be rejected.
"""
# On Unix, this would look for a directory literally named "sub\nested"
# On Windows, this would be equivalent to "sub/nested/file.js"
# Either way, build_safe_abspath should not reject it as unsafe
abspath = build_safe_abspath(str(root), "sub\\nested/file.js")
# The path passes validation (not None) - it just might not exist
assert abspath is not None | test | 1 | {"function_name": "test_mixed_separators_not_rejected_early", "class_name": null, "qualname": "test_mixed_separators_not_rejected_early", "file_path": "lib/tests/streamlit/web/server/component_file_utils_test.py", "repo_id": "streamlit/streamlit", "loc": 12, "tested_modules": ["__future__", "pathlib", "typing", "urllib.parse", "streamlit.web.server.component_file_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
zhayujie/chatgpt-on-wechat:agent/skills/manager.py:SkillManager.set_skill_enabled | # Context:
class SkillManager:
def __init__(
self,
builtin_dir: Optional[str] = None,
custom_dir: Optional[str] = None,
config: Optional[Dict] = None,
):
"""
Initialize the skill manager.
:param builtin_dir: Built-in skills directory (project root ``skills/``)
:param custom_dir: Custom skills directory (workspace ``skills/``)
:param config: Configuration dictionary
"""
project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.builtin_dir = builtin_dir or os.path.join(project_root, 'skills')
self.custom_dir = custom_dir or os.path.join(project_root, 'workspace', 'skills')
self.config = config or {}
self._skills_config_path = os.path.join(self.custom_dir, SKILLS_CONFIG_FILE)
# skills_config: full skill metadata keyed by name
# { "web-fetch": {"name": ..., "description": ..., "source": ..., "enabled": true}, ... }
self.skills_config: Dict[str, dict] = {}
self.loader = SkillLoader()
self.skills: Dict[str, SkillEntry] = {}
# Load skills on initialization
self.refresh_skills()
def refresh_skills(self): ...
def _load_skills_config(self) -> Dict[str, dict]: ...
def _save_skills_config(self): ...
def _sync_skills_config(self): ...
def is_skill_enabled(self, name: str) -> bool: ...
def get_skills_config(self) -> Dict[str, dict]: ...
def get_skill(self, name: str) -> Optional[SkillEntry]: ...
def list_skills(self) -> List[SkillEntry]: ...
def filter_skills(self, skill_filter: Optional[List[str]], include_disabled: bool) -> List[SkillEntry]: ...
def build_skills_prompt(self, skill_filter: Optional[List[str]]) -> str: ...
def build_skill_snapshot(self, skill_filter: Optional[List[str]], version: Optional[int]) -> SkillSnapshot: ...
def sync_skills_to_workspace(self, target_workspace_dir: str): ...
def get_skill_by_key(self, skill_key: str) -> Optional[SkillEntry]: ...
# Task:
Write a Python method `set_skill_enabled` for the class `SkillManager` to set a skill's enabled state and persist.
Parameters: name: str, enabled: bool | def set_skill_enabled(self, name: str, enabled: bool):
"""
Set a skill's enabled state and persist.
:param name: skill name
:param enabled: True to enable, False to disable
"""
if name not in self.skills_config:
raise ValueError(f"skill '{name}' not found in config")
self.skills_config[name]["enabled"] = enabled
self._save_skills_config() | function_simple | 1 | {"cognitive_complexity": 1, "loc": 11, "code_loc": 4, "docstring_loc": 6, "function_name": "set_skill_enabled", "class_name": "SkillManager", "qualname": "SkillManager.set_skill_enabled", "file_path": "agent/skills/manager.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": true, "runnable_level": "class_runnable"} |
PaddlePaddle/PaddleOCR:mcp_server/paddleocr_mcp/pipelines.py:_LayoutParsingHandler._parse_markdown_with_images | # Context:
import re
from typing import Any, Callable, Dict, List, NoReturn, Optional, Type, Union
from mcp.types import ImageContent, TextContent
def _is_file_path(s: str) -> bool: ...
def _is_base64(s: str) -> bool: ...
def _is_url(s: str) -> bool: ...
def _infer_file_type_from_bytes(data: bytes) -> Optional[str]: ...
def get_str_with_max_len(obj: object, max_len: int) -> str: ...
class _EngineWrapper: ...
class PipelineHandler(abc.ABC): ...
class SimpleInferencePipelineHandler(PipelineHandler): ...
class OCRHandler(SimpleInferencePipelineHandler): ...
class PPStructureV3Handler(_LayoutParsingHandler): ...
class PaddleOCRVLHandler(_LayoutParsingHandler): ...
def create_pipeline_handler(*args, **kwargs) -> PipelineHandler: ...
class _LayoutParsingHandler(SimpleInferencePipelineHandler):
def _get_service_endpoint(self) -> str: ...
def _transform_local_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: ...
def _transform_service_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]: ...
async def _parse_local_result(self, local_result: Dict, ctx: Context) -> Dict: ...
async def _parse_service_result(self, service_result: Dict, ctx: Context) -> Dict: ...
async def _process_image_data(self, img_data: str, ctx: Context) -> str: ...
async def _log_completion_stats(self, result: Dict, ctx: Context) -> None: ...
async def _format_output(self, result: Dict, detailed: bool, ctx: Context, **kwargs) -> Union[str, List[Union[TextContent, ImageContent]]]: ...
# Task:
Write a Python method `_parse_markdown_with_images` for the class `_LayoutParsingHandler` to parse markdown text and return mixed list of text and images.
Parameters: markdown_text: str, images_mapping: Dict[str, str]
Returns: List[Union[TextContent, ImageContent]] | def _parse_markdown_with_images(
self, markdown_text: str, images_mapping: Dict[str, str]
) -> List[Union[TextContent, ImageContent]]:
"""Parse markdown text and return mixed list of text and images."""
if not images_mapping:
return [TextContent(type="text", text=markdown_text)]
content_list = []
img_pattern = r'<img[^>]+src="([^"]+)"[^>]*>'
last_pos = 0
for match in re.finditer(img_pattern, markdown_text):
text_before = markdown_text[last_pos : match.start()]
if text_before.strip():
content_list.append(TextContent(type="text", text=text_before))
img_src = match.group(1)
if img_src in images_mapping:
content_list.append(
ImageContent(
type="image",
data=images_mapping[img_src],
mimeType="image/jpeg",
)
)
last_pos = match.end()
remaining_text = markdown_text[last_pos:]
if remaining_text.strip():
content_list.append(TextContent(type="text", text=remaining_text))
return content_list or [TextContent(type="text", text=markdown_text)] | function_complex | 0 | {"cognitive_complexity": 8, "loc": 33, "code_loc": 23, "docstring_loc": 1, "function_name": "_parse_markdown_with_images", "class_name": "_LayoutParsingHandler", "qualname": "_LayoutParsingHandler._parse_markdown_with_images", "file_path": "mcp_server/paddleocr_mcp/pipelines.py", "repo_id": "PaddlePaddle/PaddleOCR", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py:MoRIIOWriter.ensure_worker_started | # Context:
import threading
class MoRIIOWrapper: ...
class MoRIIOWriter:
def __init__(self, worker: "MoRIIOConnectorWorker"):
"""Initialize the writer.
Args:
worker: Reference to the parent worker
"""
self._worker_ref: weakref_ref[MoRIIOConnectorWorker] = weakref_ref(worker)
self._write_task_q: Queue[WriteTask] = Queue()
self._write_worker_started = False
self._write_worker_lock = threading.Lock()
self._deferred_tasks: list[WriteTask] = []
def worker(self) -> 'MoRIIOConnectorWorker': ...
def schedule_write(self, task: WriteTask) -> None: ...
def _write_worker_loop(self) -> None: ...
def _process_deferred_tasks(self) -> None: ...
def _is_remote_ready(self, task: WriteTask) -> bool: ...
def _get_remote_alloc_info(self, request_id: str) -> RemoteAllocInfo: ...
def _execute_write_task(self, task: WriteTask) -> None: ...
def _prepare_transfer_plan(self, task: WriteTask, request_info: RemoteAllocInfo, remote_moriio_meta: MoRIIOAgentMetadata) -> LayerTransferPlan: ...
def _do_layer_write(self, plan: LayerTransferPlan, sessions: list) -> None: ...
def _finalize_if_complete(self, task: WriteTask, request_info: RemoteAllocInfo) -> None: ...
# Task:
Write a Python method `ensure_worker_started` for the class `MoRIIOWriter` to ensure the background write worker is running.
Returns: None | def ensure_worker_started(self) -> None:
"""Ensure the background write worker is running."""
if self._write_worker_started:
return
self._write_worker_started = True
with self._write_worker_lock:
thread = threading.Thread(
target=self._write_worker_loop, daemon=True, name="moriio-write-worker"
)
thread.start()
logger.info("Started MoRIIO write worker thread") | function_simple | 1 | {"cognitive_complexity": 1, "loc": 11, "code_loc": 9, "docstring_loc": 1, "function_name": "ensure_worker_started", "class_name": "MoRIIOWriter", "qualname": "MoRIIOWriter.ensure_worker_started", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/moriio/moriio_engine.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
geekcomputers/Python:Street_Fighter/src/main.py:draw_gradient_text | # Context:
def resource_path(relative_path): ...
def draw_text(text, font, color, x, y): ...
def blur_bg(image): ...
def draw_bg(image, is_game_started): ...
def draw_button(text, font, text_col, button_col, x, y, width, height): ...
def victory_screen(winner_img): ...
def main_menu(): ...
def scores_screen(): ...
def reset_game(): ...
def draw_health_bar(health, x, y): ...
def countdown(): ...
def game_loop(): ...
# Task:
Write a Python function `draw_gradient_text` to draws a gradient text by layering multiple text surfaces with slight offsets.
Parameters: text, font, x, y, colors | def draw_gradient_text(text, font, x, y, colors):
"""
Draws a gradient text by layering multiple text surfaces with slight offsets.
"""
offset = 2
for i, color in enumerate(colors):
img = font.render(text, True, color)
screen.blit(img, (x + i * offset, y + i * offset)) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 8, "code_loc": 4, "docstring_loc": 3, "function_name": "draw_gradient_text", "class_name": null, "qualname": "draw_gradient_text", "file_path": "Street_Fighter/src/main.py", "repo_id": "geekcomputers/Python", "has_docstring": true, "runnable_level": "file_runnable"} |
streamlit/streamlit:lib/streamlit/components/v2/component_registry.py:module_doc | Write a module-level docstring for the Python module `component_registry` which contains class `BidiComponentDefinition`, class `BidiComponentRegistry`. | Component registry for Custom Components v2.
This module defines the data model and in-memory registry for Custom Components
v2. During development, component assets (JS/CSS/HTML) may change on disk as
build tools produce new outputs.
See Also
--------
- :class:`streamlit.components.v2.component_file_watcher.ComponentFileWatcher`
for directory watching and change notifications. | documentation | 1 | {"doc_type": "module", "module_name": "component_registry", "file_path": "lib/streamlit/components/v2/component_registry.py", "repo_id": "streamlit/streamlit", "char_length": 384} |
huggingface/transformers:src/transformers/utils/output_capturing.py:recursively_install_hooks | # Context:
from torch import nn
from ..modeling_utils import PreTrainedModel
class OutputRecorder: ...
class CompileableContextVar: ...
def install_output_capuring_hook(module: nn.Module, key: str, index: int) -> None: ...
def install_all_output_capturing_hooks(model: PreTrainedModel, prefix: str | None) -> None: ...
def maybe_install_capturing_hooks(model: PreTrainedModel) -> None: ...
def capture_outputs(func, tie_last_hidden_states): ...
# Task:
Write a Python function `recursively_install_hooks` to recursively install all output capturing hooks on all submodules of `parent_module`.
Parameters: parent_module: nn.Module, module_name: str, capture_tasks: list[tuple[str, OutputRecorder]]
Returns: None | def recursively_install_hooks(
parent_module: nn.Module, module_name: str, capture_tasks: list[tuple[str, OutputRecorder]]
) -> None:
"""
Recursively install all output capturing hooks on all submodules of `parent_module`.
Note that we need to use this recursive approach instead of simply iterating over all modules, because we want
to respect the `capture_tasks` of all individual submodels (`PreTrainedModel` instances) in the graph. That is, once
we reach a submodel in the graph, its children should use this submodel's `capture_tasks`, but other parts of the graph
should not.
"""
from ..modeling_utils import PreTrainedModel
# First dispatch to children if needed
for name, module in parent_module.named_children():
# Keep dispatching the same `capture_tasks`
if not isinstance(module, PreTrainedModel):
recursively_install_hooks(module, f"{module_name}.{name}", capture_tasks)
# New Submodel: we need to dispatch its own `capture_tasks`
else:
install_all_output_capturing_hooks(module, prefix=f"{module_name}.{name}")
# Potentially install the hook on current `parent_module`
for key, specs in capture_tasks:
# The second check is for multimodals where only backbone layer suffix is available
if (specs.target_class is not None and isinstance(parent_module, specs.target_class)) or (
specs.class_name is not None and module_name.endswith(specs.class_name)
):
if specs.layer_name is not None and specs.layer_name not in module_name:
continue
install_output_capuring_hook(parent_module, key, specs.index) | function_complex | 0 | {"cognitive_complexity": 14, "loc": 30, "code_loc": 13, "docstring_loc": 7, "function_name": "recursively_install_hooks", "class_name": null, "qualname": "recursively_install_hooks", "file_path": "src/transformers/utils/output_capturing.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tests/test_autoscaler_azure.py:TestAzureAvailabilityZonePrecedence.test_provider_auto_allows_auto_selection | # Context:
class TestAzureAvailabilityZones(unittest.TestCase): ...
class TestAzureAvailabilityZonePrecedence(unittest.TestCase):
def setUp(self): ...
def _create_mock_provider(self, provider_config): ...
def _extract_zone_logic(self, provider, node_config): ...
def test_node_availability_zone_overrides_provider(self): ...
def test_provider_availability_zone_used_when_no_node_override(self): ...
def test_none_disables_zones_at_node_level(self): ...
def test_no_zones_when_neither_provider_nor_node_specify(self): ...
def test_node_empty_string_overrides_provider_zones(self): ...
def test_node_auto_overrides_provider_zones(self): ...
def test_provider_none_disables_zones(self): ...
def test_provider_empty_string_allows_auto_selection(self): ...
def test_node_null_overrides_provider_zones(self): ...
def test_provider_null_disables_zones(self): ...
def test_complex_override_scenario(self): ...
def test_mixed_case_precedence(self): ...
def test_whitespace_handling_in_precedence(self): ...
# Task:
Write a Python test method `test_provider_auto_allows_auto_selection` in test class `TestAzureAvailabilityZonePrecedence` to test that provider-level 'auto' allows auto-selection.
Module under test: ray.autoscaler._private._azure.node_provider | def test_provider_auto_allows_auto_selection(self):
"""Test that provider-level 'auto' allows auto-selection."""
provider = self._create_mock_provider({"availability_zone": "auto"})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "provider availability_zone") | test | 0 | {"function_name": "test_provider_auto_allows_auto_selection", "class_name": "TestAzureAvailabilityZonePrecedence", "qualname": "TestAzureAvailabilityZonePrecedence.test_provider_auto_allows_auto_selection", "file_path": "python/ray/tests/test_autoscaler_azure.py", "repo_id": "ray-project/ray", "loc": 9, "tested_modules": ["ray.autoscaler._private._azure.node_provider"], "has_docstring": true, "runnable_level": "class_runnable"} |
huggingface/transformers:tests/models/video_llama_3/test_image_processing_video_llama_3.py:VideoLlama3ImageProcessingTest.test_call_numpy | # Context:
import numpy as np
import torch
class VideoLlama3ImageProcessingTester: ...
class VideoLlama3ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = VideoLlama3ImageProcessor if is_vision_available() else None
fast_image_processing_class = VideoLlama3ImageProcessorFast if is_torchvision_available() else None
def setUp(self): ...
def image_processor_dict(self): ...
def test_image_processor_properties(self): ...
def test_image_processor_to_json_string(self): ...
def test_select_best_resolution(self): ...
def test_call_pil(self): ...
def test_call_pytorch(self): ...
def test_call_numpy_4_channels(self): ...
def test_nested_input(self): ...
def test_video_inputs(self): ...
def test_custom_image_size(self): ...
def test_custom_pixels(self): ...
def test_slow_fast_equivalence(self): ...
def test_slow_fast_equivalence_batched(self): ...
def test_get_num_patches_without_images(self): ...
# Task:
Write a Python test method `test_call_numpy` in test class `VideoLlama3ImageProcessingTest` to verify the behavior of `call_numpy`.
Module under test: transformers.image_utils, transformers.models.video_llama_3.image_processing_video_llama_3, transformers.testing_utils | def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, numpify=True)
for image in image_inputs:
self.assertIsInstance(image[0], np.ndarray)
# Test not batched input
process_out = image_processing(image_inputs[0], return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (5329, 588)
expected_image_grid_thws = torch.Tensor([[1, 73, 73]])
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all())
# Test batched
process_out = image_processing(image_inputs, return_tensors="pt")
encoded_images = process_out.pixel_values
image_grid_thws = process_out.image_grid_thw
expected_output_image_shape = (37303, 588)
expected_image_grid_thws = torch.Tensor([[1, 73, 73]] * 7)
self.assertEqual(tuple(encoded_images.shape), expected_output_image_shape)
self.assertTrue((image_grid_thws == expected_image_grid_thws).all()) | test | 0 | {"function_name": "test_call_numpy", "class_name": "VideoLlama3ImageProcessingTest", "qualname": "VideoLlama3ImageProcessingTest.test_call_numpy", "file_path": "tests/models/video_llama_3/test_image_processing_video_llama_3.py", "repo_id": "huggingface/transformers", "loc": 26, "tested_modules": ["transformers.image_utils", "transformers.models.video_llama_3.image_processing_video_llama_3", "transformers.testing_utils", "transformers.utils", "test_image_processing_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
huggingface/transformers:src/transformers/models/kosmos2_5/modeling_kosmos2_5.py:Kosmos2_5PreTrainedModel:class_doc | Write a class-level docstring for `Kosmos2_5PreTrainedModel` (inherits from PreTrainedModel) which has methods: `_init_weights`. | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models. | documentation | 0 | {"doc_type": "class", "class_name": "Kosmos2_5PreTrainedModel", "file_path": "src/transformers/models/kosmos2_5/modeling_kosmos2_5.py", "repo_id": "huggingface/transformers", "char_length": 120, "methods": ["_init_weights"]} |
browser-use/browser-use:tests/ci/infrastructure/test_registry_validation.py:TestDecoratedFunctionBehavior:class_doc | Write a class-level docstring for `TestDecoratedFunctionBehavior` which has methods: `test_decorated_function_only_accepts_kwargs`, `test_decorated_function_accepts_params_model`, `test_decorated_function_ignores_extra_kwargs`. | Test behavior of decorated action functions (from normalization tests) | documentation | 0 | {"doc_type": "class", "class_name": "TestDecoratedFunctionBehavior", "file_path": "tests/ci/infrastructure/test_registry_validation.py", "repo_id": "browser-use/browser-use", "char_length": 70, "methods": ["test_decorated_function_only_accepts_kwargs", "test_decorated_function_accepts_params_model", "test_decorated_function_ignores_extra_kwargs"]} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py:EnterpriseActionKitToolAdapter.tools | # Context:
from crewai.tools import BaseTool
def get_enterprise_api_base_url() -> str: ...
class EnterpriseActionTool(BaseTool): ...
class EnterpriseActionKitToolAdapter:
def __init__(
self,
enterprise_action_token: str,
enterprise_api_base_url: str | None = None,
):
"""Initialize the adapter with an enterprise action token."""
self._set_enterprise_action_token(enterprise_action_token)
self._actions_schema = {} # type: ignore[var-annotated]
self._tools = None
self.enterprise_api_base_url = (
enterprise_api_base_url or get_enterprise_api_base_url()
)
def _fetch_actions(self): ...
def _generate_detailed_description(self, schema: dict[str, Any], indent: int) -> list[str]: ...
def _create_tools(self): ...
def _set_enterprise_action_token(self, enterprise_action_token: str | None): ...
def __enter__(self): ...
def __exit__(self, exc_type, exc_val, exc_tb): ...
# Task:
Write a Python method `tools` for the class `EnterpriseActionKitToolAdapter` to get the list of tools created from enterprise actions.
Returns: list[BaseTool] | def tools(self) -> list[BaseTool]:
"""Get the list of tools created from enterprise actions."""
if self._tools is None:
self._fetch_actions()
self._create_tools()
return self._tools or [] | function_simple | 0 | {"cognitive_complexity": 2, "loc": 6, "code_loc": 4, "docstring_loc": 1, "function_name": "tools", "class_name": "EnterpriseActionKitToolAdapter", "qualname": "EnterpriseActionKitToolAdapter.tools", "file_path": "lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/distributed/device_communicators/all2all.py:AgRsAll2AllManager:class_doc | Write a class-level docstring for `AgRsAll2AllManager` (inherits from All2AllManagerBase) which has methods: `__init__`, `dispatch_router_logits`, `dispatch`, `combine`, `destroy`. | An implementation of all2all communication based on
all-gather (dispatch) and reduce-scatter (combine). | documentation | 1 | {"doc_type": "class", "class_name": "AgRsAll2AllManager", "file_path": "vllm/distributed/device_communicators/all2all.py", "repo_id": "vllm-project/vllm", "char_length": 103, "methods": ["__init__", "dispatch_router_logits", "dispatch", "combine", "destroy"]} |
Comfy-Org/ComfyUI:comfy_api_nodes/nodes_kling.py:get_video_url_from_response | # Context:
def _generate_storyboard_inputs(count: int) -> list: ...
def normalize_omni_prompt_references(prompt: str) -> str: ...
async def finish_omni_video_task(cls: type[IO.ComfyNode], response: TaskStatusResponse) -> IO.NodeOutput: ...
def is_valid_camera_control_configs(configs: list[float]) -> bool: ...
def is_valid_task_creation_response(response: KlingText2VideoResponse) -> bool: ...
def is_valid_video_response(response: KlingText2VideoResponse) -> bool: ...
def is_valid_image_response(response: KlingVirtualTryOnResponse) -> bool: ...
def validate_prompts(prompt: str, negative_prompt: str, max_length: int) -> bool: ...
def validate_task_creation_response(response) -> None: ...
def validate_video_result_response(response) -> None: ...
def validate_image_result_response(response) -> None: ...
def validate_input_image(image: torch.Tensor) -> None: ...
def get_video_from_response(response) -> KlingVideoResult: ...
def get_images_from_response(response) -> list[KlingImageResult]: ...
def get_images_urls_from_response(response) -> str | None: ...
async def image_result_to_node_output(images: list[KlingImageResult]) -> torch.Tensor: ...
async def execute_text2video(cls: type[IO.ComfyNode], prompt: str, negative_prompt: str, cfg_scale: float, model_name: str, model_mode: str, duration: str, aspect_ratio: str, camera_control: KlingCameraControl | None) -> IO.NodeOutput: ...
async def execute_image2video(cls: type[IO.ComfyNode], start_frame: torch.Tensor, prompt: str, negative_prompt: str, model_name: str, cfg_scale: float, model_mode: str, aspect_ratio: str, duration: str, camera_control: KlingCameraControl | None, end_frame: torch.Tensor | None) -> IO.NodeOutput: ...
async def execute_video_effect(cls: type[IO.ComfyNode], dual_character: bool, effect_scene: KlingDualCharacterEffectsScene | KlingSingleImageEffectsScene, model_name: str, duration: KlingVideoGenDuration, image_1: torch.Tensor, image_2: torch.Tensor | None, model_mode: KlingVideoGenMode | None) -> tuple[InputImpl.VideoFromFile, str, str]: ...
async def execute_lipsync(cls: type[IO.ComfyNode], video: Input.Video, audio: Input.Audio | None, voice_language: str | None, model_mode: str | None, text: str | None, voice_speed: float | None, voice_id: str | None) -> IO.NodeOutput: ...
class KlingCameraControls(IO.ComfyNode): ...
class KlingTextToVideoNode(IO.ComfyNode): ...
class OmniProTextToVideoNode(IO.ComfyNode): ...
class OmniProFirstLastFrameNode(IO.ComfyNode): ...
class OmniProImageToVideoNode(IO.ComfyNode): ...
class OmniProVideoToVideoNode(IO.ComfyNode): ...
class OmniProEditVideoNode(IO.ComfyNode): ...
class OmniProImageNode(IO.ComfyNode): ...
class KlingCameraControlT2VNode(IO.ComfyNode): ...
class KlingImage2VideoNode(IO.ComfyNode): ...
class KlingCameraControlI2VNode(IO.ComfyNode): ...
class KlingStartEndFrameNode(IO.ComfyNode): ...
class KlingVideoExtendNode(IO.ComfyNode): ...
class KlingDualCharacterVideoEffectNode(IO.ComfyNode): ...
class KlingSingleImageVideoEffectNode(IO.ComfyNode): ...
class KlingLipSyncAudioToVideoNode(IO.ComfyNode): ...
class KlingLipSyncTextToVideoNode(IO.ComfyNode): ...
class KlingVirtualTryOnNode(IO.ComfyNode): ...
class KlingImageGenerationNode(IO.ComfyNode): ...
class TextToVideoWithAudio(IO.ComfyNode): ...
class ImageToVideoWithAudio(IO.ComfyNode): ...
class MotionControl(IO.ComfyNode): ...
class KlingVideoNode(IO.ComfyNode): ...
class KlingFirstLastFrameNode(IO.ComfyNode): ...
class KlingAvatarNode(IO.ComfyNode): ...
class KlingExtension(ComfyExtension): ...
async def comfy_entrypoint() -> KlingExtension: ...
# Task:
Write a Python function `get_video_url_from_response` to returns the first video url from the Kling video generation task result.
Parameters: response
Returns: str | None | def get_video_url_from_response(response) -> str | None:
"""Returns the first video url from the Kling video generation task result.
Will not raise an error if the response is not valid.
"""
if response and is_valid_video_response(response):
return str(get_video_from_response(response).url)
else:
return None | function_simple | 1 | {"cognitive_complexity": 3, "loc": 8, "code_loc": 4, "docstring_loc": 3, "function_name": "get_video_url_from_response", "class_name": null, "qualname": "get_video_url_from_response", "file_path": "comfy_api_nodes/nodes_kling.py", "repo_id": "Comfy-Org/ComfyUI", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:tests/ci/test_doctor_command.py:test_summarize_checks_with_errors | # Context:
from browser_use.skill_cli.commands import doctor
async def test_doctor_handle_returns_valid_structure(): ...
def test_check_package_installed(): ...
def test_check_browser_returns_valid_structure(): ...
def test_check_api_key_with_env_var(monkeypatch): ...
def test_check_api_key_missing(monkeypatch): ...
def test_check_cloudflared_returns_valid_structure(): ...
async def test_check_network_returns_valid_structure(): ...
def test_summarize_checks_all_ok(): ...
def test_summarize_checks_mixed(): ...
# Task:
Write a Python test function `test_summarize_checks_with_errors` to test _summarize_checks with errors.
Module under test: browser_use.skill_cli.commands, browser_use.skill_cli | def test_summarize_checks_with_errors():
"""Test _summarize_checks with errors."""
checks = {
'check1': {'status': 'ok'},
'check2': {'status': 'error'},
}
summary = doctor._summarize_checks(checks)
assert '1/2' in summary
assert '1 error' in summary | test | 0 | {"function_name": "test_summarize_checks_with_errors", "class_name": null, "qualname": "test_summarize_checks_with_errors", "file_path": "tests/ci/test_doctor_command.py", "repo_id": "browser-use/browser-use", "loc": 9, "tested_modules": ["browser_use.skill_cli.commands", "browser_use.skill_cli"], "has_docstring": true, "runnable_level": "project_runnable"} |
scrapy/scrapy:tests/test_downloader_handler_twisted_http11.py:module_doc | Write a module-level docstring for the Python module `test_downloader_handler_twisted_http11` which contains class `HTTP11DownloadHandlerMixin`, class `TestHttp11`, class `TestHttps11`, class `TestSimpleHttps`, class `TestHttps11WrongHostname`. | Tests for scrapy.core.downloader.handlers.http11.HTTP11DownloadHandler. | documentation | 1 | {"doc_type": "module", "module_name": "test_downloader_handler_twisted_http11", "file_path": "tests/test_downloader_handler_twisted_http11.py", "repo_id": "scrapy/scrapy", "char_length": 71} |
apache/airflow:task-sdk/tests/task_sdk/definitions/test_callback.py:TestCallback.test_callback_equality | # Context:
import pytest
from airflow.sdk.definitions.callback import AsyncCallback, Callback, SyncCallback
async def empty_async_callback_for_deadline_tests(): ...
def empty_sync_callback_for_deadline_tests(): ...
class TestAsyncCallback: ...
class TestSyncCallback: ...
class TestCallback:
def test_init_error_reserved_kwarg(self, subclass, callable): ...
def test_get_callback_path_happy_cases(self, callback_callable, expected_path): ...
def test_get_callback_path_error_cases(self, callback_callable, error_type): ...
def test_callback_hash_and_set_behavior(self, callback_class, args1, args2, should_be_same_hash): ...
# Task:
Write a Python test method `test_callback_equality` in test class `TestCallback` to verify the behavior of `callback_equality`.
Module under test: __future__, typing, airflow.sdk._shared.module_loading | def test_callback_equality(self, callback1_args, callback2_args, should_equal):
callback1 = AsyncCallback(*callback1_args)
callback2 = AsyncCallback(*callback2_args)
assert (callback1 == callback2) == should_equal | test | 1 | {"function_name": "test_callback_equality", "class_name": "TestCallback", "qualname": "TestCallback.test_callback_equality", "file_path": "task-sdk/tests/task_sdk/definitions/test_callback.py", "repo_id": "apache/airflow", "loc": 4, "tested_modules": ["__future__", "typing", "airflow.sdk._shared.module_loading", "airflow.sdk.definitions.callback", "airflow.serialization.serde"], "has_docstring": false, "runnable_level": "project_runnable"} |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/arxiv.py:fetch_arxiv_papers | # Context:
import httpx
import xml.etree.ElementTree as ET
from typing import List, Dict, Any
# Task:
Write a Python function `fetch_arxiv_papers` to fetch recent AI/ML papers from ArXiv.
Parameters: limit: int
Returns: List[Dict[str, Any]] | def fetch_arxiv_papers(limit: int = 5) -> List[Dict[str, Any]]:
"""
Fetch recent AI/ML papers from ArXiv.
Args:
limit: Maximum number of papers to return.
Returns:
List of signal dictionaries with standardized schema.
"""
base_url = "https://export.arxiv.org/api/query"
params = {
"search_query": "cat:cs.AI OR cat:cs.LG",
"start": 0,
"max_results": limit,
"sortBy": "submittedDate",
"sortOrder": "descending"
}
signals = []
try:
response = httpx.get(base_url, params=params, timeout=15.0)
response.raise_for_status()
# Parse Atom XML response
root = ET.fromstring(response.content)
ns = {"atom": "http://www.w3.org/2005/Atom"}
for entry in root.findall("atom:entry", ns):
title_elem = entry.find("atom:title", ns)
summary_elem = entry.find("atom:summary", ns)
id_elem = entry.find("atom:id", ns)
published_elem = entry.find("atom:published", ns)
title = title_elem.text.strip() if title_elem is not None else "Untitled"
summary = summary_elem.text.strip() if summary_elem is not None else ""
arxiv_id = id_elem.text.strip() if id_elem is not None else ""
published = published_elem.text if published_elem is not None else ""
# Get PDF link
pdf_link = arxiv_id
link_elem = entry.find("atom:link[@title='pdf']", ns)
if link_elem is not None:
pdf_link = link_elem.attrib.get("href", arxiv_id)
signal = {
"id": arxiv_id,
"source": "arxiv",
"title": title,
"description": summary[:500] + "..." if len(summary) > 500 else summary,
"url": arxiv_id,
"metadata": {
"pdf": pdf_link,
"published": published
}
}
signals.append(signal)
except httpx.HTTPError as e:
print(f"[ArXiv Adapter] HTTP error: {e}")
except ET.ParseError as e:
print(f"[ArXiv Adapter] XML parse error: {e}")
except Exception as e:
print(f"[ArXiv Adapter] Error: {e}")
return signals | function_complex | 0 | {"cognitive_complexity": 23, "loc": 67, "code_loc": 46, "docstring_loc": 9, "function_name": "fetch_arxiv_papers", "class_name": null, "qualname": "fetch_arxiv_papers", "file_path": "advanced_ai_agents/multi_agent_apps/devpulse_ai/adapters/arxiv.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "plib_runnable"} |
exo-explore/exo:src/exo/shared/models/model_cards.py:delete_custom_card | # Context:
from exo.shared.types.common import ModelId
async def _refresh_card_cache(): ...
def _is_image_card(card: 'ModelCard') -> bool: ...
async def get_model_cards() -> list['ModelCard']: ...
class ModelTask(str, Enum): ...
class ComponentInfo(CamelCaseModel): ...
class ModelCard(CamelCaseModel): ...
def is_custom_card(model_id: ModelId) -> bool: ...
class ConfigData(BaseModel): ...
async def fetch_config_data(model_id: ModelId) -> ConfigData: ...
async def fetch_safetensors_size(model_id: ModelId) -> Memory: ...
# Task:
Write a Python async function `delete_custom_card` to delete a user-added custom model card. Returns True if deleted.
Parameters: model_id: ModelId
Returns: bool | async def delete_custom_card(model_id: ModelId) -> bool:
"""Delete a user-added custom model card. Returns True if deleted."""
card_path = _custom_cards_dir / (ModelId(model_id).normalize() + ".toml")
if await card_path.exists():
await card_path.unlink()
_card_cache.pop(model_id, None)
return True
return False | function_simple | 0 | {"cognitive_complexity": 1, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "delete_custom_card", "class_name": null, "qualname": "delete_custom_card", "file_path": "src/exo/shared/models/model_cards.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:tests-unit/folder_paths_test/system_user_test.py:TestGetSystemUserDirectory.test_default_name | # Context:
from folder_paths import (
get_system_user_directory,
get_public_user_directory,
get_user_directory,
set_user_directory,
)
def mock_user_directory(): ...
class TestGetPublicUserDirectory: ...
class TestBackwardCompatibility: ...
class TestEdgeCases: ...
class TestGetSystemUserDirectory:
def test_custom_name(self, mock_user_directory): ...
def test_name_with_underscore(self, mock_user_directory): ...
def test_empty_name_raises(self): ...
def test_none_name_raises(self): ...
def test_name_starting_with_underscore_raises(self): ...
def test_path_traversal_raises(self): ...
def test_path_traversal_middle_raises(self): ...
def test_special_chars_raise(self): ...
def test_returns_absolute_path(self, mock_user_directory): ...
# Task:
Write a Python test method `test_default_name` in test class `TestGetSystemUserDirectory` to test default 'system' name.
Module under test: folder_paths | def test_default_name(self, mock_user_directory):
"""Test default 'system' name."""
path = get_system_user_directory()
assert path.endswith("__system")
assert mock_user_directory in path | test | 1 | {"function_name": "test_default_name", "class_name": "TestGetSystemUserDirectory", "qualname": "TestGetSystemUserDirectory.test_default_name", "file_path": "tests-unit/folder_paths_test/system_user_test.py", "repo_id": "Comfy-Org/ComfyUI", "loc": 5, "tested_modules": ["folder_paths"], "has_docstring": true, "runnable_level": "project_runnable"} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.