sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
unclecode/crawl4ai:crawl4ai/script/c4a_result.py:ErrorDetail.to_json | # Context:
import json
class ErrorType(Enum): ...
class Severity(Enum): ...
class Suggestion: ...
class WarningDetail: ...
class CompilationResult: ...
class ValidationResult: ...
class ErrorDetail:
def to_dict(self) -> dict: ...
def formatted_message(self) -> str: ...
def simple_message(self) -> str: ...
# Task:
Write a Python method `to_json` for the class `ErrorDetail` to convert to JSON string.
Returns: str | def to_json(self) -> str:
"""Convert to JSON string"""
return json.dumps(self.to_dict(), indent=2) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "to_json", "class_name": "ErrorDetail", "qualname": "ErrorDetail.to_json", "file_path": "crawl4ai/script/c4a_result.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_level": "class_runnable"} |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_agent_with_structured_output.py:test_astructured_fn_agent | # Context:
import pytest
from llama_index.core.agent.workflow import (
AgentWorkflow,
AgentOutput,
AgentStreamStructuredOutput,
)
from llama_index.core.agent.workflow import FunctionAgent
class TestLLM(LLM): ...
class Structure(BaseModel): ...
def function_agent_output_cls(): ...
def structured_function_fn(*args, **kwargs) -> dict: ...
async def astructured_function_fn(*args, **kwargs) -> dict: ...
def function_agent_struct_fn(): ...
def function_agent_astruct_fn(): ...
async def test_output_cls_agent(function_agent_output_cls: FunctionAgent): ...
async def test_structured_fn_agent(function_agent_struct_fn: FunctionAgent): ...
async def test_structured_output_agentworkflow(function_agent_output_cls: FunctionAgent) -> None: ...
async def test_structured_output_fn_agentworkflow(function_agent_output_cls: FunctionAgent) -> None: ...
async def test_astructured_output_fn_agentworkflow(function_agent_output_cls: FunctionAgent) -> None: ...
async def test_multi_agent_openai() -> None: ...
async def test_from_tools_or_functions() -> None: ...
async def test_multi_agent_openai_from_tools() -> None: ...
# Task:
Write a Python test function `test_astructured_fn_agent` to test single agent with state management.
Module under test: typing, typing_extensions, pydantic | async def test_astructured_fn_agent(function_agent_astruct_fn: FunctionAgent):
"""Test single agent with state management."""
handler = function_agent_astruct_fn.run(user_msg="test")
async for event in handler.stream_events():
if isinstance(event, AgentStreamStructuredOutput):
streaming_event = True
assert streaming_event
response = await handler
assert "Success with the FunctionAgent" in str(response.response)
assert response.get_pydantic_model(Structure) == Structure(
hello="guten tag", world=3
) | test | 1 | {"function_name": "test_astructured_fn_agent", "class_name": null, "qualname": "test_astructured_fn_agent", "file_path": "llama-index-core/tests/agent/workflow/test_agent_with_structured_output.py", "repo_id": "run-llama/llama_index", "loc": 12, "tested_modules": ["typing", "typing_extensions", "pydantic", "llama_index.core.types", "llama_index.core.llms"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/checkpoint/checkpoint_filter.py:CheckpointLoader.__init__ | # Context:
from typing import List, Optional
import pyarrow
from ray.data.datasource import PathPartitionFilter
class CheckpointFilter(abc.ABC): ...
def _combine_chunks(ckpt_block: pyarrow.Table) -> pyarrow.Table: ...
class IdColumnCheckpointLoader(CheckpointLoader): ...
class BatchBasedCheckpointFilter(CheckpointFilter): ...
class CheckpointLoader:
def load_checkpoint(self) -> ObjectRef[Block]: ...
def _preprocess_data_pipeline(self, checkpoint_ds: ray.data.Dataset) -> ray.data.Dataset: ...
def _postprocess_block(self, block_ref: ObjectRef[Block]) -> ObjectRef[Block]: ...
def _validate_loaded_checkpoint(self, schema: Schema, metadata: BlockMetadata) -> None: ...
# Task:
Write a Python method `__init__` for the class `CheckpointLoader` to initialize the CheckpointLoader.
Parameters: checkpoint_path: str, filesystem: pyarrow.fs.FileSystem, id_column: str, checkpoint_path_partition_filter: Optional[PathPartitionFilter] | def __init__(
self,
checkpoint_path: str,
filesystem: pyarrow.fs.FileSystem,
id_column: str,
checkpoint_path_partition_filter: Optional[PathPartitionFilter] = None,
):
"""Initialize the CheckpointLoader.
Args:
checkpoint_path: The path to the checkpoint
filesystem: The filesystem to use
id_column: The name of the ID column
checkpoint_path_partition_filter: Filter for checkpoint files to load during
restoration when reading from `checkpoint_path`.
"""
self.checkpoint_path = checkpoint_path
self.filesystem = filesystem
self.id_column = id_column
self.checkpoint_path_partition_filter = checkpoint_path_partition_filter | function_simple | 0 | {"cognitive_complexity": 0, "loc": 20, "code_loc": 4, "docstring_loc": 9, "function_name": "__init__", "class_name": "CheckpointLoader", "qualname": "CheckpointLoader.__init__", "file_path": "python/ray/data/checkpoint/checkpoint_filter.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"} |
unclecode/crawl4ai:tests/deep_crawling/test_deep_crawl_resume.py:TestDFSRegressions:class_doc | Write a class-level docstring for `TestDFSRegressions` which has methods: `test_inherits_bfs_params`, `test_dfs_seen_initialized`. | Ensure DFS works identically when new params not used. | documentation | 1 | {"doc_type": "class", "class_name": "TestDFSRegressions", "file_path": "tests/deep_crawling/test_deep_crawl_resume.py", "repo_id": "unclecode/crawl4ai", "char_length": 54, "methods": ["test_inherits_bfs_params", "test_dfs_seen_initialized"]} |
huggingface/transformers:src/transformers/models/pix2struct/image_processing_pix2struct_fast.py:Pix2StructImageProcessorFast.preprocess | # Context:
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_utils import ChannelDimension, ImageInput, SizeDict
from ...processing_utils import Unpack
from .image_processing_pix2struct import Pix2StructImageProcessorKwargs, render_text
def torch_extract_patches(image_tensor, patch_height, patch_width): ...
class Pix2StructImageProcessorFast(BaseImageProcessorFast):
rescale_factor = None
do_normalize = True
do_convert_rgb = True
patch_size = {"height": 16, "width": 16}
max_patches = 2048
is_vqa = False
valid_kwargs = Pix2StructImageProcessorKwargs
model_input_names = ["flattened_patches", "attention_mask"]
def _further_process_kwargs(self, patch_size: dict[str, int] | None, **kwargs) -> dict: ...
def _validate_preprocess_kwargs(self, **kwargs): ...
def render_header(self, image: torch.Tensor, header: str, font_bytes: bytes | None, font_path: str | None) -> torch.Tensor: ...
def normalize(self, images: torch.Tensor) -> torch.Tensor: ...
def extract_flattened_patches(self, images: torch.Tensor, max_patches: int, patch_size: SizeDict) -> torch.Tensor: ...
def _preprocess_image_like_inputs(self, images: ImageInput, header_text: str | list[str] | None, do_convert_rgb: bool, input_data_format: ChannelDimension, device: str | torch.device | None, **kwargs) -> BatchFeature: ...
def _preprocess(self, images: list[torch.Tensor], do_normalize: bool, max_patches: int, patch_size: SizeDict, return_tensors: str | TensorType | None, disable_grouping: bool, **kwargs) -> BatchFeature: ...
# Task:
Write a Python method `preprocess` for the class `Pix2StructImageProcessorFast` to header_text (`Union[str, list[str]]`, *optional*):.
Parameters: images: ImageInput, header_text: str | list[str] | None
Returns: BatchFeature | def preprocess(
self,
images: ImageInput,
header_text: str | list[str] | None = None,
**kwargs: Unpack[Pix2StructImageProcessorKwargs],
) -> BatchFeature:
r"""
header_text (`Union[str, list[str]]`, *optional*):
Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`.
"""
return super().preprocess(images, header_text=header_text, **kwargs) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 11, "code_loc": 1, "docstring_loc": 4, "function_name": "preprocess", "class_name": "Pix2StructImageProcessorFast", "qualname": "Pix2StructImageProcessorFast.preprocess", "file_path": "src/transformers/models/pix2struct/image_processing_pix2struct_fast.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-files/tests/test_upload_cache.py:TestCachedUpload.test_is_expired_no_expiry | # Context:
from datetime import datetime, timedelta, timezone
from crewai_files.cache.upload_cache import CachedUpload, UploadCache
class TestUploadCache: ...
class TestCachedUpload:
def test_cached_upload_creation(self): ...
def test_is_expired_false(self): ...
def test_is_expired_true(self): ...
# Task:
Write a Python test method `test_is_expired_no_expiry` in test class `TestCachedUpload` to test is_expired returns False when no expiry set.
Module under test: datetime, crewai_files, crewai_files.cache.upload_cache | def test_is_expired_no_expiry(self):
"""Test is_expired returns False when no expiry set."""
cached = CachedUpload(
file_id="file-123",
provider="anthropic",
file_uri=None,
content_type="image/png",
uploaded_at=datetime.now(timezone.utc),
expires_at=None,
)
assert cached.is_expired() is False | test | 0 | {"function_name": "test_is_expired_no_expiry", "class_name": "TestCachedUpload", "qualname": "TestCachedUpload.test_is_expired_no_expiry", "file_path": "lib/crewai-files/tests/test_upload_cache.py", "repo_id": "crewAIInc/crewAI", "loc": 12, "tested_modules": ["datetime", "crewai_files", "crewai_files.cache.upload_cache"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_sdk_api/test_agent_management/test_agent_crud_unit.py:test_delete_agent_success_and_error | # Context:
import pytest
from ragflow_sdk import RAGFlow
class _DummyResponse: ...
def auth(): ...
def set_tenant_info(): ...
def test_list_agents_success_and_error(monkeypatch): ...
def test_create_agent_payload_and_error(monkeypatch): ...
def test_update_agent_payload_matrix_and_error(monkeypatch): ...
def test_agent_and_dsl_default_initialization(): ...
def test_agent_session_methods_success_and_error_paths(monkeypatch): ...
# Task:
Write a Python test function `test_delete_agent_success_and_error` to verify the behavior of `delete_agent_success_and_error`.
Module under test: ragflow_sdk, ragflow_sdk.modules.agent, ragflow_sdk.modules.session | def test_delete_agent_success_and_error(monkeypatch):
client = RAGFlow("token", "http://localhost:9380")
calls = []
def _ok_delete(path, json):
calls.append((path, json))
return _DummyResponse({"code": 0, "message": "ok"})
monkeypatch.setattr(client, "delete", _ok_delete)
client.delete_agent("agent-1")
assert calls[-1] == ("/agents/agent-1", {})
monkeypatch.setattr(client, "delete", lambda *_args, **_kwargs: _DummyResponse({"code": 1, "message": "delete boom"}))
with pytest.raises(Exception) as exception_info:
client.delete_agent("agent-1")
assert "delete boom" in str(exception_info.value), str(exception_info.value) | test | 1 | {"function_name": "test_delete_agent_success_and_error", "class_name": null, "qualname": "test_delete_agent_success_and_error", "file_path": "test/testcases/test_sdk_api/test_agent_management/test_agent_crud_unit.py", "repo_id": "infiniflow/ragflow", "loc": 16, "tested_modules": ["ragflow_sdk", "ragflow_sdk.modules.agent", "ragflow_sdk.modules.session"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-files/tests/test_file_url.py:TestNormalizeSource.test_normalize_file_url_passthrough | # Context:
from crewai_files import FileBytes, FileUrl, ImageFile
from crewai_files.core.sources import FilePath, _normalize_source
class TestFileUrl: ...
class TestResolverUrlHandling: ...
class TestImageFileWithUrl: ...
class TestNormalizeSource:
def test_normalize_url_string(self): ...
def test_normalize_http_url_string(self): ...
def test_normalize_file_path_string(self, tmp_path): ...
def test_normalize_relative_path_is_not_url(self): ...
# Task:
Write a Python test method `test_normalize_file_url_passthrough` in test class `TestNormalizeSource` to test that FileUrl instances pass through unchanged.
Module under test: crewai_files, crewai_files.core.resolved, crewai_files.core.sources | def test_normalize_file_url_passthrough(self):
"""Test that FileUrl instances pass through unchanged."""
original = FileUrl(url="https://example.com/image.png")
result = _normalize_source(original)
assert result is original | test | 0 | {"function_name": "test_normalize_file_url_passthrough", "class_name": "TestNormalizeSource", "qualname": "TestNormalizeSource.test_normalize_file_url_passthrough", "file_path": "lib/crewai-files/tests/test_file_url.py", "repo_id": "crewAIInc/crewAI", "loc": 6, "tested_modules": ["crewai_files", "crewai_files.core.resolved", "crewai_files.core.sources", "crewai_files.resolution.resolver"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_files.py:TestFilePath.test_raises_for_missing_file | # Context:
from pathlib import Path
import pytest
from crewai_files import (
AudioFile,
File,
FileBytes,
FilePath,
FileSource,
FileStream,
ImageFile,
PDFFile,
TextFile,
VideoFile,
normalize_input_files,
wrap_file_source,
)
class TestDetectContentType: ...
class TestFileBytes: ...
class TestFileStream: ...
class TestTypedFileWrappers: ...
class TestWrapFileSource: ...
class TestNormalizeInputFiles: ...
class TestGenericFile: ...
class TestFilePath:
def test_create_from_existing_file(self, tmp_path: Path) -> None: ...
def test_content_is_cached(self, tmp_path: Path) -> None: ...
def test_raises_for_directory(self, tmp_path: Path) -> None: ...
def test_content_type_detection(self, tmp_path: Path) -> None: ...
# Task:
Write a Python test method `test_raises_for_missing_file` in test class `TestFilePath` to test that FilePath raises for non-existent files.
Module under test: pathlib, crewai_files, crewai_files.core.sources | def test_raises_for_missing_file(self, tmp_path: Path) -> None:
"""Test that FilePath raises for non-existent files."""
with pytest.raises(ValueError, match="File not found"):
FilePath(path=tmp_path / "nonexistent.txt") | test | 0 | {"function_name": "test_raises_for_missing_file", "class_name": "TestFilePath", "qualname": "TestFilePath.test_raises_for_missing_file", "file_path": "lib/crewai/tests/utilities/test_files.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["pathlib", "crewai_files", "crewai_files.core.sources"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/v1/core/kv_cache_coordinator.py:KVCacheCoordinator:class_doc | Write a class-level docstring for `KVCacheCoordinator` (inherits from ABC) which has methods: `__init__`, `get_num_blocks_to_allocate`, `allocate_new_computed_blocks`, `allocate_new_blocks`, `cache_blocks`. | Coordinate the KV cache of different KV cache groups. | documentation | 1 | {"doc_type": "class", "class_name": "KVCacheCoordinator", "file_path": "vllm/v1/core/kv_cache_coordinator.py", "repo_id": "vllm-project/vllm", "char_length": 53, "methods": ["__init__", "get_num_blocks_to_allocate", "allocate_new_computed_blocks", "allocate_new_blocks", "cache_blocks", "free", "get_num_common_prefix_blocks", "remove_skipped_blocks", "get_blocks", "find_longest_cache_hit"]} |
browser-use/browser-use:tests/ci/browser/test_navigation.py:TestNavigationEdgeCases.test_recovery_after_navigation_error | # Context:
import asyncio
import pytest
from browser_use.agent.service import Agent
from tests.ci.conftest import create_mock_llm
def http_server(): ...
def base_url(http_server): ...
async def browser_session(): ...
class TestNavigationEdgeCases:
async def test_broken_page_navigation(self, browser_session, base_url): ...
async def test_slow_loading_page(self, browser_session, base_url): ...
async def test_nonexisting_page_404(self, browser_session, base_url): ...
async def test_nonexisting_domain(self, browser_session): ...
# Task:
Write a Python test method `test_recovery_after_navigation_error` in test class `TestNavigationEdgeCases` to test that agent can recover and navigate to valid page after encountering error.
Module under test: werkzeug, browser_use.agent.service, browser_use.browser | async def test_recovery_after_navigation_error(self, browser_session, base_url):
"""Test that agent can recover and navigate to valid page after encountering error."""
actions = [
f"""
{{
"thinking": "First, I'll try the broken page",
"evaluation_previous_goal": "Starting task",
"memory": "Navigating to broken page",
"next_goal": "Navigate to broken page first",
"action": [
{{
"navigate": {{
"url": "{base_url}/broken"
}}
}}
]
}}
""",
f"""
{{
"thinking": "That page was broken, let me try a valid page now",
"evaluation_previous_goal": "Broken page loaded",
"memory": "Now navigating to valid page",
"next_goal": "Navigate to valid page",
"action": [
{{
"navigate": {{
"url": "{base_url}/valid"
}}
}}
]
}}
""",
"""
{
"thinking": "The valid page loaded successfully after the broken one",
"evaluation_previous_goal": "Valid page loaded",
"memory": "Successfully recovered from error",
"next_goal": "Complete task",
"action": [
{
"done": {
"text": "Successfully navigated to valid page after broken page",
"success": true
}
}
]
}
""",
]
mock_llm = create_mock_llm(actions=actions)
agent = Agent(
task=f'First navigate to {base_url}/broken, then navigate to {base_url}/valid',
llm=mock_llm,
browser_session=browser_session,
)
# Run with timeout - should complete within 2 minutes
try:
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=120)
assert len(history) >= 2, 'Agent should have completed at least 2 steps (broken -> valid)'
# Verify final page is the valid one
final_url = await browser_session.get_current_page_url()
assert final_url.endswith('/valid'), f'Final URL should be /valid, got {final_url}'
# Verify agent completed successfully
final_result = history.final_result()
assert final_result is not None, 'Agent should return a final result'
except TimeoutError:
pytest.fail('Test timed out after 2 minutes - agent could not recover from broken page') | test | 0 | {"function_name": "test_recovery_after_navigation_error", "class_name": "TestNavigationEdgeCases", "qualname": "TestNavigationEdgeCases.test_recovery_after_navigation_error", "file_path": "tests/ci/browser/test_navigation.py", "repo_id": "browser-use/browser-use", "loc": 74, "tested_modules": ["werkzeug", "browser_use.agent.service", "browser_use.browser", "browser_use.browser.profile", "tests.ci.conftest"], "has_docstring": true, "runnable_level": "project_runnable"} |
ManimCommunity/manim:scripts/release.py:get_release_body | # Context:
def run_gh(args: Sequence[str], check: bool, suppress_errors: bool) -> subprocess.CompletedProcess[str]: ...
def get_release_tags() -> list[str]: ...
def get_release_date(tag: str) -> str | None: ...
def generate_release_notes(head_tag: str, base_tag: str) -> str: ...
def normalize_tag(tag: str) -> str: ...
def version_from_tag(tag: str) -> str: ...
def parse_version(version: str) -> tuple[int, ...]: ...
def version_gte(version: str, min_version: str) -> bool: ...
def convert_to_myst(body: str) -> str: ...
def format_changelog(version: str, body: str, date: str | None, title: str | None) -> str: ...
def get_existing_versions() -> set[str]: ...
def save_changelog(version: str, content: str) -> Path: ...
def update_citation(version: str, date: str | None) -> Path: ...
def cli(ctx: click.Context, dry_run: bool) -> None: ...
def changelog(ctx: click.Context, base: str, version: str, head: str, title: str | None, also_update_citation: bool) -> None: ...
def citation(ctx: click.Context, version: str, date: str | None) -> None: ...
def fetch_releases(ctx: click.Context, tag: str | None, min_version: str, force: bool) -> None: ...
def _fetch_single_release(tag: str, version: str, prev_tag: str | None) -> bool: ...
def main() -> None: ...
# Task:
Write a Python function `get_release_body` to get the release body for a published release.
Parameters: tag: str
Returns: str | None | def get_release_body(tag: str) -> str | None:
"""Get the release body for a published release."""
result = run_gh(
["release", "view", tag, "--repo", REPO, "--json", "body", "--jq", ".body"],
check=False,
suppress_errors=True,
)
if result.returncode != 0:
return None
return result.stdout.strip() or None | function_simple | 1 | {"cognitive_complexity": 2, "loc": 10, "code_loc": 8, "docstring_loc": 1, "function_name": "get_release_body", "class_name": null, "qualname": "get_release_body", "file_path": "scripts/release.py", "repo_id": "ManimCommunity/manim", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:tests/ci/test_coordinate_clicking.py:TestCoordinateClickingTools.test_default_click_schema_has_only_index | # Context:
from browser_use.tools.service import Tools
class TestCoordinateClickingModelDetection: ...
class TestCoordinateClickingWithPassedTools: ...
class TestCoordinateClickingTools:
def test_default_coordinate_clicking_disabled(self): ...
def test_default_uses_index_only_action(self): ...
def test_enable_coordinate_clicking(self): ...
def test_enabled_click_schema_has_coordinates(self): ...
def test_disable_coordinate_clicking(self): ...
def test_set_coordinate_clicking_idempotent(self): ...
def test_schema_title_consistent(self): ...
# Task:
Write a Python test method `test_default_click_schema_has_only_index` in test class `TestCoordinateClickingTools` to default click action schema should only have index property.
Module under test: browser_use.tools.service, browser_use.tools.views | def test_default_click_schema_has_only_index(self):
"""Default click action schema should only have index property."""
tools = Tools()
click_action = tools.registry.registry.actions.get('click')
assert click_action is not None
schema = click_action.param_model.model_json_schema()
assert 'index' in schema['properties']
assert 'coordinate_x' not in schema['properties']
assert 'coordinate_y' not in schema['properties'] | test | 0 | {"function_name": "test_default_click_schema_has_only_index", "class_name": "TestCoordinateClickingTools", "qualname": "TestCoordinateClickingTools.test_default_click_schema_has_only_index", "file_path": "tests/ci/test_coordinate_clicking.py", "repo_id": "browser-use/browser-use", "loc": 11, "tested_modules": ["browser_use.tools.service", "browser_use.tools.views"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/expressions/test_predicate.py:module_doc | Write a module-level docstring for the Python module `test_predicate` which contains class `TestPredicateIntegration`. | Integration tests for predicate expression operations.
These tests require Ray and test end-to-end predicate expression evaluation. | documentation | 0 | {"doc_type": "module", "module_name": "test_predicate", "file_path": "python/ray/data/tests/expressions/test_predicate.py", "repo_id": "ray-project/ray", "char_length": 132} |
huggingface/transformers:tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py:DeepseekVLHybridIntegrationTest.test_model_text_generation | # Context:
from transformers import (
AutoProcessor,
DeepseekVLHybridConfig,
DeepseekVLHybridForConditionalGeneration,
DeepseekVLHybridModel,
is_torch_available,
)
from transformers.testing_utils import (
require_torch,
require_torch_accelerator,
slow,
torch_device,
)
class DeepseekVLHybridModelTester: ...
class DeepseekVLHybridModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): ...
class DeepseekVLHybridIntegrationTest(unittest.TestCase):
def setUp(self): ...
def test_model_text_generation_batched(self): ...
def test_model_text_generation_with_multi_image(self): ...
# Task:
Write a Python test method `test_model_text_generation` in test class `DeepseekVLHybridIntegrationTest` to verify the behavior of `model_text_generation`.
Module under test: transformers, transformers.testing_utils, generation.test_utils | def test_model_text_generation(self):
model = DeepseekVLHybridForConditionalGeneration.from_pretrained(
self.model_id, dtype="auto", device_map="auto"
)
model.to(torch_device)
model.eval()
processor = AutoProcessor.from_pretrained(self.model_id)
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
},
{"type": "text", "text": "Describe this image."},
],
}
]
EXPECTED_TEXT = 'You are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.\n\nUser: Describe this image.\n\nAssistant:The image depicts a fluffy, light brown animal with a white face and black markings on its face and' # fmt: skip
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device, dtype=model.dtype)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
text = processor.decode(output[0], skip_special_tokens=True)
self.assertEqual(
text,
EXPECTED_TEXT,
) | test | 0 | {"function_name": "test_model_text_generation", "class_name": "DeepseekVLHybridIntegrationTest", "qualname": "DeepseekVLHybridIntegrationTest.test_model_text_generation", "file_path": "tests/models/deepseek_vl_hybrid/test_modeling_deepseek_vl_hybrid.py", "repo_id": "huggingface/transformers", "loc": 33, "tested_modules": ["transformers", "transformers.testing_utils", "generation.test_utils", "test_configuration_common", "test_modeling_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
apache/airflow:providers/common/sql/src/airflow/providers/common/sql/decorators/analytics.py:_AnalyticsDecoratedOperator.execute | # Context:
from typing import TYPE_CHECKING, Any, ClassVar
from airflow.providers.common.compat.sdk import (
AIRFLOW_V_3_0_PLUS,
DecoratedOperator,
TaskDecorator,
context_merge,
task_decorator_factory,
)
from airflow.providers.common.sql.operators.analytics import AnalyticsOperator
from airflow.utils.operator_helpers import determine_kwargs
from airflow.providers.common.compat.sdk import Context
def analytics_task(python_callable, **kwargs) -> TaskDecorator: ...
class _AnalyticsDecoratedOperator(DecoratedOperator, AnalyticsOperator):
def __init__(
self,
python_callable: Callable,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
queries=SET_DURING_EXECUTION,
**kwargs,
)
def xcom_push(self) -> bool: ...
def xcom_push(self, value: bool) -> None: ...
# Task:
Write a Python method `execute` for the class `_AnalyticsDecoratedOperator` to build the SQL and execute the generated query (or queries).
Parameters: context: Context
Returns: Any | def execute(self, context: Context) -> Any:
"""
Build the SQL and execute the generated query (or queries).
:param context: Airflow context.
:return: Any
"""
context_merge(context, self.op_kwargs)
kwargs = determine_kwargs(self.python_callable, self.op_args, context)
# Set the queries using the Python callable
result = self.python_callable(*self.op_args, **kwargs)
# Only non-empty strings and non-empty lists of non-empty strings are acceptable return types
if (
not isinstance(result, (str, list))
or (isinstance(result, str) and not result.strip())
or (
isinstance(result, list)
and (not result or not all(isinstance(s, str) and s.strip() for s in result))
)
):
raise TypeError(
"The returned value from the @task.analytics callable must be a non-empty string "
"or a non-empty list of non-empty strings."
)
# AnalyticsOperator expects queries as a list of strings
self.queries = [result] if isinstance(result, str) else result
self.render_template_fields(context)
return AnalyticsOperator.execute(self, context) | function_complex | 1 | {"cognitive_complexity": 11, "loc": 33, "code_loc": 18, "docstring_loc": 6, "function_name": "execute", "class_name": "_AnalyticsDecoratedOperator", "qualname": "_AnalyticsDecoratedOperator.execute", "file_path": "providers/common/sql/src/airflow/providers/common/sql/decorators/analytics.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"} |
666ghj/BettaFish:MindSpider/schema/db_manager.py:DatabaseManager.show_recent_data | # Context:
from sqlalchemy import create_engine, text, inspect
from datetime import datetime, timedelta
from loguru import logger
from datetime import date, timedelta
def main(): ...
class DatabaseManager:
def __init__(self):
self.engine: Engine = None
self.connect()
def connect(self): ...
def close(self): ...
def show_tables(self): ...
def show_statistics(self): ...
def cleanup_old_data(self, days, dry_run): ...
# Task:
Write a Python method `show_recent_data` for the class `DatabaseManager` to 显示最近几天的数据.
Parameters: days | def show_recent_data(self, days=7):
"""显示最近几天的数据"""
data_recent_message = ""
data_recent_message += "\n" + "=" * 60
data_recent_message += "最近" + str(days) + "天的数据"
data_recent_message += "=" * 60
from datetime import date, timedelta
start_date = date.today() - timedelta(days=days)
# 最近的新闻
with self.engine.connect() as conn:
news_data = conn.execute(
text(
"""
SELECT crawl_date, COUNT(*) as news_count, COUNT(DISTINCT source_platform) as platforms
FROM daily_news
WHERE crawl_date >= :start_date
GROUP BY crawl_date
ORDER BY crawl_date DESC
"""
),
{"start_date": start_date},
).all()
if news_data:
data_recent_message += "每日新闻统计:"
data_recent_message += "\n"
for date, count, platforms in news_data:
data_recent_message += f" {date}: {count} 条新闻, {platforms} 个平台"
data_recent_message += "\n"
# 最近的话题
with self.engine.connect() as conn:
topic_data = conn.execute(
text(
"""
SELECT extract_date, COUNT(*) as topic_count
FROM daily_topics
WHERE extract_date >= :start_date
GROUP BY extract_date
ORDER BY extract_date DESC
"""
),
{"start_date": start_date},
).all()
if topic_data:
data_recent_message += "每日话题统计:"
data_recent_message += "\n"
for date, count in topic_data:
data_recent_message += f" {date}: {count} 个话题"
data_recent_message += "\n"
logger.info(data_recent_message) | function_complex | 1 | {"cognitive_complexity": 6, "loc": 51, "code_loc": 45, "docstring_loc": 1, "function_name": "show_recent_data", "class_name": "DatabaseManager", "qualname": "DatabaseManager.show_recent_data", "file_path": "MindSpider/schema/db_manager.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_runnable"} |
ray-project/ray:python/ray/serve/tests/test_deploy_app_2.py:test_deploy_one_app_failed | # Context:
import httpx
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve.schema import (
ApplicationStatus,
ServeApplicationSchema,
ServeDeploySchema,
ServeInstanceDetails,
)
def check_log_file(log_file: str, expected_regex: list): ...
def check_deployments_dead(deployment_ids: List[DeploymentID]): ...
class TestDeploywithLoggingConfig: ...
def test_deploy_with_no_applications(serve_instance): ...
def test_deployments_not_listed_in_config(serve_instance): ...
def test_redeploy_old_config_after_failed_deployment(serve_instance, rebuild): ...
def test_deploy_does_not_affect_dynamic_apps(serve_instance): ...
def test_change_route_prefix(serve_instance): ...
def test_num_replicas_auto_api(serve_instance): ...
def test_num_replicas_auto_basic(serve_instance): ...
def test_deploy_with_route_prefix_conflict(serve_instance): ...
def test_update_config_graceful_shutdown_timeout(serve_instance): ...
# Task:
Write a Python test function `test_deploy_one_app_failed` to deploy two applications with separate runtime envs.
Module under test: copy, functools, typing | def test_deploy_one_app_failed(serve_instance):
"""Deploy two applications with separate runtime envs."""
client = serve_instance
world_import_path = "ray.serve.tests.test_config_files.world.DagNode"
fail_import_path = "ray.serve.tests.test_config_files.fail.node"
config_template = {
"applications": [
{
"name": "app1",
"route_prefix": "/app1",
"import_path": world_import_path,
},
{
"name": "app2",
"route_prefix": "/app2",
"import_path": fail_import_path,
},
],
}
client.deploy_apps(ServeDeploySchema(**config_template))
wait_for_condition(
lambda: httpx.post("http://localhost:8000/app1").text == "wonderful world"
)
wait_for_condition(
lambda: serve.status().applications["app1"].status == ApplicationStatus.RUNNING
and serve.status().applications["app2"].status
== ApplicationStatus.DEPLOY_FAILED
)
# Ensure the request doesn't hang and actually returns a 503 error.
# The timeout is there to prevent the test from hanging and blocking
# the test suite if it does fail.
r = httpx.post("http://localhost:8000/app2", timeout=10)
assert r.status_code == 503 and "unavailable" in r.text.lower() | test | 0 | {"function_name": "test_deploy_one_app_failed", "class_name": null, "qualname": "test_deploy_one_app_failed", "file_path": "python/ray/serve/tests/test_deploy_app_2.py", "repo_id": "ray-project/ray", "loc": 37, "tested_modules": ["copy", "functools", "typing", "ray", "ray._common.test_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:devel-common/src/tests_common/test_utils/dag.py:sync_dags_to_db | # Context:
from collections.abc import Collection, Sequence
from airflow.utils.session import NEW_SESSION, provide_session
from tests_common.test_utils.compat import DagSerialization, SerializedDAG
from sqlalchemy.orm import Session
from airflow.sdk import DAG
from airflow.models.dagbundle import DagBundleModel
from airflow.models.serialized_dag import SerializedDagModel
from airflow.serialization.serialized_objects import LazyDeserializedDAG
def create_scheduler_dag(dag: DAG | SerializedDAG) -> SerializedDAG: ...
def sync_dag_to_db(dag: DAG, bundle_name: str, session: Session) -> SerializedDAG: ...
# Task:
Write a Python function `sync_dags_to_db` to sync dags into the database.
Parameters: dags: Collection[DAG], bundle_name: str, session: Session
Returns: Sequence[SerializedDAG] | def sync_dags_to_db(
dags: Collection[DAG],
bundle_name: str = "testing",
session: Session = NEW_SESSION,
) -> Sequence[SerializedDAG]:
"""
Sync dags into the database.
This serializes dags and saves the results to the database. The serialized
(scheduler-oeirnted) dags are returned. If the input is ordered (e.g. a list),
the returned sequence is guaranteed to be in the same order.
"""
from airflow.models.dagbundle import DagBundleModel
from airflow.models.serialized_dag import SerializedDagModel
from airflow.serialization.serialized_objects import LazyDeserializedDAG
session.merge(DagBundleModel(name=bundle_name))
session.flush()
def _write_dag(dag: DAG) -> SerializedDAG:
data = DagSerialization.to_dict(dag)
SerializedDagModel.write_dag(LazyDeserializedDAG(data=data), bundle_name, session=session)
return DagSerialization.from_dict(data)
SerializedDAG.bulk_write_to_db(bundle_name, None, dags, session=session)
scheduler_dags = [_write_dag(dag) for dag in dags]
session.flush()
return scheduler_dags | function_simple | 1 | {"cognitive_complexity": 0, "loc": 28, "code_loc": 13, "docstring_loc": 7, "function_name": "sync_dags_to_db", "class_name": null, "qualname": "sync_dags_to_db", "file_path": "devel-common/src/tests_common/test_utils/dag.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py:ToolCallLimitMiddleware.__init__ | # Context:
class ToolCallLimitState(AgentState[ResponseT]): ...
def _build_tool_message_content(tool_name: str | None) -> str: ...
def _build_final_ai_message_content(thread_count: int, run_count: int, thread_limit: int | None, run_limit: int | None, tool_name: str | None) -> str: ...
class ToolCallLimitExceededError(Exception): ...
class ToolCallLimitMiddleware(AgentMiddleware[ToolCallLimitState[ResponseT], ContextT, ResponseT]):
state_schema = ToolCallLimitState # type: ignore[assignment]
def name(self) -> str: ...
def _would_exceed_limit(self, thread_count: int, run_count: int) -> bool: ...
def _matches_tool_filter(self, tool_call: ToolCall) -> bool: ...
def _separate_tool_calls(self, tool_calls: list[ToolCall], thread_count: int, run_count: int) -> tuple[list[ToolCall], list[ToolCall], int, int]: ...
def after_model(self, state: ToolCallLimitState[ResponseT], runtime: Runtime[ContextT]) -> dict[str, Any] | None: ...
async def aafter_model(self, state: ToolCallLimitState[ResponseT], runtime: Runtime[ContextT]) -> dict[str, Any] | None: ...
# Task:
Write a Python method `__init__` for the class `ToolCallLimitMiddleware` to initialize the tool call limit middleware.
Returns: None | def __init__(
self,
*,
tool_name: str | None = None,
thread_limit: int | None = None,
run_limit: int | None = None,
exit_behavior: ExitBehavior = "continue",
) -> None:
"""Initialize the tool call limit middleware.
Args:
tool_name: Name of the specific tool to limit. If `None`, limits apply
to all tools.
thread_limit: Maximum number of tool calls allowed per thread.
`None` means no limit.
run_limit: Maximum number of tool calls allowed per run.
`None` means no limit.
exit_behavior: How to handle when limits are exceeded.
- `'continue'`: Block exceeded tools with error messages, let other
tools continue. Model decides when to end.
- `'error'`: Raise a `ToolCallLimitExceededError` exception
- `'end'`: Stop execution immediately with a `ToolMessage` + AI message
for the single tool call that exceeded the limit. Raises
`NotImplementedError` if there are multiple parallel tool
calls to other tools or multiple pending tool calls.
Raises:
ValueError: If both limits are `None`, if `exit_behavior` is invalid,
or if `run_limit` exceeds `thread_limit`.
"""
super().__init__()
if thread_limit is None and run_limit is None:
msg = "At least one limit must be specified (thread_limit or run_limit)"
raise ValueError(msg)
valid_behaviors = ("continue", "error", "end")
if exit_behavior not in valid_behaviors:
msg = f"Invalid exit_behavior: {exit_behavior!r}. Must be one of {valid_behaviors}"
raise ValueError(msg)
if thread_limit is not None and run_limit is not None and run_limit > thread_limit:
msg = (
f"run_limit ({run_limit}) cannot exceed thread_limit ({thread_limit}). "
"The run limit should be less than or equal to the thread limit."
)
raise ValueError(msg)
self.tool_name = tool_name
self.thread_limit = thread_limit
self.run_limit = run_limit
self.exit_behavior = exit_behavior | function_simple | 1 | {"cognitive_complexity": 5, "loc": 53, "code_loc": 18, "docstring_loc": 23, "function_name": "__init__", "class_name": "ToolCallLimitMiddleware", "qualname": "ToolCallLimitMiddleware.__init__", "file_path": "libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:tests/ci/test_variable_detection.py:test_detect_country_from_attributes | # Context:
from browser_use.agent.variable_detector import (
_detect_from_attributes,
_detect_from_value_pattern,
_detect_variable_type,
_ensure_unique_name,
detect_variables_in_history,
)
def create_test_element(attributes: dict[str, str] | None) -> DOMInteractedElement: ...
def create_mock_history(actions_with_elements: list[tuple[dict, DOMInteractedElement | None]]): ...
def test_detect_email_from_attributes(): ...
def test_detect_email_from_pattern(): ...
def test_detect_phone_from_attributes(): ...
def test_detect_phone_from_pattern(): ...
def test_detect_date_from_attributes(): ...
def test_detect_date_from_pattern(): ...
def test_detect_first_name_from_attributes(): ...
def test_detect_first_name_from_pattern(): ...
def test_detect_full_name_from_pattern(): ...
def test_detect_address_from_attributes(): ...
def test_detect_billing_address_from_attributes(): ...
def test_detect_comment_from_attributes(): ...
def test_detect_city_from_attributes(): ...
def test_detect_state_from_attributes(): ...
def test_detect_zip_code_from_attributes(): ...
def test_detect_company_from_attributes(): ...
def test_detect_number_from_pattern(): ...
def test_no_detection_for_random_text(): ...
def test_no_detection_for_short_text(): ...
def test_element_attributes_take_priority_over_pattern(): ...
def test_pattern_matching_used_when_no_element(): ...
def test_ensure_unique_name_no_conflict(): ...
def test_ensure_unique_name_with_conflict(): ...
def test_ensure_unique_name_with_multiple_conflicts(): ...
def test_detect_variables_in_empty_history(): ...
def test_detect_variables_in_history_with_input_action(): ...
def test_detect_variables_skips_duplicate_values(): ...
def test_detect_variables_handles_missing_state(): ...
def test_detect_variables_handles_missing_interacted_element(): ...
def test_detect_variables_multiple_types(): ...
# Task:
Write a Python test function `test_detect_country_from_attributes` to test country detection from element attributes.
Module under test: browser_use.agent.variable_detector, browser_use.agent.views, browser_use.dom.views | def test_detect_country_from_attributes():
"""Test country detection from element attributes"""
attributes = {'name': 'country', 'id': 'country-select'}
result = _detect_from_attributes(attributes)
assert result is not None
var_name, var_format = result
assert var_name == 'country'
assert var_format is None | test | 0 | {"function_name": "test_detect_country_from_attributes", "class_name": null, "qualname": "test_detect_country_from_attributes", "file_path": "tests/ci/test_variable_detection.py", "repo_id": "browser-use/browser-use", "loc": 9, "tested_modules": ["browser_use.agent.variable_detector", "browser_use.agent.views", "browser_use.dom.views", "types", "types"], "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-web/tests/test_firecrawl_web_reader.py:test_search_mode_with_sdk_object_lists | # Context:
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
def _install_fake_firecrawl(FirecrawlClass) -> None: ...
class _Link: ...
class _MapResponse: ...
def test_class_name_returns_expected(): ...
def test_init_uses_api_key_and_url(): ...
def test_scrape_mode_with_dict_response_includes_text_and_metadata(): ...
def test_scrape_mode_with_object_response_includes_text_and_metadata(): ...
def test_crawl_mode_strips_maxDepth_and_maps_docs(): ...
def test_map_mode_success_yields_link_documents(): ...
def test_map_mode_error_returns_single_error_document(): ...
def test_search_mode_with_dict_success_and_markdown_fallbacks(): ...
def test_search_mode_with_dict_failure_returns_error_document(): ...
def test_extract_mode_success_with_sources_and_status(): ...
def test_extract_mode_success_no_data_yields_notice(): ...
def test_extract_mode_failure_returns_error_document(): ...
def test_invalid_mode_raises_value_error(): ...
def test_argument_validation_requires_exactly_one_of_url_query_urls(): ...
# Task:
Write a Python test function `test_search_mode_with_sdk_object_lists` to verify the behavior of `search_mode_with_sdk_object_lists`.
Module under test: llama_index.readers.web.firecrawl_web.base | def test_search_mode_with_sdk_object_lists():
class Item:
def __init__(self, url: str, title: str, description: str) -> None:
self.url = url
self.title = title
self.description = description
self.rank = 7
class SearchResp:
def __init__(self):
self.web = [Item("https://a", "T1", "D1")]
self.news = []
self.images = [Item("https://img", "", "image desc")]
class Firecrawl:
def __init__(self, *_, **__):
pass
def search(self, query: str, **kwargs):
return SearchResp()
_install_fake_firecrawl(Firecrawl)
reader = FireCrawlWebReader(api_key="k", mode="search")
docs = reader.load_data(query="q")
assert len(docs) == 2
types = {d.metadata.get("search_type") for d in docs}
assert types == {"web", "images"}
assert any(d.metadata.get("rank") == 7 for d in docs) | test | 1 | {"function_name": "test_search_mode_with_sdk_object_lists", "class_name": null, "qualname": "test_search_mode_with_sdk_object_lists", "file_path": "llama-index-integrations/readers/llama-index-readers-web/tests/test_firecrawl_web_reader.py", "repo_id": "run-llama/llama_index", "loc": 28, "tested_modules": ["llama_index.readers.web.firecrawl_web.base"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/superglue/image_processing_superglue_fast.py:is_grayscale | # Context:
import torch
def _is_valid_image(image): ...
def flatten_pair_images(images): ...
def convert_to_grayscale(image: 'torch.Tensor') -> 'torch.Tensor': ...
class SuperGlueImageProcessorFast(BaseImageProcessorFast): ...
# Task:
Write a Python function `is_grayscale` to checks if an image is grayscale (all RGB channels are identical).
Parameters: image: 'torch.Tensor' | def is_grayscale(
image: "torch.Tensor",
):
"""Checks if an image is grayscale (all RGB channels are identical)."""
if image.ndim < 3 or image.shape[0 if image.ndim == 3 else 1] == 1:
return True
return torch.all(image[..., 0, :, :] == image[..., 1, :, :]) and torch.all(
image[..., 1, :, :] == image[..., 2, :, :]
) | function_simple | 0 | {"cognitive_complexity": 3, "loc": 9, "code_loc": 5, "docstring_loc": 1, "function_name": "is_grayscale", "class_name": null, "qualname": "is_grayscale", "file_path": "src/transformers/models/superglue/image_processing_superglue_fast.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "plib_runnable"} |
docling-project/docling:docling/datamodel/vlm_engine_options.py:ApiVlmEngineOptions.__init__ | # Context:
from docling.models.inference_engines.vlm.base import (
BaseVlmEngineOptions,
VlmEngineType,
)
class AutoInlineVlmEngineOptions(BaseVlmEngineOptions): ...
class TransformersVlmEngineOptions(BaseVlmEngineOptions): ...
class MlxVlmEngineOptions(BaseVlmEngineOptions): ...
class VllmVlmEngineOptions(BaseVlmEngineOptions): ...
class ApiVlmEngineOptions(BaseVlmEngineOptions):
# Task:
Write a Python method `__init__` for the class `ApiVlmEngineOptions` to initialize with default URLs based on engine type. | def __init__(self, **data):
"""Initialize with default URLs based on engine type."""
if "engine_type" in data and "url" not in data:
engine_type = data["engine_type"]
if engine_type == VlmEngineType.API_OLLAMA:
data["url"] = "http://localhost:11434/v1/chat/completions"
elif engine_type == VlmEngineType.API_LMSTUDIO:
data["url"] = "http://localhost:1234/v1/chat/completions"
elif engine_type == VlmEngineType.API_OPENAI:
data["url"] = "https://api.openai.com/v1/chat/completions"
super().__init__(**data) | function_complex | 1 | {"cognitive_complexity": 6, "loc": 12, "code_loc": 9, "docstring_loc": 1, "function_name": "__init__", "class_name": "ApiVlmEngineOptions", "qualname": "ApiVlmEngineOptions.__init__", "file_path": "docling/datamodel/vlm_engine_options.py", "repo_id": "docling-project/docling", "has_docstring": true, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:comfy_api_nodes/util/upload_helpers.py:upload_3d_model_to_comfyapi | # Context:
import uuid
from comfy_api.latest import IO, Input, Types
class UploadRequest(BaseModel): ...
class UploadResponse(BaseModel): ...
async def upload_images_to_comfyapi(cls: type[IO.ComfyNode], image: torch.Tensor | list[torch.Tensor], max_images: int, mime_type: str | None, wait_label: str | None, show_batch_index: bool, total_pixels: int | None) -> list[str]: ...
async def upload_image_to_comfyapi(cls: type[IO.ComfyNode], image: torch.Tensor, mime_type: str | None, wait_label: str | None, total_pixels: int | None) -> str: ...
async def upload_audio_to_comfyapi(cls: type[IO.ComfyNode], audio: Input.Audio, container_format: str, codec_name: str, mime_type: str) -> str: ...
async def upload_video_to_comfyapi(cls: type[IO.ComfyNode], video: Input.Video, container: Types.VideoContainer, codec: Types.VideoCodec, max_duration: int | None, wait_label: str | None) -> str: ...
async def upload_file_to_comfyapi(cls: type[IO.ComfyNode], file_bytes_io: BytesIO, filename: str, upload_mime_type: str | None, wait_label: str | None, progress_origin_ts: float | None) -> str: ...
async def upload_file(cls: type[IO.ComfyNode], upload_url: str, file: BytesIO | str, content_type: str | None, max_retries: int, retry_delay: float, retry_backoff: float, wait_label: str | None, progress_origin_ts: float | None) -> None: ...
def _generate_operation_id(method: str, url: str, attempt: int, op_uuid: str) -> str: ...
# Task:
Write a Python async function `upload_3d_model_to_comfyapi` to uploads a 3D model file to ComfyUI API and returns its download URL.
Parameters: model_3d: Types.File3D, file_format: str
Returns: str | async def upload_3d_model_to_comfyapi(
cls: type[IO.ComfyNode],
model_3d: Types.File3D,
file_format: str,
) -> str:
"""Uploads a 3D model file to ComfyUI API and returns its download URL."""
return await upload_file_to_comfyapi(
cls,
model_3d.get_data(),
f"{uuid.uuid4()}.{file_format}",
_3D_MIME_TYPES.get(file_format, "application/octet-stream"),
) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 12, "code_loc": 6, "docstring_loc": 1, "function_name": "upload_3d_model_to_comfyapi", "class_name": null, "qualname": "upload_3d_model_to_comfyapi", "file_path": "comfy_api_nodes/util/upload_helpers.py", "repo_id": "Comfy-Org/ComfyUI", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/unit/test_concurrency_solver.py:TestAllocateResources.test_two_ops_different_resource_requirements | # Context:
from ray.data._internal.cluster_autoscaler.concurrency_solver import (
allocate_resources,
compute_optimal_throughput,
)
from ray.data._internal.execution.interfaces import ExecutionResources
class TestComputeOptimalThroughput: ...
class TestAllocateResources:
def test_empty_rates(self): ...
def test_zero_throughput(self): ...
def test_one_op(self): ...
def test_two_ops_different_rates(self): ...
# Task:
Write a Python test method `test_two_ops_different_resource_requirements` in test class `TestAllocateResources` to verify the behavior of `two_ops_different_resource_requirements`.
Module under test: ray.data._internal.cluster_autoscaler.concurrency_solver, ray.data._internal.execution.interfaces | def test_two_ops_different_resource_requirements(self):
result = allocate_resources(
1.0,
rates={"A": 1.0, "B": 1.0},
resource_requirements={
"A": ExecutionResources(cpu=1),
"B": ExecutionResources(cpu=2),
},
)
assert result["A"] == ExecutionResources(cpu=1)
assert result["B"] == ExecutionResources(cpu=2) | test | 0 | {"function_name": "test_two_ops_different_resource_requirements", "class_name": "TestAllocateResources", "qualname": "TestAllocateResources.test_two_ops_different_resource_requirements", "file_path": "python/ray/data/tests/unit/test_concurrency_solver.py", "repo_id": "ray-project/ray", "loc": 11, "tested_modules": ["ray.data._internal.cluster_autoscaler.concurrency_solver", "ray.data._internal.execution.interfaces"], "has_docstring": false, "runnable_level": "plib_runnable"} |
browser-use/browser-use:browser_use/tools/extraction/schema_utils.py:schema_dict_to_pydantic_model | # Context:
from pydantic import BaseModel, ConfigDict, Field, create_model
class _StrictBase(BaseModel): ...
def _check_unsupported(schema: dict) -> None: ...
def _resolve_type(schema: dict, name: str) -> Any: ...
def _build_model(schema: dict, name: str) -> type[BaseModel]: ...
# Task:
Write a Python function `schema_dict_to_pydantic_model` to convert a JSON Schema dict to a runtime Pydantic model.
Parameters: schema: dict
Returns: type[BaseModel] | def schema_dict_to_pydantic_model(schema: dict) -> type[BaseModel]:
"""Convert a JSON Schema dict to a runtime Pydantic model.
The schema must be ``{"type": "object", "properties": {...}, ...}``.
Unsupported keywords ($ref, allOf, anyOf, oneOf, etc.) raise ValueError.
Returns:
A dynamically-created Pydantic BaseModel subclass.
Raises:
ValueError: If the schema is invalid or uses unsupported features.
"""
_check_unsupported(schema)
top_type = schema.get('type')
if top_type != 'object':
raise ValueError(f'Top-level schema must have type "object", got {top_type!r}')
properties = schema.get('properties')
if not properties:
raise ValueError('Top-level schema must have at least one property')
model_name = schema.get('title', 'DynamicExtractionModel')
return _build_model(schema, model_name) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 24, "code_loc": 9, "docstring_loc": 11, "function_name": "schema_dict_to_pydantic_model", "class_name": null, "qualname": "schema_dict_to_pydantic_model", "file_path": "browser_use/tools/extraction/schema_utils.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "file_runnable"} |
vllm-project/vllm:vllm/model_executor/layers/rotary_embedding/xdrope.py:XDRotaryEmbedding.forward_cuda | # Context:
import torch
class XDRotaryEmbedding(DynamicNTKAlphaRotaryEmbedding):
def __init__(
self,
head_size: int,
rotary_dim: int,
max_position_embeddings: int,
base: float,
is_neox_style: bool,
scaling_alpha: float,
dtype: torch.dtype,
xdrope_section: list[int],
) -> None:
self.xdrope_section = xdrope_section
super().__init__(
head_size,
rotary_dim,
max_position_embeddings,
base,
is_neox_style,
scaling_alpha,
dtype,
)
def forward_native(self, positions: torch.Tensor, query: torch.Tensor, key: torch.Tensor | None, offsets: torch.Tensor | None) -> tuple[torch.Tensor, torch.Tensor | None]: ...
def get_next_input_positions(context_len: int, seq_len: int, xd_sections: int) -> list[list[int]]: ...
def get_next_input_positions_tensor(out: np.ndarray, out_offset: int, context_len: int, num_new_tokens: int): ...
# Task:
Write a Python method `forward_cuda` for the class `XDRotaryEmbedding` to pyTorch-native implementation equivalent to forward().
Parameters: positions: torch.Tensor, query: torch.Tensor, key: torch.Tensor | None, offsets: torch.Tensor | None
Returns: tuple[torch.Tensor, torch.Tensor | None] | def forward_cuda(
self,
positions: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor | None = None,
offsets: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
"""PyTorch-native implementation equivalent to forward().
Args:
positions:
[4, num_tokens] (P/W/H/T positions with multimodal inputs)
query: [num_tokens, num_heads * head_size]
key: [num_tokens, num_kv_heads * head_size]
"""
assert positions.ndim == 2
assert key is not None
num_tokens = positions.shape[-1]
cos_sin = self.cos_sin_cache[positions]
cos, sin = cos_sin.chunk(2, dim=-1)
cos = torch.cat(
[m[i] for i, m in enumerate(cos.split(self.xdrope_section, dim=-1))], dim=-1
)
sin = torch.cat(
[m[i] for i, m in enumerate(sin.split(self.xdrope_section, dim=-1))], dim=-1
)
query_shape = query.shape
query = query.view(num_tokens, -1, self.head_size)
query_rot = query[..., : self.rotary_dim]
query_pass = query[..., self.rotary_dim :]
query_rot = self.apply_rotary_emb(
query_rot,
cos,
sin,
)
query = torch.cat((query_rot, query_pass), dim=-1).reshape(query_shape)
key_shape = key.shape
key = key.view(num_tokens, -1, self.head_size)
key_rot = key[..., : self.rotary_dim]
key_pass = key[..., self.rotary_dim :]
key_rot = self.apply_rotary_emb(
key_rot,
cos,
sin,
)
key = torch.cat((key_rot, key_pass), dim=-1).reshape(key_shape)
return query, key | function_simple | 1 | {"cognitive_complexity": 0, "loc": 50, "code_loc": 32, "docstring_loc": 8, "function_name": "forward_cuda", "class_name": "XDRotaryEmbedding", "qualname": "XDRotaryEmbedding.forward_cuda", "file_path": "vllm/model_executor/layers/rotary_embedding/xdrope.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
vllm-project/vllm:vllm/logprobs.py:FlatLogprobs.append | # Context:
class Logprob: ...
def create_prompt_logprobs(flat_logprobs: bool) -> PromptLogprobs: ...
def create_sample_logprobs(flat_logprobs: bool) -> SampleLogprobs: ...
def append_logprobs_for_next_position(request_logprobs: PromptLogprobs | SampleLogprobs, token_ids: list[int], logprobs: list[float], decoded_tokens: Iterable[str | None], rank: int, num_logprobs: int) -> None: ...
class FlatLogprobs(MutableSequence[LogprobsOnePosition | None]):
def append_fast(self, token_ids: list[int], logprobs: list[float], ranks: itertools.chain[int], decoded_tokens: Iterable[str | None]) -> None: ...
def extend(self, logprobs_multi_positions) -> None: ...
def __len__(self) -> int: ...
def __getitem__(self, position: int) -> LogprobsOnePosition: ...
def __getitem__() -> 'FlatLogprobs': ...
def __getitem__(self, index: int | slice): ...
def __setitem__(self, item, value) -> None: ...
def __delitem__(self, item) -> None: ...
def insert(self, index: int, value: dict[int, Logprob] | None) -> None: ...
def __iter__(self) -> Iterator[LogprobsOnePosition]: ...
# Task:
Write a Python method `append` for the class `FlatLogprobs` to appends the container with logprobs for the next position.
Parameters: logprobs_one_position: LogprobsOnePosition | None
Returns: None | def append(self, logprobs_one_position: LogprobsOnePosition | None) -> None:
"""Appends the container with logprobs for the next position"""
self.start_indices.append(len(self.logprobs))
if logprobs_one_position:
for token_id, logprob in logprobs_one_position.items():
self.token_ids.append(token_id)
self.logprobs.append(logprob.logprob)
self.ranks.append(logprob.rank)
self.decoded_tokens.append(logprob.decoded_token)
self.end_indices.append(len(self.logprobs)) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 10, "code_loc": 8, "docstring_loc": 1, "function_name": "append", "class_name": "FlatLogprobs", "qualname": "FlatLogprobs.append", "file_path": "vllm/logprobs.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/test_multimodal.py:TestBedrockMultimodal.test_supports_multimodal_claude3 | # Context:
from crewai.llm import LLM
def mock_api_keys(): ...
class TestLiteLLMMultimodal: ...
class TestAnthropicMultimodal: ...
class TestOpenAIMultimodal: ...
class TestGeminiMultimodal: ...
class TestAzureMultimodal: ...
class TestBaseLLMMultimodal: ...
class TestMultipleFilesFormatting: ...
class TestBedrockMultimodal:
def mock_bedrock_env(self): ...
def test_does_not_support_claude2(self) -> None: ...
def test_format_multimodal_content_image(self) -> None: ...
def test_format_multimodal_content_pdf(self) -> None: ...
# Task:
Write a Python test method `test_supports_multimodal_claude3` in test class `TestBedrockMultimodal` to test Bedrock Claude 3 supports multimodal.
Module under test: crewai.llm, crewai_files, crewai.llms.providers.anthropic.completion | def test_supports_multimodal_claude3(self) -> None:
"""Test Bedrock Claude 3 supports multimodal."""
llm = LLM(model="bedrock/anthropic.claude-3-sonnet")
assert llm.supports_multimodal() is True | test | 0 | {"function_name": "test_supports_multimodal_claude3", "class_name": "TestBedrockMultimodal", "qualname": "TestBedrockMultimodal.test_supports_multimodal_claude3", "file_path": "lib/crewai/tests/llms/test_multimodal.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["crewai.llm", "crewai_files", "crewai.llms.providers.anthropic.completion", "crewai.llms.providers.azure.completion", "crewai.llms.providers.bedrock.completion"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:shared/secrets_backend/tests/secrets_backend/test_base.py:TestBaseSecretsBackend.test_get_variable_not_implemented | # Context:
import pytest
from airflow_shared.secrets_backend.base import BaseSecretsBackend
class MockConnection: ...
class _TestBackend(BaseSecretsBackend): ...
class TestBaseSecretsBackend:
def test_build_path_with_separator(self, prefix, secret_id, sep, expected): ...
def test_get_conn_value_not_implemented(self): ...
def test_get_config_returns_none_by_default(self): ...
def test_implementation_get_conn_value(self, sample_conn_uri): ...
def test_concrete_implementation_get_conn_value_missing(self): ...
def test_concrete_implementation_get_variable(self): ...
def test_concrete_implementation_get_variable_missing(self): ...
def test_get_conn_value_with_various_conn_ids(self, conn_id, expected): ...
def test_deserialize_connection_json(self, sample_conn_json): ...
def test_deserialize_connection_uri(self, sample_conn_uri): ...
# Task:
Write a Python test method `test_get_variable_not_implemented` in test class `TestBaseSecretsBackend` to verify the behavior of `get_variable_not_implemented`.
Module under test: __future__, airflow_shared.secrets_backend.base | def test_get_variable_not_implemented(self):
backend = BaseSecretsBackend()
with pytest.raises(NotImplementedError):
backend.get_variable("test_var") | test | 1 | {"function_name": "test_get_variable_not_implemented", "class_name": "TestBaseSecretsBackend", "qualname": "TestBaseSecretsBackend.test_get_variable_not_implemented", "file_path": "shared/secrets_backend/tests/secrets_backend/test_base.py", "repo_id": "apache/airflow", "loc": 4, "tested_modules": ["__future__", "airflow_shared.secrets_backend.base"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/diffusers:src/diffusers/models/transformers/transformer_chroma.py:ChromaAdaLayerNormZeroSinglePruned:class_doc | Write a class-level docstring for `ChromaAdaLayerNormZeroSinglePruned` (inherits from nn.Module) which has methods: `__init__`, `forward`. | Norm layer adaptive layer norm zero (adaLN-Zero).
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`): The size of the embeddings dictionary. | documentation | 1 | {"doc_type": "class", "class_name": "ChromaAdaLayerNormZeroSinglePruned", "file_path": "src/diffusers/models/transformers/transformer_chroma.py", "repo_id": "huggingface/diffusers", "char_length": 191, "methods": ["__init__", "forward"]} |
huggingface/transformers:tests/models/dinov3_vit/test_modeling_dinov3_vit.py:Dinov3ModelTest:class_doc | Write a class-level docstring for `Dinov3ModelTest` (inherits from ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) which has methods: `setUp`, `test_backbone`, `test_config`, `test_inputs_embeds`, `test_model_get_set_embeddings`. | Here we also overwrite some of the tests of test_modeling_common.py, as Dinov3 does not use input_ids, inputs_embeds,
attention_mask and seq_length. | documentation | 0 | {"doc_type": "class", "class_name": "Dinov3ModelTest", "file_path": "tests/models/dinov3_vit/test_modeling_dinov3_vit.py", "repo_id": "huggingface/transformers", "char_length": 148, "methods": ["setUp", "test_backbone", "test_config", "test_inputs_embeds", "test_model_get_set_embeddings", "test_model", "test_feed_forward_chunking", "test_model_from_pretrained"]} |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_file_search.py:TestFilesystemGrepSearch.test_grep_invalid_regex | # Context:
from langchain_anthropic.middleware.anthropic_tools import AnthropicToolsState
from langchain_anthropic.middleware.file_search import (
StateFileSearchMiddleware,
)
class TestSearchMiddlewareInitialization: ...
class TestGlobSearch: ...
class TestGrepSearch: ...
class TestSearchWithDifferentBackends: ...
class TestFilesystemGrepSearch:
def test_grep_content_mode(self) -> None: ...
def test_grep_count_mode(self) -> None: ...
def test_grep_with_include_filter(self) -> None: ...
def test_grep_with_brace_expansion_filter(self) -> None: ...
def test_grep_with_base_path(self) -> None: ...
def test_grep_no_matches(self) -> None: ...
# Task:
Write a Python test method `test_grep_invalid_regex` in test class `TestFilesystemGrepSearch` to test grep with invalid regex pattern.
Module under test: langchain_anthropic.middleware.anthropic_tools, langchain_anthropic.middleware.file_search | def test_grep_invalid_regex(self) -> None:
"""Test grep with invalid regex pattern."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {},
}
result = middleware._handle_grep_search(
pattern=r"[unclosed",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "Invalid regex pattern" in result | test | 1 | {"function_name": "test_grep_invalid_regex", "class_name": "TestFilesystemGrepSearch", "qualname": "TestFilesystemGrepSearch.test_grep_invalid_regex", "file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_file_search.py", "repo_id": "langchain-ai/langchain", "loc": 19, "tested_modules": ["langchain_anthropic.middleware.anthropic_tools", "langchain_anthropic.middleware.file_search"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/kernels/helion/register.py:PresetConfigSearch:class_doc | Write a class-level docstring for `PresetConfigSearch` (inherits from BaseAutotuner) which has methods: `__init__`, `autotune`. | Custom autotuner that uses a preset config selector instead of autotuning. | documentation | 1 | {"doc_type": "class", "class_name": "PresetConfigSearch", "file_path": "vllm/kernels/helion/register.py", "repo_id": "vllm-project/vllm", "char_length": 74, "methods": ["__init__", "autotune"]} |
gradio-app/gradio:gradio/media.py:module_doc | Write a module-level docstring for the Python module `media` which contains function `_get_media_path`, function `get_image`, function `get_video`, function `get_audio`, function `get_model3d`. | Media Registry for Gradio Demos
This module provides a centralized way to access media files.
Usage:
from gradio.media import get_image, get_video, get_audio, get_model3d, get_file
# Get specific media files
cheetah_img = get_image("cheetah1.jpg")
world_video = get_video("world.mp4")
cantina_audio = get_audio("cantina.wav")
bunny_model = get_model3d("Bunny.obj")
titanic_data = get_file("titanic.csv")
# Get random media of a type
random_img = get_image()
random_video = get_video()
random_audio = get_audio() | documentation | 1 | {"doc_type": "module", "module_name": "media", "file_path": "gradio/media.py", "repo_id": "gradio-app/gradio", "char_length": 559} |
apache/airflow:providers/apache/impala/tests/unit/apache/impala/hooks/test_impala_sql.py:test_get_url | # Context:
import json
from unittest.mock import MagicMock, patch
from sqlalchemy.engine.url import make_url
def mock_connection(create_connection_without_db) -> Connection: ...
def impala_hook() -> ImpalaHook: ...
def get_cursor_descriptions(fields: list[str]) -> list[tuple[str]]: ...
def test_sqlalchemy_url_property(impala_hook, mock_connection, host, login, password, port, schema, extra_dict, expected_query): ...
def test_impala_run_query(impala_hook, mock_connection, sql, expected_rows): ...
def test_get_sqlalchemy_engine(impala_hook, mock_connection, mocker): ...
def test_run_with_empty_sql(impala_hook, sql): ...
def impala_hook_with_timeout(create_connection_without_db): ...
def test_execution_timeout_exceeded(impala_hook_with_timeout): ...
# Task:
Write a Python test function `test_get_url` to ensure get_uri() returns correct formatted URI for Impala connection.
Module under test: __future__, sqlalchemy.engine.url, airflow.models | def test_get_url(impala_hook, mock_connection):
"""Ensure get_uri() returns correct formatted URI for Impala connection"""
mock_connection.host = "impala.company.com"
mock_connection.port = 21050
mock_connection.login = "user"
mock_connection.password = "secret"
mock_connection.schema = "analytics"
mock_connection.extra = json.dumps({"use_ssl": "True", "auth_mechanism": "PLAIN"})
with patch.object(impala_hook, "get_connection", return_value=mock_connection):
uri = impala_hook.get_uri()
expected_uri = "impala://user:secret@impala.company.com:21050/analytics?use_ssl=True&auth_mechanism=PLAIN"
assert make_url(uri) == make_url(expected_uri) | test | 1 | {"function_name": "test_get_url", "class_name": null, "qualname": "test_get_url", "file_path": "providers/apache/impala/tests/unit/apache/impala/hooks/test_impala_sql.py", "repo_id": "apache/airflow", "loc": 16, "tested_modules": ["__future__", "sqlalchemy.engine.url", "airflow.models", "airflow.providers.apache.impala.hooks.impala"], "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/_private/authentication/grpc_authentication_server_interceptor.py:AsyncAuthenticationServerInterceptor.intercept_service | # Context:
from typing import Awaitable, Callable
import grpc
def _authenticate_request(metadata: tuple) -> bool: ...
class SyncAuthenticationServerInterceptor(grpc.ServerInterceptor): ...
class AsyncAuthenticationServerInterceptor(aiogrpc.ServerInterceptor):
# Task:
Write a Python async method `intercept_service` for the class `AsyncAuthenticationServerInterceptor` to intercept service calls to validate authentication.
Parameters: continuation: Callable[[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]], handler_call_details: grpc.HandlerCallDetails
Returns: grpc.RpcMethodHandler | async def intercept_service(
self,
continuation: Callable[
[grpc.HandlerCallDetails], Awaitable[grpc.RpcMethodHandler]
],
handler_call_details: grpc.HandlerCallDetails,
) -> grpc.RpcMethodHandler:
"""Intercept service calls to validate authentication.
This method is called once per RPC to get the handler. We wrap the handler
to validate authentication before executing the actual RPC method.
"""
# Get the actual handler
handler = await continuation(handler_call_details)
if handler is None:
return None
async def _abort_if_unauthenticated(context):
"""Abort the RPC if authentication fails."""
if not _authenticate_request(context.invocation_metadata()):
await context.abort(
grpc.StatusCode.UNAUTHENTICATED,
"Invalid or missing authentication token",
)
# Wrap the RPC behavior with authentication check
def wrap_unary_response(behavior):
"""Wrap a unary response RPC method to validate authentication first."""
if behavior is None:
return None
async def wrapped(request_or_iterator, context):
await _abort_if_unauthenticated(context)
return await behavior(request_or_iterator, context)
return wrapped
def wrap_stream_response(behavior):
"""Wrap a streaming response RPC method to validate authentication first."""
if behavior is None:
return None
async def wrapped(request_or_iterator, context):
await _abort_if_unauthenticated(context)
async for response in behavior(request_or_iterator, context):
yield response
return wrapped
# Create a wrapper class that implements RpcMethodHandler interface
class AuthenticatedHandler:
"""Wrapper handler that validates authentication."""
def __init__(
self, original_handler, unary_wrapper_func, stream_wrapper_func
):
self._original = original_handler
self._wrap_unary = unary_wrapper_func
self._wrap_stream = stream_wrapper_func
@property
def request_streaming(self):
return self._original.request_streaming
@property
def response_streaming(self):
return self._original.response_streaming
@property
def request_deserializer(self):
return self._original.request_deserializer
@property
def response_serializer(self):
return self._original.response_serializer
@property
def unary_unary(self):
return self._wrap_unary(self._original.unary_unary)
@property
def unary_stream(self):
return self._wrap_stream(self._original.unary_stream)
@property
def stream_unary(self):
return self._wrap_unary(self._original.stream_unary)
@property
def stream_stream(self):
return self._wrap_stream(self._original.stream_stream)
return AuthenticatedHandler(handler, wrap_unary_response, wrap_stream_response) | function_complex | 0 | {"cognitive_complexity": 10, "loc": 94, "code_loc": 60, "docstring_loc": 5, "function_name": "intercept_service", "class_name": "AsyncAuthenticationServerInterceptor", "qualname": "AsyncAuthenticationServerInterceptor.intercept_service", "file_path": "python/ray/_private/authentication/grpc_authentication_server_interceptor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:release/ray_release/tests/test_byod_build_context.py:test_decode_build_context | # Context:
from ray_release.byod.build_context import (
_INSTALL_PYTHON_DEPS_SCRIPT,
build_context_digest,
decode_build_context,
encode_build_context,
fill_build_context_dir,
make_build_context,
)
def test_make_build_context() -> None: ...
def test_make_build_context_partial() -> None: ...
def test_make_build_context_empty() -> None: ...
def test_encode_build_context() -> None: ...
def test_encode_decode_roundtrip() -> None: ...
def test_build_context_digest() -> None: ...
def test_fill_build_context_dir_empty() -> None: ...
def test_fill_build_context_dir() -> None: ...
# Task:
Write a Python test function `test_decode_build_context` to verify the behavior of `decode_build_context`.
Module under test: ray_release.byod.build_context | def test_decode_build_context() -> None:
data = '{"envs":{"FOO":"bar"},"post_build_script":"build.sh"}'
ctx = decode_build_context(data)
assert ctx["envs"] == {"FOO": "bar"}
assert ctx["post_build_script"] == "build.sh" | test | 0 | {"function_name": "test_decode_build_context", "class_name": null, "qualname": "test_decode_build_context", "file_path": "release/ray_release/tests/test_byod_build_context.py", "repo_id": "ray-project/ray", "loc": 6, "tested_modules": ["ray_release.byod.build_context"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/trainer/test_trainer_checkpointing.py:JITCheckpointTest.test_jit_checkpoint_callback_on_epoch_end | # Context:
from unittest.mock import Mock, patch
from transformers.trainer_jit_checkpoint import CheckpointManager, JITCheckpointCallback
class TrainerCheckpointSaveTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerResumeTrainingTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerAutoBatchSizeTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerCheckpointRotationTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerInterruptedTrainingTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerSavingTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerBestModelTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerIntegrationWithHubTester(unittest.TestCase): ...
class JITCheckpointTest(unittest.TestCase):
def setUp(self): ...
def tearDown(self): ...
def get_trainer(self, enable_jit): ...
def test_checkpoint_manager_initialization(self): ...
def test_signal_handler_setup(self): ...
def test_sigterm_handler_flow(self, mock_timer): ...
def test_toggle_checkpoint_flag(self): ...
def test_execute_jit_checkpoint(self): ...
def test_execute_jit_checkpoint_sentinel_file_cleanup(self): ...
def test_execute_jit_checkpoint_with_exception(self): ...
def test_jit_checkpoint_callback_initialization(self): ...
def test_jit_checkpoint_callback_set_trainer_enabled(self): ...
def test_jit_checkpoint_callback_set_trainer_disabled(self): ...
def test_jit_checkpoint_callback_on_pre_optimizer_step(self): ...
def test_jit_checkpoint_callback_on_step_begin(self): ...
def test_jit_checkpoint_callback_on_step_end(self): ...
def test_jit_checkpoint_callback_on_train_end(self): ...
def test_kill_wait_period(self, mock_timer): ...
def test_integration_with_trainer(self): ...
# Task:
Write a Python test method `test_jit_checkpoint_callback_on_epoch_end` in test class `JITCheckpointTest` to test callback behavior at epoch end.
Module under test: pathlib, typing, huggingface_hub | def test_jit_checkpoint_callback_on_epoch_end(self):
"""Test callback behavior at epoch end."""
trainer = self.get_trainer()
callback = JITCheckpointCallback()
callback.set_trainer(trainer)
# Mock control object
control = Mock()
control.should_save = True
control.should_training_stop = False
# Mock execute method
with patch.object(callback.jit_manager, "execute_jit_checkpoint") as mock_execute:
# Test when checkpoint not requested
callback.jit_manager.is_checkpoint_requested = False
callback.on_epoch_end(trainer.args, trainer.state, control)
# should_save should remain unchanged when checkpoint not requested
self.assertTrue(control.should_save)
self.assertFalse(control.should_training_stop)
mock_execute.assert_not_called()
# Reset control
control.should_save = True
control.should_training_stop = False
# Test when checkpoint requested
callback.jit_manager.is_checkpoint_requested = True
callback.on_epoch_end(trainer.args, trainer.state, control)
self.assertFalse(control.should_save)
self.assertTrue(control.should_training_stop)
mock_execute.assert_called_once() | test | 0 | {"function_name": "test_jit_checkpoint_callback_on_epoch_end", "class_name": "JITCheckpointTest", "qualname": "JITCheckpointTest.test_jit_checkpoint_callback_on_epoch_end", "file_path": "tests/trainer/test_trainer_checkpointing.py", "repo_id": "huggingface/transformers", "loc": 31, "tested_modules": ["pathlib", "typing", "huggingface_hub", "torch", "transformers"], "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py:PushNotificationHandler.execute | # Context:
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import (
CommonParams,
PushNotificationHandlerKwargs,
PushNotificationResultStore,
extract_common_params,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2APushNotificationRegisteredEvent,
A2APushNotificationTimeoutEvent,
A2AResponseReceivedEvent,
)
def _handle_push_error(error: Exception, error_msg: str, error_type: str, new_messages: list[Message], agent_branch: Any | None, params: CommonParams, task_id: str | None, status_code: int | None) -> TaskStateResult: ...
async def _wait_for_push_result(task_id: str, result_store: PushNotificationResultStore, timeout: float, poll_interval: float, agent_branch: Any | None, from_task: Any | None, from_agent: Any | None, context_id: str | None, endpoint: str | None, a2a_agent_name: str | None) -> A2ATask | None: ...
class PushNotificationHandler:
# Task:
Write a Python async method `execute` for the class `PushNotificationHandler` to execute A2A delegation using push notifications for updates.
Parameters: client: Client, message: Message, new_messages: list[Message], agent_card: AgentCard
Returns: TaskStateResult | async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PushNotificationHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using push notifications for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Push notification-specific parameters.
Returns:
Dictionary with status, result/error, and history.
Raises:
ValueError: If result_store or config not provided.
"""
config = kwargs.get("config")
result_store = kwargs.get("result_store")
polling_timeout = kwargs.get("polling_timeout", 300.0)
polling_interval = kwargs.get("polling_interval", 2.0)
agent_branch = kwargs.get("agent_branch")
task_id = kwargs.get("task_id")
params = extract_common_params(kwargs)
if config is None:
error_msg = (
"PushNotificationConfig is required for push notification handler"
)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=error_msg,
error_type="configuration_error",
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
if result_store is None:
error_msg = (
"PushNotificationResultStore is required for push notification handler"
)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=error_msg,
error_type="configuration_error",
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
from_task=params.from_task,
from_agent=params.from_agent,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
context_id=params.context_id,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APushNotificationRegisteredEvent(
task_id=task_id,
context_id=params.context_id,
callback_url=str(config.url),
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
logger.debug(
"Push notification callback for task %s configured at %s (via initial request)",
task_id,
config.url,
)
final_task = await _wait_for_push_result(
task_id=task_id,
result_store=result_store,
timeout=polling_timeout,
poll_interval=polling_interval,
agent_branch=agent_branch,
from_task=params.from_task,
from_agent=params.from_agent,
context_id=params.context_id,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
)
if final_task is None:
return TaskStateResult(
status=TaskState.failed,
error=f"Push notification timeout after {polling_timeout}s",
history=new_messages,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2AClientHTTPError as e:
return _handle_push_error(
error=e,
error_msg=f"HTTP Error {e.status_code}: {e!s}",
error_type="http_error",
new_messages=new_messages,
agent_branch=agent_branch,
params=params,
task_id=task_id,
status_code=e.status_code,
)
except Exception as e:
return _handle_push_error(
error=e,
error_msg=f"Unexpected error during push notification: {e!s}",
error_type="unexpected_error",
new_messages=new_messages,
agent_branch=agent_branch,
params=params,
task_id=task_id,
) | function_complex | 0 | {"cognitive_complexity": 10, "loc": 180, "code_loc": 145, "docstring_loc": 15, "function_name": "execute", "class_name": "PushNotificationHandler", "qualname": "PushNotificationHandler.execute", "file_path": "lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
streamlit/streamlit:e2e_playwright/st_navigation_expanded_test.py:module_doc | Write a module-level docstring for the Python module `st_navigation_expanded_test` which contains function `test_expanded_int_shows_limited_pages_with_view_more_button`, function `test_expanded_int_view_button_expands_to_show_all_pages`, function `test_expanded_int_view_button_can_collapse_again`. | E2E tests for st.navigation with expanded parameter. | documentation | 1 | {"doc_type": "module", "module_name": "st_navigation_expanded_test", "file_path": "e2e_playwright/st_navigation_expanded_test.py", "repo_id": "streamlit/streamlit", "char_length": 52} |
sansan0/TrendRadar:mcp_server/tools/storage_sync.py:StorageSyncTools.__init__ | # Context:
from pathlib import Path
class StorageSyncTools:
def _load_config(self) -> dict: ...
def _get_storage_config(self) -> dict: ...
def _get_remote_config(self) -> dict: ...
def _has_remote_config(self) -> bool: ...
def _get_remote_backend(self): ...
def _get_local_data_dir(self) -> Path: ...
def _parse_date_folder_name(self, folder_name: str) -> Optional[datetime]: ...
def _get_local_dates(self, db_type: str) -> List[str]: ...
def _get_all_local_dates(self) -> Dict[str, List[str]]: ...
def _calculate_dir_size(self, path: Path) -> int: ...
def sync_from_remote(self, days: int) -> Dict: ...
def get_storage_status(self) -> Dict: ...
def list_available_dates(self, source: str) -> Dict: ...
# Task:
Write a Python method `__init__` for the class `StorageSyncTools` to 初始化存储同步工具.
Parameters: project_root: str | def __init__(self, project_root: str = None):
"""
初始化存储同步工具
Args:
project_root: 项目根目录
"""
if project_root:
self.project_root = Path(project_root)
else:
current_file = Path(__file__)
self.project_root = current_file.parent.parent.parent
self._config = None
self._remote_backend = None | function_simple | 1 | {"cognitive_complexity": 2, "loc": 15, "code_loc": 7, "docstring_loc": 6, "function_name": "__init__", "class_name": "StorageSyncTools", "qualname": "StorageSyncTools.__init__", "file_path": "mcp_server/tools/storage_sync.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "class_runnable"} |
run-llama/llama_index:llama-index-integrations/embeddings/llama-index-embeddings-heroku/tests/test_heroku_embeddings.py:TestHerokuEmbedding.test_get_query_embedding | # Context:
from unittest.mock import MagicMock, patch
from llama_index.embeddings.heroku.base import HerokuEmbedding
def fixture_heroku_embedding() -> HerokuEmbedding: ...
def fixture_mock_response() -> MagicMock: ...
class TestHerokuEmbedding:
def test_class_name(self, heroku_embedding: HerokuEmbedding) -> None: ...
def test_init_with_parameters(self) -> None: ...
def test_init_with_environment_variables(self) -> None: ...
def test_init_missing_api_key(self) -> None: ...
def test_init_missing_base_url(self) -> None: ...
def test_init_missing_model(self) -> None: ...
def test_get_text_embedding_success(self, heroku_embedding: HerokuEmbedding, mock_response: MagicMock) -> None: ...
def test_get_text_embedding_http_error(self, heroku_embedding: HerokuEmbedding) -> None: ...
def test_get_text_embedding_exception(self, heroku_embedding: HerokuEmbedding) -> None: ...
def test_get_text_embeddings(self, heroku_embedding: HerokuEmbedding) -> None: ...
async def test_aget_text_embedding_success(self, heroku_embedding: HerokuEmbedding, mock_response: MagicMock) -> None: ...
async def test_aget_text_embedding_http_error(self, heroku_embedding: HerokuEmbedding) -> None: ...
async def test_aget_text_embedding_exception(self, heroku_embedding: HerokuEmbedding) -> None: ...
async def test_aget_query_embedding(self, heroku_embedding: HerokuEmbedding) -> None: ...
async def test_aget_text_embeddings(self, heroku_embedding: HerokuEmbedding) -> None: ...
def test_cleanup_sync_client(self) -> None: ...
async def test_cleanup_async_client(self) -> None: ...
def test_embedding_dimensions(self, heroku_embedding: HerokuEmbedding) -> None: ...
def test_batch_embedding_consistency(self, heroku_embedding: HerokuEmbedding) -> None: ...
async def test_async_batch_embedding_consistency(self, heroku_embedding: HerokuEmbedding) -> None: ...
# Task:
Write a Python test method `test_get_query_embedding` in test class `TestHerokuEmbedding` to test query embedding.
Module under test: llama_index.embeddings.heroku.base | def test_get_query_embedding(self, heroku_embedding: HerokuEmbedding) -> None:
"""Test query embedding."""
with patch.object(
heroku_embedding, "_get_text_embedding", return_value=[0.1, 0.2, 0.3]
):
embedding = heroku_embedding.get_query_embedding("test query")
assert embedding == [0.1, 0.2, 0.3] | test | 1 | {"function_name": "test_get_query_embedding", "class_name": "TestHerokuEmbedding", "qualname": "TestHerokuEmbedding.test_get_query_embedding", "file_path": "llama-index-integrations/embeddings/llama-index-embeddings-heroku/tests/test_heroku_embeddings.py", "repo_id": "run-llama/llama_index", "loc": 7, "tested_modules": ["llama_index.embeddings.heroku.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
docling-project/docling:tests/test_asr_mlx_whisper.py:TestMlxWhisperIntegration.test_mlx_whisper_options_creation | # Context:
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
from docling.datamodel.pipeline_options_asr_model import (
InferenceAsrFramework,
InlineAsrMlxWhisperOptions,
)
class TestMlxWhisperIntegration:
def test_whisper_models_auto_select_mlx(self): ...
def test_explicit_mlx_models_shape(self): ...
def test_model_selectors_mlx_and_native_paths(self, monkeypatch): ...
def test_selector_import_errors_force_native(self, monkeypatch): ...
def test_mlx_whisper_model_initialization(self, mock_import): ...
def test_mlx_whisper_model_import_error(self): ...
def test_mlx_whisper_transcribe(self, mock_import): ...
def test_asr_pipeline_with_mlx_whisper(self, mock_import): ...
# Task:
Write a Python test method `test_mlx_whisper_options_creation` in test class `TestMlxWhisperIntegration` to test that MLX Whisper options are created correctly.
Module under test: pathlib, docling.datamodel.accelerator_options, docling.datamodel.asr_model_specs | def test_mlx_whisper_options_creation(self):
"""Test that MLX Whisper options are created correctly."""
options = InlineAsrMlxWhisperOptions(
repo_id="mlx-community/whisper-tiny-mlx",
language="en",
task="transcribe",
)
assert options.inference_framework == InferenceAsrFramework.MLX
assert options.repo_id == "mlx-community/whisper-tiny-mlx"
assert options.language == "en"
assert options.task == "transcribe"
assert options.word_timestamps is True
assert AcceleratorDevice.MPS in options.supported_devices | test | 1 | {"function_name": "test_mlx_whisper_options_creation", "class_name": "TestMlxWhisperIntegration", "qualname": "TestMlxWhisperIntegration.test_mlx_whisper_options_creation", "file_path": "tests/test_asr_mlx_whisper.py", "repo_id": "docling-project/docling", "loc": 14, "tested_modules": ["pathlib", "docling.datamodel.accelerator_options", "docling.datamodel.asr_model_specs", "docling.datamodel.pipeline_options", "docling.datamodel.pipeline_options_asr_model"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:agent/sandbox/providers/e2b.py:E2BProvider.execute_code | # Context:
from .base import SandboxProvider, SandboxInstance, ExecutionResult
class E2BProvider(SandboxProvider):
def __init__(self):
self.api_key: str = ""
self.region: str = "us"
self.timeout: int = 30
self._initialized: bool = False
def initialize(self, config: Dict[str, Any]) -> bool: ...
def create_instance(self, template: str) -> SandboxInstance: ...
def destroy_instance(self, instance_id: str) -> bool: ...
def health_check(self) -> bool: ...
def get_supported_languages(self) -> List[str]: ...
def get_config_schema() -> Dict[str, Dict]: ...
def _normalize_language(self, language: str) -> str: ...
# Task:
Write a Python method `execute_code` for the class `E2BProvider` to execute code in the E2B instance.
Parameters: instance_id: str, code: str, language: str, timeout: int
Returns: ExecutionResult | def execute_code(
self,
instance_id: str,
code: str,
language: str,
timeout: int = 10
) -> ExecutionResult:
"""
Execute code in the E2B instance.
Args:
instance_id: ID of the sandbox instance
code: Source code to execute
language: Programming language (python, nodejs, go, bash)
timeout: Maximum execution time in seconds
Returns:
ExecutionResult containing stdout, stderr, exit_code, and metadata
Raises:
RuntimeError: If execution fails
TimeoutError: If execution exceeds timeout
"""
if not self._initialized:
raise RuntimeError("Provider not initialized. Call initialize() first.")
# TODO: Implement actual E2B API call
# POST /sandbox/{sandboxID}/execute
raise RuntimeError(
"E2B provider is not yet fully implemented. "
"Please use the self-managed provider or implement the E2B API integration. "
"See https://github.com/e2b-dev/e2b for API documentation."
) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 34, "code_loc": 7, "docstring_loc": 16, "function_name": "execute_code", "class_name": "E2BProvider", "qualname": "E2BProvider.execute_code", "file_path": "agent/sandbox/providers/e2b.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/test_with_column.py:test_with_column_mixed_udf_and_regular_expressions | # Context:
import pandas as pd
import pyarrow as pa
import pyarrow.compute as pc
import pytest
from pkg_resources import parse_version
import ray
from ray.data._internal.utils.arrow_utils import get_pyarrow_version
from ray.data.datatype import DataType
from ray.data.expressions import col, lit, udf
def test_with_column(ray_start_regular_shared, column_name, expr, expected_value, target_max_block_size_infinite_or_default): ...
def test_with_column_nonexistent_column(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_with_column_multiple_expressions(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_with_column_udf_single_column(ray_start_regular_shared, udf_function, column_name, expected_result, target_max_block_size_infinite_or_default): ...
def test_with_column_udf_multi_column(ray_start_regular_shared, test_scenario, target_max_block_size_infinite_or_default): ...
def test_with_column_udf_in_complex_expressions(ray_start_regular_shared, expression_scenario, target_max_block_size_infinite_or_default): ...
def test_with_column_udf_multiple_udfs(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_with_column_udf_invalid_return_type_validation(ray_start_regular_shared, target_max_block_size_infinite_or_default): ...
def test_with_column_string_concat_combinations(ray_start_regular_shared, scenario): ...
def test_with_column_string_concat_type_mismatch_raises(ray_start_regular_shared): ...
def test_with_column_alias_expressions(ray_start_regular_shared, expr_factory, expected_columns, alias_name, expected_values): ...
def test_with_column_callable_class_udf_actor_semantics(ray_start_regular_shared): ...
def test_with_column_callable_class_udf_with_constructor_args(ray_start_regular_shared): ...
def test_with_column_multiple_callable_class_udfs(ray_start_regular_shared): ...
def test_with_column_same_callable_class_different_constructor_args(ray_start_regular_shared): ...
def test_with_column_callable_class_udf_with_compute_strategy(ray_start_regular_shared): ...
def test_with_column_async_callable_class_udf(ray_start_regular_shared): ...
def test_with_column_async_callable_class_udf_with_state(ray_start_regular_shared): ...
def test_with_column_multiple_async_callable_class_udfs(ray_start_regular_shared): ...
def test_with_column_async_generator_udf_multiple_yields(ray_start_regular_shared): ...
# Task:
Write a Python test function `test_with_column_mixed_udf_and_regular_expressions` to test mixing UDF expressions and regular expressions in with_column operations.
Module under test: pkg_resources, ray.data._internal.util, ray.data._internal.utils.arrow_utils | def test_with_column_mixed_udf_and_regular_expressions(
ray_start_regular_shared, target_max_block_size_infinite_or_default
):
"""Test mixing UDF expressions and regular expressions in with_column operations."""
ds = ray.data.range(5)
# Define a UDF for testing
@udf(DataType.int64())
def multiply_by_three(x: pa.Array) -> pa.Array:
return pc.multiply(x, 3)
# Mix regular expressions and UDF expressions
ds = ds.with_column("plus_ten", col("id") + 10) # Regular expression
ds = ds.with_column("times_three", multiply_by_three(col("id"))) # UDF expression
ds = ds.with_column("minus_five", col("id") - 5) # Regular expression
ds = ds.with_column(
"udf_plus_regular", multiply_by_three(col("id")) + col("plus_ten")
) # Mixed: UDF + regular
ds = ds.with_column(
"comparison", col("times_three") > col("plus_ten")
) # Regular expression using UDF result
# Convert to pandas and compare with expected result
result_df = ds.to_pandas()
expected_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"plus_ten": [10, 11, 12, 13, 14], # id + 10
"times_three": [0, 3, 6, 9, 12], # id * 3
"minus_five": [-5, -4, -3, -2, -1], # id - 5
"udf_plus_regular": [10, 14, 18, 22, 26], # (id * 3) + (id + 10)
"comparison": [False, False, False, False, False], # times_three > plus_ten
}
)
pd.testing.assert_frame_equal(result_df, expected_df) | test | 0 | {"function_name": "test_with_column_mixed_udf_and_regular_expressions", "class_name": null, "qualname": "test_with_column_mixed_udf_and_regular_expressions", "file_path": "python/ray/data/tests/test_with_column.py", "repo_id": "ray-project/ray", "loc": 37, "tested_modules": ["pkg_resources", "ray.data._internal.util", "ray.data._internal.utils.arrow_utils", "ray.data.datatype", "ray.data.exceptions"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/tests/serve/gpu/deployments/llm/vllm/test_config_congruence.py:test_vllm_config_ray_serve_vs_cli_comparison | # Context:
from unittest.mock import MagicMock, patch
import pytest
from vllm.platforms.interface import DeviceCapability
def setup_placement_group_cleanup(): ...
def deep_compare(dict1: Any, dict2: Any) -> bool: ...
async def normalize_parallel_config(config_dict: Dict[str, Any]) -> None: ...
def get_config_differences(dict1: Dict[str, Any], dict2: Dict[str, Any]) -> list[str]: ...
async def get_ray_serve_llm_vllm_config() -> Tuple[Any, str]: ...
async def get_vllm_standalone_config() -> Tuple[Any, str]: ...
# Task:
Write a Python test function `test_vllm_config_ray_serve_vs_cli_comparison` to verify the behavior of `vllm_config_ray_serve_vs_cli_comparison`.
Module under test: typing, vllm.config, vllm.entrypoints.openai.api_server | async def test_vllm_config_ray_serve_vs_cli_comparison(
gpu_type: str, capability: DeviceCapability
):
with patch(
"vllm.platforms.cuda.NvmlCudaPlatform.get_device_capability",
return_value=capability,
):
ray_vllm_config = await get_ray_serve_llm_vllm_config()
cli_vllm_config = await get_vllm_standalone_config()
ray_config_dict = {
k: v
for k, v in vars(ray_vllm_config).items()
if k not in EXPECTED_DIFF_FIELDS
}
cli_config_dict = {
k: v
for k, v in vars(cli_vllm_config).items()
if k not in EXPECTED_DIFF_FIELDS
}
await normalize_parallel_config(ray_config_dict)
await normalize_parallel_config(cli_config_dict)
if not deep_compare(ray_config_dict, cli_config_dict):
differences = get_config_differences(ray_config_dict, cli_config_dict)
diff_msg = "\n".join(differences)
pytest.fail(
f"VllmConfig objects differ for {gpu_type} GPUs "
f"(compute capability {capability.major}.{capability.minor}):\n{diff_msg}"
) | test | 0 | {"function_name": "test_vllm_config_ray_serve_vs_cli_comparison", "class_name": null, "qualname": "test_vllm_config_ray_serve_vs_cli_comparison", "file_path": "python/ray/llm/tests/serve/gpu/deployments/llm/vllm/test_config_congruence.py", "repo_id": "ray-project/ray", "loc": 31, "tested_modules": ["typing", "vllm.config", "vllm.entrypoints.openai.api_server", "vllm.platforms.interface", "ray.llm._internal.serve.engines.vllm.vllm_engine"], "has_docstring": false, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py:LMCacheConnectorV1Impl.start_load_kv | # Context:
import torch
class LoadSpec: ...
class SaveSpec: ...
class DisaggSpec: ...
def extract_request_configs(sampling_params: SamplingParams) -> dict | None: ...
class RequestTracker: ...
class ReqMeta: ...
def need_gpu_interm_buffer(lmcache_config: LMCacheEngineConfig): ...
def _calculate_mtp_layers(vllm_config, model_config): ...
def _init_lmcache_engine(lmcache_config: LMCacheEngineConfig, vllm_config: 'VllmConfig') -> LMCacheEngine: ...
class LMCacheConnectorMetadata(KVConnectorMetadata): ...
class LMCacheConnectorV1Impl:
def __init__(
self,
vllm_config: "VllmConfig",
role: KVConnectorRole,
parent: KVConnectorBase_V1,
):
assert vllm_config.kv_transfer_config is not None
self._parent = parent
self._vllm_config = vllm_config
self.kv_role = vllm_config.kv_transfer_config.kv_role
self.worker_count = vllm_config.parallel_config.tensor_parallel_size
config = lmcache_get_or_create_config()
assert isinstance(config, LMCacheEngineConfig), (
"LMCache v1 configuration is should be passed for vLLM v1."
)
# Put the leading with "lmcache." and matched configs from
# vllm extra_config to the config
kv_connector_extra_config = (
vllm_config.kv_transfer_config.kv_connector_extra_config
)
if kv_connector_extra_config:
for key, value in kv_connector_extra_config.items():
if key.startswith("lmcache."):
config_key = key[8:] # Remove "lmcache." prefix
if _validate_and_set_config_value(config, config_key, value):
logger.info(
"Updated config %s from vLLM extra config: %s",
config_key,
value,
)
self.config = config
self.async_loading = config.enable_async_loading
self.layerwise_retrievers: list[Generator[torch.Tensor | None, None, None]] = []
self._stats_monitor = LMCStatsMonitor.GetOrCreate()
if role == KVConnectorRole.SCHEDULER:
# Create lookup client using factory
self.lookup_client = LookupClientFactory.create_lookup_client(
vllm_config, config
)
self._unfinished_requests: dict[str, Request] = {}
self._lookup_requests_in_step: list[str] = []
self.lmcache_engine = None
else:
self.lmcache_engine = _init_lmcache_engine(
config,
vllm_config,
)
self.use_layerwise = config.use_layerwise
self.enable_blending = config.enable_blending
if self.enable_blending:
self.blender = LMCBlenderBuilder.get_or_create(
ENGINE_NAME,
self.lmcache_engine,
self.lmcache_engine.gpu_connector,
config,
)
# Create lookup server using factory
assert self.lmcache_engine is not None
self.lookup_server = LookupClientFactory.create_lookup_server(
self.lmcache_engine, vllm_config
)
self.offload_server = ZMQOffloadServer(
self.lmcache_engine,
vllm_config,
get_tensor_model_parallel_rank(),
)
# In case of MLA, the lookup server is only created on worker 0
if self.async_loading and self.lookup_server is not None:
assert isinstance(self.lookup_server, LMCacheAsyncLookupServer)
self.lmcache_engine.post_init(async_lookup_server=self.lookup_server)
self.kv_caches: dict[str, torch.Tensor] = {}
self._block_size = vllm_config.cache_config.block_size
# request_id -> (vllm cached tokens, lmcache cached tokens)
self.load_specs: dict[str, LoadSpec] = {}
self.kv_cache_manager: KVCacheManager | None = None
# request_id -> full_token_ids
self._request_trackers: dict[str, RequestTracker] = {}
# Whether to discard partial chunks
self._discard_partial_chunks = (
vllm_config.kv_transfer_config.get_from_extra_config(
"discard_partial_chunks", False
)
or not config.save_unfull_chunk
)
self._lmcache_chunk_size = config.chunk_size
self._save_decode_cache = config.save_decode_cache
self.skip_last_n_tokens = vllm_config.kv_transfer_config.get_from_extra_config(
"skip_last_n_tokens", 0
)
self.num_layers = vllm_config.model_config.get_num_layers(
vllm_config.parallel_config
)
self.current_layer = 0
self.force_skip_save = bool(os.environ.get("LMCACHE_FORCE_SKIP_SAVE", False))
self._requests_priority: dict[str, int] = {}
# TODO(baoloongmao): Internal api server & plugin framework support
# dp > 1
if (
vllm_config.parallel_config.data_parallel_size_local == 1
or vllm_config.parallel_config.data_parallel_rank_local == 0
):
# Start internal API server if enabled
# The enabled check is in the InternalAPIServer constructor
self.api_server = InternalAPIServer(self)
self.api_server.start()
# Launch plugins
self.plugin_launcher = RuntimePluginLauncher(
self.config,
role,
self.worker_count,
-1
if self.lmcache_engine is None # scheduler side
else self.lmcache_engine.metadata.worker_id,
)
self.plugin_launcher.launch_plugins()
else:
self.api_server = None # type: ignore[assignment]
self.plugin_launcher = None # type: ignore[assignment]
logger.info(
"LMCache initialized for role %s with version %s, "
"vllm version %s, lmcache cache_engine metadata: %s",
role,
utils.get_version(),
VLLM_VERSION,
getattr(self.lmcache_engine, "metadata", None),
)
def get_inference_info(self) -> dict: ...
def get_inference_version(self) -> str: ...
def _init_kv_caches_from_forward_context(self, forward_context: 'ForwardContext'): ...
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): ...
def wait_for_layer_load(self, layer_name: str) -> None: ...
def save_kv_layer(self, layer_name: str, kv_layer: torch.Tensor, attn_metadata: AttentionMetadata, **kwargs) -> None: ...
def wait_for_save(self): ...
def get_finished(self, finished_req_ids: set[str]) -> tuple[set[str] | None, set[str] | None]: ...
def get_num_new_matched_tokens(self, request: 'Request', num_computed_tokens: int) -> int | None: ...
def update_state_after_alloc(self, request: 'Request', num_external_tokens: int): ...
def build_connector_meta(self, scheduler_output: SchedulerOutput) -> KVConnectorMetadata: ...
def request_finished(self, request: 'Request', block_ids: list[int]) -> tuple[bool, dict[str, Any] | None]: ...
# Task:
Write a Python method `start_load_kv` for the class `LMCacheConnectorV1Impl` to start loading the KV cache from the connector buffer to vLLM's.
Parameters: forward_context: 'ForwardContext'
Returns: None | def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
"""Start loading the KV cache from the connector buffer to vLLM's
paged KV buffer.
Args:
forward_context (ForwardContext): the forward context.
Note:
The number of elements in kv_caches and layer_names should be
the same.
"""
self.current_layer = 0
if len(self.kv_caches) == 0:
self._init_kv_caches_from_forward_context(forward_context)
metadata = self._parent._get_connector_metadata()
assert isinstance(metadata, LMCacheConnectorMetadata)
assert len(self.kv_caches) > 0
kvcaches = list(self.kv_caches.values())
attn_metadata = forward_context.attn_metadata
if attn_metadata is None:
logger.debug("In connector.start_load_kv, but the attn_metadata is None")
return
assert self.lmcache_engine is not None
self.lmcache_engine.post_init(kvcaches=kvcaches)
self.layerwise_retrievers = []
for idx, request in enumerate(metadata.requests):
if request.load_spec is None:
continue
last_idx = idx
for idx, request in enumerate(metadata.requests):
if request.load_spec is None:
continue
tokens = request.token_ids
# TODO: have a pre-allocated buffer to hold the slot_mappings
slot_mapping = request.slot_mapping.cuda()
assert len(tokens) == len(slot_mapping)
self._stats_monitor.update_interval_vllm_hit_tokens(
request.load_spec.vllm_cached_tokens
)
token_mask = torch.ones(len(tokens), dtype=torch.bool)
masked_token_count = (
request.load_spec.vllm_cached_tokens
// self._lmcache_chunk_size
* self._lmcache_chunk_size
)
token_mask[:masked_token_count] = False
lmcache_cached_tokens = request.load_spec.lmcache_cached_tokens
if self.use_layerwise:
sync = idx == last_idx
# NOTE(Jiayi): Perform blending before layerwise prefix caching
if self.enable_blending:
# TODO(Jiayi): Need to make prefix caching and blending
# compatible
self.blender.blend(
tokens[:lmcache_cached_tokens],
token_mask[:lmcache_cached_tokens],
kvcaches=kvcaches,
slot_mapping=slot_mapping[:lmcache_cached_tokens],
)
else:
layerwise_retriever = self.lmcache_engine.retrieve_layer(
tokens[:lmcache_cached_tokens],
token_mask[:lmcache_cached_tokens],
kvcaches=kvcaches,
slot_mapping=slot_mapping[:lmcache_cached_tokens],
sync=sync,
)
# NOTE: retrieve for two layers at the first layer
next(layerwise_retriever)
next(layerwise_retriever)
self.layerwise_retrievers.append(layerwise_retriever)
else:
ret_token_mask = self.lmcache_engine.retrieve(
tokens[:lmcache_cached_tokens],
token_mask[:lmcache_cached_tokens],
kvcaches=kvcaches,
slot_mapping=slot_mapping[:lmcache_cached_tokens],
request_configs=request.request_configs,
req_id=request.req_id,
)
# Check the result
num_retrieved_tokens = ret_token_mask.sum().item()
num_expected_tokens = (
lmcache_cached_tokens - request.load_spec.vllm_cached_tokens
)
if num_retrieved_tokens < num_expected_tokens:
logger.error(
"The number of retrieved tokens is less than the "
"expected number of tokens! This should not happen!"
)
logger.error(
"Num retrieved tokens: %d, num expected tokens: %d",
num_retrieved_tokens,
num_expected_tokens,
) | function_complex | 1 | {"cognitive_complexity": 18, "loc": 108, "code_loc": 78, "docstring_loc": 10, "function_name": "start_load_kv", "class_name": "LMCacheConnectorV1Impl", "qualname": "LMCacheConnectorV1Impl.start_load_kv", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
browser-use/browser-use:browser_use/llm/oci_raw/chat.py:ChatOCIRaw._make_request | # Context:
import asyncio
from oci.generative_ai_inference.models import (
BaseChatRequest,
ChatDetails,
CohereChatRequest,
GenericChatRequest,
OnDemandServingMode,
)
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from .serializer import OCIRawMessageSerializer
class ChatOCIRaw(BaseChatModel):
def provider_name(self) -> str: ...
def name(self) -> str: ...
def model(self) -> str: ...
def model_name(self) -> str: ...
def _uses_cohere_format(self) -> bool: ...
def _get_supported_parameters(self) -> dict[str, bool]: ...
def _get_oci_client(self) -> GenerativeAiInferenceClient: ...
def _extract_usage(self, response) -> ChatInvokeUsage | None: ...
def _extract_content(self, response) -> str: ...
async def ainvoke(self, messages: list[BaseMessage], output_format: None, **kwargs) -> ChatInvokeCompletion[str]: ...
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs) -> ChatInvokeCompletion[T]: ...
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T] | None, **kwargs) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]: ...
# Task:
Write a Python async method `_make_request` for the class `ChatOCIRaw` to make async request to OCI API using proper OCI SDK models.
Parameters: messages: list[BaseMessage] | async def _make_request(self, messages: list[BaseMessage]):
"""Make async request to OCI API using proper OCI SDK models."""
# Create chat request based on provider type
if self._uses_cohere_format():
# Cohere models use CohereChatRequest with single message string
message_text = OCIRawMessageSerializer.serialize_messages_for_cohere(messages)
chat_request = CohereChatRequest()
chat_request.message = message_text
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.frequency_penalty = self.frequency_penalty
chat_request.top_p = self.top_p
chat_request.top_k = self.top_k
else:
# Meta, xAI and other models use GenericChatRequest with messages array
oci_messages = OCIRawMessageSerializer.serialize_messages(messages)
chat_request = GenericChatRequest()
chat_request.api_format = BaseChatRequest.API_FORMAT_GENERIC
chat_request.messages = oci_messages
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.top_p = self.top_p
# Provider-specific parameters
if self.provider.lower() == 'meta':
# Meta models support frequency_penalty and presence_penalty
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
elif self.provider.lower() == 'xai':
# xAI models support top_k but not frequency_penalty or presence_penalty
chat_request.top_k = self.top_k
else:
# Default: include all parameters for unknown providers
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
# Create serving mode
serving_mode = OnDemandServingMode(model_id=self.model_id)
# Create chat details
chat_details = ChatDetails()
chat_details.serving_mode = serving_mode
chat_details.chat_request = chat_request
chat_details.compartment_id = self.compartment_id
# Make the request in a thread to avoid blocking
def _sync_request():
try:
client = self._get_oci_client()
response = client.chat(chat_details)
return response # Return the raw response object
except Exception as e:
# Handle OCI-specific exceptions
status_code = getattr(e, 'status', 500)
if status_code == 429:
raise ModelRateLimitError(message=f'Rate limit exceeded: {str(e)}', model=self.name) from e
else:
raise ModelProviderError(message=str(e), status_code=status_code, model=self.name) from e
# Run in thread pool to make it async
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, _sync_request) | function_complex | 0 | {"cognitive_complexity": 11, "loc": 65, "code_loc": 43, "docstring_loc": 1, "function_name": "_make_request", "class_name": "ChatOCIRaw", "qualname": "ChatOCIRaw._make_request", "file_path": "browser_use/llm/oci_raw/chat.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tests/test_symmetric_run.py:test_symmetric_run_worker_node_behavior | # Context:
from unittest.mock import MagicMock, patch
from click.testing import CliRunner
from ray.scripts.symmetric_run import symmetric_run
def _setup_mock_network_utils(curr_ip, head_ip): ...
def cleanup_ray(): ...
def test_symmetric_run_basic_interface(monkeypatch, cleanup_ray): ...
def test_symmetric_run_arg_validation(monkeypatch, cleanup_ray): ...
# Task:
Write a Python test function `test_symmetric_run_worker_node_behavior` to test symmetric_run behavior when not on the head node.
Module under test: contextlib, click.testing, ray.scripts.symmetric_run | def test_symmetric_run_worker_node_behavior(monkeypatch, cleanup_ray):
"""Test symmetric_run behavior when not on the head node."""
from ray.scripts.symmetric_run import symmetric_run
runner = CliRunner()
with patch("subprocess.run") as mock_run:
mock_run.return_value.returncode = 0
with _setup_mock_network_utils("192.168.1.100", "192.168.1.101"):
# Mock socket connection check to simulate head node ready
with patch("socket.socket") as mock_socket:
mock_socket_instance = MagicMock()
mock_socket_instance.connect_ex.return_value = 0
mock_socket.return_value.__enter__.return_value = mock_socket_instance
# Test worker node behavior
args = ["--address", "192.168.1.100:6379", "--", "echo", "test"]
with patch("sys.argv", ["/bin/ray", "symmetric-run", *args]):
with patch(
"ray.scripts.symmetric_run.check_head_node_ready"
) as mock_check_head_node_ready:
mock_check_head_node_ready.return_value = True
result = runner.invoke(symmetric_run, args)
assert result.exit_code == 0
# Verify that subprocess.run was called
assert mock_run.called
calls = mock_run.call_args_list
# Should have called ray start with --address (worker mode)
ray_start_calls = [
call
for call in calls
if "ray" in str(call) and "start" in str(call)
]
assert len(ray_start_calls) > 0
# Check that it's in worker mode (--address instead of --head)
start_call = ray_start_calls[0]
start_args = start_call[0][0]
assert "--address" in start_args
assert "192.168.1.100:6379" in start_args
assert "--head" not in start_args
assert "--block" in start_args # Worker nodes should block | test | 0 | {"function_name": "test_symmetric_run_worker_node_behavior", "class_name": null, "qualname": "test_symmetric_run_worker_node_behavior", "file_path": "python/ray/tests/test_symmetric_run.py", "repo_id": "ray-project/ray", "loc": 45, "tested_modules": ["contextlib", "click.testing", "ray.scripts.symmetric_run", "ray.scripts.symmetric_run", "ray.scripts.symmetric_run"], "has_docstring": true, "runnable_level": "file_runnable"} |
docling-project/docling:tests/test_backend_image_native.py:test_crop_page_image_scaled | # Context:
from docling_core.types.doc import BoundingBox, CoordOrigin
from docling.backend.image_backend import ImageDocumentBackend, _ImagePageBackend
def _make_png_stream(width: int, height: int, color) -> DocumentStream: ...
def _make_multipage_tiff_stream(num_pages: int, size) -> DocumentStream: ...
def test_docs_builder_uses_image_backend_for_image_stream(): ...
def test_docs_builder_multipage_tiff_counts_frames(): ...
def test_converter_default_maps_image_to_image_backend(): ...
def test_extractor_default_maps_image_to_image_backend(): ...
def _get_backend_from_stream(stream: DocumentStream): ...
def test_num_pages_single(): ...
def test_num_pages_multipage(): ...
def test_get_size(): ...
def test_get_page_image_full(): ...
def test_get_page_image_scaled(): ...
def test_crop_page_image(): ...
def test_get_bitmap_rects(): ...
def test_get_bitmap_rects_scaled(): ...
def test_get_text_in_rect(): ...
def test_multipage_access(): ...
# Task:
Write a Python test function `test_crop_page_image_scaled` to test cropping and scaling page image.
Module under test: io, pathlib, docling_core.types.doc | def test_crop_page_image_scaled():
"""Test cropping and scaling page image."""
width, height = 200, 150
scale = 0.5
stream = _make_png_stream(width=width, height=height)
doc_backend = _get_backend_from_stream(stream)
page_backend: _ImagePageBackend = doc_backend.load_page(0)
cropbox = BoundingBox(l=50, t=30, r=150, b=120, coord_origin=CoordOrigin.TOPLEFT)
img = page_backend.get_page_image(scale=scale, cropbox=cropbox)
assert img.width == round(100 * scale) # cropped width * scale
assert img.height == round(90 * scale) # cropped height * scale | test | 1 | {"function_name": "test_crop_page_image_scaled", "class_name": null, "qualname": "test_crop_page_image_scaled", "file_path": "tests/test_backend_image_native.py", "repo_id": "docling-project/docling", "loc": 12, "tested_modules": ["io", "pathlib", "docling_core.types.doc", "PIL", "docling.backend.image_backend"], "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:tests/ci/browser/test_navigation_slow_pages.py:TestHeavyPageNavigation.test_slow_server_response_completes | # Context:
import asyncio
import time
from browser_use.agent.service import Agent
from tests.ci.conftest import create_mock_llm
def heavy_page_server(): ...
def heavy_base_url(heavy_page_server): ...
async def browser_session(): ...
def _nav_actions(url: str, msg: str) -> list[str]: ...
class TestHeavyPageNavigation:
async def test_redirect_chain_completes(self, browser_session, heavy_base_url): ...
async def test_navigate_event_accepts_domcontentloaded(self, browser_session, heavy_base_url): ...
async def test_recovery_after_slow_navigation(self, browser_session, heavy_base_url): ...
async def test_event_timeout_sufficient_for_heavy_pages(self, browser_session): ...
# Task:
Write a Python test method `test_slow_server_response_completes` in test class `TestHeavyPageNavigation` to navigation succeeds even when server takes 6s to respond.
Module under test: werkzeug, browser_use.agent.service, browser_use.browser | async def test_slow_server_response_completes(self, browser_session, heavy_base_url):
"""Navigation succeeds even when server takes 6s to respond."""
url = f'{heavy_base_url}/slow-server-pdp'
agent = Agent(
task=f'Navigate to {url}',
llm=create_mock_llm(actions=_nav_actions(url)),
browser_session=browser_session,
)
start = time.time()
history = await asyncio.wait_for(agent.run(max_steps=3), timeout=60)
assert len(history) > 0
assert history.final_result() is not None
assert time.time() - start >= 5, 'Should have waited for slow server' | test | 0 | {"function_name": "test_slow_server_response_completes", "class_name": "TestHeavyPageNavigation", "qualname": "TestHeavyPageNavigation.test_slow_server_response_completes", "file_path": "tests/ci/browser/test_navigation_slow_pages.py", "repo_id": "browser-use/browser-use", "loc": 13, "tested_modules": ["werkzeug", "browser_use.agent.service", "browser_use.browser", "browser_use.browser.events", "browser_use.browser.profile"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/unit/test_transform_pyarrow.py:test_nested_struct_with_mixed_tensor_types | # Context:
import pyarrow as pa
import pytest
from ray.data._internal.arrow_ops.transform_pyarrow import (
MIN_PYARROW_VERSION_TYPE_PROMOTION,
_align_struct_fields,
concat,
hash_partition,
shuffle,
try_combine_chunked_columns,
unify_schemas,
)
from ray.data._internal.tensor_extensions.arrow import (
ArrowTensorTypeV2,
_extension_array_concat_supported,
)
def test_try_defragment_table(): ...
def test_hash_partitioning(): ...
def test_shuffle(): ...
def test_arrow_concat_empty(simple_concat_data): ...
def test_arrow_concat_single_block(simple_concat_data): ...
def test_arrow_concat_basic(basic_concat_blocks, basic_concat_expected): ...
def test_arrow_concat_null_promotion(null_promotion_blocks, null_promotion_expected): ...
def test_arrow_concat_tensor_extension_uniform(uniform_tensor_blocks, uniform_tensor_expected): ...
def test_arrow_concat_tensor_extension_variable_shaped(variable_shaped_tensor_blocks, variable_shaped_tensor_expected): ...
def test_arrow_concat_tensor_extension_uniform_and_variable_shaped(mixed_tensor_blocks, mixed_tensor_expected): ...
def test_arrow_concat_tensor_extension_uniform_but_different(different_shape_tensor_blocks, different_shape_tensor_expected): ...
def test_arrow_concat_with_objects(object_concat_blocks, object_concat_expected): ...
def test_struct_with_different_field_names(struct_different_field_names_blocks, struct_different_field_names_expected): ...
def test_nested_structs(nested_structs_blocks, nested_structs_expected): ...
def test_struct_with_null_values(struct_null_values_blocks, struct_null_values_expected): ...
def test_struct_with_mismatched_lengths(struct_mismatched_lengths_blocks, struct_mismatched_lengths_expected): ...
def test_struct_with_empty_arrays(struct_empty_arrays_blocks, struct_empty_arrays_expected): ...
def test_struct_with_arrow_variable_shaped_tensor_type(struct_variable_shaped_tensor_blocks, struct_variable_shaped_tensor_expected): ...
def test_struct_with_diverging_primitive_types(): ...
def test_arrow_concat_object_with_tensor_fails(object_with_tensor_fails_blocks): ...
def test_unify_schemas(unify_schemas_basic_schemas, unify_schemas_multicol_schemas): ...
def test_unify_schemas_object_types(unify_schemas_object_types_schemas): ...
def test_unify_schemas_incompatible_tensor_dtypes(unify_schemas_incompatible_tensor_schemas): ...
def test_unify_schemas_objects_and_tensors(unify_schemas_objects_and_tensors_schemas): ...
def test_unify_schemas_missing_tensor_fields(unify_schemas_missing_tensor_fields_schemas): ...
def test_unify_schemas_nested_struct_tensors(unify_schemas_nested_struct_tensors_schemas): ...
def test_unify_schemas_edge_cases(unify_schemas_edge_cases_data): ...
def test_unify_schemas_mixed_tensor_types(unify_schemas_mixed_tensor_data): ...
def test_unify_schemas_type_promotion(unify_schemas_type_promotion_data): ...
def test_arrow_block_select(block_select_data): ...
def test_arrow_block_slice_copy(block_slice_data): ...
def test_arrow_block_slice_copy_empty(block_slice_data): ...
def test_mixed_tensor_types_same_dtype(mixed_tensor_types_same_dtype_blocks, mixed_tensor_types_same_dtype_expected): ...
def test_mixed_tensor_types_fixed_shape_different(mixed_tensor_types_fixed_shape_blocks, mixed_tensor_types_fixed_shape_expected): ...
def test_mixed_tensor_types_variable_shaped(mixed_tensor_types_variable_shaped_blocks, mixed_tensor_types_variable_shaped_expected): ...
def test_mixed_tensor_types_in_struct(struct_with_mixed_tensor_types_blocks, struct_with_mixed_tensor_types_expected): ...
def test_multiple_tensor_fields_in_struct(multiple_tensor_fields_struct_blocks, multiple_tensor_fields_struct_expected): ...
def test_struct_with_incompatible_tensor_dtypes_fails(): ...
def test_struct_with_additional_fields(struct_with_additional_fields_blocks, struct_with_additional_fields_expected): ...
def test_struct_with_null_tensor_values(struct_with_null_tensor_values_blocks, struct_with_null_tensor_values_expected): ...
def simple_struct_blocks(): ...
def simple_struct_schema(): ...
def nested_struct_blocks(): ...
def nested_struct_schema(): ...
def missing_column_blocks(): ...
def missing_column_schema(): ...
def multiple_struct_blocks(): ...
def multiple_struct_schema(): ...
def mixed_column_blocks(): ...
def mixed_column_schema(): ...
def empty_block_blocks(): ...
def empty_block_schema(): ...
def already_aligned_blocks(): ...
def already_aligned_schema(): ...
def no_struct_blocks(): ...
def no_struct_schema(): ...
def deep_nesting_blocks(): ...
def deep_nesting_schema(): ...
def test_align_struct_fields_simple(simple_struct_blocks, simple_struct_schema): ...
def test_align_struct_fields_nested(nested_struct_blocks, nested_struct_schema): ...
def test_align_struct_fields_missing_column(missing_column_blocks, missing_column_schema): ...
def test_align_struct_fields_multiple_structs(multiple_struct_blocks, multiple_struct_schema): ...
def test_align_struct_fields_non_struct_columns(mixed_column_blocks, mixed_column_schema): ...
def test_align_struct_fields_empty_blocks(empty_block_blocks, empty_block_schema): ...
def test_align_struct_fields_already_aligned(already_aligned_blocks, already_aligned_schema): ...
def test_align_struct_fields_no_struct_columns(no_struct_blocks, no_struct_schema): ...
def test_align_struct_fields_deep_nesting(deep_nesting_blocks, deep_nesting_schema): ...
def uniform_tensor_blocks(): ...
def uniform_tensor_expected(): ...
def variable_shaped_tensor_blocks(): ...
def variable_shaped_tensor_expected(): ...
def mixed_tensor_blocks(): ...
def mixed_tensor_expected(): ...
def different_shape_tensor_blocks(): ...
def different_shape_tensor_expected(): ...
def mixed_tensor_types_same_dtype_blocks(): ...
def mixed_tensor_types_same_dtype_expected(): ...
def mixed_tensor_types_fixed_shape_blocks(): ...
def mixed_tensor_types_fixed_shape_expected(): ...
def mixed_tensor_types_variable_shaped_blocks(): ...
def mixed_tensor_types_variable_shaped_expected(): ...
def struct_with_mixed_tensor_types_blocks(): ...
def struct_with_mixed_tensor_types_expected(): ...
def nested_struct_with_mixed_tensor_types_blocks(): ...
def nested_struct_with_mixed_tensor_types_expected(): ...
def multiple_tensor_fields_struct_blocks(): ...
def multiple_tensor_fields_struct_expected(): ...
def struct_with_additional_fields_blocks(): ...
def struct_with_additional_fields_expected(): ...
def struct_with_null_tensor_values_blocks(): ...
def struct_with_null_tensor_values_expected(): ...
def basic_concat_blocks(): ...
def basic_concat_expected(): ...
def null_promotion_blocks(): ...
def null_promotion_expected(): ...
def struct_different_field_names_blocks(): ...
def struct_different_field_names_expected(): ...
def nested_structs_blocks(): ...
def nested_structs_expected(): ...
def struct_null_values_blocks(): ...
def struct_null_values_expected(): ...
def struct_mismatched_lengths_blocks(): ...
def struct_mismatched_lengths_expected(): ...
def struct_empty_arrays_blocks(): ...
def struct_empty_arrays_expected(): ...
def unify_schemas_basic_schemas(): ...
def unify_schemas_multicol_schemas(): ...
def object_concat_blocks(): ...
def object_concat_expected(): ...
def struct_variable_shaped_tensor_blocks(): ...
def struct_variable_shaped_tensor_expected(): ...
def unify_schemas_object_types_schemas(): ...
def unify_schemas_incompatible_tensor_schemas(): ...
def unify_schemas_objects_and_tensors_schemas(): ...
def unify_schemas_missing_tensor_fields_schemas(): ...
def unify_schemas_nested_struct_tensors_schemas(): ...
def object_with_tensor_fails_blocks(): ...
def simple_concat_data(): ...
def _create_tensor_array(data, tensor_type): ...
def _create_expected_result(schema, length, **kwargs): ...
def _create_tensor_blocks(tensor_data1, tensor_data2, tensor_type1, tensor_type2, id_data1, id_data2, column_name): ...
def _create_struct_tensor_blocks(tensor_data1, tensor_data2, tensor_type1, tensor_type2, value_data1, value_data2, extra_data2, struct_name, id_data1, id_data2): ...
def _create_tensor_schema(tensor_type, dtype, ndim, include_id, struct_name, include_extra): ...
def _create_basic_struct_blocks(struct_data1, struct_data2, column_name, id_data1, id_data2, other_columns): ...
def _create_struct_schema(struct_fields, include_id, other_fields): ...
def _create_struct_blocks_with_columns(struct_data1, struct_data2, struct_type1, struct_type2, additional_columns1, additional_columns2, struct_column): ...
def _create_struct_expected_result(schema, length, content): ...
def _create_simple_struct_blocks(struct_data1, struct_data2, field_names, field_types, additional_columns1, additional_columns2, struct_column): ...
def _create_simple_struct_schema(field_names, field_types, additional_fields): ...
def unify_schemas_edge_cases_data(): ...
def unify_schemas_mixed_tensor_data(): ...
def unify_schemas_type_promotion_data(): ...
def block_select_data(): ...
def block_slice_data(): ...
# Task:
Write a Python test function `test_nested_struct_with_mixed_tensor_types` to test nested structs with mixed tensor types at different levels.
Module under test: typing, ray.data._internal.arrow_ops.transform_pyarrow, ray.data._internal.tensor_extensions.arrow | def test_nested_struct_with_mixed_tensor_types(
nested_struct_with_mixed_tensor_types_blocks,
nested_struct_with_mixed_tensor_types_expected,
):
"""Test nested structs with mixed tensor types at different levels."""
t1, t2 = nested_struct_with_mixed_tensor_types_blocks
t3 = concat([t1, t2])
assert isinstance(t3, pa.Table)
assert len(t3) == nested_struct_with_mixed_tensor_types_expected["length"]
# Verify the result has the expected structure
assert t3.schema == nested_struct_with_mixed_tensor_types_expected["schema"]
assert "id" in t3.column_names
assert "complex_struct" in t3.column_names
# Verify nested struct field contains both types of tensors
struct_data = t3.column("complex_struct").to_pylist()
assert len(struct_data) == nested_struct_with_mixed_tensor_types_expected["length"]
expected_fields = nested_struct_with_mixed_tensor_types_expected["expected_fields"]
# Check that nested structures are preserved
for field in expected_fields:
if field in ["nested", "outer_tensor", "outer_value"]:
assert field in struct_data[0]
elif field in ["inner_tensor", "inner_value"]:
assert field in struct_data[0]["nested"] | test | 0 | {"function_name": "test_nested_struct_with_mixed_tensor_types", "class_name": null, "qualname": "test_nested_struct_with_mixed_tensor_types", "file_path": "python/ray/data/tests/unit/test_transform_pyarrow.py", "repo_id": "ray-project/ray", "loc": 29, "tested_modules": ["typing", "ray.data._internal.arrow_ops.transform_pyarrow", "ray.data._internal.tensor_extensions.arrow", "ray.data._internal.utils.arrow_utils", "ray.data.block"], "has_docstring": true, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:comfy_extras/nodes_dataset.py:ImageGridNode:class_doc | Write a class-level docstring for `ImageGridNode` (inherits from ImageProcessingNode) which has methods: `_group_process`. | Combine multiple images into a single grid/collage. | documentation | 1 | {"doc_type": "class", "class_name": "ImageGridNode", "file_path": "comfy_extras/nodes_dataset.py", "repo_id": "Comfy-Org/ComfyUI", "char_length": 51, "methods": ["_group_process"]} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py:BraveSearchTool:class_doc | Write a class-level docstring for `BraveSearchTool` (inherits from BaseTool) which has methods: `__init__`, `_run`. | A tool that performs web searches using the Brave Search API. | documentation | 0 | {"doc_type": "class", "class_name": "BraveSearchTool", "file_path": "lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py", "repo_id": "crewAIInc/crewAI", "char_length": 61, "methods": ["__init__", "_run"]} |
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_app_test.py:test_websocket_rejects_auth_cookie_without_valid_xsrf | # Context:
import json
from pathlib import Path
import pytest
from starlette.testclient import TestClient
from streamlit import file_util
from streamlit.web.server.starlette import starlette_app_utils
from streamlit.web.server.starlette.starlette_app import (
_RESERVED_ROUTE_PREFIXES,
App,
create_starlette_app,
)
from tests.testutil import patch_config_options
class _DummyStatsManager: ...
class _DummyComponentRegistry: ...
class _DummyBidiComponentRegistry: ...
class _DummyRuntime: ...
def starlette_client(tmp_path: Path) -> Iterator[tuple[TestClient, _DummyRuntime]]: ...
def test_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_filters_single_family(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_filters_multiple_families(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_unknown_family_returns_eof(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_metrics_endpoint_protobuf(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_serves_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_download_headers(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_supports_range_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_rejects_invalid_ranges(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_supports_head_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_no_content_encoding_for_video(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_no_content_encoding_for_audio(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_media_endpoint_no_content_encoding_for_range_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_put_adds_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_put_enforces_max_size(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_component_endpoint_sets_content_type(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_bidi_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_script_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_websocket_rejects_text_frames(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_delete_removes_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ...
def test_upload_rejects_without_xsrf_token(tmp_path: Path) -> None: ...
def test_upload_accepts_with_valid_xsrf_token(tmp_path: Path) -> None: ...
def test_host_config_excludes_localhost_when_not_dev(tmp_path: Path) -> None: ...
def test_host_config_includes_localhost_in_dev(tmp_path: Path) -> None: ...
def test_host_config_custom_allowed_origins(tmp_path: Path) -> None: ...
def test_host_config_empty_allowed_origins(tmp_path: Path) -> None: ...
def test_host_config_custom_origins_with_dev_mode(tmp_path: Path) -> None: ...
def test_static_files_skipped_in_dev_mode(tmp_path: Path) -> None: ...
def test_websocket_auth_cookie_yields_user_info(tmp_path: Path) -> None: ...
def test_websocket_accepts_existing_session(tmp_path: Path) -> None: ...
def test_static_files_fall_back_to_index(tmp_path: Path) -> None: ...
def test_static_files_apply_cache_headers(tmp_path: Path) -> None: ...
def test_websocket_ignores_debug_disconnect_in_production(tmp_path: Path) -> None: ...
def test_websocket_ignores_debug_shutdown_in_production(tmp_path: Path) -> None: ...
def test_websocket_allows_debug_shutdown_in_dev_mode(tmp_path: Path) -> None: ...
class TestAppInit: ...
class TestAppRouteValidation: ...
class TestAppLifespan: ...
class TestAppServerModeTracking: ...
class TestAppScriptPathResolution: ...
class TestAppExports: ...
def simple_script(tmp_path: Path) -> Path: ...
def reset_runtime() -> Iterator[None]: ...
class TestAppAsgi: ...
# Task:
Write a Python test function `test_websocket_rejects_auth_cookie_without_valid_xsrf` to test that auth cookies are not parsed without valid XSRF token.
Module under test: __future__, contextlib, http | def test_websocket_rejects_auth_cookie_without_valid_xsrf(tmp_path: Path) -> None:
"""Test that auth cookies are not parsed without valid XSRF token."""
component_dir = tmp_path / "component"
component_dir.mkdir()
(component_dir / "index.html").write_text("component")
static_dir = tmp_path / "static"
static_dir.mkdir()
monkeypatch = pytest.MonkeyPatch()
monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir))
runtime = _DummyRuntime(component_dir)
app = create_starlette_app(runtime)
client = TestClient(app)
# Create a valid auth cookie using Starlette's signing (itsdangerous-based)
cookie_payload = json.dumps(
{
"origin": "http://testserver",
"is_logged_in": True,
"email": "user@example.com",
}
)
cookie_value = starlette_app_utils.create_signed_value(
"test-signing-secret",
"_streamlit_user",
cookie_payload,
)
# Set auth cookie but no XSRF cookie
client.cookies.set("_streamlit_user", cookie_value.decode("utf-8"))
# Connect without providing XSRF token in subprotocol
with client.websocket_connect(
"/_stcore/stream",
headers={"Origin": "http://testserver"},
subprotocols=["streamlit"], # No XSRF token in second position
) as websocket:
websocket.close(code=1000)
# User info should NOT include auth data because XSRF validation failed
assert runtime.last_user_info is not None
assert runtime.last_user_info.get("is_logged_in") is not True
assert runtime.last_user_info.get("email") is None
monkeypatch.undo() | test | 1 | {"function_name": "test_websocket_rejects_auth_cookie_without_valid_xsrf", "class_name": null, "qualname": "test_websocket_rejects_auth_cookie_without_valid_xsrf", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_app_test.py", "repo_id": "streamlit/streamlit", "loc": 46, "tested_modules": ["__future__", "contextlib", "http", "pathlib", "typing"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/tests/serve/cpu/deployments/llm/test_llm_server.py:TestLLMServer.test_embedding_llm_server | # Context:
from typing import AsyncGenerator, Optional
import pytest
from ray.llm.tests.serve.utils.testing_utils import LLMResponseValidator
def serve_handle(mock_llm_config, stream_batching_interval_ms): ...
def multiplexed_serve_handle(mock_llm_config, stream_batching_interval_ms): ...
async def count_tpot_ms_from_stream(stream: AsyncGenerator) -> list[float]: ...
class TestGetDeploymentOptions: ...
class TestLLMServer:
async def test_unified_llm_server(self, serve_handle, mock_llm_config, mock_chat_request, mock_completion_request, api_type: str, stream: bool, max_tokens: int, stream_batching_interval_ms: int): ...
async def test_transcription_llm_server(self, serve_handle, mock_llm_config, mock_transcription_request, stream: bool, temperature: float, language: Optional[str]): ...
async def test_score_llm_server(self, serve_handle, mock_llm_config, mock_score_request): ...
async def test_tokenize_llm_server(self, serve_handle, mock_llm_config, mock_tokenize_request, return_token_strs: bool): ...
async def test_detokenize_llm_server(self, serve_handle, mock_llm_config, mock_detokenize_request): ...
async def test_check_health(self, mock_llm_config): ...
async def test_reset_prefix_cache(self, mock_llm_config): ...
async def test_start_profile(self, mock_llm_config): ...
async def test_stop_profile(self, mock_llm_config): ...
async def test_llm_config_property(self, mock_llm_config): ...
async def test_request_id_handling(self, serve_handle, mock_llm_config, mock_chat_request, stream: bool, max_tokens: int): ...
async def test_multiplexed_request_handling(self, multiplexed_serve_handle, mock_chat_request, mock_completion_request, api_type: str, stream: bool, max_tokens: int, stream_batching_interval_ms: int): ...
async def test_push_telemetry(self, mock_llm_config): ...
async def test_stable_streaming_tpot(self, serve_handle, mock_llm_config, mock_chat_request, mock_completion_request, api_type: str, stream: bool, max_tokens: int, concurrency: int, stream_batching_interval_ms: int): ...
# Task:
Write a Python test method `test_embedding_llm_server` in test class `TestLLMServer` to test embedding API from LLMServer perspective.
Module under test: typing, ray, ray.llm._internal.serve.core.configs.llm_config | async def test_embedding_llm_server(
self,
serve_handle,
mock_llm_config,
mock_embedding_request,
dimensions: Optional[int],
):
"""Test embedding API from LLMServer perspective."""
# Create embedding request
request = mock_embedding_request
print(f"\n\n_____ EMBEDDING SERVER dimensions={dimensions} _____\n\n")
# Get the response
batched_chunks = serve_handle.embeddings.remote(request)
# Collect responses (should be just one)
chunks = []
async for batch in batched_chunks:
chunks.append(batch)
# Check that we got one response
assert len(chunks) == 1
# Validate embedding response
LLMResponseValidator.validate_embedding_response(chunks[0], dimensions) | test | 0 | {"function_name": "test_embedding_llm_server", "class_name": "TestLLMServer", "qualname": "TestLLMServer.test_embedding_llm_server", "file_path": "python/ray/llm/tests/serve/cpu/deployments/llm/test_llm_server.py", "repo_id": "ray-project/ray", "loc": 27, "tested_modules": ["typing", "ray", "ray.llm._internal.serve.core.configs.llm_config", "ray.llm._internal.serve.core.server.llm_server", "ray.llm.tests.serve.mocks.mock_vllm_engine"], "has_docstring": true, "runnable_level": "project_runnable"} |
ansible/ansible:test/units/_internal/_errors/test_alarm_timeout.py:test_alarm_timeout_bad_values | # Context:
import typing as t
import pytest
from ansible._internal._errors import _alarm_timeout
def assert_sigalrm_state() -> t.Iterator[None]: ...
def test_alarm_timeout_success(timeout: int | None) -> None: ...
def test_alarm_timeout_timeout() -> None: ...
def test_alarm_timeout_bad_state() -> None: ...
def test_alarm_timeout_raise(): ...
def test_alarm_timeout_escape_broad_exception(): ...
# Task:
Write a Python test function `test_alarm_timeout_bad_values` to validate behavior for invalid inputs.
Module under test: __future__, ansible._internal._errors, ansible._internal._errors._alarm_timeout | def test_alarm_timeout_bad_values(timeout: t.Any, expected_error_type: type[Exception], expected_error_pattern: str) -> None:
"""Validate behavior for invalid inputs."""
ran = False
with pytest.raises(expected_error_type, match=expected_error_pattern):
with _alarm_timeout.AnsibleTimeoutError.alarm_timeout(timeout):
ran = True # pragma: nocover
assert not ran | test | 1 | {"function_name": "test_alarm_timeout_bad_values", "class_name": null, "qualname": "test_alarm_timeout_bad_values", "file_path": "test/units/_internal/_errors/test_alarm_timeout.py", "repo_id": "ansible/ansible", "loc": 9, "tested_modules": ["__future__", "ansible._internal._errors", "ansible._internal._errors._alarm_timeout"], "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_code_interpreter_spec.py:test_class | # Context:
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.aws_bedrock_agentcore import AgentCoreCodeInterpreterToolSpec
# Task:
Write a Python test function `test_class` to verify the behavior of `class`.
Module under test: llama_index.core.tools.tool_spec.base, llama_index.tools.aws_bedrock_agentcore | def test_class():
names_of_base_classes = [
b.__name__ for b in AgentCoreCodeInterpreterToolSpec.__mro__
]
assert BaseToolSpec.__name__ in names_of_base_classes | test | 1 | {"function_name": "test_class", "class_name": null, "qualname": "test_class", "file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_code_interpreter_spec.py", "repo_id": "run-llama/llama_index", "loc": 5, "tested_modules": ["llama_index.core.tools.tool_spec.base", "llama_index.tools.aws_bedrock_agentcore"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/agentic/services/helpers/test_intent_classification.py:module_doc | Write a module-level docstring for the Python module `test_intent_classification` which contains class `TestClassifyIntent`, class `TestIntentResult`. | Tests for intent classification helper.
Tests the classify_intent function that translates text and
classifies user intent as component generation or question. | documentation | 1 | {"doc_type": "module", "module_name": "test_intent_classification", "file_path": "src/backend/tests/unit/agentic/services/helpers/test_intent_classification.py", "repo_id": "langflow-ai/langflow", "char_length": 160} |
crewAIInc/crewAI:lib/crewai/src/crewai/events/types/a2a_events.py:A2AServerTaskCanceledEvent:class_doc | Write a class-level docstring for `A2AServerTaskCanceledEvent` (inherits from A2AEventBase) which has methods: various methods. | Event emitted when an A2A server task execution is canceled.
Attributes:
task_id: A2A task ID for this execution.
context_id: A2A context ID grouping related tasks.
metadata: Custom A2A metadata key-value pairs. | documentation | 0 | {"doc_type": "class", "class_name": "A2AServerTaskCanceledEvent", "file_path": "lib/crewai/src/crewai/events/types/a2a_events.py", "repo_id": "crewAIInc/crewAI", "char_length": 224, "methods": []} |
ray-project/ray:python/ray/data/tests/unit/expressions/test_core.py:TestStarExpr.test_star_creation | # Context:
from ray.data.expressions import (
BinaryExpr,
ColumnExpr,
Expr,
LiteralExpr,
Operation,
StarExpr,
UDFExpr,
UnaryExpr,
col,
download,
lit,
star,
udf,
)
class TestColumnExpr: ...
class TestLiteralExpr: ...
class TestBinaryExpr: ...
class TestUnaryExpr: ...
class TestAliasExpr: ...
class TestUDFExpr: ...
class TestCrossTypeEquality: ...
def _build_complex_expr(): ...
class TestExpressionRepr: ...
class TestStarExpr:
def test_star_structural_equality(self): ...
# Task:
Write a Python test method `test_star_creation` in test class `TestStarExpr` to test that star() creates a StarExpr.
Module under test: ray.data._internal.planner.plan_expression.expression_visitors, ray.data.datatype, ray.data.expressions | def test_star_creation(self):
"""Test that star() creates a StarExpr."""
expr = star()
assert isinstance(expr, StarExpr) | test | 0 | {"function_name": "test_star_creation", "class_name": "TestStarExpr", "qualname": "TestStarExpr.test_star_creation", "file_path": "python/ray/data/tests/unit/expressions/test_core.py", "repo_id": "ray-project/ray", "loc": 4, "tested_modules": ["ray.data._internal.planner.plan_expression.expression_visitors", "ray.data.datatype", "ray.data.expressions", "ray.data._internal.planner.plan_expression.expression_evaluator"], "has_docstring": true, "runnable_level": "plib_runnable"} |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_routes.py:create_script_health_routes | # Context:
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import BaseRoute
from streamlit.runtime import Runtime
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
from starlette.responses import FileResponse, Response
def _with_base(path: str, base_url: str | None) -> str: ...
async def _set_cors_headers(request: Request, response: Response) -> None: ...
def _ensure_xsrf_cookie(request: Request, response: Response) -> None: ...
def _set_unquoted_cookie(response: Response, cookie_name: str, cookie_value: str, secure: bool) -> None: ...
def create_health_routes(runtime: Runtime, base_url: str | None) -> list[BaseRoute]: ...
def create_metrics_routes(runtime: Runtime, base_url: str | None) -> list[BaseRoute]: ...
def create_host_config_routes(base_url: str | None) -> list[BaseRoute]: ...
def create_media_routes(media_storage: MemoryMediaFileStorage, base_url: str | None) -> list[BaseRoute]: ...
def create_upload_routes(runtime: Runtime, upload_mgr: MemoryUploadedFileManager, base_url: str | None) -> list[BaseRoute]: ...
def create_component_routes(component_registry: BaseComponentRegistry, base_url: str | None) -> list[BaseRoute]: ...
def create_bidi_component_routes(bidi_component_manager: BidiComponentManager, base_url: str | None) -> list[BaseRoute]: ...
def create_app_static_serving_routes(main_script_path: str | None, base_url: str | None) -> list[BaseRoute]: ...
# Task:
Write a Python function `create_script_health_routes` to create script health check route handlers.
Parameters: runtime: Runtime, base_url: str | None
Returns: list[BaseRoute] | def create_script_health_routes(
runtime: Runtime, base_url: str | None
) -> list[BaseRoute]:
"""Create script health check route handlers."""
from starlette.responses import PlainTextResponse, Response
from starlette.routing import Route
async def _script_health_endpoint(request: Request) -> PlainTextResponse:
ok, message = await runtime.does_script_run_without_error()
status = 200 if ok else 503
response = PlainTextResponse(message, status_code=status)
response.headers["Cache-Control"] = "no-cache"
await _set_cors_headers(request, response)
_ensure_xsrf_cookie(request, response)
return response
async def _script_health_options(request: Request) -> Response:
response = Response(status_code=204)
response.headers["Cache-Control"] = "no-cache"
await _set_cors_headers(request, response)
return response
return [
Route(
_with_base(_ROUTE_SCRIPT_HEALTH, base_url),
_script_health_endpoint,
methods=["GET", "HEAD"],
),
Route(
_with_base(_ROUTE_SCRIPT_HEALTH, base_url),
_script_health_options,
methods=["OPTIONS"],
),
] | function_simple | 1 | {"cognitive_complexity": 2, "loc": 34, "code_loc": 27, "docstring_loc": 1, "function_name": "create_script_health_routes", "class_name": null, "qualname": "create_script_health_routes", "file_path": "lib/streamlit/web/server/starlette/starlette_routes.py", "repo_id": "streamlit/streamlit", "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/groq/test_groq_constants.py:TestBackwardCompatibility.test_no_duplicates_in_groq_models_detailed | # Context:
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED, GROQ_PRODUCTION_MODELS
from lfx.base.models.groq_constants import DEPRECATED_GROQ_MODELS, GROQ_MODELS_DETAILED
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED, UNSUPPORTED_GROQ_MODELS
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED, GROQ_PREVIEW_MODELS
from lfx.base.models.groq_constants import (
GROQ_MODELS_DETAILED,
TOOL_CALLING_UNSUPPORTED_GROQ_MODELS,
)
from lfx.base.models.groq_constants import (
DEPRECATED_GROQ_MODELS,
GROQ_MODELS,
GROQ_MODELS_DETAILED,
UNSUPPORTED_GROQ_MODELS,
)
class TestGroqConstantsStructure: ...
class TestFallbackProductionModels: ...
class TestDeprecatedModels: ...
class TestUnsupportedModels: ...
class TestPreviewModels: ...
class TestToolCallingModels: ...
class TestModelCategorization: ...
class TestProviderMetadata: ...
class TestFallbackListMinimalSize: ...
class TestBackwardCompatibility:
def test_groq_models_is_list(self): ...
def test_groq_models_contains_strings(self): ...
def test_no_duplicates_in_groq_models(self): ...
# Task:
Write a Python test method `test_no_duplicates_in_groq_models_detailed` in test class `TestBackwardCompatibility` to test that GROQ_MODELS_DETAILED has no duplicate model names.
Module under test: lfx.base.models.groq_constants, lfx.base.models.groq_constants, lfx.base.models.groq_constants | def test_no_duplicates_in_groq_models_detailed(self):
"""Test that GROQ_MODELS_DETAILED has no duplicate model names."""
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED
model_names = [model["name"] for model in GROQ_MODELS_DETAILED]
assert len(model_names) == len(set(model_names)) | test | 1 | {"function_name": "test_no_duplicates_in_groq_models_detailed", "class_name": "TestBackwardCompatibility", "qualname": "TestBackwardCompatibility.test_no_duplicates_in_groq_models_detailed", "file_path": "src/backend/tests/unit/groq/test_groq_constants.py", "repo_id": "langflow-ai/langflow", "loc": 6, "tested_modules": ["lfx.base.models.groq_constants", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:providers/teradata/tests/unit/teradata/hooks/test_ttu.py:TestTtuHook.test_hook_context_manager | # Context:
from unittest import mock
from airflow.providers.teradata.hooks.ttu import TtuHook
class TestTtuHook:
def test_get_conn_with_valid_params(self, mock_get_connection): ...
def test_get_conn_missing_params(self, mock_get_connection): ...
def test_close_conn_subprocess_running(self, mock_get_connection, mock_popen): ...
def test_close_conn_subprocess_timeout(self, mock_get_connection, mock_popen): ...
# Task:
Write a Python test method `test_hook_context_manager` in test class `TestTtuHook` to verify the behavior of `hook_context_manager`.
Module under test: __future__, airflow.providers.common.compat.sdk, airflow.providers.teradata.hooks.ttu | def test_hook_context_manager(self, mock_enter, mock_exit):
# Setup
hook = TtuHook()
mock_enter.return_value = hook
# Execute
with hook as h:
assert h == hook
# Assert
mock_exit.assert_called_once()
# Ensure the exit method was called with the correct parameters
# Context manager's __exit__ is called with (exc_type, exc_val, exc_tb)
args = mock_exit.call_args[0]
assert len(args) == 3 # Verify we have the correct number of arguments
assert args[0] is None # type should be None
assert args[1] is None # value should be None
assert args[2] is None # traceback should be None | test | 1 | {"function_name": "test_hook_context_manager", "class_name": "TestTtuHook", "qualname": "TestTtuHook.test_hook_context_manager", "file_path": "providers/teradata/tests/unit/teradata/hooks/test_ttu.py", "repo_id": "apache/airflow", "loc": 18, "tested_modules": ["__future__", "airflow.providers.common.compat.sdk", "airflow.providers.teradata.hooks.ttu"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/brightdata_webunlocker_tool_test.py:test_run_success_html | # Context:
from unittest.mock import Mock, patch
from crewai_tools.tools.brightdata_tool.brightdata_unlocker import (
BrightDataWebUnlockerTool,
)
def test_run_success_json(mock_post): ...
def test_run_http_error(mock_post): ...
# Task:
Write a Python test function `test_run_success_html` to verify the behavior of `run_success_html`.
Module under test: crewai_tools.tools.brightdata_tool.brightdata_unlocker | def test_run_success_html(mock_post):
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "<html><body>Test</body></html>"
mock_response.raise_for_status = Mock()
mock_post.return_value = mock_response
tool = BrightDataWebUnlockerTool()
tool._run(url="https://example.com", format="html", save_file=False) | test | 0 | {"function_name": "test_run_success_html", "class_name": null, "qualname": "test_run_success_html", "file_path": "lib/crewai-tools/tests/tools/brightdata_webunlocker_tool_test.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["crewai_tools.tools.brightdata_tool.brightdata_unlocker"], "has_docstring": false, "runnable_level": "project_runnable"} |
browser-use/browser-use:tests/ci/test_fallback_llm.py:TestFallbackLLMParameter.test_public_properties | # Context:
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use import Agent
def create_mock_llm(model_name: str, should_fail: bool, fail_with: type[Exception] | None, fail_status_code: int, fail_message: str) -> BaseChatModel: ...
class TestFallbackLLMSwitching: ...
class TestFallbackLLMIntegration: ...
class TestFallbackLLMParameter:
def test_fallback_llm_none_by_default(self): ...
def test_fallback_llm_single_model(self): ...
# Task:
Write a Python test method `test_public_properties` in test class `TestFallbackLLMParameter` to test the public properties for fallback status.
Module under test: browser_use.agent.views, browser_use.llm, browser_use.llm.exceptions | def test_public_properties(self):
"""Test the public properties for fallback status."""
from browser_use import Agent
primary = create_mock_llm('primary-model')
fallback = create_mock_llm('fallback-model')
agent = Agent(task='Test task', llm=primary, fallback_llm=fallback)
# Before fallback
assert agent.is_using_fallback_llm is False
assert agent.current_llm_model == 'primary-model'
# Trigger fallback
error = ModelRateLimitError(message='Rate limit', status_code=429, model='primary')
agent._try_switch_to_fallback_llm(error)
# After fallback
assert agent.is_using_fallback_llm is True
assert agent.current_llm_model == 'fallback-model' | test | 0 | {"function_name": "test_public_properties", "class_name": "TestFallbackLLMParameter", "qualname": "TestFallbackLLMParameter.test_public_properties", "file_path": "tests/ci/test_fallback_llm.py", "repo_id": "browser-use/browser-use", "loc": 20, "tested_modules": ["browser_use.agent.views", "browser_use.llm", "browser_use.llm.exceptions", "browser_use.llm.views", "browser_use.tools.service"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/utils/gc_utils.py:_compute_detailed_type | # Context:
from contextlib import suppress
from typing import Any
class GCDebugConfig: ...
class GCDebugger: ...
def freeze_gc_heap() -> None: ...
def maybe_attach_gc_debug_callback() -> None: ...
def _compute_top_gc_collected_objects(objects: list[Any], top: int) -> str: ...
# Task:
Write a Python function `_compute_detailed_type` to detailed object type.
Parameters: o: Any
Returns: str | def _compute_detailed_type(o: Any) -> str:
"""
Detailed object type.
TODO(Jialin): Further enhance the detailed type with element types for
easier debugging. We tried but occasionally it would run into signals
which kills the engine.
"""
size_str: str = ""
# Object doesn't support len() - this can happen with type objects
# or other objects that don't implement __len__ properly
with suppress(Exception):
size_str = f"(size:{len(o)})"
return f"{str(type(o))}{size_str}" | function_simple | 1 | {"cognitive_complexity": 0, "loc": 14, "code_loc": 4, "docstring_loc": 7, "function_name": "_compute_detailed_type", "class_name": null, "qualname": "_compute_detailed_type", "file_path": "vllm/utils/gc_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "slib_runnable"} |
langflow-ai/langflow:src/lfx/tests/unit/inputs/test_model_input_fixes.py:TestModelInputPortVisibility.test_default_language_model_input_types | # Context:
from lfx.inputs.inputs import ModelInput
class TestModelInputValueNormalization: ...
class TestUnifiedModelsDefaults: ...
class TestModelInputRefreshButton: ...
class TestModelInputEmbeddingType: ...
class TestModelInputPortVisibility:
def test_default_embedding_model_input_types(self): ...
def test_input_types_with_model_value(self): ...
def test_string_value_normalization(self): ...
# Task:
Write a Python test method `test_default_language_model_input_types` in test class `TestModelInputPortVisibility` to by default, ModelInput should have input_types=['LanguageModel'] for language models.
Module under test: lfx.base.models.unified_models, lfx.inputs.inputs | def test_default_language_model_input_types(self):
"""By default, ModelInput should have input_types=['LanguageModel'] for language models."""
model_input = ModelInput(name="test_model")
assert model_input.input_types == ["LanguageModel"] | test | 1 | {"function_name": "test_default_language_model_input_types", "class_name": "TestModelInputPortVisibility", "qualname": "TestModelInputPortVisibility.test_default_language_model_input_types", "file_path": "src/lfx/tests/unit/inputs/test_model_input_fixes.py", "repo_id": "langflow-ai/langflow", "loc": 4, "tested_modules": ["lfx.base.models.unified_models", "lfx.inputs.inputs"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/cli/triggers/test_main.py:TestTriggersCommand.test_execute_with_trigger_invalid_format | # Context:
from unittest.mock import Mock, patch
class TestTriggersCommand(unittest.TestCase):
def setUp(self, mock_plus_api, mock_get_auth_token): ...
def test_list_triggers_success(self, mock_console_print): ...
def test_list_triggers_no_apps(self, mock_console_print): ...
def test_list_triggers_api_error(self, mock_console_print): ...
def test_execute_with_trigger_success(self, mock_run_crew, mock_console_print): ...
def test_execute_with_trigger_not_found(self, mock_console_print): ...
def test_execute_with_trigger_api_error(self, mock_console_print): ...
def test_run_crew_with_payload_success(self, mock_subprocess): ...
def test_run_crew_with_payload_failure(self, mock_subprocess): ...
def test_run_crew_with_payload_empty_payload(self, mock_subprocess): ...
def test_execute_with_trigger_with_default_error_message(self, mock_console_print): ...
# Task:
Write a Python test method `test_execute_with_trigger_invalid_format` in test class `TestTriggersCommand` to verify the behavior of `execute_with_trigger_invalid_format`.
Module under test: crewai.cli.triggers.main | def test_execute_with_trigger_invalid_format(self, mock_console_print):
with self.assertRaises(SystemExit):
self.triggers_command.execute_with_trigger("invalid-format")
mock_console_print.assert_called_with(
"[bold red]Error: Trigger must be in format 'app_slug/trigger_slug'[/bold red]"
) | test | 0 | {"function_name": "test_execute_with_trigger_invalid_format", "class_name": "TestTriggersCommand", "qualname": "TestTriggersCommand.test_execute_with_trigger_invalid_format", "file_path": "lib/crewai/tests/cli/triggers/test_main.py", "repo_id": "crewAIInc/crewAI", "loc": 7, "tested_modules": ["crewai.cli.triggers.main"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:release/ray_release/tests/test_custom_byod_build_init_helper.py:test_get_step_name | # Context:
from ray_release.custom_byod_build_init_helper import (
_get_step_name,
create_custom_build_yaml,
generate_custom_build_step_key,
get_prerequisite_step,
)
def test_create_custom_build_yaml(mock_get_images_from_tests): ...
def test_get_prerequisite_step(): ...
# Task:
Write a Python test function `test_get_step_name` to verify the behavior of `get_step_name`.
Module under test: ray_release.bazel, ray_release.configs.global_config, ray_release.custom_byod_build_init_helper | def test_get_step_name():
test_names = [
"test_1",
"test_2",
"test_3",
]
assert (
_get_step_name(
"ray-project/ray-ml:a1b2c3d4-py39-cpu-abcdef123456789abc123456789",
"abc123",
test_names,
)
== ":tapioca: build custom: ray-ml:py39-cpu (abc123) test_1 test_2"
) | test | 0 | {"function_name": "test_get_step_name", "class_name": null, "qualname": "test_get_step_name", "file_path": "release/ray_release/tests/test_custom_byod_build_init_helper.py", "repo_id": "ray-project/ray", "loc": 14, "tested_modules": ["ray_release.bazel", "ray_release.configs.global_config", "ray_release.custom_byod_build_init_helper", "ray_release.test", "ray_release.util"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/base/langflow/api/v2/workflow.py:check_developer_api_enabled | # Context:
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Query, Request, status
from lfx.services.deps import get_settings_service, injectable_session_scope_readonly
async def execute_workflow(workflow_request: WorkflowExecutionRequest, background_tasks: BackgroundTasks, http_request: Request, api_key_user: Annotated[UserRead, Depends(api_key_security)]) -> WorkflowExecutionResponse | WorkflowJobResponse | StreamingResponse: ...
async def execute_sync_workflow_with_timeout(workflow_request: WorkflowExecutionRequest, flow: FlowRead, job_id: UUID, api_key_user: UserRead, background_tasks: BackgroundTasks, http_request: Request) -> WorkflowExecutionResponse: ...
async def execute_sync_workflow(workflow_request: WorkflowExecutionRequest, flow: FlowRead, job_id: UUID, api_key_user: UserRead, background_tasks: BackgroundTasks, http_request: Request) -> WorkflowExecutionResponse: ...
async def execute_workflow_background(workflow_request: WorkflowExecutionRequest, flow: FlowRead, job_id: JobId, api_key_user: UserRead, http_request: Request) -> WorkflowJobResponse: ...
async def get_workflow_status(api_key_user: Annotated[UserRead, Depends(api_key_security)], job_id: Annotated[JobId | None, Query(description='Job ID to query')], session: Annotated[object, Depends(injectable_session_scope_readonly)]) -> WorkflowExecutionResponse | WorkflowJobResponse: ...
async def stop_workflow(request: WorkflowStopRequest, api_key_user: Annotated[UserRead, Depends(api_key_security)]) -> WorkflowStopResponse: ...
# Task:
Write a Python function `check_developer_api_enabled` to check if developer API is enabled.
Returns: None | def check_developer_api_enabled() -> None:
"""Check if developer API is enabled.
This dependency function protects all workflow endpoints by verifying that
the developer API feature is enabled in the application settings.
Raises:
HTTPException: 403 Forbidden if developer_api_enabled setting is False
Note:
This is used as a router-level dependency to protect all workflow endpoints.
"""
settings = get_settings_service().settings
if not settings.developer_api_enabled:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail={
"error": "Developer API disabled",
"code": "DEVELOPER_API_DISABLED",
"message": "Developer API is not enabled. Contact administrator to enable this feature.",
},
) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 22, "code_loc": 10, "docstring_loc": 11, "function_name": "check_developer_api_enabled", "class_name": null, "qualname": "check_developer_api_enabled", "file_path": "src/backend/base/langflow/api/v2/workflow.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_prompts_no_thought_leakage.py:TestNoThoughtLeakagePatterns.test_no_job_depends_on_it_in_native_task | # Context:
from unittest.mock import MagicMock
from crewai.utilities.prompts import Prompts
class TestNoToolsPromptGeneration: ...
class TestRealLLMNoThoughtLeakage: ...
class TestNoThoughtLeakagePatterns:
def test_no_job_depends_on_it_in_no_tools(self) -> None: ...
# Task:
Write a Python test method `test_no_job_depends_on_it_in_native_task` in test class `TestNoThoughtLeakagePatterns` to test that 'your job depends on it' is not in native task prompts.
Module under test: __future__, crewai, crewai.llm | def test_no_job_depends_on_it_in_native_task(self) -> None:
"""Test that 'your job depends on it' is not in native task prompts."""
mock_agent = MagicMock()
mock_agent.role = "Test"
mock_agent.goal = "Test"
mock_agent.backstory = "Test"
prompts = Prompts(
has_tools=True,
use_native_tool_calling=True,
use_system_prompt=True,
agent=mock_agent,
)
result = prompts.task_execution()
full_prompt = result["prompt"]
assert "your job depends on it" not in full_prompt.lower() | test | 0 | {"function_name": "test_no_job_depends_on_it_in_native_task", "class_name": "TestNoThoughtLeakagePatterns", "qualname": "TestNoThoughtLeakagePatterns.test_no_job_depends_on_it_in_native_task", "file_path": "lib/crewai/tests/utilities/test_prompts_no_thought_leakage.py", "repo_id": "crewAIInc/crewAI", "loc": 18, "tested_modules": ["__future__", "crewai", "crewai.llm", "crewai.utilities.prompts"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_unsupported_providers.py:TestBedrockProviderInterceptor.test_bedrock_without_interceptor_works | # Context:
from crewai.llm import LLM
def setup_provider_api_keys(monkeypatch): ...
class DummyInterceptor(BaseInterceptor[httpx.Request, httpx.Response]): ...
class TestAzureProviderInterceptor: ...
class TestGeminiProviderInterceptor: ...
class TestUnsupportedProviderMessages: ...
class TestProviderSupportMatrix: ...
class TestBedrockProviderInterceptor:
def test_bedrock_llm_accepts_interceptor_parameter(self) -> None: ...
def test_bedrock_raises_not_implemented_on_initialization(self) -> None: ...
# Task:
Write a Python test method `test_bedrock_without_interceptor_works` in test class `TestBedrockProviderInterceptor` to test that Bedrock LLM works without interceptor.
Module under test: crewai.llm, crewai.llms.hooks.base | def test_bedrock_without_interceptor_works(self) -> None:
"""Test that Bedrock LLM works without interceptor."""
llm = LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
# Bedrock provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None | test | 0 | {"function_name": "test_bedrock_without_interceptor_works", "class_name": "TestBedrockProviderInterceptor", "qualname": "TestBedrockProviderInterceptor.test_bedrock_without_interceptor_works", "file_path": "lib/crewai/tests/llms/hooks/test_unsupported_providers.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["crewai.llm", "crewai.llms.hooks.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py:NixlConnectorWorker._pop_done_transfers | # Context:
class NixlAgentMetadata: ...
class NixlHandshakePayload(KVConnectorHandshakeMetadata): ...
def compute_nixl_compatibility_hash(vllm_config: VllmConfig, attn_backend_name: str, cross_layers_blocks: bool) -> str: ...
class RemoteMeta: ...
class ReqMeta: ...
class NixlConnectorMetadata(KVConnectorMetadata): ...
class NixlConnector(KVConnectorBase_V1): ...
class NixlConnectorScheduler: ...
def zmq_ctx(socket_type: Any, addr: str) -> Iterator[zmq.Socket]: ...
class NixlKVConnectorStats(KVConnectorStats): ...
class NixlPromMetrics(KVConnectorPromMetrics): ...
class NixlConnectorWorker:
def __init__(self, vllm_config: VllmConfig, engine_id: str):
if NixlWrapper is None:
logger.error("NIXL is not available")
raise RuntimeError("NIXL is not available")
logger.info("Initializing NIXL wrapper")
logger.info("Initializing NIXL worker %s", engine_id)
# Config.
self.vllm_config = vllm_config
self.block_size = vllm_config.cache_config.block_size
if vllm_config.kv_transfer_config is None:
raise ValueError("kv_transfer_config must be set for NixlConnector")
self.kv_transfer_config = vllm_config.kv_transfer_config
self.nixl_backends = vllm_config.kv_transfer_config.get_from_extra_config(
"backends", ["UCX"]
)
# Agent.
non_ucx_backends = [b for b in self.nixl_backends if b != "UCX"]
# Configure NIXL num_threads to avoid UAR exhaustion on Mellanox NICs.
# Each UCX thread allocates UARs (doorbell pages) via DevX, and
# excessive NIXL UAR usage can exhaust NIC UAR space. This can cause
# components like NVSHMEM (used by DeepEP kernels) to fail during RDMA
# initialization with "mlx5dv_devx_alloc_uar" errors.
# Ref: https://network.nvidia.com/files/doc-2020/ethernet-adapters-programming-manual.pdf#page=63
num_threads = vllm_config.kv_transfer_config.get_from_extra_config(
"num_threads", 4
)
if nixl_agent_config is None:
config = None
else:
# Enable telemetry by default for NIXL 0.7.1 and above.
config = (
nixl_agent_config(backends=self.nixl_backends, capture_telemetry=True)
if len(non_ucx_backends) > 0
else nixl_agent_config(num_threads=num_threads, capture_telemetry=True)
)
self.nixl_wrapper = NixlWrapper(str(uuid.uuid4()), config)
# Map of engine_id -> {rank0: agent_name0, rank1: agent_name1..}.
self._remote_agents: dict[EngineId, dict[int, str]] = defaultdict(dict)
# Metadata.
self.engine_id: EngineId = engine_id
self.tp_rank = get_tensor_model_parallel_rank()
self.world_size = get_tensor_model_parallel_world_size()
self.tp_group = get_tp_group()
self.num_blocks = 0
self.enable_permute_local_kv = False
# KV Caches and nixl tracking data.
self.device_type = current_platform.device_type
self.kv_buffer_device: str = vllm_config.kv_transfer_config.kv_buffer_device
if self.device_type not in _NIXL_SUPPORTED_DEVICE:
raise RuntimeError(f"{self.device_type} is not supported.")
elif self.kv_buffer_device not in _NIXL_SUPPORTED_DEVICE[self.device_type]:
raise RuntimeError(
f"{self.device_type} with {self.kv_buffer_device} kv_buffer "
"is not supported."
)
self.device_kv_caches: dict[str, torch.Tensor] = {}
# cpu kv buffer for xfer
# used when device memory can not be registered under nixl
self.host_xfer_buffers: dict[str, torch.Tensor] = {}
if self.device_type == "cpu":
self.use_host_buffer = False
else:
self.use_host_buffer = self.kv_buffer_device == "cpu"
# reserve different cores for start_load_kv() from model_forward()
if self.device_type == "cpu":
numa_core_list = current_platform.discover_numa_topology()
# setup one last core in each numa for kv transfer.
rsv_cores_for_kv = [
max(each_numa_core_list) for each_numa_core_list in numa_core_list
]
if rsv_cores_for_kv:
if not hasattr(os, "sched_setaffinity"):
raise NotImplementedError(
"os.sched_setaffinity is not available on this platform"
)
os.sched_setaffinity(0, rsv_cores_for_kv)
# support for oot platform which can't register nixl memory
# type based on kv_buffer_device
nixl_memory_type = current_platform.get_nixl_memory_type()
if nixl_memory_type is None:
if self.kv_buffer_device == "cuda":
nixl_memory_type = "VRAM"
elif self.kv_buffer_device == "cpu":
nixl_memory_type = "DRAM"
if nixl_memory_type is None:
raise RuntimeError(
f"{self.device_type} with {self.kv_buffer_device} kv_buffer "
"is not supported."
)
self.nixl_memory_type = nixl_memory_type
# Note: host xfer buffer ops when use_host_buffer is True
self.copy_blocks: CopyBlocksOp | None = None
# Map of engine_id -> kv_caches_base_addr. For TP case, each local
self.device_id: int = 0
# Current rank may pull from multiple remote TP workers.
# EngineId, dict[int, list[int]] -> engine_id, tp_rank, base_addr_for_layer
self.kv_caches_base_addr = defaultdict[EngineId, dict[int, list[int]]](dict)
# Number of NIXL regions. Currently one region per cache
# (so 1 per layer for MLA, otherwise 2 per layer)
self.num_regions = 0
self.num_layers = 0
# nixl_prepped_dlist_handle.
self.src_xfer_handles_by_block_size: dict[int, int] = {}
# Populated dynamically during handshake based on remote configuration.
# Keep track of regions at different tp_ratio values. tp_ratio->handles
self.src_xfer_handles_by_tp_ratio: dict[int, list[int]] = {}
# Map of engine_id -> {tp_rank: nixl_prepped_dlist_handle (int)}.
self.dst_xfer_side_handles = defaultdict[EngineId, dict[int, int]](dict)
# Map of engine_id -> num_blocks. All ranks in the same deployment will
# have the same number of blocks.
self.dst_num_blocks: dict[EngineId, int] = {}
self._registered_descs: list[Any] = []
# In progress transfers.
# [req_id -> list[handle]]
self._recving_metadata: dict[ReqId, ReqMeta] = {}
self._recving_transfers = defaultdict[ReqId, list[TransferHandle]](list)
# Track the expiration time of requests that are waiting to be sent.
self._reqs_to_send: dict[ReqId, float] = {}
# Set of requests that have been part of a batch, regardless of status.
self._reqs_to_process: set[ReqId] = set()
# invalid blocks from failed NIXL operations
self._invalid_block_ids: set[int] = set()
# requests that skipped transfer (handshake or transfer failures)
self._failed_recv_reqs: set[ReqId] = set()
# Handshake metadata of this worker for NIXL transfers.
self.xfer_handshake_metadata: NixlHandshakePayload | None = None
# Background thread for initializing new NIXL handshakes.
self._handshake_initiation_executor = ThreadPoolExecutor(
# NIXL is not guaranteed to be thread-safe, limit 1 worker.
max_workers=1,
thread_name_prefix="vllm-nixl-handshake-initiator",
)
self._ready_requests = queue.Queue[tuple[ReqId, ReqMeta]]()
self._handshake_futures: dict[EngineId, Future[dict[int, str]]] = {}
# Protects _handshake_futures and _remote_agents.
self._handshake_lock = threading.RLock()
self.block_size = vllm_config.cache_config.block_size
self.model_config = vllm_config.model_config
self.cache_config = vllm_config.cache_config
# TODO(mgoin): remove this once we have hybrid memory allocator
# Optimization for models with local attention (Llama 4)
# List of block window sizes for each layer for local attention
self.block_window_per_layer: list[int | None] = []
self.use_mla = self.model_config.use_mla
# Get the attention backend from the first layer
# NOTE (NickLucche) models with multiple backends are not supported yet
self.attn_backend = get_current_attn_backend(vllm_config)
self.backend_name = self.attn_backend.get_name()
self.kv_cache_layout = get_kv_cache_layout()
self.host_buffer_kv_cache_layout = self.kv_cache_layout
logger.debug("Detected attention backend %s", self.backend_name)
logger.debug("Detected kv cache layout %s", self.kv_cache_layout)
# lazy initialized in register_kv_caches
self.compat_hash: str | None = None
self.kv_topo: TpKVTopology | None = None
self._tp_size: dict[EngineId, int] = {self.engine_id: self.world_size}
self._block_size: dict[EngineId, int] = {self.engine_id: self.block_size}
# With heterogeneous TP, P must wait for all assigned D TP workers to
# finish reading before safely freeing the blocks.
self.consumer_notification_counts_by_req = defaultdict[ReqId, int](int)
self.xfer_stats = NixlKVConnectorStats()
self._physical_blocks_per_logical_kv_block = 1
self.enforce_compat_hash = self.kv_transfer_config.get_from_extra_config(
"enforce_handshake_compat", True
)
def _nixl_handshake(self, host: str, port: int, remote_tp_size: int, expected_engine_id: str) -> dict[int, str]: ...
def initialize_host_xfer_buffer(self, kv_caches: dict[str, torch.Tensor]) -> None: ...
def set_host_xfer_buffer_ops(self, copy_operation: CopyBlocksOp): ...
def _log_failure(self, failure_type: str, req_id: str | None, msg: str, error: Exception | None, meta: ReqMeta | None, **extra_context): ...
def _background_nixl_handshake(self, req_id: str, remote_engine_id: EngineId, meta: ReqMeta): ...
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): ...
def register_local_xfer_handler(self, block_size: int) -> tuple[int, list[tuple[int, int, int]]]: ...
def add_remote_agent(self, nixl_agent_meta: NixlAgentMetadata, remote_tp_rank: int, remote_tp_size: int) -> str: ...
def _validate_remote_agent_handshake(self, nixl_agent_meta: NixlAgentMetadata, remote_tp_size: int): ...
def sync_recved_kv_to_device(self, req_id: str, meta: ReqMeta): ...
def save_kv_to_host(self, metadata: NixlConnectorMetadata): ...
def post_process_device_kv_on_receive(self, block_size_ratio: int, block_ids_list: list[list[int]]): ...
def get_finished(self) -> tuple[set[str], set[str]]: ...
def _get_new_notifs(self) -> set[str]: ...
def _handle_failed_transfer(self, req_id: str, handle: int): ...
def start_load_kv(self, metadata: NixlConnectorMetadata): ...
def _read_blocks_for_req(self, req_id: str, meta: ReqMeta): ...
def _read_blocks(self, local_block_ids: list[int], remote_block_ids: list[int], dst_engine_id: str, request_id: str, remote_request_id: str, remote_rank: int, local_xfer_side_handle: int, remote_xfer_side_handle: int): ...
def get_mapped_blocks(self, block_ids, block_size_ratio): ...
def _get_block_descs_ids(self, engine_id: str, block_ids: list[int], layer_idx: int | None, block_size_ratio: float | None) -> np.ndarray: ...
def _logical_to_kernel_block_ids(self, block_ids: list[int]) -> list[int]: ...
def get_backend_aware_kv_block_len(self, layer_idx: int) -> int: ...
def get_kv_connector_stats(self) -> KVConnectorStats | None: ...
def get_block_ids_with_load_errors(self) -> set[int]: ...
def __del__(self): ...
def shutdown(self): ...
# Task:
Write a Python method `_pop_done_transfers` for the class `NixlConnectorWorker` to pop completed xfers by checking for DONE state.
Parameters: transfers: dict[str, list[int]]
Returns: set[str] | def _pop_done_transfers(self, transfers: dict[str, list[int]]) -> set[str]:
"""
Pop completed xfers by checking for DONE state.
Args:
transfers: dict of req_id -> list[running_xfer]
Returns:
set of req_ids that have all done xfers
"""
done_req_ids: set[str] = set()
for req_id, handles in list(transfers.items()):
in_progress = []
for handle in handles:
try:
xfer_state = self.nixl_wrapper.check_xfer_state(handle)
if xfer_state == "DONE":
# Get telemetry from NIXL
res = self.nixl_wrapper.get_xfer_telemetry(handle)
self.xfer_stats.record_transfer(res)
self.nixl_wrapper.release_xfer_handle(handle)
elif xfer_state == "PROC":
in_progress.append(handle)
continue
else:
self._log_failure(
failure_type="transfer_failed",
msg="Marking blocks as invalid",
req_id=req_id,
xfer_state=xfer_state,
)
self._handle_failed_transfer(req_id, handle)
except Exception as e:
self._log_failure(
failure_type="transfer_exception",
msg="Marking blocks as invalid",
req_id=req_id,
error=e,
)
self._handle_failed_transfer(req_id, handle)
if not in_progress:
# Only report request as completed when all transfers are done.
done_req_ids.add(req_id)
del transfers[req_id]
else:
transfers[req_id] = in_progress
return done_req_ids | function_complex | 1 | {"cognitive_complexity": 13, "loc": 46, "code_loc": 35, "docstring_loc": 7, "function_name": "_pop_done_transfers", "class_name": "NixlConnectorWorker", "qualname": "NixlConnectorWorker._pop_done_transfers", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "class_runnable"} |
google/langextract:langextract/prompting.py:ContextAwarePromptBuilder._build_effective_context | # Context:
class PromptBuilderError(exceptions.LangExtractError): ...
class ParseError(PromptBuilderError): ...
class PromptTemplateStructured: ...
def read_prompt_template_structured_from_file(prompt_path: str, format_type: data.FormatType) -> PromptTemplateStructured: ...
class QAPromptGenerator: ...
class PromptBuilder: ...
class ContextAwarePromptBuilder(PromptBuilder):
_CONTEXT_PREFIX = "[Previous text]: ..."
def __init__(
self,
generator: QAPromptGenerator,
context_window_chars: int | None = None,
):
"""Initializes the builder with context tracking configuration.
Args:
generator: The underlying prompt generator to use.
context_window_chars: Number of characters from the previous chunk's
tail to include as context. Defaults to None (disabled).
"""
super().__init__(generator)
self._context_window_chars = context_window_chars
self._prev_chunk_by_doc_id: dict[str, str] = {}
def context_window_chars(self) -> int | None: ...
def build_prompt(self, chunk_text: str, document_id: str, additional_context: str | None) -> str: ...
def _update_state(self, document_id: str, chunk_text: str) -> None: ...
# Task:
Write a Python method `_build_effective_context` for the class `ContextAwarePromptBuilder` to combines previous chunk context with any additional context.
Parameters: document_id: str, additional_context: str | None
Returns: str | None | def _build_effective_context(
self,
document_id: str,
additional_context: str | None,
) -> str | None:
"""Combines previous chunk context with any additional context.
Args:
document_id: Identifier for the source document.
additional_context: Optional additional context from the document.
Returns:
Combined context string, or None if no context is available.
"""
context_parts: list[str] = []
if self._context_window_chars and document_id in self._prev_chunk_by_doc_id:
prev_text = self._prev_chunk_by_doc_id[document_id]
window = prev_text[-self._context_window_chars :]
context_parts.append(f"{self._CONTEXT_PREFIX}{window}")
if additional_context:
context_parts.append(additional_context)
return "\n\n".join(context_parts) if context_parts else None | function_simple | 1 | {"cognitive_complexity": 4, "loc": 25, "code_loc": 8, "docstring_loc": 9, "function_name": "_build_effective_context", "class_name": "ContextAwarePromptBuilder", "qualname": "ContextAwarePromptBuilder._build_effective_context", "file_path": "langextract/prompting.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "class_runnable"} |
666ghj/BettaFish:ReportEngine/ir/validator.py:IRValidator._validate_block | # Context:
from typing import Any, Dict, List, Tuple
from .schema import (
ALLOWED_BLOCK_TYPES,
ALLOWED_INLINE_MARKS,
ENGINE_AGENT_TITLES,
IR_VERSION,
)
class IRValidator:
def __init__(self, schema_version: str = IR_VERSION):
"""记录当前Schema版本,便于未来多版本并存"""
self.schema_version = schema_version
def validate_chapter(self, chapter: Dict[str, Any]) -> Tuple[bool, List[str]]: ...
def _validate_heading_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_paragraph_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_list_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_table_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_swotTable_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
ALLOWED_IMPACT_VALUES = {"低", "中低", "中", "中高", "高", "极高"}
def _validate_swot_item(self, item: Any, path: str, errors: List[str]): ...
def _validate_blockquote_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_engineQuote_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_callout_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_kpiGrid_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_widget_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_code_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_math_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_figure_block(self, block: Dict[str, Any], path: str, errors: List[str]): ...
def _validate_inline_run(self, run: Any, path: str, errors: List[str]): ...
# Task:
Write a Python method `_validate_block` for the class `IRValidator` to 根据block类型调用不同的校验器.
Parameters: block: Any, path: str, errors: List[str] | def _validate_block(self, block: Any, path: str, errors: List[str]):
"""根据block类型调用不同的校验器"""
if not isinstance(block, dict):
errors.append(f"{path} 必须是对象")
return
block_type = block.get("type")
if block_type not in ALLOWED_BLOCK_TYPES:
errors.append(f"{path}.type 不被支持: {block_type}")
return
validator = getattr(self, f"_validate_{block_type}_block", None)
if validator:
validator(block, path, errors) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 14, "code_loc": 10, "docstring_loc": 1, "function_name": "_validate_block", "class_name": "IRValidator", "qualname": "IRValidator._validate_block", "file_path": "ReportEngine/ir/validator.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-serpex/tests/test_serpex.py:test_search_no_results | # Context:
from unittest.mock import MagicMock, patch
from llama_index.tools.serpex import SerpexToolSpec
def test_serpex_init_with_key(): ...
def test_serpex_init_with_custom_engine(): ...
def test_serpex_init_without_key(): ...
def test_search(mock_get): ...
def test_search_with_engine(mock_get): ...
def test_search_with_time_range(mock_get): ...
def test_search_api_error(mock_get): ...
def test_real_search(): ...
def test_real_search_with_filters(): ...
# Task:
Write a Python test function `test_search_no_results` to test search with no results.
Module under test: llama_index.tools.serpex | def test_search_no_results(mock_get):
"""Test search with no results."""
mock_response = MagicMock()
mock_response.json.return_value = {
"results": [],
"metadata": {
"number_of_results": 0,
"response_time": 50,
},
}
mock_response.raise_for_status = MagicMock()
mock_get.return_value = mock_response
tool = SerpexToolSpec(api_key="test-key")
results = tool.search("nonexistent query")
assert len(results) == 0 | test | 1 | {"function_name": "test_search_no_results", "class_name": null, "qualname": "test_search_no_results", "file_path": "llama-index-integrations/tools/llama-index-tools-serpex/tests/test_serpex.py", "repo_id": "run-llama/llama_index", "loc": 17, "tested_modules": ["llama_index.tools.serpex"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/sam3/modeling_sam3.py:Sam3DotProductScoring._pool_text_features | # Context:
import torch
class Sam3VisionEncoderOutput(BaseModelOutputWithPooling): ...
class Sam3GeometryEncoderOutput(ModelOutput): ...
class Sam3DETREncoderOutput(ModelOutput): ...
class Sam3DETRDecoderOutput(ModelOutput): ...
class Sam3MaskDecoderOutput(ModelOutput): ...
class Sam3ImageSegmentationOutput(ModelOutput): ...
def inverse_sigmoid(x: torch.Tensor, eps: float) -> torch.Tensor: ...
def concat_padded_sequences(seq1, mask1, seq2, mask2, return_index: bool): ...
def box_cxcywh_to_xyxy(x): ...
class Sam3MLP(nn.Module): ...
def eager_attention_forward(module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: torch.Tensor | None, scaling: float | None, dropout: float, **kwargs): ...
class Sam3Attention(nn.Module): ...
class Sam3ViTRotaryEmbedding(nn.Module): ...
def rotate_pairwise(x): ...
def apply_rotary_pos_emb_2d(q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: ...
class Sam3ViTRoPEAttention(nn.Module): ...
class Sam3ViTPatchEmbeddings(nn.Module): ...
class Sam3ViTEmbeddings(nn.Module): ...
def window_partition(hidden_state, window_size): ...
def window_unpartition(windows, window_size, pad_height_width, height_width): ...
class Sam3ViTLayerScale(nn.Module): ...
class Sam3ViTLayer(GradientCheckpointingLayer): ...
class Sam3PreTrainedModel(PreTrainedModel): ...
class Sam3ViTModel(Sam3PreTrainedModel): ...
class Sam3SinePositionEmbedding(nn.Module): ...
class Sam3FPNLayer(nn.Module): ...
class Sam3VisionNeck(nn.Module): ...
class Sam3VisionModel(Sam3PreTrainedModel): ...
class Sam3GeometryEncoderLayer(nn.Module): ...
class Sam3GeometryEncoder(nn.Module): ...
class Sam3DetrEncoderLayer(nn.Module): ...
class Sam3DetrEncoder(Sam3PreTrainedModel): ...
class Sam3DecoderMLP(nn.Module): ...
class Sam3DetrDecoderLayer(nn.Module): ...
class Sam3DetrDecoder(Sam3PreTrainedModel): ...
class Sam3MaskEmbedder(nn.Module): ...
class Sam3PixelDecoder(nn.Module): ...
class Sam3MaskDecoder(Sam3PreTrainedModel): ...
class Sam3Model(Sam3PreTrainedModel): ...
class Sam3DotProductScoring(nn.Module):
def __init__(self, config: Sam3Config):
super().__init__()
self.config = config
hidden_size = config.detr_decoder_config.hidden_size
projection_dim = config.detr_decoder_config.hidden_size
self.text_mlp = Sam3DecoderMLP(
input_dim=hidden_size,
hidden_dim=config.detr_decoder_config.intermediate_size,
output_dim=hidden_size,
num_layers=2,
)
self.text_mlp_dropout = nn.Dropout(config.detr_decoder_config.dropout)
self.text_mlp_out_norm = nn.LayerNorm(hidden_size)
# Projections for text and query features
self.text_proj = nn.Linear(hidden_size, projection_dim)
self.query_proj = nn.Linear(hidden_size, projection_dim)
# Scale factor for dot product
self.scale = float(1.0 / np.sqrt(projection_dim))
# Clamping to avoid numerical issues
self.clamp_logits = True
self.clamp_max_val = 12.0
def forward(self, decoder_hidden_states: torch.Tensor, text_features: torch.Tensor, text_mask: torch.Tensor | None) -> torch.Tensor: ...
# Task:
Write a Python method `_pool_text_features` for the class `Sam3DotProductScoring` to mean pool text features, accounting for padding.
Parameters: text_features: torch.Tensor, text_mask: torch.Tensor | None
Returns: torch.Tensor | def _pool_text_features(self, text_features: torch.Tensor, text_mask: torch.Tensor | None) -> torch.Tensor:
"""
Mean pool text features, accounting for padding.
Args:
text_features: [batch_size, seq_len, hidden_size]
text_mask: [batch_size, seq_len] where True indicates valid tokens, False indicates padding
Returns:
pooled_text: [batch_size, hidden_size]
"""
if text_mask is None:
# No padding, simple mean
return text_features.mean(dim=1)
is_valid = text_mask.to(text_features.dtype).unsqueeze(-1) # [batch_size, seq_len, 1]
# Count valid tokens per batch
num_valid = is_valid.sum(dim=1).clamp(min=1.0) # [batch_size, 1]
# Mean pool only over valid tokens
pooled_text = (text_features * is_valid).sum(dim=1) / num_valid # [batch_size, hidden_size]
return pooled_text | function_simple | 0 | {"cognitive_complexity": 1, "loc": 24, "code_loc": 6, "docstring_loc": 10, "function_name": "_pool_text_features", "class_name": "Sam3DotProductScoring", "qualname": "Sam3DotProductScoring._pool_text_features", "file_path": "src/transformers/models/sam3/modeling_sam3.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "plib_runnable"} |
browser-use/browser-use:browser_use/actor/page.py:Page.mouse | # Context:
from .mouse import Mouse
class Page:
def __init__(
self, browser_session: 'BrowserSession', target_id: str, session_id: str | None = None, llm: 'BaseChatModel | None' = None
):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._target_id = target_id
self._session_id: str | None = session_id
self._mouse: 'Mouse | None' = None
self._llm = llm
async def _ensure_session(self) -> str: ...
async def session_id(self) -> str: ...
async def reload(self) -> None: ...
async def get_element(self, backend_node_id: int) -> 'Element': ...
async def evaluate(self, page_function: str, *args) -> str: ...
def _fix_javascript_string(self, js_code: str) -> str: ...
async def screenshot(self, format: str, quality: int | None) -> str: ...
async def press(self, key: str) -> None: ...
async def set_viewport_size(self, width: int, height: int) -> None: ...
async def get_target_info(self) -> 'TargetInfo': ...
async def get_url(self) -> str: ...
async def get_title(self) -> str: ...
async def goto(self, url: str) -> None: ...
async def navigate(self, url: str) -> None: ...
async def go_back(self) -> None: ...
async def go_forward(self) -> None: ...
async def get_elements_by_css_selector(self, selector: str) -> list['Element']: ...
def dom_service(self) -> 'DomService': ...
async def get_element_by_prompt(self, prompt: str, llm: 'BaseChatModel | None') -> 'Element | None': ...
async def must_get_element_by_prompt(self, prompt: str, llm: 'BaseChatModel | None') -> 'Element': ...
async def extract_content(self, prompt: str, structured_output: type[T], llm: 'BaseChatModel | None') -> T: ...
async def _extract_clean_markdown(self, extract_links: bool) -> tuple[str, dict]: ...
# Task:
Write a Python async method `mouse` for the class `Page` to get the mouse interface for this target.
Returns: 'Mouse' | async def mouse(self) -> 'Mouse':
"""Get the mouse interface for this target."""
if not self._mouse:
session_id = await self._ensure_session()
from .mouse import Mouse
self._mouse = Mouse(self._browser_session, session_id, self._target_id)
return self._mouse | function_simple | 0 | {"cognitive_complexity": 1, "loc": 8, "code_loc": 5, "docstring_loc": 1, "function_name": "mouse", "class_name": "Page", "qualname": "Page.mouse", "file_path": "browser_use/actor/page.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:tests/ci/test_action_loop_detection.py:test_navigate_different_domain_different_hash | # Context:
from browser_use.agent.views import (
ActionLoopDetector,
PageFingerprint,
compute_action_hash,
)
def _get_context_messages(agent: Agent) -> list[str]: ...
def test_search_normalization_ignores_keyword_order(): ...
def test_search_normalization_ignores_case(): ...
def test_search_normalization_ignores_punctuation(): ...
def test_search_normalization_deduplicates_tokens(): ...
def test_search_different_queries_produce_different_hashes(): ...
def test_click_same_index_same_hash(): ...
def test_click_different_index_different_hash(): ...
def test_input_same_element_same_text(): ...
def test_input_different_text_different_hash(): ...
def test_navigate_same_url_same_hash(): ...
def test_navigate_different_paths_different_hash(): ...
def test_scroll_direction_matters(): ...
def test_scroll_different_elements_different_hash(): ...
def test_scroll_same_element_same_hash(): ...
def test_different_action_types_different_hashes(): ...
def test_detector_no_nudge_for_diverse_actions(): ...
def test_detector_nudge_at_5_repeats(): ...
def test_detector_no_nudge_at_4_repeats(): ...
def test_detector_nudge_escalates_at_8_repeats(): ...
def test_detector_nudge_escalates_at_12_repeats(): ...
def test_detector_critical_message_no_done_directive(): ...
def test_detector_first_nudge_no_cannot_complete(): ...
def test_detector_window_slides(): ...
def test_detector_search_variations_detected_as_same(): ...
def test_page_stagnation_no_nudge_when_pages_change(): ...
def test_page_stagnation_nudge_at_5_identical_pages(): ...
def test_page_stagnation_no_nudge_at_4_identical_pages(): ...
def test_page_stagnation_resets_on_change(): ...
def test_combined_loop_and_stagnation(): ...
def test_page_fingerprint_same_content_equal(): ...
def test_page_fingerprint_different_content_not_equal(): ...
def test_page_fingerprint_different_url_not_equal(): ...
def test_page_fingerprint_different_element_count_not_equal(): ...
async def test_loop_nudge_injected_into_context(): ...
async def test_no_loop_nudge_when_disabled(): ...
async def test_no_loop_nudge_for_diverse_actions(): ...
async def test_loop_detector_initialized_from_settings(): ...
async def test_loop_detector_default_window_size(): ...
# Task:
Write a Python test function `test_navigate_different_domain_different_hash` to navigate to different domains produces different hashes.
Module under test: browser_use.agent.service, browser_use.agent.views, browser_use.llm.messages | def test_navigate_different_domain_different_hash():
"""Navigate to different domains produces different hashes."""
h1 = compute_action_hash('navigate', {'url': 'https://example.com/page1'})
h2 = compute_action_hash('navigate', {'url': 'https://other.com/page1'})
assert h1 != h2 | test | 0 | {"function_name": "test_navigate_different_domain_different_hash", "class_name": null, "qualname": "test_navigate_different_domain_different_hash", "file_path": "tests/ci/test_action_loop_detection.py", "repo_id": "browser-use/browser-use", "loc": 5, "tested_modules": ["browser_use.agent.service", "browser_use.agent.views", "browser_use.llm.messages", "tests.ci.conftest"], "has_docstring": true, "runnable_level": "project_runnable"} |
fastapi/fastapi:tests/test_request_params/test_header/test_optional_list.py:test_optional_list_validation_alias_by_name | # Context:
import pytest
from fastapi.testclient import TestClient
async def read_optional_list_str(p: Annotated[list[str] | None, Header()]): ...
class HeaderModelOptionalListStr(BaseModel): ...
async def read_model_optional_list_str(p: Annotated[HeaderModelOptionalListStr, Header()]): ...
def test_optional_list_str_schema(path: str): ...
def test_optional_list_str_missing(path: str): ...
def test_optional_list_str(path: str): ...
async def read_optional_list_alias(p: Annotated[list[str] | None, Header(alias='p_alias')]): ...
class HeaderModelOptionalListAlias(BaseModel): ...
async def read_model_optional_list_alias(p: Annotated[HeaderModelOptionalListAlias, Header()]): ...
def test_optional_list_str_alias_schema(path: str): ...
def test_optional_list_alias_missing(path: str): ...
def test_optional_list_alias_by_name(path: str): ...
def test_optional_list_alias_by_alias(path: str): ...
def read_optional_list_validation_alias(p: Annotated[list[str] | None, Header(validation_alias='p_val_alias')]): ...
class HeaderModelOptionalListValidationAlias(BaseModel): ...
def read_model_optional_list_validation_alias(p: Annotated[HeaderModelOptionalListValidationAlias, Header()]): ...
def test_optional_list_validation_alias_schema(path: str): ...
def test_optional_list_validation_alias_missing(path: str): ...
def test_optional_list_validation_alias_by_validation_alias(path: str): ...
def read_optional_list_alias_and_validation_alias(p: Annotated[list[str] | None, Header(alias='p_alias', validation_alias='p_val_alias')]): ...
class HeaderModelOptionalListAliasAndValidationAlias(BaseModel): ...
def read_model_optional_list_alias_and_validation_alias(p: Annotated[HeaderModelOptionalListAliasAndValidationAlias, Header()]): ...
def test_optional_list_alias_and_validation_alias_schema(path: str): ...
def test_optional_list_alias_and_validation_alias_missing(path: str): ...
def test_optional_list_alias_and_validation_alias_by_name(path: str): ...
def test_optional_list_alias_and_validation_alias_by_alias(path: str): ...
def test_optional_list_alias_and_validation_alias_by_validation_alias(path: str): ...
# Task:
Write a Python test function `test_optional_list_validation_alias_by_name` to verify the behavior of `optional_list_validation_alias_by_name`.
Module under test: typing, fastapi, fastapi.testclient | def test_optional_list_validation_alias_by_name(path: str):
client = TestClient(app)
response = client.get(path, headers=[("p", "hello"), ("p", "world")])
assert response.status_code == 200
assert response.json() == {"p": None} | test | 1 | {"function_name": "test_optional_list_validation_alias_by_name", "class_name": null, "qualname": "test_optional_list_validation_alias_by_name", "file_path": "tests/test_request_params/test_header/test_optional_list.py", "repo_id": "fastapi/fastapi", "loc": 5, "tested_modules": ["typing", "fastapi", "fastapi.testclient", "inline_snapshot", "pydantic"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/errors.py:A2APollingTimeoutError:class_doc | Write a class-level docstring for `A2APollingTimeoutError` (inherits from A2AClientTimeoutError) which has methods: various methods. | Raised when polling exceeds the configured timeout. | documentation | 0 | {"doc_type": "class", "class_name": "A2APollingTimeoutError", "file_path": "lib/crewai/src/crewai/a2a/errors.py", "repo_id": "crewAIInc/crewAI", "char_length": 51, "methods": []} |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_anthropic_tools.py:TestPathValidation.test_basic_path_normalization | # Context:
from langchain_anthropic.middleware.anthropic_tools import (
AnthropicToolsState,
StateClaudeMemoryMiddleware,
StateClaudeTextEditorMiddleware,
_validate_path,
)
class TestTextEditorMiddleware: ...
class TestMemoryMiddleware: ...
class TestFileOperations: ...
class TestSystemMessageHandling: ...
class TestPathValidation:
def test_path_traversal_blocked(self) -> None: ...
def test_allowed_prefixes(self) -> None: ...
def test_memories_prefix(self) -> None: ...
# Task:
Write a Python test method `test_basic_path_normalization` in test class `TestPathValidation` to test basic path normalization.
Module under test: langchain_core.messages, langgraph.types, langchain_anthropic.middleware.anthropic_tools | def test_basic_path_normalization(self) -> None:
"""Test basic path normalization."""
assert _validate_path("/foo/bar") == "/foo/bar"
assert _validate_path("foo/bar") == "/foo/bar"
assert _validate_path("/foo//bar") == "/foo/bar"
assert _validate_path("/foo/./bar") == "/foo/bar" | test | 1 | {"function_name": "test_basic_path_normalization", "class_name": "TestPathValidation", "qualname": "TestPathValidation.test_basic_path_normalization", "file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_anthropic_tools.py", "repo_id": "langchain-ai/langchain", "loc": 6, "tested_modules": ["langchain_core.messages", "langgraph.types", "langchain_anthropic.middleware.anthropic_tools", "langchain.agents.middleware.types", "langchain.agents.middleware.types"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/gemini/completion.py:GeminiCompletion.get_file_uploader | # Context:
from typing import TYPE_CHECKING, Any, Literal, cast
from crewai_files.uploaders.gemini import GeminiFileUploader
class GeminiCompletion(BaseLLM):
def __init__(
self,
model: str = "gemini-2.0-flash-001",
api_key: str | None = None,
project: str | None = None,
location: str | None = None,
temperature: float | None = None,
top_p: float | None = None,
top_k: int | None = None,
max_output_tokens: int | None = None,
stop_sequences: list[str] | None = None,
stream: bool = False,
safety_settings: dict[str, Any] | None = None,
client_params: dict[str, Any] | None = None,
interceptor: BaseInterceptor[Any, Any] | None = None,
use_vertexai: bool | None = None,
response_format: type[BaseModel] | None = None,
**kwargs: Any,
):
"""Initialize Google Gemini chat completion client.
Args:
model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro')
api_key: Google API key for Gemini API authentication.
Defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var.
NOTE: Cannot be used with Vertex AI (project parameter). Use Gemini API instead.
project: Google Cloud project ID for Vertex AI with ADC authentication.
Requires Application Default Credentials (gcloud auth application-default login).
NOTE: Vertex AI does NOT support API keys, only OAuth2/ADC.
If both api_key and project are set, api_key takes precedence.
location: Google Cloud location (for Vertex AI with ADC, defaults to 'us-central1')
temperature: Sampling temperature (0-2)
top_p: Nucleus sampling parameter
top_k: Top-k sampling parameter
max_output_tokens: Maximum tokens in response
stop_sequences: Stop sequences
stream: Enable streaming responses
safety_settings: Safety filter settings
client_params: Additional parameters to pass to the Google Gen AI Client constructor.
Supports parameters like http_options, credentials, debug_config, etc.
interceptor: HTTP interceptor (not yet supported for Gemini).
use_vertexai: Whether to use Vertex AI instead of Gemini API.
- True: Use Vertex AI (with ADC or Express mode with API key)
- False: Use Gemini API (explicitly override env var)
- None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var
When using Vertex AI with API key (Express mode), http_options with
api_version="v1" is automatically configured.
response_format: Pydantic model for structured output. Used as default when
response_model is not passed to call()/acall() methods.
**kwargs: Additional parameters
"""
if interceptor is not None:
raise NotImplementedError(
"HTTP interceptors are not yet supported for Google Gemini provider. "
"Interceptors are currently supported for OpenAI and Anthropic providers only."
)
super().__init__(
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
)
# Store client params for later use
self.client_params = client_params or {}
# Get API configuration with environment variable fallbacks
self.api_key = (
api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
)
self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT")
self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1"
if use_vertexai is None:
use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true"
self.client = self._initialize_client(use_vertexai)
# Store completion parameters
self.top_p = top_p
self.top_k = top_k
self.max_output_tokens = max_output_tokens
self.stream = stream
self.safety_settings = safety_settings or {}
self.stop_sequences = stop_sequences or []
self.tools: list[dict[str, Any]] | None = None
self.response_format = response_format
# Model-specific settings
version_match = re.search(r"gemini-(\d+(?:\.\d+)?)", model.lower())
self.supports_tools = bool(
version_match and float(version_match.group(1)) >= 1.5
)
self.is_gemini_2_0 = bool(
version_match and float(version_match.group(1)) >= 2.0
)
def stop(self) -> list[str]: ...
def stop(self, value: list[str] | str | None) -> None: ...
def _initialize_client(self, use_vertexai: bool) -> genai.Client: ...
def _get_client_params(self) -> dict[str, Any]: ...
def call(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _prepare_generation_config(self, system_instruction: str | None, tools: list[dict[str, Any]] | None, response_model: type[BaseModel] | None) -> types.GenerateContentConfig: ...
def _convert_tools_for_interference(self, tools: list[dict[str, Any]]) -> list[types.Tool]: ...
def _format_messages_for_gemini(self, messages: str | list[LLMMessage]) -> tuple[list[types.Content], str | None]: ...
def _validate_and_emit_structured_output(self, content: str, response_model: type[BaseModel], messages_for_event: list[LLMMessage], from_task: Any | None, from_agent: Any | None) -> BaseModel: ...
def _finalize_completion_response(self, content: str, contents: list[types.Content], response_model: type[BaseModel] | None, from_task: Any | None, from_agent: Any | None) -> str | BaseModel: ...
def _handle_structured_output_tool_call(self, structured_data: dict[str, Any], response_model: type[BaseModel], contents: list[types.Content], from_task: Any | None, from_agent: Any | None) -> BaseModel: ...
def _process_response_with_tools(self, response: GenerateContentResponse, contents: list[types.Content], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _process_stream_chunk(self, chunk: GenerateContentResponse, full_response: str, function_calls: dict[int, dict[str, Any]], usage_data: dict[str, int], from_task: Any | None, from_agent: Any | None) -> tuple[str, dict[int, dict[str, Any]], dict[str, int]]: ...
def _finalize_streaming_response(self, full_response: str, function_calls: dict[int, dict[str, Any]], usage_data: dict[str, int], contents: list[types.Content], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | BaseModel | list[dict[str, Any]]: ...
def _handle_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _handle_streaming_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | BaseModel | list[dict[str, Any]] | Any: ...
async def _ahandle_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
async def _ahandle_streaming_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def supports_function_calling(self) -> bool: ...
def supports_stop_words(self) -> bool: ...
def get_context_window_size(self) -> int: ...
def _extract_token_usage(response: GenerateContentResponse) -> dict[str, Any]: ...
def _extract_text_from_response(response: GenerateContentResponse) -> str: ...
def _add_property_ordering(schema: dict[str, Any]) -> dict[str, Any]: ...
def _convert_contents_to_dict(contents: list[types.Content]) -> list[LLMMessage]: ...
def supports_multimodal(self) -> bool: ...
def format_text_content(self, text: str) -> dict[str, Any]: ...
# Task:
Write a Python method `get_file_uploader` for the class `GeminiCompletion` to get a Gemini file uploader using this LLM's client.
Returns: Any | def get_file_uploader(self) -> Any:
"""Get a Gemini file uploader using this LLM's client.
Returns:
GeminiFileUploader instance with pre-configured client.
"""
try:
from crewai_files.uploaders.gemini import GeminiFileUploader
return GeminiFileUploader(client=self.client)
except ImportError:
return None | function_simple | 0 | {"cognitive_complexity": 1, "loc": 12, "code_loc": 5, "docstring_loc": 5, "function_name": "get_file_uploader", "class_name": "GeminiCompletion", "qualname": "GeminiCompletion.get_file_uploader", "file_path": "lib/crewai/src/crewai/llms/providers/gemini/completion.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py:EnterpriseActionTool._create_field_definition | # Context:
from typing import Any, Literal, Optional, Union, _SpecialForm, cast, get_origin
from pydantic import Field, create_model
def get_enterprise_api_base_url() -> str: ...
class EnterpriseActionKitToolAdapter: ...
class EnterpriseActionTool(BaseTool):
def __init__(
self,
name: str,
description: str,
enterprise_action_token: str,
action_name: str,
action_schema: dict[str, Any],
enterprise_api_base_url: str | None = None,
):
self._model_registry = {} # type: ignore[var-annotated]
self._base_name = self._sanitize_name(name)
schema_props, required = self._extract_schema_info(action_schema)
# Define field definitions for the model
field_definitions = {}
for param_name, param_details in schema_props.items():
param_desc = param_details.get("description", "")
is_required = param_name in required
try:
field_type = self._process_schema_type(
param_details, self._sanitize_name(param_name).title()
)
except Exception:
field_type = str
# Create field definition based on requirement
field_definitions[param_name] = self._create_field_definition(
field_type, is_required, param_desc
)
# Create the model
if field_definitions:
try:
args_schema = create_model( # type: ignore[call-overload]
f"{self._base_name}Schema", **field_definitions
)
except Exception:
args_schema = create_model(
f"{self._base_name}Schema",
input_text=(str, Field(description="Input for the action")),
)
else:
# Fallback for empty schema
args_schema = create_model(
f"{self._base_name}Schema",
input_text=(str, Field(description="Input for the action")),
)
super().__init__(name=name, description=description, args_schema=args_schema)
self.enterprise_action_token = enterprise_action_token
self.action_name = action_name
self.action_schema = action_schema
self.enterprise_api_base_url = (
enterprise_api_base_url or get_enterprise_api_base_url()
)
def _sanitize_name(self, name: str) -> str: ...
def _extract_schema_info(self, action_schema: dict[str, Any]) -> tuple[dict[str, Any], list[str]]: ...
def _process_schema_type(self, schema: dict[str, Any], type_name: str) -> type[Any] | _SpecialForm: ...
def _create_nested_model(self, schema: dict[str, Any], model_name: str) -> type[Any]: ...
def _map_json_type_to_python(self, json_type: str) -> type[Any]: ...
def _get_required_nullable_fields(self) -> list[str]: ...
def _is_nullable_type(self, schema: dict[str, Any]) -> bool: ...
def _run(self, **kwargs) -> str: ...
# Task:
Write a Python method `_create_field_definition` for the class `EnterpriseActionTool` to create Pydantic field definition based on type and requirement.
Parameters: field_type: type[Any] | _SpecialForm, is_required: bool, description: str
Returns: tuple | def _create_field_definition(
self, field_type: type[Any] | _SpecialForm, is_required: bool, description: str
) -> tuple:
"""Create Pydantic field definition based on type and requirement."""
if is_required:
return (field_type, Field(description=description))
if get_origin(field_type) is Union:
return (field_type, Field(default=None, description=description))
return (
Optional[field_type], # noqa: UP045
Field(default=None, description=description),
) | function_simple | 0 | {"cognitive_complexity": 2, "loc": 12, "code_loc": 8, "docstring_loc": 1, "function_name": "_create_field_definition", "class_name": "EnterpriseActionTool", "qualname": "EnterpriseActionTool._create_field_definition", "file_path": "lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "plib_runnable"} |
ray-project/ray:python/ray/data/tests/test_join.py:test_anti_join_multi_key | # Context:
import pandas as pd
import pytest
import ray
from ray.data._internal.util import MiB, rows_same
from ray.data.context import DataContext
from ray.data.dataset import Dataset
from ray.data._internal.util import MiB
def test_simple_inner_join(ray_start_regular_shared_2_cpus, num_rows_left: int, num_rows_right: int, partition_size_hint: Optional[int]): ...
def test_simple_left_right_outer_semi_anti_join(ray_start_regular_shared_2_cpus, join_type, num_rows_left, num_rows_right): ...
def test_simple_full_outer_join(ray_start_regular_shared_2_cpus, num_rows_left, num_rows_right): ...
def test_simple_self_join(ray_start_regular_shared_2_cpus, left_suffix, right_suffix): ...
def test_invalid_join_config(ray_start_regular_shared_2_cpus): ...
def test_invalid_join_not_matching_key_columns(ray_start_regular_shared_2_cpus, join_type): ...
def test_anti_join_no_matches(ray_start_regular_shared_2_cpus, join_type): ...
def test_anti_join_all_matches(ray_start_regular_shared_2_cpus, join_type): ...
def _assert_columns_match(result, expected_columns): ...
def _assert_list_values(result_by_id, expected_values): ...
def _assert_tensor_values(result_by_id, expected_values): ...
def _assert_none_values(result_by_id, none_checks): ...
def _assert_scalar_values(result_by_id, expected_values): ...
def test_join_with_unjoinable_non_key_columns(ray_start_regular_shared_2_cpus, join_type): ...
def test_join_with_predicate_pushdown(ray_start_regular_shared_2_cpus, join_type, filter_side, should_push): ...
def test_join_cross_side_column_comparison_no_pushdown(ray_start_regular_shared_2_cpus): ...
# Task:
Write a Python test function `test_anti_join_multi_key` to test anti-join with multiple join keys.
Module under test: typing, packaging.version, ray.data._internal.logical.operators | def test_anti_join_multi_key(
ray_start_regular_shared_2_cpus,
join_type,
):
"""Test anti-join with multiple join keys"""
DataContext.get_current().target_max_block_size = 1 * MiB
# Create left dataset using ray.data.range for consistency
left_ds = ray.data.range(32).map(
lambda row: {
"id": row["id"],
"oddness": row["id"] % 2, # Even
"10x": row["id"] * 10,
}
)
# Create right dataset with partial matches (16 vs 32 for partial overlap)
right_ds = ray.data.range(16).map(
lambda row: {
"id": row["id"] % 2,
"oddness": row["id"] % 2 + 1, # odd
"100x": row["id"] * 100,
}
)
# Anti-join should return rows that don't have matching key1,key2 in the other dataset
joined: Dataset = left_ds.join(
right_ds,
join_type=join_type,
num_partitions=4,
on=("id", "oddness"),
)
joined_pd = pd.DataFrame(joined.take_all())
# Create expected data for pandas comparison
left_pd = left_ds.to_pandas()
right_pd = right_ds.to_pandas()
# Calculate expected result using pandas
if join_type == "left_anti":
expected_cols = ["id", "oddness", "10x"]
merged = left_pd.merge(
right_pd, on=["id", "oddness"], how="left", indicator=True
)
expected_pd = merged[merged["_merge"] == "left_only"][expected_cols]
else:
expected_cols = ["id", "oddness", "100x"]
merged = left_pd.merge(
right_pd, on=["id", "oddness"], how="right", indicator=True
)
expected_pd = merged[merged["_merge"] == "right_only"][expected_cols]
# Sort resulting frames and reset index (to be able to compare with expected one)
expected_pd_sorted = expected_pd.sort_values(by=expected_cols).reset_index(
drop=True
)
joined_pd_sorted = joined_pd.sort_values(by=expected_cols).reset_index(drop=True)
pd.testing.assert_frame_equal(expected_pd_sorted, joined_pd_sorted) | test | 0 | {"function_name": "test_anti_join_multi_key", "class_name": null, "qualname": "test_anti_join_multi_key", "file_path": "python/ray/data/tests/test_join.py", "repo_id": "ray-project/ray", "loc": 62, "tested_modules": ["typing", "packaging.version", "ray.data._internal.logical.operators", "ray.data._internal.util", "ray.data._internal.utils.arrow_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/models/language/pooling/test_mm_classifier_conversion.py:test_idefics_multimodal | # Context:
def update_config(config): ...
def test_gemma_multimodal(vllm_runner) -> None: ...
# Task:
Write a Python test function `test_idefics_multimodal` to verify the behavior of `idefics_multimodal`.
Module under test: vllm.config.pooler | def test_idefics_multimodal(
vllm_runner,
) -> None:
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
with vllm_runner(
model_name="HuggingFaceM4/Idefics3-8B-Llama3",
runner="pooling",
convert="classify",
load_format="dummy",
max_model_len=512,
enforce_eager=True,
tensor_parallel_size=1,
disable_log_stats=True,
dtype="bfloat16",
) as vllm_model:
llm = vllm_model.get_llm()
outputs = llm.classify(prompts)
for output in outputs:
assert len(output.outputs.probs) == 2 | test | 1 | {"function_name": "test_idefics_multimodal", "class_name": null, "qualname": "test_idefics_multimodal", "file_path": "tests/models/language/pooling/test_mm_classifier_conversion.py", "repo_id": "vllm-project/vllm", "loc": 25, "tested_modules": ["vllm.config.pooler"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_human_approval.py:TestLLMHookHumanInput.test_request_human_input_pauses_and_resumes_live_updates | # Context:
from unittest.mock import Mock, patch
from crewai.hooks.llm_hooks import LLMCallHookContext
def mock_executor(): ...
def mock_tool(): ...
def mock_agent(): ...
def mock_task(): ...
class TestToolHookHumanInput: ...
class TestApprovalHookIntegration: ...
class TestCostControlApproval: ...
class TestLLMHookHumanInput:
def test_request_human_input_returns_user_response(self, mock_event_listener, mock_input, mock_executor): ...
def test_request_human_input_returns_empty_string_on_enter(self, mock_event_listener, mock_input, mock_executor): ...
def test_request_human_input_resumes_on_exception(self, mock_event_listener, mock_input, mock_executor): ...
def test_request_human_input_strips_whitespace(self, mock_event_listener, mock_input, mock_executor): ...
# Task:
Write a Python test method `test_request_human_input_pauses_and_resumes_live_updates` in test class `TestLLMHookHumanInput` to test that live updates are paused and resumed.
Module under test: __future__, crewai.hooks.llm_hooks, crewai.hooks.tool_hooks | def test_request_human_input_pauses_and_resumes_live_updates(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that live updates are paused and resumed."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
context.request_human_input(prompt="Test")
# Verify pause was called
mock_formatter.pause_live_updates.assert_called_once()
# Verify resume was called
mock_formatter.resume_live_updates.assert_called_once() | test | 0 | {"function_name": "test_request_human_input_pauses_and_resumes_live_updates", "class_name": "TestLLMHookHumanInput", "qualname": "TestLLMHookHumanInput.test_request_human_input_pauses_and_resumes_live_updates", "file_path": "lib/crewai/tests/hooks/test_human_approval.py", "repo_id": "crewAIInc/crewAI", "loc": 16, "tested_modules": ["__future__", "crewai.hooks.llm_hooks", "crewai.hooks.tool_hooks"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:common/data_source/utils.py:run_with_timeout | # Context:
import contextvars
from collections.abc import Callable, Generator, Iterator, Mapping, Sequence
from typing import IO, Any, Generic, Iterable, Optional, Protocol, TypeVar, cast
def datetime_from_string(datetime_string: str) -> datetime: ...
def is_valid_image_type(mime_type: str) -> bool: ...
def _handle_http_error(e: requests.HTTPError, attempt: int) -> int: ...
def update_param_in_path(path: str, param: str, value: str) -> str: ...
def build_confluence_document_id(base_url: str, content_url: str, is_cloud: bool) -> str: ...
def get_single_param_from_url(url: str, param: str) -> str | None: ...
def get_start_param_from_url(url: str) -> int: ...
def wrap_request_to_handle_ratelimiting(request_fn: R, default_wait_time_sec: int, max_waits: int) -> R: ...
class _RateLimitedRequest: ...
def create_s3_client(bucket_type: BlobType, credentials: dict[str, Any], european_residency: bool) -> S3Client: ...
def detect_bucket_region(s3_client: S3Client, bucket_name: str) -> str | None: ...
def download_object(s3_client: S3Client, bucket_name: str, key: str, size_threshold: int | None) -> bytes | None: ...
def read_stream_with_limit(body: Any, key: str, size_threshold: int) -> bytes | None: ...
def _extract_onyx_metadata(line: str) -> dict | None: ...
def read_text_file(file: IO, encoding: str, errors: str, ignore_onyx_metadata: bool) -> tuple[str, dict]: ...
def get_blob_link(bucket_type: BlobType, s3_client: S3Client, bucket_name: str, key: str, bucket_region: str | None) -> str: ...
def extract_size_bytes(obj: Mapping[str, Any]) -> int | None: ...
def get_file_ext(file_name: str) -> str: ...
def is_accepted_file_ext(file_ext: str, extension_type: OnyxExtensionType) -> bool: ...
def detect_encoding(file: IO[bytes]) -> str: ...
def get_markitdown_converter(): ...
def to_bytesio(stream: IO[bytes]) -> BytesIO: ...
def get_base_url(token: str) -> str: ...
def get_message_link(event: dict, client: WebClient, channel_id: str) -> str: ...
def make_slack_api_call(call: Callable[..., SlackResponse], **kwargs) -> SlackResponse: ...
def make_paginated_slack_api_call(call: Callable[..., SlackResponse], **kwargs) -> Generator[dict[str, Any], None, None]: ...
def _make_slack_api_call_paginated(call: Callable[..., SlackResponse]) -> Callable[..., Generator[dict[str, Any], None, None]]: ...
def is_atlassian_date_error(e: Exception) -> bool: ...
def expert_info_from_slack_id(user_id: str | None, client: WebClient, user_cache: dict[str, BasicExpertInfo | None]) -> BasicExpertInfo | None: ...
class SlackTextCleaner: ...
def is_mail_service_disabled_error(error: HttpError) -> bool: ...
def build_time_range_query(time_range_start: SecondsSinceUnixEpoch | None, time_range_end: SecondsSinceUnixEpoch | None) -> str | None: ...
def clean_email_and_extract_name(email: str) -> tuple[str, str | None]: ...
def get_message_body(payload: dict[str, Any]) -> str: ...
def time_str_to_utc(time_str: str): ...
def gmail_time_str_to_utc(time_str: str): ...
def batch_generator(items: Iterable[T], batch_size: int, pre_batch_yield: Callable[[list[T]], None] | None) -> Generator[list[T], None, None]: ...
def fetch_notion_data(url: str, headers: dict[str, str], method: str, json_data: Optional[dict]) -> dict[str, Any]: ...
def properties_to_str(properties: dict[str, Any]) -> str: ...
def filter_pages_by_time(pages: list[dict[str, Any]], start: float, end: float, filter_field: str) -> list[dict[str, Any]]: ...
def _load_all_docs(connector: CheckpointedConnector[CT], load: LoadFunction) -> list[Document]: ...
def load_all_docs_from_checkpoint_connector(connector: CheckpointedConnector[CT], start: SecondsSinceUnixEpoch, end: SecondsSinceUnixEpoch) -> list[Document]: ...
def is_atlassian_cloud_url(url: str) -> bool: ...
def get_cloudId(base_url: str) -> str: ...
def scoped_url(url: str, product: str) -> str: ...
def process_confluence_user_profiles_override(confluence_user_email_override: list[dict[str, str]]) -> list[ConfluenceUser]: ...
def confluence_refresh_tokens(client_id: str, client_secret: str, cloud_id: str, refresh_token: str) -> dict[str, Any]: ...
class TimeoutThread(threading.Thread, Generic[R]): ...
def validate_attachment_filetype(attachment: dict[str, Any]) -> bool: ...
class CallableProtocol(Protocol): ...
def run_functions_tuples_in_parallel(functions_with_args: Sequence[tuple[CallableProtocol, tuple[Any, ...]]], allow_failures: bool, max_workers: int | None) -> list[Any]: ...
def _next_or_none(ind: int, gen: Iterator[R]) -> tuple[int, R | None]: ...
def parallel_yield(gens: list[Iterator[R]], max_workers: int) -> Iterator[R]: ...
def sanitize_filename(name: str, extension: str) -> str: ...
class _RateLimitDecorator: ...
def retry_builder(tries: int, delay: float, max_delay: float | None, backoff: float, jitter: tuple[float, float] | float, exceptions: type[Exception] | tuple[type[Exception], ...]) -> Callable[[F], F]: ...
# Task:
Write a Python function `run_with_timeout` to executes a function with a timeout. If the function doesn't complete within the specified.
Parameters: timeout: float, func: Callable[..., R]
Returns: R | def run_with_timeout(timeout: float, func: Callable[..., R], *args: Any, **kwargs: Any) -> R:
"""
Executes a function with a timeout. If the function doesn't complete within the specified
timeout, raises TimeoutError.
"""
context = contextvars.copy_context()
task = TimeoutThread(timeout, context.run, func, *args, **kwargs)
task.start()
task.join(timeout)
if task.exception is not None:
raise task.exception
if task.is_alive():
task.end()
return task.result # type: ignore | function_simple | 1 | {"cognitive_complexity": 2, "loc": 16, "code_loc": 9, "docstring_loc": 4, "function_name": "run_with_timeout", "class_name": null, "qualname": "run_with_timeout", "file_path": "common/data_source/utils.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "file_runnable"} |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/trust.py:DelegationChain.add_delegation | # Context:
import json
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, List, Optional
from llama_index.agent.agentmesh.identity import CMVKIdentity, CMVKSignature
class TrustPolicy: ...
class TrustVerificationResult: ...
class TrustedAgentCard: ...
class TrustHandshake: ...
class Delegation: ...
class DelegationChain:
def __init__(self, root_identity: CMVKIdentity):
self.root_identity = root_identity
self.delegations: List[Delegation] = []
self._known_identities: Dict[str, CMVKIdentity] = {
root_identity.did: root_identity
}
def verify(self) -> bool: ...
# Task:
Write a Python method `add_delegation` for the class `DelegationChain` to add a delegation to the chain.
Parameters: delegatee: TrustedAgentCard, capabilities: List[str], expires_in_hours: Optional[int], delegator_identity: Optional[CMVKIdentity]
Returns: Delegation | def add_delegation(
self,
delegatee: TrustedAgentCard,
capabilities: List[str],
expires_in_hours: Optional[int] = None,
delegator_identity: Optional[CMVKIdentity] = None,
) -> Delegation:
"""Add a delegation to the chain."""
if not delegatee.identity:
raise ValueError("Delegatee must have identity")
delegator = delegator_identity or self.root_identity
delegatee_did = delegatee.identity.did
expires_at = None
if expires_in_hours:
expires_at = datetime.now(timezone.utc) + timedelta(hours=expires_in_hours)
delegation_data = json.dumps(
{
"delegator": delegator.did,
"delegatee": delegatee_did,
"capabilities": sorted(capabilities),
"expires_at": expires_at.isoformat() if expires_at else None,
},
sort_keys=True,
)
signature = delegator.sign(delegation_data)
delegation = Delegation(
delegator=delegator.did,
delegatee=delegatee_did,
capabilities=capabilities,
signature=signature,
expires_at=expires_at,
)
self.delegations.append(delegation)
self._known_identities[delegatee_did] = delegatee.identity
return delegation | function_simple | 1 | {"cognitive_complexity": 4, "loc": 41, "code_loc": 27, "docstring_loc": 1, "function_name": "add_delegation", "class_name": "DelegationChain", "qualname": "DelegationChain.add_delegation", "file_path": "llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/trust.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:release/nightly_tests/dataset/model_inference_pipeline_benchmark.py:module_doc | Write a module-level docstring for the Python module `model_inference_pipeline_benchmark` which contains class `WorkerConfig`, class `PipelineConfig`, function `parse_args`, function `preprocessing_task_pandas`, class `InferenceActor`. | Model Inference Pipeline Benchmark
This benchmark mimics a production ML inference pipeline with the following structure:
1. Read parquet data with configurable columns
2. Preprocessing with map_batches (CPU tasks) using Pandas
3. Inference with map_batches using actors (GPU) with concurrency control
4. Consume output
Key features mirrored from production:
- Separate worker configurations for preprocessing and inference
- Metadata column passthrough
- Extra output columns added during inference | documentation | 0 | {"doc_type": "module", "module_name": "model_inference_pipeline_benchmark", "file_path": "release/nightly_tests/dataset/model_inference_pipeline_benchmark.py", "repo_id": "ray-project/ray", "char_length": 501} |
ccxt/ccxt:python/ccxt/static_dependencies/bip/ecc/common/ikeys.py:IPublicKey.IsValidPoint | # Context:
from .ipoint import IPoint
class IPrivateKey(ABC): ...
class IPublicKey(ABC):
def FromBytes(cls, key_bytes: bytes) -> IPublicKey: ...
def FromPoint(cls, key_point: IPoint) -> IPublicKey: ...
def CurveType() -> EllipticCurveTypes: ...
def IsValidBytes(cls, key_bytes: bytes) -> bool: ...
def CompressedLength() -> int: ...
def UncompressedLength() -> int: ...
def UnderlyingObject(self) -> Any: ...
def RawCompressed(self) -> DataBytes: ...
def RawUncompressed(self) -> DataBytes: ...
def Point(self) -> IPoint: ...
# Task:
Write a Python method `IsValidPoint` for the class `IPublicKey` to return if the specified point represents a valid public key.
Parameters: key_point: IPoint
Returns: bool | def IsValidPoint(cls,
key_point: IPoint) -> bool:
"""
Return if the specified point represents a valid public key.
Args:
key_point (IPoint object): Key point
Returns:
bool: True if valid, false otherwise
"""
try:
cls.FromPoint(key_point)
return True
except ValueError:
return False | function_simple | 1 | {"cognitive_complexity": 1, "loc": 16, "code_loc": 5, "docstring_loc": 9, "function_name": "IsValidPoint", "class_name": "IPublicKey", "qualname": "IPublicKey.IsValidPoint", "file_path": "python/ccxt/static_dependencies/bip/ecc/common/ikeys.py", "repo_id": "ccxt/ccxt", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/sam2_video/test_modeling_sam2_video.py:Sam2VideoModelIntegrationTest.test_inference_mask_generation_video_multi_objects_multi_points | # Context:
from transformers.testing_utils import (
backend_empty_cache,
is_torch_bf16_available_on_device,
is_torch_fp16_available_on_device,
slow,
torch_device,
)
import torch
def prepare_image(): ...
def prepare_groceries_image(): ...
def prepare_dog_img(): ...
def prepare_video(): ...
class Sam2VideoModelIntegrationTest(unittest.TestCase):
def setUp(self): ...
def tearDown(self): ...
def test_inference_mask_generation_video_one_point(self): ...
def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self): ...
def test_inference_mask_generation_video_multi_points(self): ...
def test_inference_mask_generation_video_one_bb(self): ...
def test_inference_mask_generation_video_one_point_one_bb(self): ...
def test_inference_mask_generation_video_batched_bb(self): ...
def test_inference_propagate_video_from_mask_input(self): ...
def test_inference_propagate_on_streamed_video(self): ...
def test_inference_with_different_dtypes(self): ...
# Task:
Write a Python test method `test_inference_mask_generation_video_multi_objects_multi_points` in test class `Sam2VideoModelIntegrationTest` to verify the behavior of `inference_mask_generation_video_multi_objects_multi_points`.
Module under test: transformers.testing_utils, transformers.utils, transformers.video_utils | def test_inference_mask_generation_video_multi_objects_multi_points(self):
raw_video = prepare_video()
inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device)
ann_frame_idx = 0 # the frame index we interact with
ann_obj_ids = [2, 3] # give a unique id to each object we interact with (it can be any integers)
self.processor.add_inputs_to_inference_session(
inference_session=inference_session,
frame_idx=ann_frame_idx,
obj_ids=ann_obj_ids,
input_points=[[[[200, 300], [230, 250], [275, 175]], [[400, 150]]]],
input_labels=[[[1, 1, 0], [1]]],
)
outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx)
low_res_masks = outputs.pred_masks
video_res_masks = self.processor.post_process_masks(
[outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
self.assertEqual(low_res_masks.shape, (2, 1, 256, 256))
self.assertEqual(video_res_masks.shape, (2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
video_res_masks[:, 0, :2, :2], # first object
torch.tensor(
[[[-12.6294, -12.6294], [-13.3659, -13.3659]], [[-20.3319, -20.3319], [-22.0491, -22.0491]]]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
)
# test propagate in video frames
frames = []
for sam2_video_output in self.video_model.propagate_in_video_iterator(
inference_session=inference_session,
start_frame_idx=ann_frame_idx,
max_frame_num_to_track=2,
):
video_res_masks = self.processor.post_process_masks(
[sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False
)[0]
frames.append(video_res_masks)
frames = torch.stack(frames, dim=0)
self.assertEqual(frames.shape, (3, 2, 1, raw_video.shape[-3], raw_video.shape[-2]))
torch.testing.assert_close(
frames[:3, :, :, :2, :2],
torch.tensor(
[
[[[[-12.6294, -12.6294], [-13.3659, -13.3659]]], [[[-20.3319, -20.3319], [-22.0491, -22.0491]]]],
[[[[-18.5249, -18.5249], [-19.5830, -19.5830]]], [[[-17.5537, -17.5537], [-19.2259, -19.2259]]]],
[[[[-14.2722, -14.2722], [-15.4622, -15.4622]]], [[[-18.3185, -18.3185], [-20.0314, -20.0314]]]],
]
).to(torch_device),
atol=1e-4,
rtol=1e-4,
) | test | 0 | {"function_name": "test_inference_mask_generation_video_multi_objects_multi_points", "class_name": "Sam2VideoModelIntegrationTest", "qualname": "Sam2VideoModelIntegrationTest.test_inference_mask_generation_video_multi_objects_multi_points", "file_path": "tests/models/sam2_video/test_modeling_sam2_video.py", "repo_id": "huggingface/transformers", "loc": 54, "tested_modules": ["transformers.testing_utils", "transformers.utils", "transformers.video_utils", "transformers", "PIL"], "has_docstring": false, "runnable_level": "file_runnable"} |
Textualize/textual:src/textual/compose.py:compose | # Context:
from textual.app import App, ComposeResult
from textual.widget import Widget
from textual.widget import MountError, Widget
# Task:
Write a Python function `compose` to compose child widgets from a generator in the same way as [compose][textual.widget.Widget.compose].
Parameters: node: App | Widget, compose_result: ComposeResult | None
Returns: list[Widget] | def compose(
node: App | Widget, compose_result: ComposeResult | None = None
) -> list[Widget]:
"""Compose child widgets from a generator in the same way as [compose][textual.widget.Widget.compose].
Example:
```python
def on_key(self, event:events.Key) -> None:
def add_key(key:str) -> ComposeResult:
with containers.HorizontalGroup():
yield Label("You pressed:")
yield Label(key)
self.mount_all(
compose(self, add_key(event.key)),
)
```
Args:
node: The parent node.
compose_result: A compose result, or `None` to call `node.compose()`.
Returns:
A list of widgets.
"""
_rich_traceback_omit = True
from textual.widget import MountError, Widget
app = node.app
nodes: list[Widget] = []
compose_stack: list[Widget] = []
composed: list[Widget] = []
app._compose_stacks.append(compose_stack)
app._composed.append(composed)
iter_compose = iter(
compose_result if compose_result is not None else node.compose()
)
is_generator = hasattr(iter_compose, "throw")
try:
while True:
try:
child = next(iter_compose)
except StopIteration:
break
if not isinstance(child, Widget):
mount_error = MountError(
f"Can't mount {type(child)}; expected a Widget instance."
)
if is_generator:
iter_compose.throw(mount_error) # type: ignore
else:
raise mount_error from None
try:
child.id
except AttributeError:
mount_error = MountError(
"Widget is missing an 'id' attribute; did you forget to call super().__init__()?"
)
if is_generator:
iter_compose.throw(mount_error) # type: ignore
else:
raise mount_error from None
if composed:
nodes.extend(composed)
composed.clear()
if compose_stack:
try:
compose_stack[-1].compose_add_child(child)
except Exception as error:
if is_generator:
# So the error is raised inside the generator
# This will generate a more sensible traceback for the dev
iter_compose.throw(error) # type: ignore
else:
raise
else:
nodes.append(child)
if composed:
nodes.extend(composed)
composed.clear()
finally:
app._compose_stacks.pop()
app._composed.pop()
return nodes | function_complex | 1 | {"cognitive_complexity": 34, "loc": 88, "code_loc": 56, "docstring_loc": 23, "function_name": "compose", "class_name": null, "qualname": "compose", "file_path": "src/textual/compose.py", "repo_id": "Textualize/textual", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/kernels/test_fla_layernorm_guard.py:test_layer_norm_fwd_with_groups | # Context:
import pytest
import torch
from vllm.model_executor.layers.fla.ops.layernorm_guard import (
layer_norm_fwd,
layernorm_fn,
rms_norm_ref,
)
from vllm.utils.torch_utils import set_random_seed
def layer_norm_ref(x, weight, bias, z, eps, group_size, norm_before_gate, is_rms_norm): ...
def test_layer_norm_fwd_basic(num_tokens: int, hidden_size: int, dtype: torch.dtype, seed: int, is_rms_norm: bool) -> None: ...
def test_layer_norm_fwd_with_gate(num_tokens: int, hidden_size: int, dtype: torch.dtype, norm_before_gate: bool, is_rms_norm: bool) -> None: ...
def test_layer_norm_rows_per_block(num_tokens: int, dtype: torch.dtype) -> None: ...
def test_strided_input(dtype: torch.dtype) -> None: ...
def test_output_buffer_provided(num_tokens: int, hidden_size: int, dtype: torch.dtype) -> None: ...
def test_multidimensional_input(shape: tuple, dtype: torch.dtype) -> None: ...
def test_rmsnorm_gated_forward_native_dtype(default_vllm_config, num_tokens: int, hidden_size: int, dtype: torch.dtype, has_gate: bool, group_size: int | None, norm_before_gate: bool): ...
# Task:
Write a Python test function `test_layer_norm_fwd_with_groups` to test layer norm forward pass with group normalization.
Module under test: vllm.model_executor.layers.fla.ops.layernorm_guard, vllm.utils.torch_utils, vllm.model_executor.layers.layernorm | def test_layer_norm_fwd_with_groups(
num_tokens: int,
hidden_size: int,
group_size: int,
dtype: torch.dtype,
is_rms_norm: bool,
) -> None:
"""Test layer norm forward pass with group normalization."""
if hidden_size % group_size != 0:
pytest.skip(
f"hidden_size {hidden_size} not divisible by group_size {group_size}"
)
set_random_seed(42)
device = torch.device("cuda:0")
# Create inputs
x = torch.randn(num_tokens, hidden_size, dtype=dtype, device=device)
weight = torch.randn(hidden_size, dtype=dtype, device=device)
bias = None if is_rms_norm else torch.randn(hidden_size, dtype=dtype, device=device)
eps = 1e-6
ngroups = hidden_size // group_size
# Run the triton kernel
out, mean, rstd = layer_norm_fwd(
x, weight, bias, eps, z=None, group_size=group_size, is_rms_norm=is_rms_norm
)
# Run reference implementation
ref_out = layer_norm_ref(
x, weight, bias, z=None, eps=eps, group_size=group_size, is_rms_norm=is_rms_norm
)
# Check outputs
assert out.shape == x.shape
assert out.dtype == x.dtype
torch.testing.assert_close(out, ref_out, atol=1e-2, rtol=1e-2)
# Check mean and rstd shapes for groups
if not is_rms_norm:
assert mean.shape == (ngroups * num_tokens,)
assert rstd.shape == (ngroups * num_tokens,) | test | 1 | {"function_name": "test_layer_norm_fwd_with_groups", "class_name": null, "qualname": "test_layer_norm_fwd_with_groups", "file_path": "tests/kernels/test_fla_layernorm_guard.py", "repo_id": "vllm-project/vllm", "loc": 43, "tested_modules": ["vllm.model_executor.layers.fla.ops.layernorm_guard", "vllm.utils.torch_utils", "vllm.model_executor.layers.layernorm", "einops"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/test_auth_jwt_algorithms.py:TestJWTKeyHelpers.test_verification_and_signing_keys_work_together_rs256 | # Context:
import tempfile
import jwt
from langflow.services.auth.utils import get_jwt_verification_key
from langflow.services.auth.utils import JWTKeyError, get_jwt_verification_key
from langflow.services.auth.utils import get_jwt_signing_key
from langflow.services.auth.utils import get_jwt_signing_key, get_jwt_verification_key
class TestAuthSettingsAlgorithms: ...
class TestRSAKeyGeneration: ...
class TestTokenCreation: ...
class TestTokenVerification: ...
class TestAuthenticationFailures: ...
class TestRefreshTokenVerification: ...
class TestAlgorithmMismatch: ...
class TestKeyPersistence: ...
class TestEdgeCases: ...
class TestJWTKeyHelpers:
def _create_mock_settings_service(self, algorithm, tmpdir): ...
def test_get_jwt_verification_key_hs256_returns_secret_key(self): ...
def test_get_jwt_verification_key_rs256_returns_public_key(self): ...
def test_get_jwt_verification_key_rs512_returns_public_key(self): ...
def test_get_jwt_verification_key_missing_public_key_raises_error(self): ...
def test_get_jwt_verification_key_missing_secret_key_raises_error(self): ...
def test_get_jwt_signing_key_hs256_returns_secret_key(self): ...
def test_get_jwt_signing_key_rs256_returns_private_key(self): ...
def test_get_jwt_signing_key_rs512_returns_private_key(self): ...
def test_verification_and_signing_keys_work_together_hs256(self): ...
# Task:
Write a Python test method `test_verification_and_signing_keys_work_together_rs256` in test class `TestJWTKeyHelpers` to verification and signing keys should work together for RS256.
Module under test: datetime, pathlib, fastapi | def test_verification_and_signing_keys_work_together_rs256(self):
"""Verification and signing keys should work together for RS256."""
from langflow.services.auth.utils import get_jwt_signing_key, get_jwt_verification_key
with tempfile.TemporaryDirectory() as tmpdir:
mock_service = self._create_mock_settings_service("RS256", tmpdir)
signing_key = get_jwt_signing_key(mock_service)
verification_key = get_jwt_verification_key(mock_service)
# For asymmetric algorithms, keys are different
assert signing_key != verification_key
# Sign and verify a token
payload = {"sub": "test-user", "type": "access"}
token = jwt.encode(payload, signing_key, algorithm="RS256")
decoded = jwt.decode(token, verification_key, algorithms=["RS256"])
assert decoded["sub"] == "test-user" | test | 1 | {"function_name": "test_verification_and_signing_keys_work_together_rs256", "class_name": "TestJWTKeyHelpers", "qualname": "TestJWTKeyHelpers.test_verification_and_signing_keys_work_together_rs256", "file_path": "src/backend/tests/unit/test_auth_jwt_algorithms.py", "repo_id": "langflow-ai/langflow", "loc": 19, "tested_modules": ["datetime", "pathlib", "fastapi", "jwt", "pydantic"], "has_docstring": true, "runnable_level": "project_runnable"} |
sansan0/TrendRadar:mcp_server/server.py:get_system_status | # Context:
import asyncio
import json
def _get_tools(project_root: Optional[str]): ...
async def get_platforms_resource() -> str: ...
async def get_rss_feeds_resource() -> str: ...
async def get_available_dates_resource() -> str: ...
async def get_keywords_resource() -> str: ...
async def resolve_date_range(expression: str) -> str: ...
async def get_latest_news(platforms: Optional[List[str]], limit: int, include_url: bool) -> str: ...
async def get_trending_topics(top_n: int, mode: str, extract_mode: str) -> str: ...
async def get_latest_rss(feeds: Optional[List[str]], days: int, limit: int, include_summary: bool) -> str: ...
async def search_rss(keyword: str, feeds: Optional[List[str]], days: int, limit: int, include_summary: bool) -> str: ...
async def get_rss_feeds_status() -> str: ...
async def get_news_by_date(date_range: Optional[Union[Dict[str, str], str]], platforms: Optional[List[str]], limit: int, include_url: bool) -> str: ...
async def analyze_topic_trend(topic: str, analysis_type: str, date_range: Optional[Union[Dict[str, str], str]], granularity: str, spike_threshold: float, time_window: int, lookahead_hours: int, confidence_threshold: float) -> str: ...
async def analyze_data_insights(insight_type: str, topic: Optional[str], date_range: Optional[Union[Dict[str, str], str]], min_frequency: int, top_n: int) -> str: ...
async def analyze_sentiment(topic: Optional[str], platforms: Optional[List[str]], date_range: Optional[Union[Dict[str, str], str]], limit: int, sort_by_weight: bool, include_url: bool) -> str: ...
async def find_related_news(reference_title: str, date_range: Optional[Union[Dict[str, str], str]], threshold: float, limit: int, include_url: bool) -> str: ...
async def generate_summary_report(report_type: str, date_range: Optional[Union[Dict[str, str], str]]) -> str: ...
async def aggregate_news(date_range: Optional[Union[Dict[str, str], str]], platforms: Optional[List[str]], similarity_threshold: float, limit: int, include_url: bool) -> str: ...
async def compare_periods(period1: Union[Dict[str, str], str], period2: Union[Dict[str, str], str], topic: Optional[str], compare_type: str, platforms: Optional[List[str]], top_n: int) -> str: ...
async def search_news(query: str, search_mode: str, date_range: Optional[Union[Dict[str, str], str]], platforms: Optional[List[str]], limit: int, sort_by: str, threshold: float, include_url: bool, include_rss: bool, rss_limit: int) -> str: ...
async def get_current_config(section: str) -> str: ...
async def check_version(proxy_url: Optional[str]) -> str: ...
async def trigger_crawl(platforms: Optional[List[str]], save_to_local: bool, include_url: bool) -> str: ...
async def sync_from_remote(days: int) -> str: ...
async def get_storage_status() -> str: ...
async def list_available_dates(source: str) -> str: ...
async def read_article(url: str, timeout: int) -> str: ...
async def read_articles_batch(urls: List[str], timeout: int) -> str: ...
async def get_channel_format_guide(channel: Optional[str]) -> str: ...
async def get_notification_channels() -> str: ...
async def send_notification(message: str, title: str, channels: Optional[List[str]]) -> str: ...
def run_server(project_root: Optional[str], transport: str, host: str, port: int): ...
# Task:
Write a Python async function `get_system_status` to 获取系统运行状态和健康检查信息.
Returns: str | async def get_system_status() -> str:
"""
获取系统运行状态和健康检查信息
返回系统版本、数据统计、缓存状态等信息
Returns:
JSON格式的系统状态信息
"""
tools = _get_tools()
result = await asyncio.to_thread(tools['system'].get_system_status)
return json.dumps(result, ensure_ascii=False, indent=2) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 12, "code_loc": 3, "docstring_loc": 8, "function_name": "get_system_status", "class_name": null, "qualname": "get_system_status", "file_path": "mcp_server/server.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "file_runnable"} |
hiyouga/LlamaFactory:src/llamafactory/v1/utils/helper.py:set_seed | # Context:
from transformers import set_seed as hf_set_seed
def is_tokenizer(processor: Processor) -> bool: ...
def get_tokenizer(processor: Processor) -> PreTrainedTokenizer: ...
def _pad_and_truncate(tensor: Tensor, max_seqlen: int, pad_value: int) -> Tensor: ...
def pad_and_truncate(samples: list[ModelInput], max_seqlen: int) -> list[BatchInput]: ...
def compute_valid_tokens(batches: list[BatchInput]) -> int: ...
# Task:
Write a Python function `set_seed` to set seed for reproducibility.
Parameters: seed: int
Returns: None | def set_seed(seed: int) -> None:
"""Set seed for reproducibility.
Args:
seed: Random seed.
"""
hf_set_seed(seed) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 7, "code_loc": 1, "docstring_loc": 5, "function_name": "set_seed", "class_name": null, "qualname": "set_seed", "file_path": "src/llamafactory/v1/utils/helper.py", "repo_id": "hiyouga/LlamaFactory", "has_docstring": true, "runnable_level": "plib_runnable"} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.