sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/multi_agent_trust_layer/multi_agent_trust_layer.py:MultiAgentPolicyEngine.add_role_policy | # Context:
from typing import Dict, Any, List, Optional, Set
class TrustLevel(Enum): ...
class AgentIdentity: ...
class TrustScore: ...
class DelegationScope: ...
class Delegation: ...
class AuditEntry: ...
class IdentityRegistry: ...
class TrustScoringEngine: ...
class DelegationManager: ...
class TrustLayer: ...
class GovernedAgent: ...
def main(): ...
class MultiAgentPolicyEngine:
def __init__(self, trust_engine: TrustScoringEngine, delegation_manager: DelegationManager):
self.trust_engine = trust_engine
self.delegation_manager = delegation_manager
self.role_policies: Dict[str, Dict[str, Any]] = {}
def evaluate(self, agent_id: str, action: str, roles: List[str], delegation_id: Optional[str]) -> tuple[bool, str]: ...
# Task:
Write a Python method `add_role_policy` for the class `MultiAgentPolicyEngine` to add a policy for a specific role.
Parameters: role: str, policy: Dict[str, Any] | def add_role_policy(self, role: str, policy: Dict[str, Any]):
"""Add a policy for a specific role"""
self.role_policies[role] = policy | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "add_role_policy", "class_name": "MultiAgentPolicyEngine", "qualname": "MultiAgentPolicyEngine.add_role_policy", "file_path": "advanced_ai_agents/multi_agent_apps/multi_agent_trust_layer/multi_agent_trust_layer.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "class_runnable"} |
666ghj/BettaFish:ReportEngine/utils/test_json_parser.py:module_doc | Write a module-level docstring for the Python module `test_json_parser` which contains class `TestRobustJSONParser`, function `run_manual_test`. | 测试RobustJSONParser的各种修复能力。
验证解析器能够处理:
1. 基本的markdown包裹
2. 思考内容清理
3. 缺少逗号的修复
4. 括号不平衡的修复
5. 控制字符转义
6. 尾随逗号移除 | documentation | 1 | {"doc_type": "module", "module_name": "test_json_parser", "file_path": "ReportEngine/utils/test_json_parser.py", "repo_id": "666ghj/BettaFish", "char_length": 108} |
ray-project/ray:python/ray/data/tests/test_ranker.py:module_doc | Write a module-level docstring for the Python module `test_ranker` which contains function `test_default_ranker`, class `IntRanker`, function `test_generic_types`. | Comprehensive tests for the generic ranker type system. | documentation | 0 | {"doc_type": "module", "module_name": "test_ranker", "file_path": "python/ray/data/tests/test_ranker.py", "repo_id": "ray-project/ray", "char_length": 55} |
infiniflow/ragflow:test/testcases/test_http_api/test_session_management/test_session_sdk_routes_unit.py:test_list_agent_session_projection_unit | # Context:
import inspect
from types import ModuleType, SimpleNamespace
import pytest
class _DummyManager: ...
class _AwaitableValue: ...
class _Args(dict): ...
class _StubHeaders: ...
class _StubResponse: ...
class _DummyUploadFile: ...
def _run(coro): ...
async def _collect_stream(body): ...
def auth(): ...
def set_tenant_info(): ...
def _load_session_module(monkeypatch): ...
def test_create_and_update_guard_matrix(monkeypatch): ...
def test_chat_completion_metadata_and_stream_paths(monkeypatch): ...
def test_openai_chat_validation_matrix_unit(monkeypatch): ...
def test_openai_stream_generator_branches_unit(monkeypatch): ...
def test_openai_nonstream_branch_unit(monkeypatch): ...
def test_agents_openai_compatibility_unit(monkeypatch): ...
def test_agent_completions_stream_and_nonstream_unit(monkeypatch): ...
def test_list_session_projection_unit(monkeypatch): ...
def test_delete_routes_partial_duplicate_unit(monkeypatch): ...
def test_delete_agent_session_error_matrix_unit(monkeypatch): ...
def test_sessions_ask_route_validation_and_stream_unit(monkeypatch): ...
def test_sessions_related_questions_prompt_build_unit(monkeypatch): ...
def test_chatbot_routes_auth_stream_nonstream_unit(monkeypatch): ...
def test_agentbot_routes_auth_stream_nonstream_unit(monkeypatch): ...
def test_searchbots_ask_embedded_auth_and_stream_unit(monkeypatch): ...
def test_searchbots_retrieval_test_embedded_matrix_unit(monkeypatch): ...
def test_searchbots_related_questions_embedded_matrix_unit(monkeypatch): ...
def test_searchbots_detail_share_embedded_matrix_unit(monkeypatch): ...
def test_searchbots_mindmap_embedded_matrix_unit(monkeypatch): ...
def test_sequence2txt_embedded_validation_and_stream_matrix_unit(monkeypatch): ...
def test_tts_embedded_stream_and_error_matrix_unit(monkeypatch): ...
def test_build_reference_chunks_metadata_matrix_unit(monkeypatch): ...
# Task:
Write a Python test function `test_list_agent_session_projection_unit` to verify the behavior of `list_agent_session_projection_unit`.
Module under test: pathlib, types | def test_list_agent_session_projection_unit(monkeypatch):
module = _load_session_module(monkeypatch)
monkeypatch.setattr(module, "request", SimpleNamespace(args=_Args({})))
monkeypatch.setattr(module.UserCanvasService, "query", lambda **_kwargs: [SimpleNamespace(id="agent-1")])
conv_non_list_reference = {
"id": "session-1",
"dialog_id": "agent-1",
"message": [{"role": "assistant", "content": "hello", "prompt": "internal"}],
"reference": {"unexpected": "shape"},
}
monkeypatch.setattr(module.API4ConversationService, "get_list", lambda *_args, **_kwargs: (1, [conv_non_list_reference]))
res = _run(inspect.unwrap(module.list_agent_session)("tenant-1", "agent-1"))
assert res["data"][0]["agent_id"] == "agent-1"
assert "prompt" not in res["data"][0]["messages"][0]
conv_with_chunks = {
"id": "session-2",
"dialog_id": "agent-1",
"message": [
{"role": "user", "content": "question"},
{"role": "assistant", "content": "answer", "prompt": "internal"},
],
"reference": [
{
"chunks": [
"not-a-dict",
{
"chunk_id": "chunk-2",
"content_with_weight": "weighted",
"doc_id": "doc-2",
"docnm_kwd": "doc-name-2",
"kb_id": "kb-2",
"image_id": "img-2",
"positions": [9],
},
]
}
],
}
monkeypatch.setattr(module.API4ConversationService, "get_list", lambda *_args, **_kwargs: (1, [conv_with_chunks]))
res = _run(inspect.unwrap(module.list_agent_session)("tenant-1", "agent-1"))
projected_chunk = res["data"][0]["messages"][1]["reference"][0]
assert projected_chunk["image_id"] == "img-2"
assert projected_chunk["positions"] == [9] | test | 1 | {"function_name": "test_list_agent_session_projection_unit", "class_name": null, "qualname": "test_list_agent_session_projection_unit", "file_path": "test/testcases/test_http_api/test_session_management/test_session_sdk_routes_unit.py", "repo_id": "infiniflow/ragflow", "loc": 46, "tested_modules": ["pathlib", "types"], "has_docstring": false, "runnable_level": "file_runnable"} |
google/langextract:tests/test_live_api.py:TestLiveAPIOpenAI.test_medication_relationship_extraction | # Context:
import textwrap
import langextract as lx
def has_vertex_ai_credentials(): ...
def retry_on_transient_errors(max_retries, backoff_factor): ...
def add_delay_between_tests(): ...
def get_basic_medication_examples(): ...
def get_relationship_examples(): ...
def extract_by_class(result, extraction_class): ...
def assert_extractions_contain(test_case, result, expected_classes): ...
def assert_valid_char_intervals(test_case, result): ...
class TestLiveAPIGemini(unittest.TestCase): ...
class TestCrossChunkContext(unittest.TestCase): ...
class TestLiveAPIOpenAI(unittest.TestCase):
def test_medication_extraction(self): ...
def test_explicit_provider_selection(self): ...
# Task:
Write a Python test method `test_medication_relationship_extraction` in test class `TestLiveAPIOpenAI` to test relationship extraction for medications with OpenAI.
Module under test: typing, langextract, langextract.core | def test_medication_relationship_extraction(self):
"""Test relationship extraction for medications with OpenAI."""
input_text = """
The patient was prescribed Lisinopril and Metformin last month.
He takes the Lisinopril 10mg daily for hypertension, but often misses
his Metformin 500mg dose which should be taken twice daily for diabetes.
"""
prompt = textwrap.dedent("""
Extract medications with their details, using attributes to group related information:
1. Extract entities in the order they appear in the text
2. Each entity must have a 'medication_group' attribute linking it to its medication
3. All details about a medication should share the same medication_group value
""")
examples = get_relationship_examples()
result = lx.extract(
text_or_documents=input_text,
prompt_description=prompt,
examples=examples,
model_id=DEFAULT_OPENAI_MODEL,
api_key=OPENAI_API_KEY,
use_schema_constraints=False,
language_model_params=OPENAI_MODEL_PARAMS,
)
assert result is not None
assert len(result.extractions) > 0
assert_valid_char_intervals(self, result)
medication_groups = {}
for extraction in result.extractions:
assert (
extraction.attributes is not None
), f"Missing attributes for {extraction.extraction_text}"
assert (
"medication_group" in extraction.attributes
), f"Missing medication_group for {extraction.extraction_text}"
group_name = extraction.attributes["medication_group"]
medication_groups.setdefault(group_name, []).append(extraction)
assert (
len(medication_groups) >= 2
), f"Expected at least 2 medications, found {len(medication_groups)}"
# Allow flexible matching for dosage field (could be "dosage" or "dose")
for med_name, extractions in medication_groups.items():
extraction_classes = {e.extraction_class for e in extractions}
# At minimum, each group should have the medication itself
assert (
_CLASS_MEDICATION in extraction_classes
), f"{med_name} group missing medication entity"
# Dosage is expected but might be formatted differently
assert any(
c in extraction_classes for c in [_CLASS_DOSAGE, "dose"]
), f"{med_name} group missing dosage" | test | 1 | {"function_name": "test_medication_relationship_extraction", "class_name": "TestLiveAPIOpenAI", "qualname": "TestLiveAPIOpenAI.test_medication_relationship_extraction", "file_path": "tests/test_live_api.py", "repo_id": "google/langextract", "loc": 59, "tested_modules": ["typing", "langextract", "langextract.core", "langextract.providers"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:doc/source/ray-overview/examples/mcp-ray-serve/build-mcp-docker-image/weather.py:format_alert | Write a Python function `format_alert` to format an alert feature into a readable string.
Parameters: feature: dict
Returns: str | def format_alert(feature: dict) -> str:
"""Format an alert feature into a readable string."""
props = feature.get("properties", {})
return f"""
Event: {props.get('event', 'Unknown')}
Area: {props.get('areaDesc', 'Unknown')}
Severity: {props.get('severity', 'Unknown')}
Description: {props.get('description', 'No description available')}
Instructions: {props.get('instruction', 'No specific instructions provided')}
""" | function_simple | 0 | {"cognitive_complexity": 0, "loc": 10, "code_loc": 8, "docstring_loc": 1, "function_name": "format_alert", "class_name": null, "qualname": "format_alert", "file_path": "doc/source/ray-overview/examples/mcp-ray-serve/build-mcp-docker-image/weather.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "self_contained"} |
apache/airflow:airflow-core/src/airflow/serialization/definitions/taskgroup.py:SerializedTaskGroup.iter_mapped_task_groups | # Context:
from collections.abc import Generator, Iterator
class SerializedMappedTaskGroup(SerializedTaskGroup): ...
class SerializedTaskGroup(DAGNode):
def __repr__(self) -> str: ...
def _iter_child(child): ...
def __iter__(self): ...
def group_id(self) -> str | None: ...
def label(self) -> str: ...
def node_id(self) -> str: ...
def is_root(self) -> bool: ...
def task_group(self) -> SerializedTaskGroup | None: ...
def child_id(self, label: str) -> str: ...
def upstream_join_id(self) -> str: ...
def downstream_join_id(self) -> str: ...
def roots(self) -> list[DAGNode]: ...
def leaves(self) -> list[DAGNode]: ...
def get_roots(self) -> Generator[SerializedOperator, None, None]: ...
def get_leaves(self) -> Generator[SerializedOperator, None, None]: ...
def get_task_group_dict(self) -> dict[str | None, SerializedTaskGroup]: ...
def iter_tasks(self) -> Iterator[SerializedOperator]: ...
def topological_sort(self) -> list[DAGNode]: ...
def add(self, node: DAGNode) -> DAGNode: ...
# Task:
Write a Python method `iter_mapped_task_groups` for the class `SerializedTaskGroup` to find mapped task groups in the hierarchy.
Returns: Iterator[SerializedMappedTaskGroup] | def iter_mapped_task_groups(self) -> Iterator[SerializedMappedTaskGroup]:
"""
Find mapped task groups in the hierarchy.
Groups are returned from the closest to the outmost. If *self* is a
mapped task group, it is returned first.
"""
group: SerializedTaskGroup | None = self
while group is not None:
if isinstance(group, SerializedMappedTaskGroup):
yield group
group = group.parent_group | function_simple | 1 | {"cognitive_complexity": 3, "loc": 12, "code_loc": 5, "docstring_loc": 6, "function_name": "iter_mapped_task_groups", "class_name": "SerializedTaskGroup", "qualname": "SerializedTaskGroup.iter_mapped_task_groups", "file_path": "airflow-core/src/airflow/serialization/definitions/taskgroup.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "file_runnable"} |
apache/airflow:airflow-core/tests/unit/timetables/test_partitioned_timetable.py:TestPartitionedAssetTimetable.test_serialize | # Context:
from airflow.partition_mappers.identity import IdentityMapper as IdentityMapper
from airflow.sdk import Asset
from airflow.serialization.encoders import ensure_serialized_asset
from airflow.serialization.enums import DagAttributeTypes
from airflow.timetables.simple import PartitionedAssetTimetable
class Key1Mapper(IdentityMapper): ...
def _find_registered_custom_partition_mapper(import_string: str) -> type[PartitionMapper]: ...
def custom_partition_mapper_patch() -> Callable[[], ExitStack]: ...
class TestPartitionedAssetTimetable:
def test_get_partition_mapper_without_mapping(self, asset_obj): ...
def test_get_partition_mapper_with_mapping(self, asset_obj): ...
def test_deserialize(self): ...
# Task:
Write a Python test method `test_serialize` in test class `TestPartitionedAssetTimetable` to verify the behavior of `serialize`.
Module under test: __future__, collections.abc, contextlib | def test_serialize(self):
ser_asset = ensure_serialized_asset(Asset("test"))
timetable = PartitionedAssetTimetable(
assets=ser_asset, partition_mapper_config={ser_asset: IdentityMapper()}
)
assert timetable.serialize() == {
"asset_condition": {
"__type": DagAttributeTypes.ASSET,
"name": "test",
"uri": "test",
"group": "asset",
"extra": {},
},
"partition_mapper_config": [
(
{
"__type": DagAttributeTypes.ASSET,
"name": "test",
"uri": "test",
"group": "asset",
"extra": {},
},
{
"__type": "airflow.partition_mappers.identity.IdentityMapper",
"__var": {},
},
)
],
"default_partition_mapper": {
"__type": "airflow.partition_mappers.identity.IdentityMapper",
"__var": {},
},
} | test | 1 | {"function_name": "test_serialize", "class_name": "TestPartitionedAssetTimetable", "qualname": "TestPartitionedAssetTimetable.test_serialize", "file_path": "airflow-core/tests/unit/timetables/test_partitioned_timetable.py", "repo_id": "apache/airflow", "loc": 33, "tested_modules": ["__future__", "collections.abc", "contextlib", "typing", "airflow._shared.module_loading"], "has_docstring": false, "runnable_level": "project_runnable"} |
vllm-project/vllm:tests/renderers/test_process_multi_modal_uuids.py:test_multi_modal_uuids_missing_modality_raises | # Context:
import pytest
from vllm.multimodal.parse import parse_mm_uuids
def _build_renderer(mm_cache_gb: float, enable_prefix_caching: bool) -> HfRenderer: ...
def test_multi_modal_uuids_length_mismatch_raises(): ...
def test_multi_modal_uuids_accepts_none_and_passes_through(mm_cache_gb: float, enable_prefix_caching: bool): ...
def test_multi_modal_uuids_accepts_empty(mm_cache_gb: float, enable_prefix_caching: bool): ...
def test_multi_modal_uuids_ignored_when_caching_disabled(): ...
# Task:
Write a Python test function `test_multi_modal_uuids_missing_modality_raises` to verify the behavior of `multi_modal_uuids_missing_modality_raises`.
Module under test: vllm.assets.image, vllm.assets.video, vllm.config | def test_multi_modal_uuids_missing_modality_raises():
renderer = _build_renderer()
mm_data = {
"image": [cherry_pil_image],
"video": None,
}
# Only image uuids provided; video missing should raise
mm_uuids = {"image": ["hash_cherry"]}
mm_processor = renderer.get_mm_processor()
mm_data_items = mm_processor.info.parse_mm_data(mm_data)
mm_uuid_items = parse_mm_uuids(mm_uuids)
with pytest.raises(ValueError, match="is empty but .* is missing"):
renderer._process_mm_uuids(mm_data, mm_data_items, mm_uuid_items, "req-2") | test | 1 | {"function_name": "test_multi_modal_uuids_missing_modality_raises", "class_name": null, "qualname": "test_multi_modal_uuids_missing_modality_raises", "file_path": "tests/renderers/test_process_multi_modal_uuids.py", "repo_id": "vllm-project/vllm", "loc": 17, "tested_modules": ["vllm.assets.image", "vllm.assets.video", "vllm.config", "vllm.multimodal.parse", "vllm.renderers.hf"], "has_docstring": false, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:comfy_api/latest/_io.py:ComfyTypeIO:class_doc | Write a class-level docstring for `ComfyTypeIO` (inherits from ComfyTypeI) which has methods: various methods. | ComfyType subclass that has default Input and Output classes; useful for types with both Inputs and Outputs. | documentation | 1 | {"doc_type": "class", "class_name": "ComfyTypeIO", "file_path": "comfy_api/latest/_io.py", "repo_id": "Comfy-Org/ComfyUI", "char_length": 108, "methods": []} |
apache/airflow:dev/breeze/src/airflow_breeze/utils/release_validator.py:ReleaseValidator:class_doc | Write a class-level docstring for `ReleaseValidator` (inherits from ABC) which has methods: `__init__`, `get_distribution_name`, `get_svn_directory`, `get_expected_files`, `build_packages`. | Base class for release validators with common functionality for PMC verification. | documentation | 1 | {"doc_type": "class", "class_name": "ReleaseValidator", "file_path": "dev/breeze/src/airflow_breeze/utils/release_validator.py", "repo_id": "apache/airflow", "char_length": 81, "methods": ["__init__", "get_distribution_name", "get_svn_directory", "get_expected_files", "build_packages", "validate_svn_files", "validate_reproducible_build", "validate_licenses", "get_svn_directories", "validate_signatures"]} |
infiniflow/ragflow:tools/es-to-oceanbase-migration/src/es_ob_migration/progress.py:ProgressManager.save_progress | # Context:
import json
from dataclasses import dataclass, field, asdict
from datetime import datetime
class MigrationProgress: ...
class ProgressManager:
def __init__(self, progress_dir: str = ".migration_progress"):
"""
Initialize progress manager.
Args:
progress_dir: Directory to store progress files
"""
self.progress_dir = Path(progress_dir)
self.progress_dir.mkdir(parents=True, exist_ok=True)
def _get_progress_file(self, es_index: str, ob_table: str) -> Path: ...
def load_progress(self, es_index: str, ob_table: str) -> MigrationProgress | None: ...
def delete_progress(self, es_index: str, ob_table: str): ...
def create_progress(self, es_index: str, ob_table: str, total_documents: int) -> MigrationProgress: ...
def update_progress(self, progress: MigrationProgress, migrated_count: int, last_sort_values: list[Any] | None, last_batch_ids: list[str] | None): ...
def mark_completed(self, progress: MigrationProgress): ...
def mark_failed(self, progress: MigrationProgress, error: str): ...
def mark_paused(self, progress: MigrationProgress): ...
def can_resume(self, es_index: str, ob_table: str) -> bool: ...
def get_resume_info(self, es_index: str, ob_table: str) -> dict[str, Any] | None: ...
# Task:
Write a Python method `save_progress` for the class `ProgressManager` to save progress to file.
Parameters: progress: MigrationProgress | def save_progress(self, progress: MigrationProgress):
"""
Save progress to file.
Args:
progress: MigrationProgress instance
"""
progress.updated_at = datetime.utcnow().isoformat()
progress_file = self._get_progress_file(progress.es_index, progress.ob_table)
try:
with open(progress_file, "w") as f:
json.dump(asdict(progress), f, indent=2, default=str)
logger.debug(f"Saved progress to {progress_file}")
except Exception as e:
logger.error(f"Failed to save progress: {e}") | function_simple | 1 | {"cognitive_complexity": 1, "loc": 16, "code_loc": 8, "docstring_loc": 6, "function_name": "save_progress", "class_name": "ProgressManager", "qualname": "ProgressManager.save_progress", "file_path": "tools/es-to-oceanbase-migration/src/es_ob_migration/progress.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "file_runnable"} |
unclecode/crawl4ai:test_webhook_implementation.py:test_imports | Write a Python test function `test_imports` to test that all webhook-related modules can be imported.
Module under test: datetime, webhook, schemas | def test_imports():
"""Test that all webhook-related modules can be imported"""
print("=" * 60)
print("TEST 1: Module Imports")
print("=" * 60)
try:
from webhook import WebhookDeliveryService
print("✅ webhook.WebhookDeliveryService imported successfully")
except Exception as e:
print(f"❌ Failed to import webhook module: {e}")
return False
try:
from schemas import WebhookConfig, WebhookPayload
print("✅ schemas.WebhookConfig imported successfully")
print("✅ schemas.WebhookPayload imported successfully")
except Exception as e:
print(f"❌ Failed to import schemas: {e}")
return False
return True | test | 1 | {"function_name": "test_imports", "class_name": null, "qualname": "test_imports", "file_path": "test_webhook_implementation.py", "repo_id": "unclecode/crawl4ai", "loc": 22, "tested_modules": ["datetime", "webhook", "schemas", "webhook", "schemas"], "has_docstring": true, "runnable_level": "self_contained"} |
crewAIInc/crewAI:lib/crewai/tests/llms/azure/test_azure.py:test_azure_token_usage_tracking | # Context:
from unittest.mock import patch, MagicMock, Mock
from crewai.llm import LLM
def mock_azure_credentials(): ...
def test_azure_completion_is_used_when_azure_provider(): ...
def test_azure_completion_is_used_when_azure_openai_provider(): ...
def test_azure_tool_use_conversation_flow(): ...
def test_azure_completion_module_is_imported(): ...
def test_native_azure_raises_error_when_initialization_fails(): ...
def test_azure_completion_initialization_parameters(): ...
def test_azure_specific_parameters(): ...
def test_azure_completion_call(): ...
def test_azure_completion_called_during_crew_execution(): ...
def test_azure_completion_call_arguments(): ...
def test_multiple_azure_calls_in_crew(): ...
def test_azure_completion_with_tools(): ...
def test_azure_raises_error_when_endpoint_missing(): ...
def test_azure_raises_error_when_api_key_missing(): ...
def test_azure_endpoint_configuration(): ...
def test_azure_api_key_configuration(): ...
def test_azure_model_capabilities(): ...
def test_azure_completion_params_preparation(): ...
def test_azure_model_detection(): ...
def test_azure_supports_stop_words(): ...
def test_azure_gpt5_models_do_not_support_stop_words(): ...
def test_azure_o_series_models_do_not_support_stop_words(): ...
def test_azure_responses_api_models_do_not_support_stop_words(): ...
def test_azure_stop_words_not_included_for_unsupported_models(): ...
def test_azure_context_window_size(): ...
def test_azure_message_formatting(): ...
def test_azure_streaming_parameter(): ...
def test_azure_tool_conversion(): ...
def test_azure_environment_variable_endpoint(): ...
def test_azure_http_error_handling(): ...
def test_azure_streaming_completion(): ...
def test_azure_api_version_default(): ...
def test_azure_function_calling_support(): ...
def test_azure_openai_endpoint_url_construction(): ...
def test_azure_openai_endpoint_url_with_trailing_slash(): ...
def test_azure_openai_endpoint_already_complete(): ...
def test_non_azure_openai_endpoint_unchanged(): ...
def test_azure_openai_model_parameter_excluded(): ...
def test_non_azure_openai_model_parameter_included(): ...
def test_azure_message_formatting_with_role(): ...
def test_azure_message_formatting_default_role(): ...
def test_azure_endpoint_detection_flags(): ...
def test_azure_improved_error_messages(): ...
def test_azure_api_version_properly_passed(): ...
def test_azure_timeout_and_max_retries_stored(): ...
def test_azure_complete_params_include_optional_params(): ...
def test_azure_endpoint_validation_with_azure_prefix(): ...
def test_azure_message_formatting_preserves_all_roles(): ...
def test_azure_deepseek_model_support(): ...
def test_azure_mistral_and_other_models(): ...
def test_azure_completion_params_preparation_with_drop_params(): ...
def test_azure_streaming_returns_usage_metrics(): ...
def test_azure_agent_kickoff_structured_output_without_tools(): ...
def test_azure_agent_kickoff_structured_output_with_tools(): ...
def test_azure_stop_words_not_applied_to_structured_output(): ...
def test_azure_stop_words_still_applied_to_regular_responses(): ...
# Task:
Write a Python test function `test_azure_token_usage_tracking` to test that token usage is properly tracked for Azure responses.
Module under test: crewai.llm, crewai.crew, crewai.agent | def test_azure_token_usage_tracking():
"""
Test that token usage is properly tracked for Azure responses
"""
llm = LLM(model="azure/gpt-4")
# Mock the Azure response with usage information
with patch.object(llm.client, 'complete') as mock_complete:
mock_message = MagicMock()
mock_message.content = "test response"
mock_message.tool_calls = None
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = MagicMock(
prompt_tokens=50,
completion_tokens=25,
total_tokens=75
)
mock_complete.return_value = mock_response
result = llm.call("Hello")
# Verify the response
assert result == "test response"
# Verify token usage was extracted
usage = llm._extract_azure_token_usage(mock_response)
assert usage["prompt_tokens"] == 50
assert usage["completion_tokens"] == 25
assert usage["total_tokens"] == 75 | test | 0 | {"function_name": "test_azure_token_usage_tracking", "class_name": null, "qualname": "test_azure_token_usage_tracking", "file_path": "lib/crewai/tests/llms/azure/test_azure.py", "repo_id": "crewAIInc/crewAI", "loc": 34, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai.llms.providers.azure.completion"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/utils/test_mcp_cleanup.py:TestTerminateOrphanedMcpProcesses.test_skips_non_orphaned_processes | # Context:
from unittest.mock import AsyncMock, MagicMock, patch
from langflow.utils.mcp_cleanup import (
_kill_mcp_processes,
_terminate_child_mcp_processes,
_terminate_orphaned_mcp_processes,
_try_terminate_mcp_process,
cleanup_mcp_sessions,
)
class TestCleanupMcpSessions: ...
class TestKillMcpProcesses: ...
class TestTerminateChildMcpProcesses: ...
class TestTryTerminateMcpProcess: ...
class TestMcpCleanupIntegration: ...
class TestTerminateOrphanedMcpProcesses:
async def test_terminates_orphaned_mcp_processes(self): ...
async def test_handles_access_denied(self): ...
# Task:
Write a Python test method `test_skips_non_orphaned_processes` in test class `TestTerminateOrphanedMcpProcesses` to test that non-orphaned processes are skipped.
Module under test: langflow.utils.mcp_cleanup | async def test_skips_non_orphaned_processes(self):
"""Test that non-orphaned processes are skipped."""
mock_psutil = MagicMock()
mock_proc = MagicMock()
mock_proc.info = {
"pid": 12345,
"ppid": 1000, # Not orphaned
"cmdline": ["python", "mcp-server-filesystem"],
}
mock_psutil.process_iter.return_value = [mock_proc]
mock_psutil.NoSuchProcess = Exception
mock_psutil.AccessDenied = Exception
mock_psutil.ZombieProcess = Exception
with patch(
"langflow.utils.mcp_cleanup._try_terminate_mcp_process",
new_callable=AsyncMock,
) as mock_terminate:
count = await _terminate_orphaned_mcp_processes(mock_psutil)
assert count == 0
mock_terminate.assert_not_called() | test | 1 | {"function_name": "test_skips_non_orphaned_processes", "class_name": "TestTerminateOrphanedMcpProcesses", "qualname": "TestTerminateOrphanedMcpProcesses.test_skips_non_orphaned_processes", "file_path": "src/backend/tests/unit/utils/test_mcp_cleanup.py", "repo_id": "langflow-ai/langflow", "loc": 24, "tested_modules": ["langflow.utils.mcp_cleanup"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_http_api/test_chat_assistant_management/test_list_chat_assistants.py:TestChatAssistantsList.test_desc_false_parse_branch_p2 | # Context:
import pytest
from common import delete_datasets, list_chat_assistants
from utils import is_sorted
class TestAuthorization: ...
class TestChatAssistantsList:
def test_default(self, HttpApiAuth): ...
def test_page(self, HttpApiAuth, params, expected_code, expected_page_size, expected_message): ...
def test_page_size(self, HttpApiAuth, params, expected_code, expected_page_size, expected_message): ...
def test_orderby(self, HttpApiAuth, params, expected_code, assertions, expected_message): ...
def test_desc(self, HttpApiAuth, params, expected_code, assertions, expected_message): ...
def test_name(self, HttpApiAuth, params, expected_code, expected_num, expected_message): ...
def test_id(self, HttpApiAuth, add_chat_assistants, chat_assistant_id, expected_code, expected_num, expected_message): ...
def test_name_and_id(self, HttpApiAuth, add_chat_assistants, chat_assistant_id, name, expected_code, expected_num, expected_message): ...
def test_concurrent_list(self, HttpApiAuth): ...
def test_invalid_params(self, HttpApiAuth): ...
def test_list_chats_after_deleting_associated_dataset(self, HttpApiAuth, add_chat_assistants): ...
# Task:
Write a Python test method `test_desc_false_parse_branch_p2` in test class `TestChatAssistantsList` to verify the behavior of `desc_false_parse_branch_p2`.
Module under test: concurrent.futures, common, configs | def test_desc_false_parse_branch_p2(self, HttpApiAuth):
res = list_chat_assistants(HttpApiAuth, params={"desc": "False", "orderby": "create_time"})
assert res["code"] == 0
assert is_sorted(res["data"], "create_time", False) | test | 1 | {"function_name": "test_desc_false_parse_branch_p2", "class_name": "TestChatAssistantsList", "qualname": "TestChatAssistantsList.test_desc_false_parse_branch_p2", "file_path": "test/testcases/test_http_api/test_chat_assistant_management/test_list_chat_assistants.py", "repo_id": "infiniflow/ragflow", "loc": 4, "tested_modules": ["concurrent.futures", "common", "configs", "libs.auth", "utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/diffusers:src/diffusers/models/attention_dispatch.py:_maybe_pad_qkv_head | # Context:
import torch
import torch.distributed as dist
import torch.nn.functional as F
class AttentionBackendName(str, Enum): ...
class _AttentionBackendRegistry: ...
class _HubKernelConfig: ...
def attention_backend(backend: str | AttentionBackendName): ...
def dispatch_attention_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, attention_kwargs: dict[str, Any] | None, backend: AttentionBackendName | None, parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _check_attn_mask_or_causal(attn_mask: torch.Tensor | None, is_causal: bool, **kwargs) -> None: ...
def _check_device(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: ...
def _check_device_cuda(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: ...
def _check_device_cuda_atleast_smXY(major: int, minor: int) -> Callable: ...
def _check_qkv_dtype_match(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: ...
def _check_qkv_dtype_bf16_or_fp16(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, **kwargs) -> None: ...
def _check_shape(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, **kwargs) -> None: ...
def _check_attention_backend_requirements(backend: AttentionBackendName) -> None: ...
def _prepare_for_flash_attn_or_sage_varlen_without_mask(batch_size: int, seq_len_q: int, seq_len_kv: int, device: torch.device | None): ...
def _prepare_for_flash_attn_or_sage_varlen_with_mask(batch_size: int, seq_len_q: int, attn_mask: torch.Tensor, device: torch.device | None): ...
def _prepare_for_flash_attn_or_sage_varlen(batch_size: int, seq_len_q: int, seq_len_kv: int, attn_mask: torch.Tensor | None, device: torch.device | None) -> None: ...
def _normalize_attn_mask(attn_mask: torch.Tensor, batch_size: int, seq_len_k: int) -> torch.Tensor: ...
def _flex_attention_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx): ...
def _resolve_kernel_attr(module, attr_path: str): ...
def _maybe_download_kernel_for_backend(backend: AttentionBackendName) -> None: ...
def _wrapped_flash_attn_3(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, softmax_scale: float | None, causal: bool, qv: torch.Tensor | None, q_descale: torch.Tensor | None, k_descale: torch.Tensor | None, v_descale: torch.Tensor | None, attention_chunk: int, softcap: float, num_splits: int, pack_gqa: bool | None, deterministic: bool, sm_margin: int) -> tuple[torch.Tensor, torch.Tensor]: ...
def _(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, softmax_scale: float | None, causal: bool, qv: torch.Tensor | None, q_descale: torch.Tensor | None, k_descale: torch.Tensor | None, v_descale: torch.Tensor | None, attention_chunk: int, softcap: float, num_splits: int, pack_gqa: bool | None, deterministic: bool, sm_margin: int) -> tuple[torch.Tensor, torch.Tensor]: ...
def _native_attention_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _native_attention_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _cudnn_attention_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _cudnn_attention_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _native_flash_attention_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _native_flash_attention_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _flash_attention_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _flash_attention_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _flash_attention_hub_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _flash_attention_hub_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _flash_attention_3_hub_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None, window_size: tuple[int, int], softcap: float, num_splits: int, pack_gqa: bool | None, deterministic: bool, sm_margin: int): ...
def _flash_attention_3_hub_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _sage_attention_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _sage_attention_hub_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _sage_attention_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args): ...
def _maybe_modify_attn_mask_npu(query: torch.Tensor, key: torch.Tensor, attn_mask: torch.Tensor | None): ...
def _npu_attention_forward_op(ctx: torch.autograd.function.FunctionCtx, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _save_ctx: bool, _parallel_config: 'ParallelConfig' | None): ...
def _npu_attention_backward_op(ctx: torch.autograd.function.FunctionCtx, grad_out: torch.Tensor, *args, **kwargs): ...
def _wait_tensor(tensor): ...
def _all_to_all_single(x: torch.Tensor, group) -> torch.Tensor: ...
def _all_to_all_dim_exchange(x: torch.Tensor, scatter_idx: int, gather_idx: int, group) -> torch.Tensor: ...
class SeqAllToAllDim(torch.autograd.Function): ...
def _maybe_unpad_qkv_head(x: torch.Tensor, H_PAD: int, group: dist.ProcessGroup) -> torch.Tensor: ...
def _maybe_pad_o_head(x: torch.Tensor, H: int, group: dist.ProcessGroup) -> tuple[torch.Tensor, int]: ...
def _maybe_unpad_o_head(x: torch.Tensor, H_PAD: int, group: dist.ProcessGroup) -> torch.Tensor: ...
def ulysses_anything_metadata(query: torch.Tensor, **kwargs) -> dict: ...
def all_to_all_single_any_qkv_async(x: torch.Tensor, group: dist.ProcessGroup, **kwargs) -> Callable[..., torch.Tensor]: ...
def all_to_all_single_any_o_async(x: torch.Tensor, group: dist.ProcessGroup, **kwargs) -> Callable[..., torch.Tensor]: ...
class TemplatedRingAttention(torch.autograd.Function): ...
class TemplatedUlyssesAttention(torch.autograd.Function): ...
class TemplatedUlyssesAnythingAttention(torch.autograd.Function): ...
def _templated_unified_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor, dropout_p: float, is_causal: bool, scale: float, enable_gqa: bool, return_lse: bool, forward_op, backward_op, _parallel_config: 'ParallelConfig' | None, scatter_idx: int, gather_idx: int): ...
def _templated_context_parallel_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, forward_op, backward_op, _parallel_config: 'ParallelConfig' | None): ...
def _flash_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_attention_hub(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_varlen_attention_hub(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, scale: float | None, is_causal: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_varlen_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, scale: float | None, is_causal: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_attention_3(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, scale: float | None, is_causal: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_attention_3_hub(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, scale: float | None, is_causal: bool, window_size: tuple[int, int], softcap: float, deterministic: bool, return_attn_probs: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_attention_3_varlen_hub(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, scale: float | None, is_causal: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _flash_varlen_attention_3(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, scale: float | None, is_causal: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _aiter_flash_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_flex_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | 'flex_attention.BlockMask' | None, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _prepare_additive_attn_mask(attn_mask: torch.Tensor, target_dtype: torch.dtype, reshape_4d: bool) -> torch.Tensor: ...
def _native_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_cudnn_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_efficient_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_flash_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_math_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_npu_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _native_xla_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_attention_hub(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_varlen_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_qk_int8_pv_fp8_cuda_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_qk_int8_pv_fp8_cuda_sm90_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_qk_int8_pv_fp16_cuda_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _sage_qk_int8_pv_fp16_triton_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, is_causal: bool, scale: float | None, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
def _xformers_attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: torch.Tensor | None, dropout_p: float, is_causal: bool, scale: float | None, enable_gqa: bool, return_lse: bool, _parallel_config: 'ParallelConfig' | None) -> torch.Tensor: ...
# Task:
Write a Python function `_maybe_pad_qkv_head` to maybe pad the head dimension to be divisible by world_size.
Parameters: x: torch.Tensor, H: int, group: dist.ProcessGroup
Returns: tuple[torch.Tensor, int] | def _maybe_pad_qkv_head(x: torch.Tensor, H: int, group: dist.ProcessGroup) -> tuple[torch.Tensor, int]:
r"""Maybe pad the head dimension to be divisible by world_size.
x: torch.Tensor, shape (B, S_LOCAL, H, D) H: int, original global head num return: tuple[torch.Tensor, int], padded
tensor (B, S_LOCAL, H + H_PAD, D) and H_PAD
"""
world_size = dist.get_world_size(group=group)
H_PAD = 0
if H % world_size != 0:
H_PAD = world_size - (H % world_size)
NEW_H_LOCAL = (H + H_PAD) // world_size
# e.g., Allow: H=30, world_size=8 -> NEW_H_LOCAL=4, H_PAD=2.
# NOT ALLOW: H=30, world_size=16 -> NEW_H_LOCAL=2, H_PAD=14.
assert H_PAD < NEW_H_LOCAL, f"Padding head num {H_PAD} should be less than new local head num {NEW_H_LOCAL}"
x = F.pad(x, (0, 0, 0, H_PAD)).contiguous()
return x, H_PAD | function_simple | 1 | {"cognitive_complexity": 1, "loc": 15, "code_loc": 8, "docstring_loc": 4, "function_name": "_maybe_pad_qkv_head", "class_name": null, "qualname": "_maybe_pad_qkv_head", "file_path": "src/diffusers/models/attention_dispatch.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "plib_runnable"} |
commaai/openpilot:selfdrive/controls/tests/test_torqued_lat_accel_offset.py:test_straight_road_roll_bias | # Context:
import numpy as np
def generate_inputs(torque_tune, la_err_std, input_noise_std): ...
def get_warmed_up_estimator(steer_torques, lat_accels): ...
def simulate_straight_road_msgs(est): ...
def test_estimated_offset(): ...
# Task:
Write a Python test function `test_straight_road_roll_bias` to verify the behavior of `straight_road_roll_bias`.
Module under test: cereal, opendbc.car, opendbc.car | def test_straight_road_roll_bias():
steer_torques, lat_accels = generate_inputs(TORQUE_TUNE, la_err_std=LA_ERR_STD, input_noise_std=INPUT_NOISE_STD)
est = get_warmed_up_estimator(steer_torques, lat_accels)
simulate_straight_road_msgs(est)
msg = est.get_msg()
assert (msg.liveTorqueParameters.latAccelOffsetRaw < -0.05) and np.isfinite(msg.liveTorqueParameters.latAccelOffsetRaw) | test | 0 | {"function_name": "test_straight_road_roll_bias", "class_name": null, "qualname": "test_straight_road_roll_bias", "file_path": "selfdrive/controls/tests/test_torqued_lat_accel_offset.py", "repo_id": "commaai/openpilot", "loc": 6, "tested_modules": ["cereal", "opendbc.car", "opendbc.car", "opendbc.car.lateral", "openpilot.common.realtime"], "has_docstring": false, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/anthropic/test_anthropic.py:test_anthropic_agent_kickoff_structured_output_with_tools | # Context:
import pytest
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.tools import tool
from pydantic import BaseModel, Field
def mock_anthropic_api_key(): ...
def test_anthropic_completion_is_used_when_anthropic_provider(): ...
def test_anthropic_completion_is_used_when_claude_provider(): ...
def test_anthropic_completion_module_is_imported(): ...
def test_native_anthropic_raises_error_when_initialization_fails(): ...
def test_anthropic_completion_initialization_parameters(): ...
def test_anthropic_specific_parameters(): ...
def test_anthropic_completion_call(): ...
def test_anthropic_completion_called_during_crew_execution(): ...
def test_anthropic_completion_call_arguments(): ...
def test_multiple_anthropic_calls_in_crew(): ...
def test_anthropic_completion_with_tools(): ...
def test_anthropic_raises_error_when_model_not_supported(): ...
def test_anthropic_client_params_setup(): ...
def test_anthropic_client_params_override_defaults(): ...
def test_anthropic_client_params_none(): ...
def test_anthropic_client_params_empty_dict(): ...
def test_anthropic_model_detection(): ...
def test_anthropic_supports_stop_words(): ...
def test_anthropic_context_window_size(): ...
def test_anthropic_message_formatting(): ...
def test_anthropic_streaming_parameter(): ...
def test_anthropic_tool_conversion(): ...
def test_anthropic_environment_variable_api_key(): ...
def test_anthropic_token_usage_tracking(): ...
def test_anthropic_stop_sequences_sync(): ...
def test_anthropic_stop_sequences_sent_to_api(): ...
def test_anthropic_thinking(): ...
def test_anthropic_thinking_blocks_preserved_across_turns(): ...
def test_anthropic_function_calling(): ...
def test_anthropic_tool_execution_with_available_functions(): ...
def test_anthropic_tool_execution_returns_tool_result_directly(): ...
def test_anthropic_agent_kickoff_structured_output_without_tools(): ...
def test_anthropic_cached_prompt_tokens(): ...
def test_anthropic_streaming_cached_prompt_tokens(): ...
def test_anthropic_cached_prompt_tokens_with_tools(): ...
# Task:
Write a Python test function `test_anthropic_agent_kickoff_structured_output_with_tools` to test that agent kickoff returns structured output after using tools.
Module under test: crewai.llm, crewai.crew, crewai.agent | def test_anthropic_agent_kickoff_structured_output_with_tools():
"""
Test that agent kickoff returns structured output after using tools.
This tests post-tool-call structured output handling for Anthropic models.
"""
from pydantic import BaseModel, Field
from crewai.tools import tool
class CalculationResult(BaseModel):
"""Structured output for calculation results."""
operation: str = Field(description="The mathematical operation performed")
result: int = Field(description="The result of the calculation")
explanation: str = Field(description="Brief explanation of the calculation")
@tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together and return the sum."""
return a + b
agent = Agent(
role="Calculator",
goal="Perform calculations using available tools",
backstory="You are a calculator assistant that uses tools to compute results.",
llm=LLM(model="anthropic/claude-3-5-haiku-20241022"),
tools=[add_numbers],
verbose=True,
)
result = agent.kickoff(
messages="Calculate 15 + 27 using your add_numbers tool. Report the result.",
response_format=CalculationResult,
)
assert result.pydantic is not None, "Expected pydantic output but got None"
assert isinstance(result.pydantic, CalculationResult), f"Expected CalculationResult but got {type(result.pydantic)}"
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty" | test | 0 | {"function_name": "test_anthropic_agent_kickoff_structured_output_with_tools", "class_name": null, "qualname": "test_anthropic_agent_kickoff_structured_output_with_tools", "file_path": "lib/crewai/tests/llms/anthropic/test_anthropic.py", "repo_id": "crewAIInc/crewAI", "loc": 39, "tested_modules": ["crewai.llm", "crewai.crew", "crewai.agent", "crewai.task", "crewai.llms.providers.anthropic.completion"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_anthropic_interceptor.py:TestMixedProviderInterceptors:class_doc | Write a class-level docstring for `TestMixedProviderInterceptors` which has methods: `test_openai_and_anthropic_different_interceptors`, `test_same_interceptor_different_providers`. | Test suite for using interceptors with different providers. | documentation | 0 | {"doc_type": "class", "class_name": "TestMixedProviderInterceptors", "file_path": "lib/crewai/tests/llms/hooks/test_anthropic_interceptor.py", "repo_id": "crewAIInc/crewAI", "char_length": 59, "methods": ["test_openai_and_anthropic_different_interceptors", "test_same_interceptor_different_providers"]} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestEndToEndMCPSchema:class_doc | Write a class-level docstring for `TestEndToEndMCPSchema` which has methods: `test_model_creation`, `test_valid_input_accepted`, `test_invalid_enum_rejected`, `test_model_name_for_mcp_tool`, `test_enriched_descriptions_for_mcp`. | Realistic MCP tool schema exercising multiple features simultaneously. | documentation | 0 | {"doc_type": "class", "class_name": "TestEndToEndMCPSchema", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "char_length": 70, "methods": ["test_model_creation", "test_valid_input_accepted", "test_invalid_enum_rejected", "test_model_name_for_mcp_tool", "test_enriched_descriptions_for_mcp", "test_optional_fields_accept_none", "test_nested_filters_validated"]} |
zhayujie/chatgpt-on-wechat:agent/tools/scheduler/task_store.py:TaskStore.load_tasks | # Context:
import json
import os
from typing import Dict, List, Optional
class TaskStore:
def __init__(self, store_path: str = None):
"""
Initialize task store
Args:
store_path: Path to tasks.json file. Defaults to ~/cow/scheduler/tasks.json
"""
if store_path is None:
# Default to ~/cow/scheduler/tasks.json
home = expand_path("~")
store_path = os.path.join(home, "cow", "scheduler", "tasks.json")
self.store_path = store_path
self.lock = threading.Lock()
self._ensure_store_dir()
def _ensure_store_dir(self): ...
def save_tasks(self, tasks: Dict[str, dict]): ...
def add_task(self, task: dict) -> bool: ...
def update_task(self, task_id: str, updates: dict) -> bool: ...
def delete_task(self, task_id: str) -> bool: ...
def get_task(self, task_id: str) -> Optional[dict]: ...
def list_tasks(self, enabled_only: bool) -> List[dict]: ...
def enable_task(self, task_id: str, enabled: bool) -> bool: ...
# Task:
Write a Python method `load_tasks` for the class `TaskStore` to load all tasks from storage.
Returns: Dict[str, dict] | def load_tasks(self) -> Dict[str, dict]:
"""
Load all tasks from storage
Returns:
Dictionary of task_id -> task_data
"""
with self.lock:
if not os.path.exists(self.store_path):
return {}
try:
with open(self.store_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return data.get("tasks", {})
except Exception as e:
print(f"Error loading tasks: {e}")
return {} | function_simple | 1 | {"cognitive_complexity": 3, "loc": 18, "code_loc": 10, "docstring_loc": 6, "function_name": "load_tasks", "class_name": "TaskStore", "qualname": "TaskStore.load_tasks", "file_path": "agent/tools/scheduler/task_store.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/tests/test_generate_tool_specs.py:test_extract_init_params_schema | # Context:
class MockToolSchema(BaseModel): ...
class MockTool(BaseTool): ...
def extractor(): ...
def test_unwrap_schema(extractor): ...
def mock_tool_extractor(extractor): ...
def test_extract_basic_tool_info(mock_tool_extractor): ...
def test_extract_env_vars(mock_tool_extractor): ...
def test_extract_run_params_schema(mock_tool_extractor): ...
def test_extract_package_dependencies(mock_tool_extractor): ...
def test_save_to_json(extractor, tmp_path): ...
# Task:
Write a Python test function `test_extract_init_params_schema` to verify the behavior of `extract_init_params_schema`.
Module under test: crewai.tools.base_tool, crewai_tools.generate_tool_specs, pydantic | def test_extract_init_params_schema(mock_tool_extractor):
tool_info = mock_tool_extractor
init_params_schema = tool_info["init_params_schema"]
assert init_params_schema.keys() == {
"$defs",
"properties",
"title",
"type",
}
another_parameter = init_params_schema["properties"]["another_parameter"]
assert another_parameter["description"] == ""
assert another_parameter["default"] == "Another way to define a default value"
assert another_parameter["type"] == "string"
my_parameter = init_params_schema["properties"]["my_parameter"]
assert my_parameter["description"] == "What a description"
assert my_parameter["default"] == "This is default value"
assert my_parameter["type"] == "string"
my_parameter_bool = init_params_schema["properties"]["my_parameter_bool"]
assert not my_parameter_bool["default"]
assert my_parameter_bool["type"] == "boolean" | test | 0 | {"function_name": "test_extract_init_params_schema", "class_name": null, "qualname": "test_extract_init_params_schema", "file_path": "lib/crewai-tools/tests/test_generate_tool_specs.py", "repo_id": "crewAIInc/crewAI", "loc": 24, "tested_modules": ["crewai.tools.base_tool", "crewai_tools.generate_tool_specs", "pydantic"], "has_docstring": false, "runnable_level": "file_runnable"} |
huggingface/transformers:src/transformers/generation/continuous_batching/cache.py:PagedAttentionMemoryHandler.compute_num_blocks | # Context:
from math import floor, gcd, sqrt
import torch
from .requests import RequestState, RequestStatus, get_device_and_memory_breakdown, logger
def group_layers_by_attn_type(config: PreTrainedConfig) -> tuple[list[list[int]], list[str]]: ...
class PagedAttentionCache: ...
class PagedAttentionMemoryHandler:
_activation_dtype = torch.bfloat16
_input_dtype = torch.int32
_upper_bound_max_batch_tokens = 256
_upper_bound_num_blocks = 4096
def __init__(
self,
block_size: int,
page_size: int,
num_groups: int,
group_size: int,
peak_activation_per_token: int,
num_attention_masks: int,
) -> None:
"""Initialize the memory handler with the parameters that cannot be automatically inferred.
Args:
block_size: Size of the cache blocks
page_size: Size of the cache pages
num_groups: Number of layer groups
group_size: Number of layers per layer group
peak_activation_per_token: Maximum size of activation tensor per token, = hidden_size + vocab_size
num_attention_masks: Number of attention masks, 0 if no attention mask is used, 2 if hybrid model, else 1
"""
self.block_size = block_size
self.page_size = page_size
self.num_groups = num_groups
self.group_size = group_size
self.peak_activation_per_token = peak_activation_per_token
self.num_attention_masks = num_attention_masks
def get_available_memory(max_memory_percent: float) -> int: ...
def infer_num_blocks_and_max_batch_tokens(self, num_blocks: int | None, max_batch_tokens: int | None, max_memory_percent: float, cache_dtype: torch.dtype) -> tuple[int, int]: ...
def compute_num_blocks_and_max_batch_tokens(self, max_memory_percent: float, cache_dtype: torch.dtype, m: float) -> tuple[int, int]: ...
def compute_max_batch_tokens(self, num_blocks: int, max_memory_percent: float, cache_dtype: torch.dtype) -> int: ...
def compute_memory_footprint(self, num_blocks: int, max_batch_tokens: int, cache_dtype: torch.dtype) -> int: ...
# Task:
Write a Python method `compute_num_blocks` for the class `PagedAttentionMemoryHandler` to calculate number of cache blocks N given a fixed maximum token per token M. The formula for N is given by:.
Parameters: max_batch_tokens: int, max_memory_percent: float, cache_dtype: torch.dtype
Returns: int | def compute_num_blocks(
self,
max_batch_tokens: int,
max_memory_percent: float,
cache_dtype: torch.dtype = torch.float16,
) -> int:
"""Calculate number of cache blocks N given a fixed maximum token per token M. The formula for N is given by:
N = (available_memory - M * (peak_activation_per_token * activation_dtype + 28 + 4 * num_group))
/ (2 * (layer_group_size * page_size * cache_dtype + 2 * num_group) + M * (num_attention_masks * activation_dtype_size))
"""
cache_memory = self.get_available_memory(max_memory_percent)
# Compute numerator
num = cache_memory
num -= max_batch_tokens * self.peak_activation_per_token * self._activation_dtype.itemsize
num -= max_batch_tokens * (28 + 4 * self.num_groups)
# Compute denominator
denum = 2 * (self.group_size * self.page_size * cache_dtype.itemsize + 2 * self.num_groups)
denum += max_batch_tokens * (self.num_attention_masks * self._activation_dtype.itemsize)
denum += max_batch_tokens * self._activation_dtype.itemsize
# Compute cache size and return number of blocks
num_pages = floor(num / denum)
num_blocks = num_pages // self.block_size
if num_blocks > self._upper_bound_num_blocks:
logger.info(f"{num_blocks = } is too large, setting to {self._upper_bound_num_blocks = }")
num_blocks = self._upper_bound_num_blocks
return num_blocks | function_simple | 0 | {"cognitive_complexity": 1, "loc": 27, "code_loc": 13, "docstring_loc": 5, "function_name": "compute_num_blocks", "class_name": "PagedAttentionMemoryHandler", "qualname": "PagedAttentionMemoryHandler.compute_num_blocks", "file_path": "src/transformers/generation/continuous_batching/cache.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/tests/test_tracing_utils.py:test_disable_tracing_exporter | # Context:
from ray.serve._private.common import ServeComponentType
from ray.serve._private.tracing_utils import (
DEFAULT_TRACING_EXPORTER_IMPORT_PATH,
TRACE_STACK,
_append_trace_stack,
_load_span_processors,
_validate_tracing_exporter,
_validate_tracing_exporter_processors,
set_trace_status,
setup_tracing,
)
def use_custom_tracing_exporter(): ...
def serve_and_ray_shutdown(): ...
class FakeSpan: ...
def test_validate_tracing_exporter_with_string(): ...
def test_validate_tracing_exporter_with_args(): ...
def test_validate_tracing_exporter_processors_list(): ...
def test_validate_tracing_exporter_processors_full_output(): ...
def test_missing_dependencies(): ...
def test_default_tracing_exporter(ray_start_cluster): ...
def test_custom_tracing_exporter(use_custom_tracing_exporter): ...
def test_tracing_sampler(use_custom_tracing_exporter): ...
def test_tracing_e2e(serve_and_ray_shutdown, serve_application, expected_proxy_spans_path, expected_replica_spans_path, expected_upstream_spans_path): ...
def test_tracing_e2e_with_errors(serve_and_ray_shutdown, protocol, expected_status_code, expected_span_status): ...
def custom_tracing_exporter(): ...
def load_json_fixture(file_path): ...
def load_spans(file_path): ...
def sanitize_spans(spans): ...
def validate_span_associations_in_trace(spans): ...
def test_set_trace_status_empty_stack(): ...
def test_set_trace_status_error(): ...
def test_set_trace_status_ok(caplog): ...
def test_append_trace_stack_multithread(): ...
def test_batched_span_attached_to_first_request_trace(): ...
# Task:
Write a Python test function `test_disable_tracing_exporter` to test that setting `tracing_exporter_import_path`.
Module under test: pathlib, threading, typing | def test_disable_tracing_exporter():
"""Test that setting `tracing_exporter_import_path`
to an empty string disables tracing.
"""
is_tracing_setup_successful = setup_tracing(
component_type=ServeComponentType.REPLICA,
component_name="component_name",
component_id="component_id",
tracing_exporter_import_path="",
tracing_sampling_ratio=1.0,
)
assert is_tracing_setup_successful is False | test | 0 | {"function_name": "test_disable_tracing_exporter", "class_name": null, "qualname": "test_disable_tracing_exporter", "file_path": "python/ray/serve/tests/test_tracing_utils.py", "repo_id": "ray-project/ray", "loc": 13, "tested_modules": ["pathlib", "threading", "typing", "starlette.requests", "starlette.responses"], "has_docstring": true, "runnable_level": "plib_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_get_nodes.py:test_get_nodes_with_filters_generates_correct_sql_and_params | # Context:
from google.cloud import bigquery
from llama_index.core.vector_stores import MetadataFilter, MetadataFilters
from llama_index.vector_stores.bigquery import BigQueryVectorStore
from sql_assertions import assert_equivalent_sql_statements
def test_get_nodes_constructs_nodes_from_valid_metadata_row(vector_store: BigQueryVectorStore): ...
def test_get_nodes_falls_back_to_manual_textnode_on_metadata_parse_error(mock_metadata_dict_to_node, vector_store: BigQueryVectorStore): ...
def test_get_nodes_without_arguments_raises_value_error(vector_store: BigQueryVectorStore): ...
# Task:
Write a Python test function `test_get_nodes_with_filters_generates_correct_sql_and_params` to it should execute a parameterized query to get nodes based on filter criteria.
Module under test: google.cloud, llama_index.core.schema, llama_index.core.vector_stores | def test_get_nodes_with_filters_generates_correct_sql_and_params(
vector_store: BigQueryVectorStore,
):
"""It should execute a parameterized query to get nodes based on filter criteria"""
# Given filtering criteria
filters = MetadataFilters(
filters=[
MetadataFilter(key="author", value="ceo@company.com"),
MetadataFilter(key="author", value="cfo@company.com"),
],
condition="or",
)
node_ids = ["node1", "node2"]
# When `get_nodes` is called with the filtering criteria
vector_store.get_nodes(node_ids, filters)
# Then it should call BigQuery with the correct query parameters
vector_store.client.query_and_wait.assert_called_once()
args, kwargs = vector_store.client.query_and_wait.call_args
actual_query = args[0]
job_config = kwargs["job_config"]
expected_query_params = [
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="ceo@company.com"
),
bigquery.ScalarQueryParameter(
name=None, type_="STRING", value="cfo@company.com"
),
bigquery.ArrayQueryParameter(
name="node_ids", array_type="STRING", values=["node1", "node2"]
),
]
assert isinstance(job_config, bigquery.QueryJobConfig)
assert job_config.query_parameters == expected_query_params
# And the actual SQL query should match the expected SQL query
expected_query = """
SELECT node_id,
text,
embedding,
metadata
FROM `mock-project.mock_dataset.mock_table`
WHERE (SAFE.JSON_VALUE(metadata, '$."author"') = ? OR SAFE.JSON_VALUE(metadata, '$."author"') = ?)
AND node_id IN UNNEST(@node_ids);
"""
assert_equivalent_sql_statements(actual_query, expected_query) | test | 1 | {"function_name": "test_get_nodes_with_filters_generates_correct_sql_and_params", "class_name": null, "qualname": "test_get_nodes_with_filters_generates_correct_sql_and_params", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-bigquery/tests/test_get_nodes.py", "repo_id": "run-llama/llama_index", "loc": 48, "tested_modules": ["google.cloud", "llama_index.core.schema", "llama_index.core.vector_stores", "llama_index.vector_stores.bigquery", "sql_assertions"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_mongodb_vector_search_tool.py:test_provide_config | # Context:
from unittest.mock import patch
from crewai_tools import MongoDBVectorSearchConfig, MongoDBVectorSearchTool
def mongodb_vector_search_tool(): ...
def test_successful_query_execution(mongodb_vector_search_tool): ...
def test_cleanup_on_deletion(mongodb_vector_search_tool): ...
def test_create_search_index(mongodb_vector_search_tool): ...
def test_add_texts(mongodb_vector_search_tool): ...
# Task:
Write a Python test function `test_provide_config` to verify the behavior of `provide_config`.
Module under test: crewai_tools | def test_provide_config():
query_config = MongoDBVectorSearchConfig(limit=10)
tool = MongoDBVectorSearchTool(
connection_string="foo",
database_name="bar",
collection_name="test",
query_config=query_config,
vector_index_name="foo",
embedding_model="bar",
)
tool._embed_texts = lambda x: [[0.1]]
with patch.object(tool._coll, "aggregate") as mock_aggregate:
mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)]
tool._run(query="sandwiches")
assert mock_aggregate.mock_calls[-1].args[0][0]["$vectorSearch"]["limit"] == 10
mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)] | test | 0 | {"function_name": "test_provide_config", "class_name": null, "qualname": "test_provide_config", "file_path": "lib/crewai-tools/tests/tools/test_mongodb_vector_search_tool.py", "repo_id": "crewAIInc/crewAI", "loc": 18, "tested_modules": ["crewai_tools"], "has_docstring": false, "runnable_level": "project_runnable"} |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_dynamic_tools.py:MultipleDynamicToolsMiddleware:class_doc | Write a class-level docstring for `MultipleDynamicToolsMiddleware` (inherits from AgentMiddleware) which has methods: `wrap_model_call`, `awrap_model_call`, `_handle_tool`, `wrap_tool_call`, `awrap_tool_call`. | Middleware that dynamically adds multiple tools (sync and async). | documentation | 1 | {"doc_type": "class", "class_name": "MultipleDynamicToolsMiddleware", "file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_dynamic_tools.py", "repo_id": "langchain-ai/langchain", "char_length": 65, "methods": ["wrap_model_call", "awrap_model_call", "_handle_tool", "wrap_tool_call", "awrap_tool_call"]} |
ray-project/ray:python/ray/data/namespace_expressions/arr_namespace.py:module_doc | Write a module-level docstring for the Python module `arr_namespace` which contains class `_ArrayNamespace`. | Array namespace for expression operations on array-typed columns. | documentation | 0 | {"doc_type": "module", "module_name": "arr_namespace", "file_path": "python/ray/data/namespace_expressions/arr_namespace.py", "repo_id": "ray-project/ray", "char_length": 65} |
huggingface/transformers:tests/models/solar_open/test_modeling_solar_open.py:SolarOpenModelTest.test_rope_parameters_partially_initialized | # Context:
from transformers import AutoTokenizer, SolarOpenConfig, SolarOpenForCausalLM, SolarOpenModel
class SolarOpenModelTester(CausalLMModelTester): ...
class SolarOpenIntegrationTest(unittest.TestCase): ...
class SolarOpenModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = SolarOpenModelTester
model_split_percents = [0.5, 0.85, 0.9] # it tries to offload everything with the default value
# Task:
Write a Python test method `test_rope_parameters_partially_initialized` in test class `SolarOpenModelTest` to test for SolarOpenConfig when rope_parameters is partially initialized.
Module under test: transformers, transformers.testing_utils, causal_lm_tester | def test_rope_parameters_partially_initialized(self):
"""
Test for SolarOpenConfig when rope_parameters is partially initialized
"""
config = SolarOpenConfig(
rope_parameters={
"rope_type": "yarn",
"factor": 2.0,
"original_max_position_embeddings": 65536,
}
)
# ensure SolarOpenConfig overrides the parent's default partial_rotary_factor to 1.0
self.assertEqual(config.rope_parameters["partial_rotary_factor"], 1.0)
self.assertEqual(config.rope_parameters["rope_theta"], 1_000_000) | test | 0 | {"function_name": "test_rope_parameters_partially_initialized", "class_name": "SolarOpenModelTest", "qualname": "SolarOpenModelTest.test_rope_parameters_partially_initialized", "file_path": "tests/models/solar_open/test_modeling_solar_open.py", "repo_id": "huggingface/transformers", "loc": 15, "tested_modules": ["transformers", "transformers.testing_utils", "causal_lm_tester", "transformers"], "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_tool_hooks.py:TestToolHooksIntegration.test_hooks_with_validation_and_sanitization | # Context:
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
register_after_tool_call_hook,
register_before_tool_call_hook,
)
def mock_tool(): ...
def mock_agent(): ...
def mock_task(): ...
def mock_crew(): ...
def clear_hooks(): ...
class TestToolCallHookContext: ...
class TestBeforeToolCallHooks: ...
class TestAfterToolCallHooks: ...
class TestNativeToolCallingHooksIntegration: ...
class TestToolHooksIntegration:
def test_multiple_before_hooks_execute_in_order(self, mock_tool): ...
def test_first_blocking_hook_stops_execution(self, mock_tool): ...
def test_multiple_after_hooks_chain_modifications(self, mock_tool): ...
def test_unregister_before_hook(self): ...
def test_unregister_after_hook(self): ...
def test_clear_all_tool_call_hooks(self): ...
def test_lite_agent_hooks_integration_with_real_tool(self): ...
# Task:
Write a Python test method `test_hooks_with_validation_and_sanitization` in test class `TestToolHooksIntegration` to test a realistic scenario with validation and sanitization hooks.
Module under test: __future__, crewai.hooks, crewai.hooks.tool_hooks | def test_hooks_with_validation_and_sanitization(self, mock_tool):
"""Test a realistic scenario with validation and sanitization hooks."""
# Validation hook (before)
def validate_file_path(context):
if context.tool_name == "write_file":
file_path = context.tool_input.get("file_path", "")
if ".env" in file_path:
return False # Block sensitive files
return None
# Sanitization hook (after)
def sanitize_secrets(context):
if context.tool_result and "SECRET_KEY" in context.tool_result:
return context.tool_result.replace("SECRET_KEY=abc123", "SECRET_KEY=[REDACTED]")
return None
register_before_tool_call_hook(validate_file_path)
register_after_tool_call_hook(sanitize_secrets)
# Test blocking
blocked_context = ToolCallHookContext(
tool_name="write_file",
tool_input={"file_path": ".env"},
tool=mock_tool,
)
before_hooks = get_before_tool_call_hooks()
blocked = False
for hook in before_hooks:
if hook(blocked_context) is False:
blocked = True
break
assert blocked is True
# Test sanitization
sanitize_context = ToolCallHookContext(
tool_name="read_file",
tool_input={"file_path": "config.txt"},
tool=mock_tool,
tool_result="Content: SECRET_KEY=abc123",
)
after_hooks = get_after_tool_call_hooks()
result = sanitize_context.tool_result
for hook in after_hooks:
sanitize_context.tool_result = result
modified = hook(sanitize_context)
if modified is not None:
result = modified
assert "SECRET_KEY=[REDACTED]" in result
assert "abc123" not in result | test | 0 | {"function_name": "test_hooks_with_validation_and_sanitization", "class_name": "TestToolHooksIntegration", "qualname": "TestToolHooksIntegration.test_hooks_with_validation_and_sanitization", "file_path": "lib/crewai/tests/hooks/test_tool_hooks.py", "repo_id": "crewAIInc/crewAI", "loc": 53, "tested_modules": ["__future__", "crewai.hooks", "crewai.hooks.tool_hooks", "crewai.hooks", "crewai.lite_agent"], "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:examples/features/add_image_context.py:main | # Context:
from browser_use import Agent
from browser_use.llm import ChatOpenAI
def image_to_base64(image_path: str) -> str: ...
def create_sample_images() -> list[ContentPartTextParam | ContentPartImageParam]: ...
# Task:
Write a Python async function `main` to main function to run the browser agent with image context.
Returns: None | async def main() -> None:
"""
Main function to run the browser agent with image context.
"""
# Task configuration
task_str = 'goto https://www.google.com/ and click image button'
# Initialize the language model
model = ChatOpenAI(model='gpt-4.1')
# Create sample images for context
try:
sample_images = create_sample_images()
except (FileNotFoundError, OSError) as e:
print(f'Error loading sample images: {e}')
print('Continuing without sample images...')
sample_images = []
# Initialize and run the agent
agent = Agent(task=task_str, llm=model, sample_images=sample_images)
await agent.run() | function_simple | 0 | {"cognitive_complexity": 1, "loc": 21, "code_loc": 10, "docstring_loc": 3, "function_name": "main", "class_name": null, "qualname": "main", "file_path": "examples/features/add_image_context.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/events/test_async_event_bus.py:module_doc | Write a module-level docstring for the Python module `test_async_event_bus` which contains class `AsyncTestEvent`. | Tests for async event handling in CrewAI event bus.
This module tests async handler registration, execution, and the aemit method. | documentation | 0 | {"doc_type": "module", "module_name": "test_async_event_bus", "file_path": "lib/crewai/tests/utilities/events/test_async_event_bus.py", "repo_id": "crewAIInc/crewAI", "char_length": 131} |
ray-project/ray:python/ray/tests/test_runtime_env_get_wheel_names.py:test_get_master_wheel_url | # Context:
import requests
import ray._private.ray_constants as ray_constants
from ray._private.utils import (
get_master_wheel_url,
get_release_wheel_url,
get_wheel_filename,
)
def test_get_wheel_filename(): ...
def test_get_release_wheel_url(): ...
# Task:
Write a Python test function `test_get_master_wheel_url` to test the code that generates the filenames of `master` commit wheels.
Module under test: ray._private.utils | def test_get_master_wheel_url():
"""Test the code that generates the filenames of `master` commit wheels."""
# NOTE: These should not be changed for releases.
ray_version = "3.0.0.dev0"
# This should be a commit for which wheels have already been built for
# all platforms and python versions at
# `s3://ray-wheels/master/<test_commit>/`.
#
# Link to commit:
# https://github.com/ray-project/ray/commit/faf06e09e55558fb36c72e91a5cf8a7e3da8b8c6
test_commit = "faf06e09e55558fb36c72e91a5cf8a7e3da8b8c6"
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:
url = get_master_wheel_url(
test_commit, sys_platform, ray_version, py_version
)
assert requests.head(url).status_code == 200, url | test | 0 | {"function_name": "test_get_master_wheel_url", "class_name": null, "qualname": "test_get_master_wheel_url", "file_path": "python/ray/tests/test_runtime_env_get_wheel_names.py", "repo_id": "ray-project/ray", "loc": 17, "tested_modules": ["ray._private.utils"], "has_docstring": true, "runnable_level": "plib_runnable"} |
jax-ml/jax:jaxlib/tools/build_mosaic_wheel.py:license_header | Add a Apache-2.0 license header comment for the project 'jax', authored by The JAX Authors, year 2025. | # Copyright 2025 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script that builds a jax cuda/rocm plugin wheel, intended to be run via bazel run
# as part of the jax cuda/rocm plugin build process.
# Most users should not run this script directly; use build.py instead. | license | 1 | {"license_type": "Apache-2.0", "author": "The JAX Authors", "year": "2025", "source": "header", "repo_id": "jax-ml/jax"} |
run-llama/llama_index:llama-index-core/tests/test_rate_limiter.py:test_llm_sync_complete_calls_acquire | # Context:
from unittest.mock import AsyncMock, MagicMock, patch
from llama_index.core.llms.mock import MockLLM
def test_base_rate_limiter_is_abstract() -> None: ...
def test_token_bucket_is_subclass_of_base() -> None: ...
def test_rate_limiter_alias_is_token_bucket() -> None: ...
def test_instance_is_base_rate_limiter() -> None: ...
def test_custom_rate_limiter_subclass() -> None: ...
def test_creation_rpm_only() -> None: ...
def test_creation_tpm_only() -> None: ...
def test_creation_both() -> None: ...
def test_validation_rejects_zero() -> None: ...
def test_validation_rejects_negative() -> None: ...
def test_burst_within_limit() -> None: ...
def test_acquire_blocks_when_exhausted() -> None: ...
def test_refill_caps_at_max() -> None: ...
async def test_async_acquire_burst_within_limit() -> None: ...
async def test_async_acquire_tpm_limiting() -> None: ...
async def test_concurrent_async_rate_limiting() -> None: ...
def test_llm_sync_chat_calls_acquire() -> None: ...
async def test_llm_async_chat_calls_async_acquire() -> None: ...
async def test_llm_async_complete_calls_async_acquire() -> None: ...
def test_llm_without_rate_limiter_works() -> None: ...
def test_embedding_single_calls_acquire() -> None: ...
def test_embedding_batch_calls_acquire_per_batch() -> None: ...
def test_embedding_without_rate_limiter_works() -> None: ...
def test_shared_rate_limiter_across_instances() -> None: ...
def test_shared_limiter_between_llm_and_embedding() -> None: ...
# Task:
Write a Python test function `test_llm_sync_complete_calls_acquire` to verify the behavior of `llm_sync_complete_calls_acquire`.
Module under test: llama_index.core.base.llms.types, llama_index.core.llms.mock, llama_index.core.rate_limiter | def test_llm_sync_complete_calls_acquire() -> None:
mock_limiter = MagicMock()
llm = MockLLM()
llm.rate_limiter = mock_limiter
llm.complete("hello")
mock_limiter.acquire.assert_called_once() | test | 1 | {"function_name": "test_llm_sync_complete_calls_acquire", "class_name": null, "qualname": "test_llm_sync_complete_calls_acquire", "file_path": "llama-index-core/tests/test_rate_limiter.py", "repo_id": "run-llama/llama_index", "loc": 6, "tested_modules": ["llama_index.core.base.llms.types", "llama_index.core.llms.mock", "llama_index.core.rate_limiter", "llama_index.core.embeddings.mock_embed_model", "llama_index.core.embeddings.mock_embed_model"], "has_docstring": false, "runnable_level": "project_runnable"} |
deepspeedai/DeepSpeed:deepspeed/runtime/superoffload/superoffload_stage3.py:SuperOffloadOptimizer_Stage3.step | # Context:
import torch
class SuperOffloadOptimizer_Stage3(DeepSpeedZeroOptimizer_Stage3):
def __init__(
self,
module,
init_optimizer,
param_names,
timers,
ds_config,
**kwargs,
):
self.sub_group_to_param_num = {}
self.params_in_ipg_bucket_buffer = deque()
self._cur_bucket_index = -1
self.async_cpuadam_num = 0
self.max_grad_numel = 0
super().__init__(module, init_optimizer, param_names, timers, ds_config, **kwargs)
optimizer_config = {
"lr": self.optimizer.param_groups[0]["lr"],
"betas": self.optimizer.param_groups[0]["betas"],
"eps": self.optimizer.param_groups[0]["eps"],
"weight_decay": self.optimizer.param_groups[0]["weight_decay"],
"amsgrad": self.optimizer.param_groups[0]["amsgrad"]
}
cpuadam_cores_perc = kwargs.get("cpuadam_cores_perc", 0.8)
self.superoffload_cpu_optimizer = SuperOffloadCPUOptimizer(optimizer_config=optimizer_config,
cpuadam_cores_perc=cpuadam_cores_perc,
max_grad_numel=self.max_grad_numel)
def _create_fp16_sub_groups(self, params_group): ...
def _optimizer_step(self, sub_group_id): ...
def reduce_independent_p_g_buckets_and_remove_grads(self, param): ...
def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id): ...
def _reassign_or_swap_out_partitioned_parameters_async(self, sub_group_id, updated_param): ...
def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: ...
def _wait_for_async_operations(self, timeout_seconds): ...
def _wait_for_single_async_result(self, event_type: str, timeout_seconds): ...
def _sync_cpu_optimizer_step(self, param_group_id: int, sub_group_id: int, fp32_param_data, fp32_grad_data, rollback: bool, timeout_seconds: int): ...
def _handle_overflow_rollback(self): ...
def _handle_gradient_clipping(self, scaled_global_grad_norm): ...
def check_clip_grads(self, total_norm): ...
# Task:
Write a Python method `step` for the class `SuperOffloadOptimizer_Stage3` to not supporting closure.
Parameters: closure | def step(self, closure=None):
"""
Not supporting closure.
"""
# Wait for any pending asynchronous CPU optimizer operations
self._wait_for_async_operations()
self._pre_step()
self._partition_all_parameters()
if self._overflow_check_and_loss_scale_update():
self._handle_overflow_rollback()
return
norm_groups = self._get_norm_groups()
scaled_global_grad_norm = torch.linalg.vector_norm(torch.stack(norm_groups))
self._global_grad_norm = scaled_global_grad_norm / self.loss_scale
timer_names = set()
timer_names.add(OPTIMIZER_STEP_TIMER)
self.timers(OPTIMIZER_STEP_TIMER).start()
if self.check_clip_grads(scaled_global_grad_norm):
self._handle_gradient_clipping(scaled_global_grad_norm)
for sub_group_id, group in enumerate(self.fp16_groups):
# Prepare optimizer states, gradients and fp32 parameters for update
self._prepare_sub_group(sub_group_id, timer_names)
# Scale the fp32 gradients
self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm)
# Apply the optimizer step on the sub group and copy fp32 parameters to fp16
self._optimizer_step(sub_group_id)
# Put fp16 parameters in appropriate location
self._reassign_or_swap_out_partitioned_parameters(sub_group_id)
# Release memory or swap out optimizer states of fp32 parameters
self._release_sub_group(sub_group_id, timer_names)
self.timers(OPTIMIZER_STEP_TIMER).stop()
self._post_step(timer_names) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 43, "code_loc": 22, "docstring_loc": 3, "function_name": "step", "class_name": "SuperOffloadOptimizer_Stage3", "qualname": "SuperOffloadOptimizer_Stage3.step", "file_path": "deepspeed/runtime/superoffload/superoffload_stage3.py", "repo_id": "deepspeedai/DeepSpeed", "has_docstring": true, "runnable_level": "file_runnable"} |
vllm-project/vllm:vllm/v1/attention/backends/mamba_attn.py:BaseMambaAttentionMetadataBuilder.build | # Context:
from typing import Any, ClassVar, TypeVar
import torch
from vllm.v1.attention.backend import (
AttentionCGSupport,
AttentionMetadataBuilder,
CommonAttentionMetadata,
)
class BaseMambaAttentionMetadata: ...
class BaseMambaAttentionMetadataBuilder(AttentionMetadataBuilder[M], abc.ABC):
def __init__(
self,
kv_cache_spec: AttentionSpec,
layer_names: list[str],
vllm_config: VllmConfig,
device: torch.device,
):
super().__init__(kv_cache_spec, layer_names, vllm_config, device)
# Enable speculative decoding support
self.speculative_config = vllm_config.speculative_config
self.compilation_config = vllm_config.compilation_config
self.num_spec_tokens: int = vllm_config.num_speculative_tokens
self.use_spec_decode = self.num_spec_tokens > 0
assert isinstance(kv_cache_spec, MambaSpec)
self.compilation_config = vllm_config.compilation_config
self.decode_cudagraph_max_bs = self.vllm_config.scheduler_config.max_num_seqs
if self.compilation_config.max_cudagraph_capture_size is not None:
self.decode_cudagraph_max_bs = min(
self.decode_cudagraph_max_bs,
self.compilation_config.max_cudagraph_capture_size,
)
if self.vllm_config.cache_config.mamba_cache_mode == "all":
max_num_blocks = cdiv(
self.vllm_config.model_config.max_model_len,
self.kv_cache_spec.block_size,
)
# Speculative decoding not supported with prefix caching,
# so keep shape consistent with prefill buffer
# TODO: reduce this size as needed for decode-only cudagraph capture
self.state_indices_tensor_d = torch.empty(
(
self.decode_cudagraph_max_bs,
max_num_blocks,
),
dtype=torch.int32,
device=device,
)
self.block_idx_last_scheduled_token = torch.empty(
(self.decode_cudagraph_max_bs,),
dtype=torch.int32,
device=device,
)
self.block_idx_last_computed_token = torch.empty(
(self.decode_cudagraph_max_bs,),
dtype=torch.int32,
device=device,
)
else:
self.state_indices_tensor_d = torch.empty(
(self.decode_cudagraph_max_bs, 1 + self.num_spec_tokens),
dtype=torch.int32,
device=device,
)
# For speculative decoding, we need to store the following buffers
# for CUDA graph capture during decode
if self.num_spec_tokens > 0:
self.decode_num_accepted_tokens = torch.empty(
(self.decode_cudagraph_max_bs,),
dtype=torch.int32,
device=device,
)
self._init_reorder_batch_threshold(1, self.use_spec_decode)
if self.use_spec_decode:
self.supports_update_block_table = False
def build_for_cudagraph_capture(self, common_attn_metadata: CommonAttentionMetadata) -> M: ...
def _compute_chunk_metadata(self, chunk_size: int, num_prefills: int, num_computed_tokens_p_cpu: torch.Tensor, query_start_loc_p_cpu: torch.Tensor) -> tuple[list[int], list[int], list[int]]: ...
def _build_chunk_metadata_tensors(self, chunk_size: int, common: M, common_attn_metadata: CommonAttentionMetadata) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: ...
def _compute_prefix_caching_block_indices(self, common_attn_metadata: CommonAttentionMetadata, mamba_block_size: int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: ...
def _compute_common_metadata(self, common_attn_metadata: CommonAttentionMetadata, num_accepted_tokens: torch.Tensor | None) -> M: ...
def _update_metadata_for_cudagraph_capture(self, metadata: M) -> M: ...
def update_block_table(self, metadata: M, blk_table: torch.Tensor, slot_mapping: torch.Tensor) -> M: ...
# Task:
Write a Python method `build` for the class `BaseMambaAttentionMetadataBuilder` to default build implementation for Mamba-like attention backends.
Parameters: common_prefix_len: int, common_attn_metadata: CommonAttentionMetadata, fast_build: bool
Returns: M | def build(
self,
common_prefix_len: int,
common_attn_metadata: CommonAttentionMetadata,
fast_build: bool = False,
*,
num_accepted_tokens: torch.Tensor | None = None,
**kwargs: Any,
) -> M:
"""
Default build implementation for Mamba-like attention backends.
Subclasses (e.g., Mamba2) can override to add additional metadata.
"""
return self._compute_common_metadata(
common_attn_metadata, num_accepted_tokens=num_accepted_tokens
) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 16, "code_loc": 3, "docstring_loc": 4, "function_name": "build", "class_name": "BaseMambaAttentionMetadataBuilder", "qualname": "BaseMambaAttentionMetadataBuilder.build", "file_path": "vllm/v1/attention/backends/mamba_attn.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/tests/test_generate_tool_specs.py:test_save_to_json | # Context:
import json
class MockToolSchema(BaseModel): ...
class MockTool(BaseTool): ...
def extractor(): ...
def test_unwrap_schema(extractor): ...
def mock_tool_extractor(extractor): ...
def test_extract_basic_tool_info(mock_tool_extractor): ...
def test_extract_init_params_schema(mock_tool_extractor): ...
def test_extract_env_vars(mock_tool_extractor): ...
def test_extract_run_params_schema(mock_tool_extractor): ...
def test_extract_package_dependencies(mock_tool_extractor): ...
# Task:
Write a Python test function `test_save_to_json` to verify the behavior of `save_to_json`.
Module under test: crewai.tools.base_tool, crewai_tools.generate_tool_specs, pydantic | def test_save_to_json(extractor, tmp_path):
extractor.tools_spec = [
{
"name": "TestTool",
"humanized_name": "Test Tool",
"description": "A test tool",
"run_params_schema": [
{"name": "param1", "description": "Test parameter", "type": "str"}
],
}
]
file_path = tmp_path / "output.json"
extractor.save_to_json(str(file_path))
assert file_path.exists()
with open(file_path, "r") as f:
data = json.load(f)
assert "tools" in data
assert len(data["tools"]) == 1
assert data["tools"][0]["humanized_name"] == "Test Tool"
assert data["tools"][0]["run_params_schema"][0]["name"] == "param1" | test | 0 | {"function_name": "test_save_to_json", "class_name": null, "qualname": "test_save_to_json", "file_path": "lib/crewai-tools/tests/test_generate_tool_specs.py", "repo_id": "crewAIInc/crewAI", "loc": 24, "tested_modules": ["crewai.tools.base_tool", "crewai_tools.generate_tool_specs", "pydantic"], "has_docstring": false, "runnable_level": "file_runnable"} |
exo-explore/exo:src/exo/worker/tests/unittests/test_runner/test_dsml_e2e.py:TestE2EEdgeCases.test_empty_model_response | # Context:
from exo.shared.types.worker.runner_response import (
GenerationResponse,
ToolCallResponse,
)
from exo.worker.runner.llm_inference.runner import parse_deepseek_v32
def _simulate_tokens(texts: list[str], finish_on_last: bool) -> Generator[GenerationResponse]: ...
class TestE2EStandardResponse: ...
class TestE2EToolCallResponse: ...
class TestE2EMultiTurnToolUse: ...
class TestE2EThinkingAndToolCall: ...
class TestE2ERoundTrip: ...
class TestE2EFullRoundTrip: ...
class TestE2EEdgeCases:
def test_dsml_marker_split_at_fullwidth_pipe(self): ...
def test_tool_call_with_nested_json_object(self): ...
def test_text_with_angle_brackets_not_mistaken_for_dsml(self): ...
# Task:
Write a Python test method `test_empty_model_response` in test class `TestE2EEdgeCases` to model produces only EOS (empty response).
Module under test: collections.abc, typing, exo.shared.types.worker.runner_response | def test_empty_model_response(self):
"""Model produces only EOS (empty response)."""
model_tokens = [""]
results = list(parse_deepseek_v32(_simulate_tokens(model_tokens)))
gen_results = [r for r in results if isinstance(r, GenerationResponse)]
assert len(gen_results) == 1
assert gen_results[0].text == ""
assert gen_results[0].finish_reason == "stop" | test | 0 | {"function_name": "test_empty_model_response", "class_name": "TestE2EEdgeCases", "qualname": "TestE2EEdgeCases.test_empty_model_response", "file_path": "src/exo/worker/tests/unittests/test_runner/test_dsml_e2e.py", "repo_id": "exo-explore/exo", "loc": 8, "tested_modules": ["collections.abc", "typing", "exo.shared.types.worker.runner_response", "exo.worker.engines.mlx.dsml_encoding", "exo.worker.runner.llm_inference.runner"], "has_docstring": true, "runnable_level": "project_runnable"} |
paperless-ngx/paperless-ngx:src/paperless/tests/settings/test_environment_parsers.py:TestGetEnvChoice.test_raises_error_when_env_value_invalid | # Context:
import pytest
from pytest_mock import MockerFixture
from paperless.settings.parsers import get_choice_from_env
class TestStringToBool: ...
class TestParseDictFromString: ...
class TestGetIntFromEnv: ...
class TestGetEnvChoice:
def valid_choices(self) -> set[str]: ...
def test_returns_valid_env_value(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_returns_default_when_env_not_set(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_raises_error_when_env_not_set_and_no_default(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_raises_error_when_default_invalid(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_case_sensitive_validation(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_empty_string_env_value(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_whitespace_env_value(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
def test_single_choice_set(self, mocker: MockerFixture) -> None: ...
def test_large_choice_set(self, mocker: MockerFixture) -> None: ...
def test_different_env_keys(self, mocker: MockerFixture, valid_choices: set[str]) -> None: ...
# Task:
Write a Python test method `test_raises_error_when_env_value_invalid` in test class `TestGetEnvChoice` to test that function raises ValueError when env value is not in choices.
Module under test: pathlib, paperless.settings.parsers, paperless.settings.parsers | def test_raises_error_when_env_value_invalid(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function raises ValueError when env value is not in choices."""
mocker.patch.dict("os.environ", {"TEST_ENV": "invalid_value"})
with pytest.raises(ValueError) as exc_info:
get_choice_from_env("TEST_ENV", valid_choices)
error_msg = str(exc_info.value)
assert (
"Environment variable 'TEST_ENV' has invalid value 'invalid_value'"
in error_msg
)
assert "Valid choices are:" in error_msg
assert "development" in error_msg
assert "staging" in error_msg
assert "production" in error_msg | test | 1 | {"function_name": "test_raises_error_when_env_value_invalid", "class_name": "TestGetEnvChoice", "qualname": "TestGetEnvChoice.test_raises_error_when_env_value_invalid", "file_path": "src/paperless/tests/settings/test_environment_parsers.py", "repo_id": "paperless-ngx/paperless-ngx", "loc": 20, "tested_modules": ["pathlib", "paperless.settings.parsers", "paperless.settings.parsers", "paperless.settings.parsers", "paperless.settings.parsers"], "has_docstring": true, "runnable_level": "project_runnable"} |
fastapi/fastapi:scripts/tests/test_translation_fixer/test_code_blocks/test_code_blocks_lines_number_mismatch.py:test_lt | # Context:
from pathlib import Path
import pytest
from typer.testing import CliRunner
from scripts.translation_fixer import cli
def test_gt(runner: CliRunner, root_dir: Path, copy_test_files): ...
# Task:
Write a Python test function `test_lt` to verify the behavior of `lt`.
Module under test: pathlib, typer.testing, scripts.translation_fixer | def test_lt(runner: CliRunner, root_dir: Path, copy_test_files):
result = runner.invoke(
cli,
["fix-pages", "docs/lang/docs/doc.md"],
)
# assert result.exit_code == 1, result.output
fixed_content = (root_dir / "docs" / "lang" / "docs" / "doc.md").read_text("utf-8")
expected_content = Path(f"{data_path}/translated_doc_lines_number_lt.md").read_text(
"utf-8"
)
assert fixed_content == expected_content # Translated doc remains unchanged
assert "Error processing docs/lang/docs/doc.md" in result.output
assert (
"Code block (lines 16-18) has different number of lines than the original block (3 vs 4)"
) in result.output | test | 1 | {"function_name": "test_lt", "class_name": null, "qualname": "test_lt", "file_path": "scripts/tests/test_translation_fixer/test_code_blocks/test_code_blocks_lines_number_mismatch.py", "repo_id": "fastapi/fastapi", "loc": 17, "tested_modules": ["pathlib", "typer.testing", "scripts.translation_fixer"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestStripNullFromTypes.test_strips_null_from_anyof | # Context:
from copy import deepcopy
from crewai.utilities.pydantic_schema_utils import (
build_rich_field_description,
convert_oneof_to_anyof,
create_model_from_schema,
ensure_all_properties_required,
ensure_type_in_schemas,
force_additional_properties_false,
resolve_refs,
strip_null_from_types,
strip_unsupported_formats,
)
class TestSimpleTypes: ...
class TestRequiredOptional: ...
class TestEnumLiteral: ...
class TestFormatMapping: ...
class TestNestedObjects: ...
class TestTypedArrays: ...
class TestUnionTypes: ...
class TestAllOfMerging: ...
class TestRefResolution: ...
class TestModelName: ...
class TestEnrichDescriptions: ...
class TestEdgeCases: ...
class TestBuildRichFieldDescription: ...
class TestResolveRefs: ...
class TestForceAdditionalPropertiesFalse: ...
class TestStripUnsupportedFormats: ...
class TestEnsureTypeInSchemas: ...
class TestConvertOneofToAnyof: ...
class TestEnsureAllPropertiesRequired: ...
class TestEndToEndMCPSchema: ...
class TestStripNullFromTypes:
def test_strips_null_from_type_array(self) -> None: ...
def test_multiple_non_null_in_anyof(self) -> None: ...
def test_no_null_unchanged(self) -> None: ...
# Task:
Write a Python test method `test_strips_null_from_anyof` in test class `TestStripNullFromTypes` to verify the behavior of `strips_null_from_anyof`.
Module under test: __future__, copy, typing | def test_strips_null_from_anyof(self) -> None:
schema = {
"anyOf": [{"type": "string"}, {"type": "null"}],
}
result = strip_null_from_types(deepcopy(schema))
assert "anyOf" not in result
assert result["type"] == "string" | test | 0 | {"function_name": "test_strips_null_from_anyof", "class_name": "TestStripNullFromTypes", "qualname": "TestStripNullFromTypes.test_strips_null_from_anyof", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 7, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
paperless-ngx/paperless-ngx:src/documents/workflows/webhooks.py:WebhookTransport._format_ip_for_url | # Context:
import ipaddress
def send_webhook(url: str, data: str | dict, headers: dict, files: dict, as_json: bool): ...
class WebhookTransport(httpx.HTTPTransport):
def __init__(
self,
hostname: str,
*args,
allow_internal: bool = False,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.hostname = hostname
self.allow_internal = allow_internal
def handle_request(self, request: httpx.Request) -> httpx.Response: ...
def is_public_ip(ip: str | int) -> bool: ...
def resolve_first_ip(host: str) -> str | None: ...
# Task:
Write a Python method `_format_ip_for_url` for the class `WebhookTransport` to format IP address for use in URL (wrap IPv6 in brackets).
Parameters: ip: str
Returns: str | def _format_ip_for_url(self, ip: str) -> str:
"""
Format IP address for use in URL (wrap IPv6 in brackets)
"""
try:
ip_obj = ipaddress.ip_address(ip)
if ip_obj.version == 6:
return f"[{ip}]"
return ip
except ValueError:
return ip | function_simple | 1 | {"cognitive_complexity": 3, "loc": 11, "code_loc": 7, "docstring_loc": 3, "function_name": "_format_ip_for_url", "class_name": "WebhookTransport", "qualname": "WebhookTransport._format_ip_for_url", "file_path": "src/documents/workflows/webhooks.py", "repo_id": "paperless-ngx/paperless-ngx", "has_docstring": true, "runnable_level": "slib_runnable"} |
browser-use/browser-use:browser_use/mcp/server.py:handle_call_tool | # Context:
import time
from typing import Any
from browser_use.telemetry import MCPServerTelemetryEvent, ProductTelemetry
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
import mcp.types as types
def _configure_mcp_server_logging(): ...
def _ensure_all_loggers_use_stderr(): ...
def get_parent_process_cmdline() -> str | None: ...
class BrowserUseServer: ...
async def main(session_timeout_minutes: int): ...
# Task:
Write a Python async function `handle_call_tool` to handle tool execution.
Parameters: name: str, arguments: dict[str, Any] | None
Returns: list[types.TextContent | types.ImageContent] | async def handle_call_tool(name: str, arguments: dict[str, Any] | None) -> list[types.TextContent | types.ImageContent]:
"""Handle tool execution."""
start_time = time.time()
error_msg = None
try:
result = await self._execute_tool(name, arguments or {})
if isinstance(result, list):
return result
return [types.TextContent(type='text', text=result)]
except Exception as e:
error_msg = str(e)
logger.error(f'Tool execution failed: {e}', exc_info=True)
return [types.TextContent(type='text', text=f'Error: {str(e)}')]
finally:
# Capture telemetry for tool calls
duration = time.time() - start_time
self._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='tool_call',
tool_name=name,
duration_seconds=duration,
error_message=error_msg,
)
) | function_simple | 0 | {"cognitive_complexity": 3, "loc": 25, "code_loc": 22, "docstring_loc": 1, "function_name": "handle_call_tool", "class_name": null, "qualname": "handle_call_tool", "file_path": "browser_use/mcp/server.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py:ExecuteCommandTool:class_doc | Write a class-level docstring for `ExecuteCommandTool` (inherits from BaseTool) which has methods: `__init__`, `_run`, `_arun`. | Tool for running shell commands in the code interpreter environment. | documentation | 0 | {"doc_type": "class", "class_name": "ExecuteCommandTool", "file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py", "repo_id": "crewAIInc/crewAI", "char_length": 68, "methods": ["__init__", "_run", "_arun"]} |
Zie619/n8n-workflows:run.py:start_server | # Context:
import os
import uvicorn
def print_banner(): ...
def check_requirements() -> bool: ...
def setup_directories(): ...
def setup_database(force_reindex: bool, skip_index: bool) -> str: ...
def main(): ...
# Task:
Write a Python function `start_server` to start the FastAPI server.
Parameters: host: str, port: int, reload: bool | def start_server(host: str = "127.0.0.1", port: int = 8000, reload: bool = False):
"""Start the FastAPI server."""
print(f"🌐 Starting server at http://{host}:{port}")
print(f"📊 API Documentation: http://{host}:{port}/docs")
print(f"🔍 Workflow Search: http://{host}:{port}/api/workflows")
print()
print("Press Ctrl+C to stop the server")
print("-" * 50)
# Configure database path
os.environ["WORKFLOW_DB_PATH"] = "database/workflows.db"
# Start uvicorn with better configuration
import uvicorn
uvicorn.run(
"api_server:app",
host=host,
port=port,
reload=reload,
log_level="info",
access_log=False, # Reduce log noise
) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 23, "code_loc": 16, "docstring_loc": 1, "function_name": "start_server", "class_name": null, "qualname": "start_server", "file_path": "run.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "plib_runnable"} |
ray-project/ray:python/ray/data/tests/test_issue_detection.py:TestHangingExecutionIssueDetector.test_hanging_detector_configuration | # Context:
from ray.data._internal.issue_detection.detectors.hanging_detector import (
DEFAULT_OP_TASK_STATS_MIN_COUNT,
DEFAULT_OP_TASK_STATS_STD_FACTOR,
HangingExecutionIssueDetector,
HangingExecutionIssueDetectorConfig,
)
from ray.data.context import DataContext
class FakeOpTask(OpTask): ...
class FakeOperator(PhysicalOperator): ...
def test_high_memory_detection(configured_memory, actual_memory, should_return_issue, restore_data_context): ...
class TestHangingExecutionIssueDetector:
def test_basic_hanging_detection(self, mock_stats_cls, ray_start_regular_shared, restore_data_context): ...
def test_hanging_deitector_detects_issues(self, mock_perf_counter, ray_start_regular_shared): ...
# Task:
Write a Python test method `test_hanging_detector_configuration` in test class `TestHangingExecutionIssueDetector` to test hanging detector configuration and initialization.
Module under test: ray.data._internal.execution.interfaces.physical_operator, ray.data._internal.execution.operators.input_data_buffer, ray.data._internal.execution.operators.task_pool_map_operator | def test_hanging_detector_configuration(self, restore_data_context):
"""Test hanging detector configuration and initialization."""
# Test default configuration from DataContext
ctx = DataContext.get_current()
default_config = ctx.issue_detectors_config.hanging_detector_config
assert default_config.op_task_stats_min_count == DEFAULT_OP_TASK_STATS_MIN_COUNT
assert (
default_config.op_task_stats_std_factor == DEFAULT_OP_TASK_STATS_STD_FACTOR
)
# Test custom configuration
min_count = 5
std_factor = 3.0
custom_config = HangingExecutionIssueDetectorConfig(
op_task_stats_min_count=min_count,
op_task_stats_std_factor=std_factor,
)
ctx.issue_detectors_config.hanging_detector_config = custom_config
detector = HangingExecutionIssueDetector(
dataset_id="id", operators=[], config=custom_config
)
assert detector._op_task_stats_min_count == min_count
assert detector._op_task_stats_std_factor_threshold == std_factor | test | 0 | {"function_name": "test_hanging_detector_configuration", "class_name": "TestHangingExecutionIssueDetector", "qualname": "TestHangingExecutionIssueDetector.test_hanging_detector_configuration", "file_path": "python/ray/data/tests/test_issue_detection.py", "repo_id": "ray-project/ray", "loc": 24, "tested_modules": ["ray.data._internal.execution.interfaces.physical_operator", "ray.data._internal.execution.operators.input_data_buffer", "ray.data._internal.execution.operators.task_pool_map_operator", "ray.data._internal.issue_detection.detectors.hanging_detector", "ray.data._internal.issue_detection.detectors.high_memory_detector"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/agentic/helpers/test_error_handling.py:TestExtractFriendlyError.test_should_return_friendly_message_for_timeout_error | # Context:
from langflow.agentic.helpers.error_handling import (
ERROR_PATTERNS,
MAX_ERROR_MESSAGE_LENGTH,
MIN_MEANINGFUL_PART_LENGTH,
_truncate_error_message,
extract_friendly_error,
)
class TestTruncateErrorMessage: ...
class TestErrorPatterns: ...
class TestConstants: ...
class TestExtractFriendlyError:
def test_should_return_friendly_message_for_rate_limit_error(self): ...
def test_should_return_friendly_message_for_authentication_error(self): ...
def test_should_return_friendly_message_for_quota_error(self): ...
def test_should_return_friendly_message_for_connection_error(self): ...
def test_should_return_friendly_message_for_server_error(self): ...
def test_should_return_friendly_message_for_model_not_found(self): ...
def test_should_return_friendly_message_for_content_policy_error(self): ...
def test_should_truncate_unknown_error_messages(self): ...
def test_should_return_original_for_short_unknown_errors(self): ...
def test_should_handle_empty_string(self): ...
def test_should_be_case_insensitive(self): ...
# Task:
Write a Python test method `test_should_return_friendly_message_for_timeout_error` in test class `TestExtractFriendlyError` to should return user-friendly message for timeout errors.
Module under test: langflow.agentic.helpers.error_handling | def test_should_return_friendly_message_for_timeout_error(self):
"""Should return user-friendly message for timeout errors."""
error_messages = [
"Request timeout",
"Connection timed out",
"Operation timed out after 30 seconds",
]
for error in error_messages:
result = extract_friendly_error(error)
assert "timeout" in result.lower() or "timed out" in result.lower() | test | 1 | {"function_name": "test_should_return_friendly_message_for_timeout_error", "class_name": "TestExtractFriendlyError", "qualname": "TestExtractFriendlyError.test_should_return_friendly_message_for_timeout_error", "file_path": "src/backend/tests/unit/agentic/helpers/test_error_handling.py", "repo_id": "langflow-ai/langflow", "loc": 11, "tested_modules": ["langflow.agentic.helpers.error_handling"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/diffusers:tests/models/test_models_auto.py:TestAutoModel.test_load_from_model_index | # Context:
from transformers import CLIPTextModel, LongformerModel
from diffusers.models import AutoModel, UNet2DConditionModel
class TestAutoModelFromConfig(unittest.TestCase): ...
class TestRegisterForAutoClass(unittest.TestCase): ...
class TestAutoModel(unittest.TestCase):
def test_load_from_config_diffusers_with_subfolder(self, mock_load_config): ...
def test_load_from_config_transformers_with_subfolder(self, mock_load_config): ...
def test_load_from_config_without_subfolder(self): ...
def test_load_dynamic_module_from_local_path_with_subfolder(self): ...
# Task:
Write a Python test method `test_load_from_model_index` in test class `TestAutoModel` to verify the behavior of `load_from_model_index`.
Module under test: transformers, diffusers, diffusers.models | def test_load_from_model_index(self):
model = AutoModel.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-torch", subfolder="text_encoder", use_safetensors=False
)
assert isinstance(model, CLIPTextModel) | test | 1 | {"function_name": "test_load_from_model_index", "class_name": "TestAutoModel", "qualname": "TestAutoModel.test_load_from_model_index", "file_path": "tests/models/test_models_auto.py", "repo_id": "huggingface/diffusers", "loc": 5, "tested_modules": ["transformers", "diffusers", "diffusers.models", "diffusers.models.modeling_utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/lfx/tests/unit/events/test_event_manager.py:TestEventManagerFactories.test_default_manager_event_execution | # Context:
from unittest.mock import MagicMock
from lfx.events.event_manager import (
EventManager,
create_default_event_manager,
create_stream_tokens_event_manager,
)
class TestEventManager: ...
class TestEventManagerAsync: ...
class TestEventManagerFactories:
def test_create_default_event_manager(self): ...
def test_create_default_event_manager_without_queue(self): ...
def test_create_stream_tokens_event_manager(self): ...
def test_create_stream_tokens_event_manager_without_queue(self): ...
def test_stream_manager_event_execution(self): ...
# Task:
Write a Python test method `test_default_manager_event_execution` in test class `TestEventManagerFactories` to test that events in default manager can be executed.
Module under test: lfx.events.event_manager | def test_default_manager_event_execution(self):
"""Test that events in default manager can be executed."""
queue = MagicMock()
manager = create_default_event_manager(queue)
# Test executing different events
test_events = [
("on_token", {"chunk": "test"}),
("on_error", {"error": "test error"}),
("on_message", {"text": "test message"}),
]
for event_name, data in test_events:
event_callback = getattr(manager, event_name)
event_callback(data=data)
# Verify all events were sent to queue
assert queue.put_nowait.call_count == len(test_events) | test | 1 | {"function_name": "test_default_manager_event_execution", "class_name": "TestEventManagerFactories", "qualname": "TestEventManagerFactories.test_default_manager_event_execution", "file_path": "src/lfx/tests/unit/events/test_event_manager.py", "repo_id": "langflow-ai/langflow", "loc": 18, "tested_modules": ["lfx.events.event_manager"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/diffusers:src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2i.py:whitespace_clean | # Context:
import regex as re
def basic_clean(text): ...
def prompt_clean(text): ...
class Kandinsky5I2IPipeline(DiffusionPipeline, KandinskyLoraLoaderMixin): ...
# Task:
Write a Python function `whitespace_clean` to copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py.
Parameters: text | def whitespace_clean(text):
"""
Copied from https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/wan/pipeline_wan.py
Normalize whitespace in text by replacing multiple spaces with single space.
"""
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text | function_simple | 1 | {"cognitive_complexity": 0, "loc": 9, "code_loc": 3, "docstring_loc": 5, "function_name": "whitespace_clean", "class_name": null, "qualname": "whitespace_clean", "file_path": "src/diffusers/pipelines/kandinsky5/pipeline_kandinsky_i2i.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/test_crew_multimodal.py:TestCrewMultimodalBedrock.test_pdf_file | # Context:
import pytest
from crewai import Agent, Crew, LLM, Task
from crewai_files import AudioFile, File, ImageFile, PDFFile, TextFile, VideoFile
def image_file() -> ImageFile: ...
def image_bytes() -> bytes: ...
def text_file() -> TextFile: ...
def text_bytes() -> bytes: ...
def pdf_file() -> PDFFile: ...
def video_file() -> VideoFile: ...
def audio_file() -> AudioFile: ...
def _create_analyst_crew(llm: LLM) -> Crew: ...
class TestCrewMultimodalOpenAI: ...
class TestCrewMultimodalOpenAIResponses: ...
class TestCrewMultimodalAnthropic: ...
class TestCrewMultimodalGemini: ...
class TestCrewMultimodalFileTypes: ...
class TestCrewMultimodalUnsupportedTypes: ...
class TestCrewMultimodalFileUpload: ...
class TestCrewMultimodalBedrock:
def test_image_file(self, model: str, image_file: ImageFile) -> None: ...
# Task:
Write a Python test method `test_pdf_file` in test class `TestCrewMultimodalBedrock` to test crew can process a PDF file.
Module under test: pathlib, crewai, crewai_files | def test_pdf_file(self, model: str, pdf_file: PDFFile) -> None:
"""Test crew can process a PDF file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
assert len(result.raw) > 0 | test | 0 | {"function_name": "test_pdf_file", "class_name": "TestCrewMultimodalBedrock", "qualname": "TestCrewMultimodalBedrock.test_pdf_file", "file_path": "lib/crewai/tests/test_crew_multimodal.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["pathlib", "crewai", "crewai_files"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:airflow-core/tests/unit/serialization/definitions/test_assets.py:test_asset_alias_as_expression | # Context:
from airflow.serialization.definitions.assets import (
SerializedAsset,
SerializedAssetAlias,
SerializedAssetAll,
SerializedAssetAny,
SerializedAssetUniqueKey,
)
def test_asset_iter_assets(): ...
def test_asset_iter_asset_aliases(): ...
class TestSerializedAssetUniqueKey: ...
# Task:
Write a Python test function `test_asset_alias_as_expression` to verify the behavior of `asset_alias_as_expression`.
Module under test: __future__, airflow.api_fastapi.execution_api.datamodels.asset, airflow.serialization.definitions.assets | def test_asset_alias_as_expression():
alias = SerializedAssetAlias(name="test_name", group="test")
assert alias.as_expression() == {"alias": {"name": "test_name", "group": "test"}} | test | 1 | {"function_name": "test_asset_alias_as_expression", "class_name": null, "qualname": "test_asset_alias_as_expression", "file_path": "airflow-core/tests/unit/serialization/definitions/test_assets.py", "repo_id": "apache/airflow", "loc": 3, "tested_modules": ["__future__", "airflow.api_fastapi.execution_api.datamodels.asset", "airflow.serialization.definitions.assets"], "has_docstring": false, "runnable_level": "project_runnable"} |
mem0ai/mem0:mem0/vector_stores/neptune_analytics.py:NeptuneAnalyticsVector.get | # Context:
class OutputData(BaseModel): ...
class NeptuneAnalyticsVector(VectorStoreBase):
_COLLECTION_PREFIX = "MEM0_VECTOR_"
_FIELD_N = 'n'
_FIELD_ID = '~id'
_FIELD_PROP = '~properties'
_FIELD_SCORE = 'score'
_FIELD_LABEL = 'label'
_TIMEZONE = "UTC"
def __init__(
self,
endpoint: str,
collection_name: str,
):
"""
Initialize the Neptune Analytics vector store.
Args:
endpoint (str): Neptune Analytics endpoint in format 'neptune-graph://<graphid>'.
collection_name (str): Name of the collection to store vectors.
Raises:
ValueError: If endpoint format is invalid.
ImportError: If langchain_aws is not installed.
"""
if not endpoint.startswith("neptune-graph://"):
raise ValueError("Please provide 'endpoint' with the format as 'neptune-graph://<graphid>'.")
graph_id = endpoint.replace("neptune-graph://", "")
self.graph = NeptuneAnalyticsGraph(graph_id)
self.collection_name = self._COLLECTION_PREFIX + collection_name
def create_col(self, name, vector_size, distance): ...
def insert(self, vectors: List[list], payloads: Optional[List[Dict]], ids: Optional[List[str]]): ...
def search(self, query: str, vectors: List[float], limit: int, filters: Optional[Dict]) -> List[OutputData]: ...
def delete(self, vector_id: str): ...
def update(self, vector_id: str, vector: Optional[List[float]], payload: Optional[Dict]): ...
def list_cols(self): ...
def delete_col(self): ...
def col_info(self): ...
def list(self, filters: Optional[Dict], limit: int) -> List[OutputData]: ...
def reset(self): ...
def _parse_query_responses(self, response: dict, with_score: bool): ...
def execute_query(self, query_string: str, params): ...
def _get_where_clause(filters: dict): ...
def _get_node_filter_clause(filters: dict): ...
def _process_success_message(response, context): ...
# Task:
Write a Python method `get` for the class `NeptuneAnalyticsVector` to retrieve a vector by its ID.
Parameters: vector_id: str | def get(self, vector_id: str):
"""
Retrieve a vector by its ID.
Fetches the node data including metadata for the specified vector ID.
Args:
vector_id (str): ID of the vector to retrieve.
Returns:
OutputData: Vector data with metadata, or None if not found.
"""
params = dict(node_id=vector_id)
query_string = f"""
MATCH (n :{self.collection_name})
WHERE id(n) = $node_id
RETURN n
"""
# Composite the query
result = self.execute_query(query_string, params)
if len(result) != 0:
return self._parse_query_responses(result)[0] | function_simple | 1 | {"cognitive_complexity": 1, "loc": 24, "code_loc": 9, "docstring_loc": 11, "function_name": "get", "class_name": "NeptuneAnalyticsVector", "qualname": "NeptuneAnalyticsVector.get", "file_path": "mem0/vector_stores/neptune_analytics.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "class_runnable"} |
Shubhamsaboo/awesome-llm-apps:advanced_llm_apps/llm_optimization_tools/headroom_context_optimization/headroom_demo.py:module_doc | Write a module-level docstring for the Python module `headroom_demo` which contains function `generate_test_logs`, function `demo_without_headroom`, function `demo_with_headroom`, function `calculate_savings`, function `demo_langchain_integration`. | Headroom Context Optimization Demo
==================================
This demo shows how Headroom reduces token usage by 50-90% while preserving
accuracy. It recreates the "needle in haystack" test from the Headroom repo.
Run: python headroom_demo.py | documentation | 0 | {"doc_type": "module", "module_name": "headroom_demo", "file_path": "advanced_llm_apps/llm_optimization_tools/headroom_context_optimization/headroom_demo.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "char_length": 253} |
ansible/ansible:lib/ansible/_internal/_ssh/_agent_launch.py:launch_ssh_agent | # Context:
from ansible.errors import AnsibleError
def _launch_ssh_agent() -> None: ...
# Task:
Write a Python function `launch_ssh_agent` to if configured via `SSH_AGENT`, launch an ssh-agent for Ansible's use and/or verify access to an existing one.
Returns: None | def launch_ssh_agent() -> None:
"""If configured via `SSH_AGENT`, launch an ssh-agent for Ansible's use and/or verify access to an existing one."""
try:
_launch_ssh_agent()
except Exception as ex:
raise AnsibleError("Failed to launch ssh agent.") from ex | function_simple | 1 | {"cognitive_complexity": 1, "loc": 6, "code_loc": 4, "docstring_loc": 1, "function_name": "launch_ssh_agent", "class_name": null, "qualname": "launch_ssh_agent", "file_path": "lib/ansible/_internal/_ssh/_agent_launch.py", "repo_id": "ansible/ansible", "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/lfx/tests/unit/base/data/test_docling_utils.py:TestDocumentConverterCaching.test_cached_converter_function_exists | # Context:
from lfx.base.data.docling_utils import _get_cached_converter
class TestExtractDoclingDocuments: ...
class TestDocumentConverterCaching:
def test_cached_converter_cache_key(self): ...
def test_cached_converter_lru_eviction(self): ...
def test_cached_converter_performance_improvement(self): ...
def test_cache_clear(self): ...
def test_different_ocr_engines_create_different_caches(self): ...
def test_different_pipelines_create_different_caches(self): ...
# Task:
Write a Python test method `test_cached_converter_function_exists` in test class `TestDocumentConverterCaching` to test that _get_cached_converter function exists and is properly decorated.
Module under test: lfx.base.data.docling_utils, lfx.schema.data, lfx.schema.dataframe | def test_cached_converter_function_exists(self):
"""Test that _get_cached_converter function exists and is properly decorated."""
from lfx.base.data.docling_utils import _get_cached_converter
# Verify function exists
assert callable(_get_cached_converter)
# Verify it has cache_info method (indicates lru_cache decorator)
assert hasattr(_get_cached_converter, "cache_info")
assert callable(_get_cached_converter.cache_info) | test | 1 | {"function_name": "test_cached_converter_function_exists", "class_name": "TestDocumentConverterCaching", "qualname": "TestDocumentConverterCaching.test_cached_converter_function_exists", "file_path": "src/lfx/tests/unit/base/data/test_docling_utils.py", "repo_id": "langflow-ai/langflow", "loc": 10, "tested_modules": ["lfx.base.data.docling_utils", "lfx.schema.data", "lfx.schema.dataframe", "docling_core.types.doc", "lfx.base.data.docling_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
mem0ai/mem0:tests/vector_stores/test_valkey.py:test_delete | # Context:
def mock_valkey_client(): ...
def valkey_db(mock_valkey_client): ...
def test_search_filter_syntax(valkey_db, mock_valkey_client): ...
def test_search_without_filters(valkey_db, mock_valkey_client): ...
def test_insert(valkey_db, mock_valkey_client): ...
def test_insert_handles_missing_created_at(valkey_db, mock_valkey_client): ...
def test_update(valkey_db, mock_valkey_client): ...
def test_update_handles_missing_created_at(valkey_db, mock_valkey_client): ...
def test_get(valkey_db, mock_valkey_client): ...
def test_get_not_found(valkey_db, mock_valkey_client): ...
def test_list_cols(valkey_db, mock_valkey_client): ...
def test_delete_col(valkey_db, mock_valkey_client): ...
def test_context_aware_logging(valkey_db, mock_valkey_client): ...
def test_col_info(valkey_db, mock_valkey_client): ...
def test_create_col(valkey_db, mock_valkey_client): ...
def test_list(valkey_db, mock_valkey_client): ...
def test_search_error_handling(valkey_db, mock_valkey_client): ...
def test_drop_index_error_handling(valkey_db, mock_valkey_client): ...
def test_reset(valkey_db, mock_valkey_client): ...
def test_build_list_query(valkey_db): ...
def test_process_document_fields(valkey_db): ...
def test_init_connection_error(): ...
def test_build_search_query(valkey_db): ...
def test_get_error_handling(valkey_db, mock_valkey_client): ...
def test_list_error_handling(valkey_db, mock_valkey_client): ...
def test_create_index_other_error(): ...
def test_create_col_error(valkey_db, mock_valkey_client): ...
def test_list_cols_error(valkey_db, mock_valkey_client): ...
def test_col_info_error(valkey_db, mock_valkey_client): ...
def test_invalid_index_type(): ...
def test_index_existence_check_error(mock_valkey_client): ...
def test_flat_index_creation(mock_valkey_client): ...
def test_index_creation_error(mock_valkey_client): ...
def test_insert_missing_required_field(valkey_db, mock_valkey_client): ...
def test_insert_general_error(valkey_db, mock_valkey_client): ...
def test_search_with_invalid_metadata(valkey_db, mock_valkey_client): ...
def test_search_with_hnsw_ef_runtime(valkey_db, mock_valkey_client): ...
def test_delete_error(valkey_db, mock_valkey_client): ...
def test_update_missing_required_field(valkey_db, mock_valkey_client): ...
def test_update_general_error(valkey_db, mock_valkey_client): ...
def test_get_with_binary_data_and_unicode_error(valkey_db, mock_valkey_client): ...
def test_get_with_invalid_timestamps(valkey_db, mock_valkey_client): ...
def test_get_with_invalid_metadata_json(valkey_db, mock_valkey_client): ...
def test_list_with_missing_fields_and_defaults(valkey_db, mock_valkey_client): ...
# Task:
Write a Python test function `test_delete` to test deleting a vector.
Module under test: datetime, valkey.exceptions, mem0.vector_stores.valkey | def test_delete(valkey_db, mock_valkey_client):
"""Test deleting a vector."""
# Call delete
valkey_db.delete("test_id")
# Check that delete was called with the correct key
mock_valkey_client.delete.assert_called_once_with("mem0:test_collection:test_id") | test | 1 | {"function_name": "test_delete", "class_name": null, "qualname": "test_delete", "file_path": "tests/vector_stores/test_valkey.py", "repo_id": "mem0ai/mem0", "loc": 7, "tested_modules": ["datetime", "valkey.exceptions", "mem0.vector_stores.valkey"], "has_docstring": true, "runnable_level": "file_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/anthropic/completion.py:AnthropicCompletion._extract_anthropic_token_usage | # Context:
from typing import TYPE_CHECKING, Any, Final, Literal, TypeGuard, cast
from anthropic.types import Message, TextBlock, ThinkingBlock, ToolUseBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
def _supports_native_structured_outputs(model: str) -> bool: ...
def _is_pydantic_model_class(obj: Any) -> TypeGuard[type[BaseModel]]: ...
def _contains_file_id_reference(messages: list[dict[str, Any]]) -> bool: ...
class AnthropicThinkingConfig(BaseModel): ...
class AnthropicCompletion(BaseLLM):
def __init__(
self,
model: str = "claude-3-5-sonnet-20241022",
api_key: str | None = None,
base_url: str | None = None,
timeout: float | None = None,
max_retries: int = 2,
temperature: float | None = None,
max_tokens: int = 4096, # Required for Anthropic
top_p: float | None = None,
stop_sequences: list[str] | None = None,
stream: bool = False,
client_params: dict[str, Any] | None = None,
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
thinking: AnthropicThinkingConfig | None = None,
response_format: type[BaseModel] | None = None,
**kwargs: Any,
):
"""Initialize Anthropic chat completion client.
Args:
model: Anthropic model name (e.g., 'claude-3-5-sonnet-20241022')
api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var)
base_url: Custom base URL for Anthropic API
timeout: Request timeout in seconds
max_retries: Maximum number of retries
temperature: Sampling temperature (0-1)
max_tokens: Maximum tokens in response (required for Anthropic)
top_p: Nucleus sampling parameter
stop_sequences: Stop sequences (Anthropic uses stop_sequences, not stop)
stream: Enable streaming responses
client_params: Additional parameters for the Anthropic client
interceptor: HTTP interceptor for modifying requests/responses at transport level.
response_format: Pydantic model for structured output. When provided, responses
will be validated against this model schema.
**kwargs: Additional parameters
"""
super().__init__(
model=model, temperature=temperature, stop=stop_sequences or [], **kwargs
)
# Client params
self.interceptor = interceptor
self.client_params = client_params
self.base_url = base_url
self.timeout = timeout
self.max_retries = max_retries
self.client = Anthropic(**self._get_client_params())
async_client_params = self._get_client_params()
if self.interceptor:
async_transport = AsyncHTTPTransport(interceptor=self.interceptor)
async_http_client = httpx.AsyncClient(transport=async_transport)
async_client_params["http_client"] = async_http_client
self.async_client = AsyncAnthropic(**async_client_params)
# Store completion parameters
self.max_tokens = max_tokens
self.top_p = top_p
self.stream = stream
self.stop_sequences = stop_sequences or []
self.thinking = thinking
self.previous_thinking_blocks: list[ThinkingBlock] = []
self.response_format = response_format
# Model-specific settings
self.is_claude_3 = "claude-3" in model.lower()
self.supports_tools = True
def stop(self) -> list[str]: ...
def stop(self, value: list[str] | str | None) -> None: ...
def _get_client_params(self) -> dict[str, Any]: ...
def call(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _prepare_completion_params(self, messages: list[LLMMessage], system_message: str | None, tools: list[dict[str, Any]] | None, available_functions: dict[str, Any] | None) -> dict[str, Any]: ...
def _convert_tools_for_interference(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]: ...
def _extract_thinking_block(self, content_block: Any) -> ThinkingBlock | dict[str, Any] | None: ...
def _format_messages_for_anthropic(self, messages: str | list[LLMMessage]) -> tuple[list[LLMMessage], str | None]: ...
def _handle_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _handle_streaming_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
def _execute_tools_and_collect_results(self, tool_uses: list[ToolUseBlock | BetaToolUseBlock], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> list[dict[str, Any]]: ...
def _execute_first_tool(self, tool_uses: list[ToolUseBlock | BetaToolUseBlock], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> Any | None: ...
def _handle_tool_use_conversation(self, initial_response: Message | BetaMessage, tool_uses: list[ToolUseBlock | BetaToolUseBlock], params: dict[str, Any], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> str: ...
async def _ahandle_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
async def _ahandle_streaming_completion(self, params: dict[str, Any], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ...
async def _ahandle_tool_use_conversation(self, initial_response: Message | BetaMessage, tool_uses: list[ToolUseBlock | BetaToolUseBlock], params: dict[str, Any], available_functions: dict[str, Any], from_task: Any | None, from_agent: Any | None) -> str: ...
def supports_function_calling(self) -> bool: ...
def supports_stop_words(self) -> bool: ...
def get_context_window_size(self) -> int: ...
def supports_multimodal(self) -> bool: ...
def get_file_uploader(self) -> Any: ...
# Task:
Write a Python method `_extract_anthropic_token_usage` for the class `AnthropicCompletion` to extract token usage from Anthropic response.
Parameters: response: Message | BetaMessage
Returns: dict[str, Any] | def _extract_anthropic_token_usage(
response: Message | BetaMessage,
) -> dict[str, Any]:
"""Extract token usage from Anthropic response."""
if hasattr(response, "usage") and response.usage:
usage = response.usage
input_tokens = getattr(usage, "input_tokens", 0)
output_tokens = getattr(usage, "output_tokens", 0)
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
return {
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"total_tokens": input_tokens + output_tokens,
"cached_prompt_tokens": cache_read_tokens,
}
return {"total_tokens": 0} | function_simple | 0 | {"cognitive_complexity": 3, "loc": 16, "code_loc": 12, "docstring_loc": 1, "function_name": "_extract_anthropic_token_usage", "class_name": "AnthropicCompletion", "qualname": "AnthropicCompletion._extract_anthropic_token_usage", "file_path": "lib/crewai/src/crewai/llms/providers/anthropic/completion.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
karpathy/nanochat:nanochat/core_eval.py:forward_model | # Context:
import torch
def render_prompts_mc(item, continuation_delimiter, fewshot_examples): ...
def render_prompts_schema(item, continuation_delimiter, fewshot_examples): ...
def render_prompts_lm(item, continuation_delimiter, fewshot_examples): ...
def find_common_length(token_sequences, direction): ...
def stack_sequences(tokens, pad_token_id): ...
def batch_sequences_mc(tokenizer, prompts): ...
def batch_sequences_schema(tokenizer, prompts): ...
def batch_sequences_lm(tokenizer, prompts): ...
def evaluate_example(idx, model, tokenizer, data, device, task_meta): ...
def evaluate_task(model, tokenizer, data, device, task_meta): ...
# Task:
Write a Python function `forward_model` to take BxT tensor of token ids, return BxT tensor of losses and argmax predictions.
Parameters: model, input_ids | def forward_model(model, input_ids):
"""
Take BxT tensor of token ids, return BxT tensor of losses and argmax predictions.
The last column of losses is set to nan because we don't have autoregressive targets there.
"""
batch_size, seq_len = input_ids.size()
outputs = model(input_ids)
# Roll the tensor to the left by one position to get the (autoregressive) target ids
target_ids = torch.roll(input_ids, shifts=-1, dims=1)
# Calculate cross entropy at all positions
losses = torch.nn.functional.cross_entropy(
outputs.view(batch_size * seq_len, -1),
target_ids.view(batch_size * seq_len),
reduction='none'
).view(batch_size, seq_len)
# Set the last column to be nan because there is no autoregressive loss there
losses[:, -1] = float('nan')
# Get the argmax predictions at each position
predictions = outputs.argmax(dim=-1)
return losses, predictions | function_simple | 0 | {"cognitive_complexity": 0, "loc": 20, "code_loc": 11, "docstring_loc": 4, "function_name": "forward_model", "class_name": null, "qualname": "forward_model", "file_path": "nanochat/core_eval.py", "repo_id": "karpathy/nanochat", "has_docstring": true, "runnable_level": "plib_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/openai/test_openai.py:test_openai_responses_api_auto_chain_reasoning_adds_include | # Context:
from crewai.llms.providers.openai.completion import OpenAICompletion, ResponsesAPIResult
from crewai.llms.providers.openai.completion import OpenAICompletion
def test_openai_completion_is_used_when_openai_provider(): ...
def test_openai_completion_is_used_when_no_provider_prefix(): ...
def test_openai_is_default_provider_without_explicit_llm_set_on_agent(): ...
def test_openai_completion_module_is_imported(): ...
def test_native_openai_raises_error_when_initialization_fails(): ...
def test_openai_completion_initialization_parameters(): ...
def test_openai_completion_call(): ...
def test_openai_completion_called_during_crew_execution(): ...
def test_openai_completion_call_arguments(): ...
def test_multiple_openai_calls_in_crew(): ...
def test_openai_completion_with_tools(): ...
def test_openai_completion_call_returns_usage_metrics(): ...
def test_openai_raises_error_when_model_not_supported(): ...
def test_openai_client_setup_with_extra_arguments(): ...
def test_extra_arguments_are_passed_to_openai_completion(): ...
def test_openai_get_client_params_with_api_base(): ...
def test_openai_get_client_params_with_base_url_priority(): ...
def test_openai_get_client_params_with_env_var(): ...
def test_openai_get_client_params_priority_order(): ...
def test_openai_get_client_params_no_base_url(monkeypatch): ...
def test_openai_streaming_with_response_model(): ...
def test_openai_response_format_with_pydantic_model(): ...
def test_openai_response_format_with_dict(): ...
def test_openai_response_format_none(): ...
def test_openai_streaming_returns_usage_metrics(): ...
def test_openai_responses_api_initialization(): ...
def test_openai_responses_api_default_is_completions(): ...
def test_openai_responses_api_prepare_params(): ...
def test_openai_responses_api_tool_format(): ...
def test_openai_completions_api_tool_format(): ...
def test_openai_responses_api_structured_output_format(): ...
def test_openai_responses_api_with_previous_response_id(): ...
def test_openai_responses_api_call_routing(): ...
def test_openai_responses_api_basic_call(): ...
def test_openai_responses_api_with_structured_output(): ...
def test_openai_responses_api_with_system_message_extraction(): ...
def test_openai_responses_api_streaming(): ...
def test_openai_responses_api_returns_usage_metrics(): ...
def test_openai_responses_api_builtin_tools_param(): ...
def test_openai_responses_api_builtin_tools_with_custom_tools(): ...
def test_openai_responses_api_with_web_search(): ...
def test_responses_api_result_dataclass(): ...
def test_responses_api_result_has_tool_outputs(): ...
def test_responses_api_result_has_reasoning(): ...
def test_openai_responses_api_parse_tool_outputs_param(): ...
def test_openai_responses_api_parse_tool_outputs_default_false(): ...
def test_openai_responses_api_with_parse_tool_outputs(): ...
def test_openai_responses_api_parse_tool_outputs_basic_call(): ...
def test_openai_responses_api_auto_chain_param(): ...
def test_openai_responses_api_auto_chain_default_false(): ...
def test_openai_responses_api_last_response_id_property(): ...
def test_openai_responses_api_reset_chain(): ...
def test_openai_responses_api_auto_chain_prepare_params(): ...
def test_openai_responses_api_explicit_previous_response_id_takes_precedence(): ...
def test_openai_responses_api_auto_chain_disabled_no_tracking(): ...
def test_openai_responses_api_auto_chain_integration(): ...
def test_openai_responses_api_auto_chain_with_reset(): ...
def test_openai_responses_api_auto_chain_reasoning_param(): ...
def test_openai_responses_api_auto_chain_reasoning_default_false(): ...
def test_openai_responses_api_last_reasoning_items_property(): ...
def test_openai_responses_api_reset_reasoning_chain(): ...
def test_openai_responses_api_auto_chain_reasoning_preserves_existing_include(): ...
def test_openai_responses_api_auto_chain_reasoning_no_duplicate_include(): ...
def test_openai_responses_api_auto_chain_reasoning_prepends_to_input(): ...
def test_openai_responses_api_auto_chain_reasoning_disabled_no_include(): ...
def test_openai_responses_api_auto_chain_reasoning_disabled_no_prepend(): ...
def test_openai_responses_api_both_auto_chains_work_together(): ...
def test_openai_agent_kickoff_structured_output_without_tools(): ...
def test_openai_agent_kickoff_structured_output_with_tools(): ...
def test_openai_stop_words_not_applied_to_structured_output(): ...
def test_openai_stop_words_still_applied_to_regular_responses(): ...
def test_openai_structured_output_preserves_json_with_stop_word_patterns(): ...
def test_openai_completions_cached_prompt_tokens(): ...
def test_openai_responses_api_cached_prompt_tokens(): ...
def test_openai_streaming_cached_prompt_tokens(): ...
def test_openai_completions_cached_prompt_tokens_with_tools(): ...
def test_openai_responses_api_cached_prompt_tokens_with_tools(): ...
def test_openai_streaming_returns_tool_calls_without_available_functions(): ...
async def test_openai_async_streaming_returns_tool_calls_without_available_functions(): ...
# Task:
Write a Python test function `test_openai_responses_api_auto_chain_reasoning_adds_include` to test that auto_chain_reasoning adds reasoning.encrypted_content to include.
Module under test: typing, crewai.llm, crewai.llms.providers.openai.completion | def test_openai_responses_api_auto_chain_reasoning_adds_include():
"""Test that auto_chain_reasoning adds reasoning.encrypted_content to include."""
llm = OpenAICompletion(
model="gpt-4o",
api="responses",
auto_chain_reasoning=True,
)
params = llm._prepare_responses_params(messages=[{"role": "user", "content": "test"}])
assert "include" in params
assert "reasoning.encrypted_content" in params["include"] | test | 0 | {"function_name": "test_openai_responses_api_auto_chain_reasoning_adds_include", "class_name": null, "qualname": "test_openai_responses_api_auto_chain_reasoning_adds_include", "file_path": "lib/crewai/tests/llms/openai/test_openai.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["typing", "crewai.llm", "crewai.llms.providers.openai.completion", "crewai.crew", "crewai.agent"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_files.py:TestGenericFile.test_file_read_text | # Context:
from crewai_files import (
AudioFile,
File,
FileBytes,
FilePath,
FileSource,
FileStream,
ImageFile,
PDFFile,
TextFile,
VideoFile,
normalize_input_files,
wrap_file_source,
)
class TestDetectContentType: ...
class TestFilePath: ...
class TestFileBytes: ...
class TestFileStream: ...
class TestTypedFileWrappers: ...
class TestWrapFileSource: ...
class TestNormalizeInputFiles: ...
class TestGenericFile:
def test_file_from_text_bytes(self) -> None: ...
def test_file_from_png_bytes(self) -> None: ...
def test_file_from_pdf_bytes(self) -> None: ...
def test_file_from_path(self, tmp_path: Path) -> None: ...
def test_file_from_path_object(self, tmp_path: Path) -> None: ...
def test_file_dict_unpacking(self, tmp_path: Path) -> None: ...
def test_file_dict_unpacking_no_filename(self) -> None: ...
def test_file_keys_method(self, tmp_path: Path) -> None: ...
def test_file_getitem(self, tmp_path: Path) -> None: ...
def test_file_getitem_invalid_key(self, tmp_path: Path) -> None: ...
def test_file_with_stream(self) -> None: ...
def test_file_default_mode(self) -> None: ...
def test_file_custom_mode(self) -> None: ...
def test_file_chunk_mode(self) -> None: ...
def test_image_file_with_mode(self) -> None: ...
# Task:
Write a Python test method `test_file_read_text` in test class `TestGenericFile` to test File.read_text method.
Module under test: pathlib, crewai_files, crewai_files.core.sources | def test_file_read_text(self) -> None:
"""Test File.read_text method."""
f = File(source=b"Text content here")
assert f.read_text() == "Text content here" | test | 0 | {"function_name": "test_file_read_text", "class_name": "TestGenericFile", "qualname": "TestGenericFile.test_file_read_text", "file_path": "lib/crewai/tests/utilities/test_files.py", "repo_id": "crewAIInc/crewAI", "loc": 5, "tested_modules": ["pathlib", "crewai_files", "crewai_files.core.sources"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:release/ray_release/tests/test_custom_byod_build.py:test_custom_byod_build_with_env | # Context:
from unittest.mock import patch
from click.testing import CliRunner
from ray_release.scripts.custom_byod_build import main
def test_custom_byod_build(mock_build_anyscale_custom_byod_image): ...
def test_custom_byod_build_without_lock_file(mock_build_anyscale_custom_byod_image): ...
def test_custom_byod_build_missing_arg(mock_build_anyscale_custom_byod_image): ...
def test_custom_byod_build_with_env_and_script(mock_build_anyscale_custom_byod_image): ...
# Task:
Write a Python test function `test_custom_byod_build_with_env` to verify the behavior of `custom_byod_build_with_env`.
Module under test: click.testing, ray_release.scripts.custom_byod_build | def test_custom_byod_build_with_env(mock_build_anyscale_custom_byod_image):
mock_build_anyscale_custom_byod_image.return_value = None
runner = CliRunner()
result = runner.invoke(
main,
[
"--image-name",
"test-image",
"--base-image",
"test-base-image",
"--env",
"FOO=bar",
"--env",
"BAZ=qux",
],
)
assert result.exit_code == 0
build_context = mock_build_anyscale_custom_byod_image.call_args[0][2]
assert build_context["envs"] == {"FOO": "bar", "BAZ": "qux"}
assert "post_build_script" not in build_context
assert "python_depset" not in build_context | test | 0 | {"function_name": "test_custom_byod_build_with_env", "class_name": null, "qualname": "test_custom_byod_build_with_env", "file_path": "release/ray_release/tests/test_custom_byod_build.py", "repo_id": "ray-project/ray", "loc": 21, "tested_modules": ["click.testing", "ray_release.scripts.custom_byod_build"], "has_docstring": false, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/unit_test/utils/test_oceanbase_health.py:TestOBConnectionPerformanceMetrics.test_get_performance_metrics_connection_error | # Context:
from unittest.mock import Mock, patch
class TestOceanBaseHealthCheck: ...
class TestOBConnectionPerformanceMetrics:
def _create_mock_connection(self): ...
def test_get_performance_metrics_success(self): ...
def test_get_storage_info_success(self): ...
def test_get_storage_info_fallback(self): ...
def test_get_connection_pool_stats(self): ...
def test_get_slow_query_count(self): ...
def test_estimate_qps(self): ...
# Task:
Write a Python test method `test_get_performance_metrics_connection_error` in test class `TestOBConnectionPerformanceMetrics` to test performance metrics when connection fails.
Module under test: api.utils.health_utils, rag.utils | def test_get_performance_metrics_connection_error(self):
"""Test performance metrics when connection fails."""
# Create mock connection with actual methods
conn = self._create_mock_connection()
mock_client = Mock()
conn.client = mock_client
conn.uri = "localhost:2881"
conn.logger = Mock()
mock_client.perform_raw_text_sql.side_effect = Exception("Connection failed")
result = conn.get_performance_metrics()
assert result["connection"] == "disconnected"
assert "error" in result | test | 1 | {"function_name": "test_get_performance_metrics_connection_error", "class_name": "TestOBConnectionPerformanceMetrics", "qualname": "TestOBConnectionPerformanceMetrics.test_get_performance_metrics_connection_error", "file_path": "test/unit_test/utils/test_oceanbase_health.py", "repo_id": "infiniflow/ragflow", "loc": 15, "tested_modules": ["api.utils.health_utils", "rag.utils"], "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/utils/agent_card_signing.py:module_doc | Write a module-level docstring for the Python module `agent_card_signing` which contains function `_normalize_private_key`, function `_serialize_agent_card`, function `_base64url_encode`, function `sign_agent_card`, function `verify_agent_card_signature`. | AgentCard JWS signing utilities.
This module provides functions for signing and verifying AgentCards using
JSON Web Signatures (JWS) as per RFC 7515. Signed agent cards allow clients
to verify the authenticity and integrity of agent card information.
Example:
>>> from crewai.a2a.utils.agent_card_signing import sign_agent_card
>>> signature = sign_agent_card(agent_card, private_key_pem, key_id="key-1")
>>> card_with_sig = card.model_copy(update={"signatures": [signature]}) | documentation | 0 | {"doc_type": "module", "module_name": "agent_card_signing", "file_path": "lib/crewai/src/crewai/a2a/utils/agent_card_signing.py", "repo_id": "crewAIInc/crewAI", "char_length": 490} |
langchain-ai/langchain:libs/partners/perplexity/tests/unit_tests/test_output_parsers.py:TestStripThinkTags.test_strip_think_tags_no_closing_tag | # Context:
from langchain_perplexity.output_parsers import (
ReasoningJsonOutputParser,
ReasoningStructuredOutputParser,
strip_think_tags,
)
class TestReasoningJsonOutputParser: ...
class MockPerson(BaseModel): ...
class MockCompany(BaseModel): ...
class TestReasoningStructuredOutputParser: ...
class TestStripThinkTags:
def test_strip_simple_think_tags(self) -> None: ...
def test_strip_multiple_think_tags(self) -> None: ...
def test_strip_nested_like_think_tags(self) -> None: ...
def test_strip_think_tags_empty_content(self) -> None: ...
def test_strip_think_tags_no_tags(self) -> None: ...
def test_strip_think_tags_only_tags(self) -> None: ...
def test_strip_think_tags_multiline(self) -> None: ...
def test_strip_think_tags_with_special_chars(self) -> None: ...
# Task:
Write a Python test method `test_strip_think_tags_no_closing_tag` in test class `TestStripThinkTags` to test handling of think tags without closing tag.
Module under test: langchain_core.exceptions, langchain_core.outputs, pydantic | def test_strip_think_tags_no_closing_tag(self) -> None:
"""Test handling of think tags without closing tag."""
text = "Hello <think>unclosed reasoning world"
result = strip_think_tags(text)
# Treats unclosed tag as literal text
assert result == "Hello <think>unclosed reasoning world" | test | 1 | {"function_name": "test_strip_think_tags_no_closing_tag", "class_name": "TestStripThinkTags", "qualname": "TestStripThinkTags.test_strip_think_tags_no_closing_tag", "file_path": "libs/partners/perplexity/tests/unit_tests/test_output_parsers.py", "repo_id": "langchain-ai/langchain", "loc": 6, "tested_modules": ["langchain_core.exceptions", "langchain_core.outputs", "pydantic", "langchain_perplexity.output_parsers"], "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_sdk_api/test_dataset_mangement/test_create_dataset.py:TestParserConfigBugFix.test_parser_config_with_both_fields | # Context:
import pytest
from ragflow_sdk import DataSet, RAGFlow
class TestAuthorization: ...
class TestCapability: ...
class TestDatasetCreate: ...
class TestParserConfigBugFix:
def test_parser_config_missing_raptor_and_graphrag(self, client): ...
def test_parser_config_with_only_raptor(self, client): ...
def test_parser_config_with_only_graphrag(self, client): ...
def test_parser_config_different_chunk_methods(self, client, chunk_method): ...
# Task:
Write a Python test method `test_parser_config_with_both_fields` in test class `TestParserConfigBugFix` to verify the behavior of `parser_config_with_both_fields`.
Module under test: concurrent.futures, operator, configs | def test_parser_config_with_both_fields(self, client):
parser_config = DataSet.ParserConfig(client, {"chunk_token_num": 1024, "raptor": {"use_raptor": True}, "graphrag": {"use_graphrag": True}})
payload = {"name": "test_parser_config_both_fields_sdk", "parser_config": parser_config}
dataset = client.create_dataset(**payload)
config = dataset.parser_config
assert config.raptor.use_raptor is True, "User-provided raptor.use_raptor should be preserved"
assert config.graphrag.use_graphrag is True, "User-provided graphrag.use_graphrag should be preserved" | test | 1 | {"function_name": "test_parser_config_with_both_fields", "class_name": "TestParserConfigBugFix", "qualname": "TestParserConfigBugFix.test_parser_config_with_both_fields", "file_path": "test/testcases/test_sdk_api/test_dataset_mangement/test_create_dataset.py", "repo_id": "infiniflow/ragflow", "loc": 8, "tested_modules": ["concurrent.futures", "operator", "configs", "hypothesis", "ragflow_sdk"], "has_docstring": false, "runnable_level": "project_runnable"} |
browser-use/browser-use:examples/sandbox/example.py:on_browser_ready | Write a Python function `on_browser_ready` to callback when browser session is created.
Parameters: data | def on_browser_ready(data):
"""Callback when browser session is created"""
print('\n🌐 Browser session created!')
print(f' Session ID: {data.session_id}')
print(f' Live view: {data.live_url}')
print(' Click the link above to watch the AI agent work!\n') | function_simple | 0 | {"cognitive_complexity": 0, "loc": 6, "code_loc": 4, "docstring_loc": 1, "function_name": "on_browser_ready", "class_name": null, "qualname": "on_browser_ready", "file_path": "examples/sandbox/example.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "self_contained"} |
ray-project/ray:python/ray/llm/tests/common/cloud/test_mirror_config.py:TestCloudMirrorConfig.test_valid_s3_uri | # Context:
from ray.llm._internal.common.utils.cloud_utils import (
CloudMirrorConfig,
LoraMirrorConfig,
)
class TestLoraMirrorConfig: ...
class TestCloudMirrorConfig:
def test_valid_gcs_uri(self): ...
def test_valid_abfss_uri(self): ...
def test_valid_azure_uri(self): ...
def test_none_uri(self): ...
def test_invalid_uri(self): ...
def test_extra_files(self): ...
# Task:
Write a Python test method `test_valid_s3_uri` in test class `TestCloudMirrorConfig` to test valid S3 URI.
Module under test: ray.llm._internal.common.utils.cloud_utils | def test_valid_s3_uri(self):
"""Test valid S3 URI."""
config = CloudMirrorConfig(bucket_uri="s3://my-bucket/path")
assert config.bucket_uri == "s3://my-bucket/path"
assert config.storage_type == "s3" | test | 0 | {"function_name": "test_valid_s3_uri", "class_name": "TestCloudMirrorConfig", "qualname": "TestCloudMirrorConfig.test_valid_s3_uri", "file_path": "python/ray/llm/tests/common/cloud/test_mirror_config.py", "repo_id": "ray-project/ray", "loc": 5, "tested_modules": ["ray.llm._internal.common.utils.cloud_utils"], "has_docstring": true, "runnable_level": "plib_runnable"} |
apache/airflow:helm-tests/tests/helm_tests/redis/test_labels_service.py:TestRedisService.test_component_specific_labels_should_override_global_labels | # Context:
import jmespath
from chart_utils.helm_template_generator import render_chart
class TestRedisService:
AIRFLOW_EXECUTOR = "CeleryExecutor"
TEMPLATE_FILE = "templates/redis/redis-service.yaml"
def test_should_add_global_labels(self): ...
def test_should_add_component_specific_labels(self): ...
def test_should_merge_global_and_component_specific_labels(self): ...
# Task:
Write a Python test method `test_component_specific_labels_should_override_global_labels` in test class `TestRedisService` to test that component-specific labels take precedence over global labels with the same key.
Module under test: __future__, chart_utils.helm_template_generator | def test_component_specific_labels_should_override_global_labels(self):
"""Test that component-specific labels take precedence over global labels with the same key."""
docs = render_chart(
values={
"executor": self.AIRFLOW_EXECUTOR,
"redis": {
"enabled": True,
"labels": {"common_label": "component_value"},
},
"labels": {"common_label": "global_value"},
},
show_only=[self.TEMPLATE_FILE],
)
assert "common_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["common_label"] == "component_value" | test | 1 | {"function_name": "test_component_specific_labels_should_override_global_labels", "class_name": "TestRedisService", "qualname": "TestRedisService.test_component_specific_labels_should_override_global_labels", "file_path": "helm-tests/tests/helm_tests/redis/test_labels_service.py", "repo_id": "apache/airflow", "loc": 16, "tested_modules": ["__future__", "chart_utils.helm_template_generator"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:shared/configuration/tests/configuration/test_parser.py:TestAirflowConfigParser.test_deprecated_options_precedence | # Context:
from configparser import ConfigParser
class AirflowConfigParser(_SharedAirflowConfigParser): ...
class TestAirflowConfigParser:
def test_getboolean(self): ...
def test_getint(self): ...
def test_getfloat(self): ...
def test_getlist(self): ...
def test_getjson(self, config_str, expected): ...
def test_getenum(self): ...
def test_getenumlist(self): ...
def test_getjson_empty_with_fallback(self): ...
def test_getjson_fallback(self, fallback): ...
def test_has_option(self): ...
def test_remove_option(self): ...
def test_get_with_defaults(self): ...
def test_get_mandatory_value(self): ...
def test_sensitive_config_values(self): ...
def test_deprecated_options(self): ...
def test_deprecated_options_same_section(self): ...
def test_deprecated_options_lookup_disabled(self): ...
def test_deprecated_options_with_lookup_from_deprecated(self, deprecated_options_dict, kwargs, new_section_expected_value, old_section_expected_value): ...
def test_deprecated_options_cmd(self): ...
def test_cmd_from_env_var(self): ...
def test_cmd_from_config_file(self): ...
def test_secret_from_config_file(self): ...
def test_secret_from_env_var(self): ...
def test_deprecated_sections(self): ...
def test_gettimedelta(self): ...
def test_getimport(self): ...
def test_get_mandatory_list_value(self): ...
def test_set_case_insensitive(self): ...
def test_configure_parser_from_configuration_description_with_deprecated_options(self): ...
def test_get_default_value_deprecated(self): ...
def test_team_env_var_takes_priority(self): ...
def test_team_config_file_section(self): ...
def test_team_does_not_fallback_to_global_config(self): ...
def test_team_does_not_fallback_to_global_env_var(self): ...
def test_team_skips_cmd_lookup(self): ...
def test_team_skips_secret_lookup(self): ...
def test_team_falls_through_to_defaults(self): ...
def test_team_env_var_format(self): ...
# Task:
Write a Python test method `test_deprecated_options_precedence` in test class `TestAirflowConfigParser` to test that new option takes precedence over deprecated option.
Module under test: __future__, configparser, enum | def test_deprecated_options_precedence(self):
"""Test that new option takes precedence over deprecated option"""
class TestParserWithDeprecated(AirflowConfigParser):
deprecated_options = {
("test", "new_key"): ("test", "old_key", "2.0.0"),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.configuration_description = {}
self._default_values = ConfigParser()
self._suppress_future_warnings = False
test_conf = TestParserWithDeprecated()
test_conf.add_section("test")
test_conf.set("test", "old_key", "old_value")
test_conf.set("test", "new_key", "new_value")
value = test_conf.get("test", "new_key")
assert value == "new_value" | test | 1 | {"function_name": "test_deprecated_options_precedence", "class_name": "TestAirflowConfigParser", "qualname": "TestAirflowConfigParser.test_deprecated_options_precedence", "file_path": "shared/configuration/tests/configuration/test_parser.py", "repo_id": "apache/airflow", "loc": 20, "tested_modules": ["__future__", "configparser", "enum", "airflow_shared.configuration.exceptions", "airflow_shared.configuration.parser"], "has_docstring": true, "runnable_level": "file_runnable"} |
vllm-project/vllm:vllm/lora/ops/triton_ops/fused_moe_lora_fp8_op.py:_get_expert_id | # Context:
from vllm.triton_utils import tl, triton
def _get_lora_id(lora_ids, token_lora_mapping_ptr, lora_idx, pid_m, top_k_num, naive_block_assignment: tl.constexpr): ...
def _get_token_offs(sorted_token_ids_ptr, lora_id, pid_m, offs, stride_tl, max_loras, num_valid_tokens, naive_block_assignment: tl.constexpr, BLOCK_SIZE_M: tl.constexpr): ...
def _get_ptr(lora_weights: list[torch.Tensor], device: torch.device): ...
def _adjust_kernel_inputs(num_active_loras: int, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor): ...
def _fused_moe_lora_kernel_fp8(a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, token_lora_mapping_ptr, N, K, EM, num_valid_tokens, num_experts, top_k_num, lora_ids, adapter_enabled, max_loras, stride_am, stride_ak, stride_bl, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, stride_tl, stride_el, stride_asm, stride_ask, stride_bsl, stride_bse, stride_bsk, stride_bsn, group_n: tl.constexpr, group_k: tl.constexpr, slice_a_size, slice_c_size, num_slice_a: tl.constexpr, num_slice_c: tl.constexpr, token_mapping_factor: tl.constexpr, naive_block_assignment: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, ADD_INPUTS: tl.constexpr, USE_B_L2_CACHE: tl.constexpr, BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, SPLIT_K: tl.constexpr, USE_GDC: tl.constexpr, launch_pdl: tl.constexpr, IS_PRIMARY: tl.constexpr, use_fp8_w8a8: tl.constexpr, use_int8_w8a8: tl.constexpr, use_int8_w8a16: tl.constexpr, per_channel_quant: tl.constexpr): ...
def _fused_moe_lora_shrink_fp8(a_intermediate_cache1: torch.Tensor, qcurr_hidden_states: torch.Tensor, lora_a_stacked: list[torch.Tensor], topk_weights: torch.Tensor, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor, num_tokens_post_padded: torch.Tensor | None, token_lora_mapping: torch.Tensor, top_k_num: int, lora_ids: torch.Tensor, adapter_enabled: torch.Tensor, device: torch.device, N: int, M: int, EM: int, K: int, num_tokens: int, num_experts: int, num_slices: int, block_size_m: int, block_size_n: int, block_size_k: int, group_size_m: int, num_warps: int, num_stages: int, split_k: int, num_active_loras: int, lora_a_scale_stacked: list[torch.Tensor], mul_routed_weight: bool, use_gdc: bool, act_scale: torch.Tensor | None, use_fp8_w8a8: bool, use_int8_w8a8: bool, use_int8_w8a16: bool, per_channel_quant: bool, block_shape: List[int] | None) -> None: ...
def _fused_moe_lora_expand_fp8(output: torch.Tensor, a_intermediate_cache1: torch.Tensor, lora_b_stacked: list[torch.Tensor], topk_weights: torch.Tensor, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor, num_tokens_post_padded: torch.Tensor | None, token_lora_mapping: torch.Tensor, top_k_num: int, lora_ids: torch.Tensor, adapter_enabled: torch.Tensor, device: torch.device, N: int, M: int, EM: int, K: int, num_tokens: int, num_experts: int, num_slices: int, max_lora_rank: int, w1_output_dim_size: int, block_size_m: int, block_size_n: int, block_size_k: int, group_size_m: int, num_warps: int, num_stages: int, split_k: int, num_active_loras: int, lora_b_scale_stacked: list[torch.Tensor], mul_routed_weight: bool, offset: int, use_gdc: bool, act_scale: torch.Tensor | None, use_fp8_w8a8: bool, use_int8_w8a8: bool, use_int8_w8a16: bool, per_channel_quant: bool, block_shape: List[int] | None) -> None: ...
def _fused_moe_lora_fp8(output: torch.Tensor, qcurr_hidden_states: torch.Tensor, lora_a_stacked: list[torch.Tensor], lora_b_stacked: list[torch.Tensor], topk_weights: torch.Tensor, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor, num_tokens_post_padded: torch.Tensor | None, token_lora_mapping: torch.Tensor, max_lora_rank: int, top_k_num: int, lora_ids: torch.Tensor, num_active_loras: int, adapter_enabled: torch.Tensor, shrink_block_size_m: int, shrink_block_size_n: int, shrink_block_size_k: int, shrink_group_size_m: int, shrink_num_warps: int, shrink_num_stages: int, shrink_split_k: int, expand_block_size_m: int, expand_block_size_n: int, expand_block_size_k: int, expand_group_size_m: int, expand_num_warps: int, expand_num_stages: int, expand_split_k: int, lora_a_scale_stacked: list[torch.Tensor], lora_b_scale_stacked: list[torch.Tensor], shrink_act_scale: torch.Tensor | None, expand_act_scale: torch.Tensor | None, mul_routed_weight: bool, fully_sharded: bool, offset: int, use_fp8_w8a8: bool, use_int8_w8a8: bool, use_int8_w8a16: bool, per_channel_quant: bool, block_shape: List[int] | None) -> None: ...
def _fused_moe_lora_fp8_fake(output: torch.Tensor, qcurr_hidden_states: torch.Tensor, lora_a_stacked: list[torch.Tensor], lora_b_stacked: list[torch.Tensor], topk_weights: torch.Tensor, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor, num_tokens_post_padded: torch.Tensor | None, token_lora_mapping: torch.Tensor, max_lora_rank: int, top_k_num: int, lora_ids: torch.Tensor, num_active_loras: int, adapter_enabled: torch.Tensor, shrink_block_size_m: int, shrink_block_size_n: int, shrink_block_size_k: int, shrink_group_size_m: int, shrink_num_warps: int, shrink_num_stages: int, shrink_split_k: int, expand_block_size_m: int, expand_block_size_n: int, expand_block_size_k: int, expand_group_size_m: int, expand_num_warps: int, expand_num_stages: int, expand_split_k: int, lora_a_scale_stacked: list[torch.Tensor], lora_b_scale_stacked: list[torch.Tensor], mul_routed_weight: bool, fully_sharded: bool, offset: int, shrink_act_scale: torch.Tensor | None, expand_act_scale: torch.Tensor | None, use_fp8_w8a8: bool, use_int8_w8a8: bool, use_int8_w8a16: bool, per_channel_quant: bool, block_shape: List[int] | None) -> None: ...
def _fused_moe_lora_shrink_fp8_fake(a_intermediate_cache1: torch.Tensor, qcurr_hidden_states: torch.Tensor, lora_a_stacked: list[torch.Tensor], topk_weights: torch.Tensor, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor, num_tokens_post_padded: torch.Tensor | None, token_lora_mapping: torch.Tensor, top_k_num: int, lora_ids: torch.Tensor, adapter_enabled: torch.Tensor, device: torch.device, N: int, M: int, EM: int, K: int, num_tokens: int, num_experts: int, num_slices: int, block_size_m: int, block_size_n: int, block_size_k: int, group_size_m: int, num_warps: int, num_stages: int, split_k: int, num_active_loras: int, lora_a_scale_stacked: list[torch.Tensor], mul_routed_weight: bool, use_gdc: bool, act_scale: torch.Tensor | None, use_fp8_w8a8: bool, use_int8_w8a8: bool, use_int8_w8a16: bool, per_channel_quant: bool, block_shape: List[int] | None) -> None: ...
def _fused_moe_lora_expand_fp8_fake(output: torch.Tensor, a_intermediate_cache1: torch.Tensor, lora_b_stacked: list[torch.Tensor], topk_weights: torch.Tensor, sorted_token_ids: torch.Tensor | None, expert_ids: torch.Tensor, num_tokens_post_padded: torch.Tensor | None, token_lora_mapping: torch.Tensor, top_k_num: int, lora_ids: torch.Tensor, adapter_enabled: torch.Tensor, device: torch.device, N: int, M: int, EM: int, K: int, num_tokens: int, num_experts: int, num_slices: int, max_lora_rank: int, w1_output_dim_size: int, block_size_m: int, block_size_n: int, block_size_k: int, group_size_m: int, num_warps: int, num_stages: int, split_k: int, num_active_loras: int, act_scale: torch.Tensor, lora_b_scale_stacked: list[torch.Tensor], mul_routed_weight: bool, offset: int, use_fp8_w8a8: bool, use_int8_w8a8: bool, use_int8_w8a16: bool, per_channel_quant: bool, block_shape: List[int] | None, use_gdc: bool) -> None: ...
# Task:
Write a Python function `_get_expert_id` to returns expert_id.
Parameters: expert_ids_ptr, lora_id, pid_m, stride_el, max_loras, naive_block_assignment: tl.constexpr | def _get_expert_id(
expert_ids_ptr,
lora_id,
pid_m,
stride_el,
max_loras,
naive_block_assignment: tl.constexpr,
):
"""Returns expert_id"""
if naive_block_assignment:
return tl.load(expert_ids_ptr + pid_m)
else:
ind = lora_id * stride_el + pid_m
return tl.load(expert_ids_ptr + ind, ind < max_loras * stride_el, -1) | function_simple | 1 | {"cognitive_complexity": 2, "loc": 14, "code_loc": 5, "docstring_loc": 1, "function_name": "_get_expert_id", "class_name": null, "qualname": "_get_expert_id", "file_path": "vllm/lora/ops/triton_ops/fused_moe_lora_fp8_op.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/collate_fn.py:DefaultCollateFn.__init__ | # Context:
from concurrent.futures import ThreadPoolExecutor
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generic,
List,
Mapping,
Optional,
Tuple,
TypeVar,
Union,
)
import torch
def _is_tensor(batch: Any) -> bool: ...
def _is_tensor_sequence(batch: Any) -> bool: ...
def _is_nested_tensor_sequence(batch: Any) -> bool: ...
def _is_tensor_mapping(batch: Any) -> bool: ...
def _is_tensor_sequence_mapping(batch: Any) -> bool: ...
def is_tensor_batch_type(batch: Any) -> bool: ...
class CollateFn(Generic[DataBatchType]): ...
class ArrowBatchCollateFn(CollateFn['pyarrow.Table']): ...
class NumpyBatchCollateFn(CollateFn[Dict[str, np.ndarray]]): ...
class PandasBatchCollateFn(CollateFn['pandas.DataFrame']): ...
class DefaultCollateFn(ArrowBatchCollateFn):
_DEFAULT_NUM_WORKERS = env_integer(
def __del__(self): ...
def __call__(self, batch: 'pyarrow.Table') -> Union[Dict[str, 'torch.Tensor'], Dict[str, List['torch.Tensor']]]: ...
# Task:
Write a Python method `__init__` for the class `DefaultCollateFn` to initialize the collate function.
Parameters: dtypes: Optional[Union['torch.dtype', Dict[str, 'torch.dtype']]], device: Optional['TorchDeviceType'], pin_memory: bool, num_workers: int | def __init__(
self,
dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None,
device: Optional["TorchDeviceType"] = None,
pin_memory: bool = False,
num_workers: int = _DEFAULT_NUM_WORKERS,
):
"""Initialize the collate function.
Args:
dtypes: The torch dtype(s) for the created tensor(s); if None, the dtype
will be inferred from the tensor data.
device: The device on which the tensor should be placed. Can be a string
(e.g. "cpu", "cuda:0") or a torch.device object.
pin_memory: Whether to pin the memory of the created tensors.
num_workers: Number of worker threads for parallel tensor conversion.
Defaults to `RAY_DATA_DEFAULT_COLLATE_FN_THREADPOOL_MAX_WORKERS`.
"""
import torch
super().__init__()
self.dtypes = dtypes
if isinstance(device, (str, int)):
self.device = torch.device(device)
else:
self.device = device
self.pin_memory = pin_memory
self.num_workers = num_workers
self._threadpool: Optional[ThreadPoolExecutor] = None | function_simple | 0 | {"cognitive_complexity": 2, "loc": 29, "code_loc": 10, "docstring_loc": 11, "function_name": "__init__", "class_name": "DefaultCollateFn", "qualname": "DefaultCollateFn.__init__", "file_path": "python/ray/data/collate_fn.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
exo-explore/exo:src/exo/master/placement_utils.py:get_mlx_jaccl_devices_matrix | # Context:
from exo.shared.topology import Topology
from exo.shared.types.common import Host, NodeId
from exo.shared.types.topology import Cycle, RDMAConnection, SocketConnection
def filter_cycles_by_memory(cycles: list[Cycle], node_memory: Mapping[NodeId, MemoryUsage], required_memory: Memory) -> list[Cycle]: ...
def get_smallest_cycles(cycles: list[Cycle]) -> list[Cycle]: ...
def allocate_layers_proportionally(total_layers: int, memory_fractions: list[float]) -> list[int]: ...
def _validate_cycle(cycle: Cycle) -> None: ...
def _compute_total_memory(node_ids: list[NodeId], node_memory: Mapping[NodeId, MemoryUsage]) -> Memory: ...
def _allocate_and_validate_layers(node_ids: list[NodeId], node_memory: Mapping[NodeId, MemoryUsage], total_memory: Memory, model_card: ModelCard) -> list[int]: ...
def get_shard_assignments_for_pipeline_parallel(model_card: ModelCard, cycle: Cycle, node_memory: Mapping[NodeId, MemoryUsage]) -> ShardAssignments: ...
def _get_shard_assignments_for_cfg_parallel(model_card: ModelCard, cycle: Cycle, node_memory: Mapping[NodeId, MemoryUsage]) -> ShardAssignments: ...
def _get_shard_assignments_for_pure_pipeline(model_card: ModelCard, cycle: Cycle, node_memory: Mapping[NodeId, MemoryUsage]) -> ShardAssignments: ...
def get_shard_assignments_for_tensor_parallel(model_card: ModelCard, cycle: Cycle): ...
def get_shard_assignments(model_card: ModelCard, cycle: Cycle, sharding: Sharding, node_memory: Mapping[NodeId, MemoryUsage]) -> ShardAssignments: ...
def _find_connection_ip(node_i: NodeId, node_j: NodeId, cycle_digraph: Topology) -> Generator[str, None, None]: ...
def _find_ip_prioritised(node_id: NodeId, other_node_id: NodeId, cycle_digraph: Topology, node_network: Mapping[NodeId, NodeNetworkInfo], ring: bool) -> str | None: ...
def get_mlx_ring_hosts_by_node(selected_cycle: Cycle, cycle_digraph: Topology, ephemeral_port: int, node_network: Mapping[NodeId, NodeNetworkInfo]) -> dict[NodeId, list[Host]]: ...
def get_mlx_jaccl_coordinators(coordinator: NodeId, coordinator_port: int, cycle_digraph: Topology, node_network: Mapping[NodeId, NodeNetworkInfo]) -> dict[NodeId, str]: ...
# Task:
Write a Python function `get_mlx_jaccl_devices_matrix` to build connectivity matrix mapping device i to device j via RDMA interface names.
Parameters: selected_cycle: list[NodeId], cycle_digraph: Topology
Returns: list[list[str | None]] | def get_mlx_jaccl_devices_matrix(
selected_cycle: list[NodeId],
cycle_digraph: Topology,
) -> list[list[str | None]]:
"""Build connectivity matrix mapping device i to device j via RDMA interface names.
The matrix element [i][j] contains the interface name on device i that connects
to device j, or None if no connection exists or no interface name is found.
Diagonal elements are always None.
"""
num_nodes = len(selected_cycle)
matrix: list[list[str | None]] = [
[None for _ in range(num_nodes)] for _ in range(num_nodes)
]
for i, node_i in enumerate(selected_cycle):
for j, node_j in enumerate(selected_cycle):
if i == j:
continue
for conn in cycle_digraph.get_all_connections_between(node_i, node_j):
if isinstance(conn, RDMAConnection):
matrix[i][j] = conn.source_rdma_iface
break
else:
raise ValueError(
"Current jaccl backend requires all-to-all RDMA connections"
)
return matrix | function_complex | 0 | {"cognitive_complexity": 13, "loc": 30, "code_loc": 17, "docstring_loc": 6, "function_name": "get_mlx_jaccl_devices_matrix", "class_name": null, "qualname": "get_mlx_jaccl_devices_matrix", "file_path": "src/exo/master/placement_utils.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
github/spec-kit:src/specify_cli/extensions.py:ExtensionManifest.__init__ | # Context:
from pathlib import Path
class ExtensionError(Exception): ...
class ValidationError(ExtensionError): ...
class CompatibilityError(ExtensionError): ...
class ExtensionRegistry: ...
class ExtensionManager: ...
def version_satisfies(current: str, required: str) -> bool: ...
class CommandRegistrar: ...
class ExtensionCatalog: ...
class ConfigManager: ...
class HookExecutor: ...
class ExtensionManifest:
SCHEMA_VERSION = "1.0"
REQUIRED_FIELDS = ["schema_version", "extension", "requires", "provides"]
def _load_yaml(self, path: Path) -> dict: ...
def _validate(self): ...
def id(self) -> str: ...
def name(self) -> str: ...
def version(self) -> str: ...
def description(self) -> str: ...
def requires_speckit_version(self) -> str: ...
def commands(self) -> List[Dict[str, Any]]: ...
def hooks(self) -> Dict[str, Any]: ...
def get_hash(self) -> str: ...
# Task:
Write a Python method `__init__` for the class `ExtensionManifest` to load and validate extension manifest.
Parameters: manifest_path: Path | def __init__(self, manifest_path: Path):
"""Load and validate extension manifest.
Args:
manifest_path: Path to extension.yml file
Raises:
ValidationError: If manifest is invalid
"""
self.path = manifest_path
self.data = self._load_yaml(manifest_path)
self._validate() | function_simple | 0 | {"cognitive_complexity": 0, "loc": 12, "code_loc": 3, "docstring_loc": 8, "function_name": "__init__", "class_name": "ExtensionManifest", "qualname": "ExtensionManifest.__init__", "file_path": "src/specify_cli/extensions.py", "repo_id": "github/spec-kit", "has_docstring": true, "runnable_level": "class_runnable"} |
infiniflow/ragflow:api/db/services/doc_metadata_service.py:DocMetadataService._drop_empty_metadata_table | # Context:
import logging
from common import settings
from common.doc_store.doc_store_base import OrderByExpr
class DocMetadataService:
def _get_doc_meta_index_name(tenant_id: str) -> str: ...
def _extract_metadata(flat_meta: Dict) -> Dict: ...
def _extract_doc_id(doc: Dict, hit: Dict) -> str: ...
def _iter_search_results(cls, results): ...
def _search_metadata(cls, kb_id: str, condition: Dict): ...
def _split_combined_values(cls, meta_fields: Dict) -> Dict: ...
def insert_document_metadata(cls, doc_id: str, meta_fields: Dict) -> bool: ...
def update_document_metadata(cls, doc_id: str, meta_fields: Dict) -> bool: ...
def delete_document_metadata(cls, doc_id: str, skip_empty_check: bool) -> bool: ...
def get_document_metadata(cls, doc_id: str) -> Dict: ...
def get_meta_by_kbs(cls, kb_ids: List[str]) -> Dict: ...
def get_flatted_meta_by_kbs(cls, kb_ids: List[str]) -> Dict: ...
def get_metadata_for_documents(cls, doc_ids: Optional[List[str]], kb_id: str) -> Dict[str, Dict]: ...
def get_metadata_summary(cls, kb_id: str, doc_ids) -> Dict: ...
def batch_update_metadata(cls, kb_id: str, doc_ids: List[str], updates, deletes) -> int: ...
# Task:
Write a Python method `_drop_empty_metadata_table` for the class `DocMetadataService` to check if metadata table is empty and drop it if so.
Parameters: index_name: str, tenant_id: str
Returns: None | def _drop_empty_metadata_table(cls, index_name: str, tenant_id: str) -> None:
"""
Check if metadata table is empty and drop it if so.
Uses optimized count query instead of full search.
This prevents accumulation of empty metadata tables.
Args:
index_name: Metadata table/index name
tenant_id: Tenant ID
"""
try:
logging.debug(f"[DROP EMPTY TABLE] Starting empty table check for: {index_name}")
# Check if table exists first (cheap operation)
if not settings.docStoreConn.index_exist(index_name, ""):
logging.debug(f"[DROP EMPTY TABLE] Metadata table {index_name} does not exist, skipping")
return
logging.debug(f"[DROP EMPTY TABLE] Table {index_name} exists, checking if empty...")
# Use ES count API for accurate count
# Note: No need to refresh since delete operation already uses refresh=True
try:
count_response = settings.docStoreConn.es.count(index=index_name)
total_count = count_response['count']
logging.debug(f"[DROP EMPTY TABLE] ES count API result: {total_count} documents")
is_empty = (total_count == 0)
except Exception as e:
logging.warning(f"[DROP EMPTY TABLE] Count API failed, falling back to search: {e}")
# Fallback to search if count fails
results = settings.docStoreConn.search(
select_fields=["id"],
highlight_fields=[],
condition={},
match_expressions=[],
order_by=OrderByExpr(),
offset=0,
limit=1, # Only need 1 result to know if table is non-empty
index_names=index_name,
knowledgebase_ids=[""] # Metadata tables don't filter by KB
)
logging.debug(f"[DROP EMPTY TABLE] Search results type: {type(results)}, results: {results}")
# Check if empty based on return type (fallback search only)
if isinstance(results, tuple) and len(results) == 2:
# Infinity returns (DataFrame, int)
df, total = results
logging.debug(f"[DROP EMPTY TABLE] Infinity format - total: {total}, df length: {len(df) if hasattr(df, '__len__') else 'N/A'}")
is_empty = (total == 0 or (hasattr(df, '__len__') and len(df) == 0))
elif hasattr(results, 'get') and 'hits' in results:
# ES format - MUST check this before hasattr(results, '__len__')
# because ES response objects also have __len__
total = results.get('hits', {}).get('total', {})
hits = results.get('hits', {}).get('hits', [])
# ES 7.x+: total is a dict like {'value': 0, 'relation': 'eq'}
# ES 6.x: total is an int
if isinstance(total, dict):
total_count = total.get('value', 0)
else:
total_count = total
logging.debug(f"[DROP EMPTY TABLE] ES format - total: {total_count}, hits count: {len(hits)}")
is_empty = (total_count == 0 or len(hits) == 0)
elif hasattr(results, '__len__'):
# DataFrame or list (check this AFTER ES format)
result_len = len(results)
logging.debug(f"[DROP EMPTY TABLE] List/DataFrame format - length: {result_len}")
is_empty = result_len == 0
else:
logging.warning(f"[DROP EMPTY TABLE] Unknown result format: {type(results)}")
is_empty = False
if is_empty:
logging.debug(f"[DROP EMPTY TABLE] Metadata table {index_name} is empty, dropping it")
drop_result = settings.docStoreConn.delete_idx(index_name, "")
logging.debug(f"[DROP EMPTY TABLE] Drop result: {drop_result}")
else:
logging.debug(f"[DROP EMPTY TABLE] Metadata table {index_name} still has documents, keeping it")
except Exception as e:
# Log but don't fail - metadata deletion was successful
logging.error(f"[DROP EMPTY TABLE] Failed to check/drop empty metadata table {index_name}: {e}") | function_complex | 1 | {"cognitive_complexity": 23, "loc": 84, "code_loc": 53, "docstring_loc": 9, "function_name": "_drop_empty_metadata_table", "class_name": "DocMetadataService", "qualname": "DocMetadataService._drop_empty_metadata_table", "file_path": "api/db/services/doc_metadata_service.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py:MMGroundingDinoModelIntegrationTests.test_inference_object_detection_head_equivalence_cpu_gpu | # Context:
from transformers.testing_utils import (
is_flaky,
require_timm,
require_torch,
require_torch_accelerator,
require_vision,
slow,
torch_device,
)
import torch
from transformers import MMGroundingDinoConfig, MMGroundingDinoForObjectDetection, MMGroundingDinoModel
def generate_fake_bounding_boxes(n_boxes): ...
class MMGroundingDinoModelTester: ...
class MMGroundingDinoModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...
def prepare_img(): ...
def prepare_text(): ...
class MMGroundingDinoModelIntegrationTests(unittest.TestCase):
def default_processor(self): ...
def test_inference_object_detection_head(self): ...
def test_cross_attention_mask(self): ...
def test_mm_grounding_dino_loss(self): ...
# Task:
Write a Python test method `test_inference_object_detection_head_equivalence_cpu_gpu` in test class `MMGroundingDinoModelIntegrationTests` to verify the behavior of `inference_object_detection_head_equivalence_cpu_gpu`.
Module under test: functools, datasets, transformers | def test_inference_object_detection_head_equivalence_cpu_gpu(self):
processor = self.default_processor
image = prepare_img()
text = prepare_text()
encoding = processor(images=image, text=text, return_tensors="pt")
# 1. run model on CPU
model = MMGroundingDinoForObjectDetection.from_pretrained(
"openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det"
)
# HACK: the issue happens during top-k (k=900) after the encoder
# there are some flips between cpu and gpu query ordering (idxs 195<->196 and 267<->268 on my machine)
# which causes different query position embedding assignments
# which in turn significantly changes the decoder pass due to self attention
model.config.num_queries = 100
model.model.query_position_embeddings.weight.data = model.model.query_position_embeddings.weight.data[:100]
with torch.no_grad():
cpu_outputs = model(**encoding)
# 2. run model on GPU
model.to(torch_device)
encoding = encoding.to(torch_device)
with torch.no_grad():
gpu_outputs = model(**encoding)
# 3. assert equivalence
for key in cpu_outputs.keys():
torch.testing.assert_close(cpu_outputs[key], gpu_outputs[key].cpu(), rtol=1e-3, atol=1e-3)
expected_logits = torch.tensor(
[[-5.0188, -1.0069, -1.0005], [-5.1177, -1.0537, -1.0444], [-5.3986, -2.4935, -2.4716]]
)
torch.testing.assert_close(cpu_outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3)
# assert postprocessing
results_cpu = processor.image_processor.post_process_object_detection(
cpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)]
)[0]
result_gpu = processor.image_processor.post_process_object_detection(
gpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)]
)[0]
torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-3, atol=1e-3)
torch.testing.assert_close(results_cpu["boxes"], result_gpu["boxes"].cpu(), rtol=1e-3, atol=1e-3) | test | 0 | {"function_name": "test_inference_object_detection_head_equivalence_cpu_gpu", "class_name": "MMGroundingDinoModelIntegrationTests", "qualname": "MMGroundingDinoModelIntegrationTests.test_inference_object_detection_head_equivalence_cpu_gpu", "file_path": "tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py", "repo_id": "huggingface/transformers", "loc": 46, "tested_modules": ["functools", "datasets", "transformers", "transformers.testing_utils", "test_configuration_common"], "has_docstring": false, "runnable_level": "file_runnable"} |
run-llama/llama_index:llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py:TestBaseAgentCoreMemoryMethods.test_create_event_no_event_id | # Context:
import pytest
from llama_index.core.base.llms.types import ChatMessage, MessageRole
def mock_client(): ...
def memory_context(): ...
def memory(mock_client, memory_context): ...
class TestAgentCoreMemoryContext: ...
class TestAgentCoreMemory: ...
class TestIntegration: ...
class TestErrorHandling: ...
async def test_aput(memory): ...
async def test_aput_messages(memory): ...
class TestBaseAgentCoreMemoryMethods:
def test_create_event_success(self, memory): ...
def test_create_event_no_client(self, memory_context): ...
def test_create_event_empty_messages(self, memory): ...
def test_list_events_simple(self, memory): ...
def test_list_events_with_pagination(self, memory): ...
def test_retrieve_memories(self, memory): ...
def test_list_raw_events_pagination(self, memory): ...
def test_list_memory_records_pagination(self, memory): ...
def test_list_sessions_pagination(self, memory): ...
def test_delete_events_skips_missing_event_id(self, memory): ...
def test_delete_memory_records(self, memory): ...
def test_batch_delete_memory_records_chunks(self, memory): ...
def test_delete_all_memory_for_session_events_only(self, memory): ...
def test_delete_all_memory_for_session_events_and_records(self, memory): ...
# Task:
Write a Python test method `test_create_event_no_event_id` in test class `TestBaseAgentCoreMemoryMethods` to test create_event raises error when no event ID is returned.
Module under test: llama_index.core.base.llms.types, llama_index.core.memory.memory, llama_index.memory.bedrock_agentcore.base | def test_create_event_no_event_id(self, memory):
"""Test create_event raises error when no event ID is returned."""
memory._client.create_event.return_value = {"event": {"eventId": None}}
messages = [ChatMessage(role=MessageRole.USER, content="Hello")]
with pytest.raises(
RuntimeError, match="Bedrock AgentCore did not return an event ID"
):
memory.create_event(
memory_id="test-memory",
actor_id="test-actor",
messages=messages,
session_id="test-session",
) | test | 1 | {"function_name": "test_create_event_no_event_id", "class_name": "TestBaseAgentCoreMemoryMethods", "qualname": "TestBaseAgentCoreMemoryMethods.test_create_event_no_event_id", "file_path": "llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py", "repo_id": "run-llama/llama_index", "loc": 14, "tested_modules": ["llama_index.core.base.llms.types", "llama_index.core.memory.memory", "llama_index.memory.bedrock_agentcore.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/lfx/tests/unit/cli/test_serve_simple.py:test_serve_command_missing_api_key | # Context:
import json
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
from typer.testing import CliRunner
from lfx.__main__ import app, main
from lfx.__main__ import app
def test_cli_imports(): ...
def test_serve_command_help(): ...
def test_serve_command_with_flow_json(): ...
def test_serve_command_invalid_json(): ...
def test_serve_command_nonexistent_file(): ...
def test_cli_utility_functions(): ...
# Task:
Write a Python test function `test_serve_command_missing_api_key` to test that serve command fails without API key.
Module under test: pathlib, typer.testing, lfx.__main__ | def test_serve_command_missing_api_key():
"""Test that serve command fails without API key."""
from lfx.__main__ import app
# Create a temporary JSON flow file
flow_data = {
"data": {
"nodes": [],
"edges": [],
}
}
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump(flow_data, f)
temp_path = f.name
try:
# Clear API key from environment
with patch.dict(os.environ, {}, clear=True):
runner = CliRunner()
result = runner.invoke(app, ["serve", temp_path])
assert result.exit_code == 1
# Check both output and exception since typer may output to different streams
assert "LANGFLOW_API_KEY" in str(result.output or result.exception or "")
finally:
Path(temp_path).unlink() | test | 1 | {"function_name": "test_serve_command_missing_api_key", "class_name": null, "qualname": "test_serve_command_missing_api_key", "file_path": "src/lfx/tests/unit/cli/test_serve_simple.py", "repo_id": "langflow-ai/langflow", "loc": 27, "tested_modules": ["pathlib", "typer.testing", "lfx.__main__", "lfx.__main__", "lfx.__main__"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/vibevoice_acoustic_tokenizer/test_feature_extraction_vibevoice_acoustic_tokenizer.py:VibeVoiceAcousticTokenizerFeatureExtractionTest.test_call | # Context:
import numpy as np
import torch
def floats_list(shape, scale, rng, name): ...
class VibeVoiceAcousticTokenizerFeatureExtractionTester: ...
class VibeVoiceAcousticTokenizerFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = VibeVoiceAcousticTokenizerFeatureExtractor
def setUp(self): ...
def _load_datasamples(self, num_samples): ...
def test_normalize_audio(self): ...
def test_sampling_rate_validation(self): ...
def test_padding_mask_generation(self): ...
# Task:
Write a Python test method `test_call` in test class `VibeVoiceAcousticTokenizerFeatureExtractionTest` to verify the behavior of `call`.
Module under test: transformers, transformers.testing_utils, transformers.utils.import_utils | def test_call(self):
TOL = 1e-6
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
sampling_rate = feature_extractor.sampling_rate
audio_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_audio_inputs = [np.asarray(audio_input) for audio_input in audio_inputs]
torch_audio_inputs = [torch.tensor(audio_input) for audio_input in audio_inputs]
# Test non-batched input
encoded_sequences_1 = feature_extractor(torch_audio_inputs[0], sampling_rate=sampling_rate).input_values
encoded_sequences_2 = feature_extractor(np_audio_inputs[0], sampling_rate=sampling_rate).input_values
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=TOL))
# Test batched input
encoded_sequences_1 = feature_extractor(torch_audio_inputs, sampling_rate=sampling_rate).input_values
encoded_sequences_2 = feature_extractor(np_audio_inputs, sampling_rate=sampling_rate).input_values
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=TOL)) | test | 0 | {"function_name": "test_call", "class_name": "VibeVoiceAcousticTokenizerFeatureExtractionTest", "qualname": "VibeVoiceAcousticTokenizerFeatureExtractionTest.test_call", "file_path": "tests/models/vibevoice_acoustic_tokenizer/test_feature_extraction_vibevoice_acoustic_tokenizer.py", "repo_id": "huggingface/transformers", "loc": 19, "tested_modules": ["transformers", "transformers.testing_utils", "transformers.utils.import_utils", "test_sequence_feature_extraction_common", "datasets"], "has_docstring": false, "runnable_level": "file_runnable"} |
ocrmypdf/OCRmyPDF:tests/test_pdf_renderer.py:TestFpdf2PdfRendererTextangle.test_rotated_text | # Context:
from ocrmypdf.fpdf_renderer import DebugRenderOptions, Fpdf2PdfRenderer
from ocrmypdf.helpers import check_pdf
from ocrmypdf.hocrtransform import (
Baseline,
BoundingBox,
OcrClass,
OcrElement,
)
def text_from_pdf(filename: Path) -> str: ...
def font_dir(): ...
def multi_font_manager(font_dir): ...
def create_simple_page(width: float, height: float, words: list[tuple[str, tuple[float, float, float, float]]] | None) -> OcrElement: ...
class TestFpdf2PdfRendererBasic: ...
class TestFpdf2PdfRendererPageSize: ...
class TestFpdf2PdfRendererMultiLine: ...
class TestFpdf2PdfRendererTextDirection: ...
class TestFpdf2PdfRendererBaseline: ...
class TestFpdf2PdfRendererWordBreaks: ...
class TestFpdf2PdfRendererDebugOptions: ...
class TestFpdf2PdfRendererErrors: ...
class TestFpdf2PdfRendererLineTypes: ...
class TestFpdf2PdfRendererTextangle:
# Task:
Write a Python test method `test_rotated_text` in test class `TestFpdf2PdfRendererTextangle` to test rendering rotated text.
Module under test: __future__, io, pathlib | def test_rotated_text(self, tmp_path, multi_font_manager):
"""Test rendering rotated text."""
word = OcrElement(
ocr_class=OcrClass.WORD,
text="Rotated",
bbox=BoundingBox(left=100, top=100, right=200, bottom=150),
)
line = OcrElement(
ocr_class=OcrClass.LINE,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
baseline=Baseline(slope=0.0, intercept=0),
textangle=5.0,
children=[word],
)
paragraph = OcrElement(
ocr_class=OcrClass.PARAGRAPH,
bbox=BoundingBox(left=100, top=100, right=900, bottom=150),
direction="ltr",
language="eng",
children=[line],
)
page = OcrElement(
ocr_class=OcrClass.PAGE,
bbox=BoundingBox(left=0, top=0, right=1000, bottom=500),
children=[paragraph],
)
output_pdf = tmp_path / "rotated.pdf"
renderer = Fpdf2PdfRenderer(
page=page, dpi=72.0, multi_font_manager=multi_font_manager
)
renderer.render(output_pdf)
check_pdf(str(output_pdf))
extracted_text = text_from_pdf(output_pdf)
assert "Rotated" in extracted_text | test | 1 | {"function_name": "test_rotated_text", "class_name": "TestFpdf2PdfRendererTextangle", "qualname": "TestFpdf2PdfRendererTextangle.test_rotated_text", "file_path": "tests/test_pdf_renderer.py", "repo_id": "ocrmypdf/OCRmyPDF", "loc": 36, "tested_modules": ["__future__", "io", "pathlib", "pdfminer.converter", "pdfminer.layout"], "has_docstring": true, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/locust/langflow_locustfile.py:SustainedLoadUser:class_doc | Write a class-level docstring for `SustainedLoadUser` (inherits from BaseLangflowUser) which has methods: `steady_load`. | Maintains exactly 1 request/second for steady load testing. | documentation | 1 | {"doc_type": "class", "class_name": "SustainedLoadUser", "file_path": "src/backend/tests/locust/langflow_locustfile.py", "repo_id": "langflow-ai/langflow", "char_length": 59, "methods": ["steady_load"]} |
mem0ai/mem0:mem0/vector_stores/valkey.py:ValkeyDB.delete_col | # Context:
class OutputData(BaseModel): ...
class ValkeyDB(VectorStoreBase):
def __init__(
self,
valkey_url: str,
collection_name: str,
embedding_model_dims: int,
timezone: str = "UTC",
index_type: str = "hnsw",
hnsw_m: int = 16,
hnsw_ef_construction: int = 200,
hnsw_ef_runtime: int = 10,
):
"""
Initialize the Valkey vector store.
Args:
valkey_url (str): Valkey URL.
collection_name (str): Collection name.
embedding_model_dims (int): Embedding model dimensions.
timezone (str, optional): Timezone for timestamps. Defaults to "UTC".
index_type (str, optional): Index type ('hnsw' or 'flat'). Defaults to "hnsw".
hnsw_m (int, optional): HNSW M parameter (connections per node). Defaults to 16.
hnsw_ef_construction (int, optional): HNSW ef_construction parameter. Defaults to 200.
hnsw_ef_runtime (int, optional): HNSW ef_runtime parameter. Defaults to 10.
"""
self.embedding_model_dims = embedding_model_dims
self.collection_name = collection_name
self.prefix = f"mem0:{collection_name}"
self.timezone = timezone
self.index_type = index_type.lower()
self.hnsw_m = hnsw_m
self.hnsw_ef_construction = hnsw_ef_construction
self.hnsw_ef_runtime = hnsw_ef_runtime
# Validate index type
if self.index_type not in ["hnsw", "flat"]:
raise ValueError(f"Invalid index_type: {index_type}. Must be 'hnsw' or 'flat'")
# Connect to Valkey
try:
self.client = valkey.from_url(valkey_url)
logger.debug(f"Successfully connected to Valkey at {valkey_url}")
except Exception as e:
logger.exception(f"Failed to connect to Valkey at {valkey_url}: {e}")
raise
# Create the index schema
self._create_index(embedding_model_dims)
def _build_index_schema(self, collection_name, embedding_dims, distance_metric, prefix): ...
def _create_index(self, embedding_model_dims): ...
def create_col(self, name, vector_size, distance): ...
def insert(self, vectors: list, payloads: list, ids: list): ...
def _build_search_query(self, knn_part, filters): ...
def _execute_search(self, query, params): ...
def _process_search_results(self, results): ...
def search(self, query: str, vectors: list, limit: int, filters: dict, ef_runtime: int): ...
def delete(self, vector_id): ...
def update(self, vector_id, vector, payload): ...
def _format_timestamp(self, timestamp, timezone): ...
def _process_document_fields(self, result, vector_id): ...
def _convert_bytes(self, data): ...
def get(self, vector_id): ...
def list_cols(self): ...
def _drop_index(self, collection_name, log_level): ...
def col_info(self, name): ...
def reset(self): ...
def _build_list_query(self, filters): ...
def list(self, filters: dict, limit: int) -> list: ...
# Task:
Write a Python method `delete_col` for the class `ValkeyDB` to delete the current collection (index). | def delete_col(self):
"""
Delete the current collection (index).
"""
return self._drop_index(self.collection_name, log_level="info") | function_simple | 1 | {"cognitive_complexity": 0, "loc": 5, "code_loc": 1, "docstring_loc": 3, "function_name": "delete_col", "class_name": "ValkeyDB", "qualname": "ValkeyDB.delete_col", "file_path": "mem0/vector_stores/valkey.py", "repo_id": "mem0ai/mem0", "has_docstring": true, "runnable_level": "class_runnable"} |
streamlit/streamlit:e2e_playwright/st_plotly_chart_dimensions_test.py:test_plotly_stretch_width_fullscreen | # Context:
from playwright.sync_api import Page, expect
from e2e_playwright.conftest import ImageCompareFunction
def test_check_top_level_class(app: Page): ...
def test_plotly_dimensions(app: Page, assert_snapshot: ImageCompareFunction): ...
def test_plotly_content_width_fullscreen(themed_app: Page, assert_snapshot: ImageCompareFunction): ...
# Task:
Write a Python test function `test_plotly_stretch_width_fullscreen` to test fullscreen behavior with width='stretch'.
Module under test: playwright.sync_api, e2e_playwright.conftest, e2e_playwright.shared.app_utils | def test_plotly_stretch_width_fullscreen(
themed_app: Page, assert_snapshot: ImageCompareFunction
):
"""Test fullscreen behavior with width='stretch'."""
index = 1 # Second chart with width='stretch'
themed_app.get_by_test_id("stPlotlyChart").nth(index).hover()
fullscreen_button = themed_app.locator('[data-title="Fullscreen"]').nth(index)
fullscreen_button.hover()
fullscreen_button.click()
# Wait for fullscreen mode to activate
expect(themed_app.get_by_test_id("stFullScreenFrame").nth(index)).to_have_css(
"position", "fixed"
)
assert_snapshot(
themed_app.get_by_test_id("stPlotlyChart").nth(index),
name="st_plotly_chart-stretch_width_fullscreen",
)
fullscreen_button = themed_app.locator('[data-title="Close fullscreen"]').nth(0)
fullscreen_button.hover()
fullscreen_button.click()
# Wait for fullscreen mode to deactivate
expect(themed_app.get_by_test_id("stFullScreenFrame").nth(index)).not_to_have_css(
"position", "fixed"
)
assert_snapshot(
themed_app.get_by_test_id("stPlotlyChart").nth(index),
name="st_plotly_chart-stretch_width_exited_fullscreen",
) | test | 1 | {"function_name": "test_plotly_stretch_width_fullscreen", "class_name": null, "qualname": "test_plotly_stretch_width_fullscreen", "file_path": "e2e_playwright/st_plotly_chart_dimensions_test.py", "repo_id": "streamlit/streamlit", "loc": 33, "tested_modules": ["playwright.sync_api", "e2e_playwright.conftest", "e2e_playwright.shared.app_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/_internal/batch/stages/serve_deployment_stage.py:ServeDeploymentStageUDF.__init__ | # Context:
from typing import Any, AsyncIterator, Dict, List, Optional, Tuple, Type
from ray import serve
class ServeDeploymentStage(StatefulStage): ...
class ServeDeploymentStageUDF(StatefulStageUDF):
def _prepare_request(self, row: Dict[str, Any]) -> Tuple[Dict[str, Any], Optional[Type[Any]], str]: ...
async def generate_async(self, row: Dict[str, Any]) -> Tuple[Dict[str, Any], Dict[str, Any], float]: ...
def _is_recoverable_error(self, exc: Exception) -> bool: ...
async def _generate_with_error_handling(self, row: Dict[str, Any], batch_uuid: uuid.UUID) -> Dict[str, Any]: ...
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]: ...
# Task:
Write a Python method `__init__` for the class `ServeDeploymentStageUDF` to initialize the ServeDeploymentStageUDF.
Parameters: data_column: str, expected_input_keys: List[str] | def __init__(
self,
data_column: str,
expected_input_keys: List[str],
*,
deployment_name: str,
app_name: str,
dtype_mapping: Dict[str, Type[Any]],
should_continue_on_error: bool = False,
):
"""
Initialize the ServeDeploymentStageUDF.
Args:
data_column: The data column name.
expected_input_keys: The expected input keys of the stage.
deployment_name: The name of the deployment.
app_name: The name of the deployment app.
dtype_mapping: The mapping of the request class name to the request class.
should_continue_on_error: If True, continue processing when inference
fails for a row instead of raising. Failed rows will have
'__inference_error__' set to the error message.
"""
super().__init__(data_column, expected_input_keys)
self._dtype_mapping = dtype_mapping
self.should_continue_on_error = should_continue_on_error
# Using stream=True as LLM serve deployments return async generators.
# TODO (Kourosh): Generalize this to support non-streaming deployments.
self._dh = serve.get_deployment_handle(deployment_name, app_name).options(
stream=True
)
self.request_id = 0 | function_simple | 0 | {"cognitive_complexity": 0, "loc": 33, "code_loc": 7, "docstring_loc": 13, "function_name": "__init__", "class_name": "ServeDeploymentStageUDF", "qualname": "ServeDeploymentStageUDF.__init__", "file_path": "python/ray/llm/_internal/batch/stages/serve_deployment_stage.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
huggingface/transformers:tests/models/exaone_moe/test_modeling_exaone_moe.py:ExaoneMoeIntegrationTest.test_model_generation_beyond_sliding_window_flash | # Context:
from pytest import mark
import torch
class ExaoneMoeModelTester(CausalLMModelTester): ...
class ExaoneMoeModelTest(CausalLMModelTest, unittest.TestCase): ...
class ExaoneMoeIntegrationTest(unittest.TestCase):
TEST_MODEL_ID = "hf-internal-testing/EXAONE-MoE-Dummy-7B-A1B"
def setUpClass(cls): ...
def tearDownClass(cls): ...
def setup(self): ...
def tearDown(self): ...
def get_model(cls): ...
def test_model_logits(self): ...
def test_model_generation_sdpa(self): ...
# Task:
Write a Python test method `test_model_generation_beyond_sliding_window_flash` in test class `ExaoneMoeIntegrationTest` to verify the behavior of `model_generation_beyond_sliding_window_flash`.
Module under test: transformers, transformers.testing_utils, causal_lm_tester | def test_model_generation_beyond_sliding_window_flash(self):
EXPECTED_OUTPUT_TOKEN_IDS = [373, 686, 373, 115708, 373, 885]
input_ids = [72861, 2711] + [21605, 2711] * 2048
model = self.get_model()
model.set_attn_implementation("flash_attention_2")
input_ids = torch.tensor([input_ids]).to(model.device)
with torch.no_grad():
generated_ids = model.generate(input_ids, max_new_tokens=6, do_sample=False)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-6:].tolist()) | test | 0 | {"function_name": "test_model_generation_beyond_sliding_window_flash", "class_name": "ExaoneMoeIntegrationTest", "qualname": "ExaoneMoeIntegrationTest.test_model_generation_beyond_sliding_window_flash", "file_path": "tests/models/exaone_moe/test_modeling_exaone_moe.py", "repo_id": "huggingface/transformers", "loc": 10, "tested_modules": ["transformers", "transformers.testing_utils", "causal_lm_tester", "transformers"], "has_docstring": false, "runnable_level": "class_runnable"} |
ray-project/ray:python/ray/autoscaler/v2/instance_manager/subscribers/cloud_resource_monitor.py:CloudResourceMonitor.get_resource_availabilities | # Context:
from typing import Dict, List
from ray.autoscaler.v2.schema import NodeType
class CloudResourceMonitor(InstanceUpdatedSubscriber):
def __init__(
self,
) -> None:
self._last_unavailable_timestamp: Dict[NodeType, float] = {}
def allocation_timeout(self, failed_event: InstanceUpdateEvent): ...
def allocation_succeeded(self, succeeded_event: InstanceUpdateEvent): ...
def notify(self, events: List[InstanceUpdateEvent]) -> None: ...
# Task:
Write a Python method `get_resource_availabilities` for the class `CloudResourceMonitor` to calculate the availability scores of node types.
Returns: Dict[NodeType, float] | def get_resource_availabilities(self) -> Dict[NodeType, float]:
"""Calculate the availability scores of node types.
Higher values indicate a higher likelihood of resource allocation.
"""
resource_availability_scores: Dict[NodeType, float] = {}
if self._last_unavailable_timestamp:
max_ts = max(self._last_unavailable_timestamp.values())
for node_type in self._last_unavailable_timestamp:
resource_availability_scores[node_type] = (
1 - self._last_unavailable_timestamp[node_type] / max_ts
)
return resource_availability_scores | function_simple | 0 | {"cognitive_complexity": 3, "loc": 12, "code_loc": 8, "docstring_loc": 3, "function_name": "get_resource_availabilities", "class_name": "CloudResourceMonitor", "qualname": "CloudResourceMonitor.get_resource_availabilities", "file_path": "python/ray/autoscaler/v2/instance_manager/subscribers/cloud_resource_monitor.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
browser-use/browser-use:browser_use/skill_cli/commands/cloud_session.py:delete_session | # Context:
from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client
def create_session(**kwargs) -> SessionItemView: ...
def list_sessions(limit: int, status: str | None) -> list[SessionItemView]: ...
def get_session(session_id: str) -> SessionView: ...
def stop_session(session_id: str) -> SessionView: ...
def create_public_share(session_id: str) -> ShareView: ...
def delete_public_share(session_id: str) -> None: ...
def stop_sessions_parallel(session_ids: list[str]) -> tuple[list[str], list[dict[str, Any]]]: ...
def handle_session_command(args: argparse.Namespace) -> int: ...
def _session_to_dict(session: Any) -> dict[str, Any]: ...
def _handle_list(args: argparse.Namespace) -> int: ...
def _handle_get(args: argparse.Namespace) -> int: ...
def _handle_stop(args: argparse.Namespace) -> int: ...
def _handle_stop_all(args: argparse.Namespace) -> int: ...
def _handle_create(args: argparse.Namespace) -> int: ...
def _handle_share(args: argparse.Namespace) -> int: ...
# Task:
Write a Python function `delete_session` to delete a cloud session and all its tasks.
Parameters: session_id: str
Returns: None | def delete_session(session_id: str) -> None:
"""Delete a cloud session and all its tasks."""
get_sdk_client().sessions.delete_session(session_id) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "delete_session", "class_name": null, "qualname": "delete_session", "file_path": "browser_use/skill_cli/commands/cloud_session.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
PaddlePaddle/PaddleOCR:paddleocr/_pipelines/ocr.py:license_header | Add a Apache-2.0 license header comment for the project 'PaddleOCR', authored by PaddlePaddle Authors, year 2025. | # Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Should we use a third-party CLI library to auto-generate command-line
# arguments from the pipeline class, to reduce boilerplate and improve
# maintainability? | license | 0 | {"license_type": "Apache-2.0", "author": "PaddlePaddle Authors", "year": "2025", "source": "header", "repo_id": "PaddlePaddle/PaddleOCR"} |
apache/airflow:task-sdk/tests/task_sdk/execution_time/test_context_cache.py:TestCacheDisabled.test_get_connection_no_cache_when_disabled | # Context:
from unittest.mock import AsyncMock, MagicMock, call, patch
from airflow.sdk.execution_time.comms import ConnectionResult, VariableResult
from airflow.sdk.execution_time.context import (
_delete_variable,
_get_connection,
_get_variable,
_set_variable,
)
from airflow.sdk.execution_time.secrets import ExecutionAPISecretsBackend
class TestConnectionCacheIntegration: ...
class TestVariableCacheIntegration: ...
class TestAsyncConnectionCache: ...
class TestCacheDisabled:
def setup_method(): ...
def teardown_method(): ...
# Task:
Write a Python test method `test_get_connection_no_cache_when_disabled` in test class `TestCacheDisabled` to test that cache is not used when disabled.
Module under test: __future__, airflow.sdk.definitions.connection, airflow.sdk.execution_time.cache | def test_get_connection_no_cache_when_disabled(self, mock_ensure_backends, mock_supervisor_comms):
"""Test that cache is not used when disabled."""
conn_id = "test_conn"
conn_result = ConnectionResult(conn_id=conn_id, conn_type="mysql", host="host")
mock_ensure_backends.return_value = [ExecutionAPISecretsBackend()]
mock_supervisor_comms.send.return_value = conn_result
result = _get_connection(conn_id)
assert result.conn_id == conn_id
# Called for GetConnection (and possibly MaskSecret)
assert mock_supervisor_comms.send.call_count >= 1
_get_connection(conn_id)
# Called twice since cache is disabled
assert mock_supervisor_comms.send.call_count >= 2 | test | 1 | {"function_name": "test_get_connection_no_cache_when_disabled", "class_name": "TestCacheDisabled", "qualname": "TestCacheDisabled.test_get_connection_no_cache_when_disabled", "file_path": "task-sdk/tests/task_sdk/execution_time/test_context_cache.py", "repo_id": "apache/airflow", "loc": 19, "tested_modules": ["__future__", "airflow.sdk.definitions.connection", "airflow.sdk.execution_time.cache", "airflow.sdk.execution_time.comms", "airflow.sdk.execution_time.context"], "has_docstring": true, "runnable_level": "project_runnable"} |
google/langextract:tests/prompting_test.py:ContextAwarePromptBuilderTest.test_first_chunk_has_no_previous_context | # Context:
from langextract import prompting
class QAPromptGeneratorTest(parameterized.TestCase): ...
class PromptBuilderTest(absltest.TestCase): ...
class ContextAwarePromptBuilderTest(absltest.TestCase):
def _create_generator(self): ...
def test_context_window_chars_property(self): ...
def test_second_chunk_includes_previous_context(self): ...
def test_context_disabled_when_none(self): ...
def test_context_isolated_per_document(self): ...
def test_combines_previous_context_with_additional_context(self): ...
# Task:
Write a Python test method `test_first_chunk_has_no_previous_context` in test class `ContextAwarePromptBuilderTest` to verifies the first chunk does not include previous context.
Module under test: absl.testing, absl.testing, langextract | def test_first_chunk_has_no_previous_context(self):
"""Verifies the first chunk does not include previous context."""
generator = self._create_generator()
builder = prompting.ContextAwarePromptBuilder(
generator, context_window_chars=50
)
context_prefix = prompting.ContextAwarePromptBuilder._CONTEXT_PREFIX
prompt = builder.build_prompt(
chunk_text="First chunk text.",
document_id="doc1",
)
self.assertNotIn(context_prefix, prompt)
self.assertIn("First chunk text.", prompt) | test | 1 | {"function_name": "test_first_chunk_has_no_previous_context", "class_name": "ContextAwarePromptBuilderTest", "qualname": "ContextAwarePromptBuilderTest.test_first_chunk_has_no_previous_context", "file_path": "tests/prompting_test.py", "repo_id": "google/langextract", "loc": 15, "tested_modules": ["absl.testing", "absl.testing", "langextract", "langextract.core", "langextract.core"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/dashboard/modules/aggregator/tests/test_aggregator_agent.py:test_aggregator_agent_receive_empty_events | # Context:
import pytest
from ray.core.generated.events_event_aggregator_service_pb2 import (
AddEventsRequest,
RayEventsData,
TaskEventsMetadata,
)
def httpserver_listen_address(): ...
def fake_timestamp(): ...
def generate_event_export_env_vars(preserve_proto_field_name: Optional[bool], additional_env_vars: dict) -> dict: ...
def build_export_env_vars_param_list(additional_env_vars: dict) -> list: ...
def get_event_aggregator_grpc_stub(gcs_address, head_node_id): ...
def test_aggregator_agent_http_target_not_enabled(export_addr, expected_http_target_enabled, expected_event_processing_enabled): ...
def test_aggregator_agent_event_processing_disabled(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp): ...
def test_aggregator_agent_receive_publish_events_normally(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_receive_event_full(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_receive_multiple_events(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_receive_multiple_events_failures(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_profile_events_not_exposed(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_all_event_types_exposed(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp): ...
def _create_task_definition_event_proto(timestamp): ...
def _verify_task_definition_event_json(req_json, expected_timestamp, preserve_proto_field_name): ...
def _create_task_lifecycle_event_proto(timestamp): ...
def _verify_task_lifecycle_event_json(req_json, expected_timestamp, preserve_proto_field_name): ...
def _create_profile_event_request(timestamp): ...
def _verify_profile_event_json(req_json, expected_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_receive_events(create_event, verify_event, ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp, preserve_proto_field_name): ...
def test_aggregator_agent_receive_driver_job_definition_event(ray_start_cluster_head_with_env_vars, httpserver, preserve_proto_field_name): ...
def test_aggregator_agent_receive_driver_job_lifecycle_event(ray_start_cluster_head_with_env_vars, httpserver, preserve_proto_field_name): ...
def test_aggregator_agent_http_svc_publish_disabled(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp): ...
def _get_task_from_gcs(unique_task_name: str): ...
def _create_task_definition_event_for_gcs(timestamp, unique_task_name: str): ...
def _wait_for_and_verify_task_definition_event_in_gcs(unique_task_name: str, sent_event): ...
def test_aggregator_agent_publish_to_both_gcs_and_http(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp): ...
def test_aggregator_agent_gcs_filtering_driver_job_events(ray_start_cluster_head_with_env_vars, httpserver, fake_timestamp): ...
# Task:
Write a Python test function `test_aggregator_agent_receive_empty_events` to verify the behavior of `aggregator_agent_receive_empty_events`.
Module under test: typing, google.protobuf.timestamp_pb2, ray._common.network_utils | def test_aggregator_agent_receive_empty_events(
ray_start_cluster_head_with_env_vars,
httpserver,
):
cluster = ray_start_cluster_head_with_env_vars
stub = get_event_aggregator_grpc_stub(
cluster.gcs_address, cluster.head_node.node_id
)
httpserver.expect_request("/", method="POST").respond_with_data("", status=200)
request = AddEventsRequest(
events_data=RayEventsData(
events=[],
task_events_metadata=TaskEventsMetadata(
dropped_task_attempts=[],
),
)
)
stub.AddEvents(request) | test | 0 | {"function_name": "test_aggregator_agent_receive_empty_events", "class_name": null, "qualname": "test_aggregator_agent_receive_empty_events", "file_path": "python/ray/dashboard/modules/aggregator/tests/test_aggregator_agent.py", "repo_id": "ray-project/ray", "loc": 18, "tested_modules": ["typing", "google.protobuf.timestamp_pb2", "ray._common.network_utils", "ray._common.test_utils", "ray._private"], "has_docstring": false, "runnable_level": "project_runnable"} |
apache/airflow:shared/module_loading/tests/module_loading/test_file_discovery.py:TestFindPathFromDirectory.test_find_path_from_directory_fails_on_recursive_link | # Context:
import os
from pathlib import Path
import pytest
from airflow_shared.module_loading import find_path_from_directory
class TestFindPathFromDirectory:
def test_dir(self, tmp_path): ...
def test_find_path_from_directory_respects_symlinks_regexp_ignore(self, test_dir): ...
def test_find_path_from_directory_respects_symlinks_glob_ignore(self, test_dir): ...
def test_airflowignore_negation_unignore_subfolder_file_glob(self, tmp_path): ...
def test_airflowignore_negation_nested_with_globstar(self, tmp_path): ...
# Task:
Write a Python test method `test_find_path_from_directory_fails_on_recursive_link` in test class `TestFindPathFromDirectory` to verify the behavior of `find_path_from_directory_fails_on_recursive_link`.
Module under test: __future__, pathlib, airflow_shared.module_loading | def test_find_path_from_directory_fails_on_recursive_link(self, test_dir):
# add a recursive link
recursing_src = os.path.join(test_dir, "folder2", "recursor")
recursing_tgt = os.path.join(test_dir, "folder2")
os.mkdir(recursing_tgt)
os.symlink(recursing_tgt, recursing_src)
ignore_list_file = ".airflowignore"
error_message = (
f"Detected recursive loop when walking DAG directory {test_dir}: "
f"{Path(recursing_tgt).resolve()} has appeared more than once."
)
with pytest.raises(RuntimeError, match=error_message):
list(find_path_from_directory(test_dir, ignore_list_file, ignore_file_syntax="glob")) | test | 1 | {"function_name": "test_find_path_from_directory_fails_on_recursive_link", "class_name": "TestFindPathFromDirectory", "qualname": "TestFindPathFromDirectory.test_find_path_from_directory_fails_on_recursive_link", "file_path": "shared/module_loading/tests/module_loading/test_file_discovery.py", "repo_id": "apache/airflow", "loc": 15, "tested_modules": ["__future__", "pathlib", "airflow_shared.module_loading"], "has_docstring": false, "runnable_level": "project_runnable"} |
exo-explore/exo:src/exo/master/tests/test_event_log.py:test_empty_log | # Context:
from pathlib import Path
from exo.master.event_log import DiskEventLog
def log_dir(tmp_path: Path) -> Path: ...
def test_append_and_read_back(log_dir: Path): ...
def test_read_range(log_dir: Path): ...
def test_read_range_bounds(log_dir: Path): ...
def _archives(log_dir: Path) -> list[Path]: ...
def test_rotation_on_close(log_dir: Path): ...
def test_rotation_on_construction_with_stale_file(log_dir: Path): ...
def test_empty_log_no_archive(log_dir: Path): ...
def test_close_is_idempotent(log_dir: Path): ...
def test_successive_sessions(log_dir: Path): ...
def test_rotation_keeps_at_most_5_archives(log_dir: Path): ...
# Task:
Write a Python test function `test_empty_log` to verify the behavior of `empty_log`.
Module under test: pathlib, exo.master.event_log, exo.shared.types.events | def test_empty_log(log_dir: Path):
log = DiskEventLog(log_dir)
assert len(log) == 0
assert list(log.read_all()) == []
assert list(log.read_range(0, 10)) == []
log.close() | test | 0 | {"function_name": "test_empty_log", "class_name": null, "qualname": "test_empty_log", "file_path": "src/exo/master/tests/test_event_log.py", "repo_id": "exo-explore/exo", "loc": 6, "tested_modules": ["pathlib", "exo.master.event_log", "exo.shared.types.events"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_llm_hooks.py:TestAfterLLMCallHooks.test_after_hook_returns_none_keeps_original | # Context:
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
get_after_llm_call_hooks,
get_before_llm_call_hooks,
register_after_llm_call_hook,
register_before_llm_call_hook,
)
def mock_executor(): ...
def clear_hooks(): ...
class TestLLMCallHookContext: ...
class TestBeforeLLMCallHooks: ...
class TestLLMHooksIntegration: ...
class TestAfterLLMCallHooks:
def test_register_after_hook(self): ...
def test_multiple_after_hooks(self): ...
def test_after_hook_can_modify_response(self, mock_executor): ...
def test_get_after_hooks_returns_copy(self): ...
# Task:
Write a Python test method `test_after_hook_returns_none_keeps_original` in test class `TestAfterLLMCallHooks` to test that returning None keeps the original response.
Module under test: __future__, crewai.hooks, crewai.hooks.llm_hooks | def test_after_hook_returns_none_keeps_original(self, mock_executor):
"""Test that returning None keeps the original response."""
original_response = "Original response"
def no_change_hook(context):
return None
context = LLMCallHookContext(executor=mock_executor, response=original_response)
result = no_change_hook(context)
assert result is None
assert context.response == original_response | test | 0 | {"function_name": "test_after_hook_returns_none_keeps_original", "class_name": "TestAfterLLMCallHooks", "qualname": "TestAfterLLMCallHooks.test_after_hook_returns_none_keeps_original", "file_path": "lib/crewai/tests/hooks/test_llm_hooks.py", "repo_id": "crewAIInc/crewAI", "loc": 12, "tested_modules": ["__future__", "crewai.hooks", "crewai.hooks.llm_hooks", "crewai.hooks", "crewai.lite_agent"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/v1/core/sched/request_queue.py:PriorityRequestQueue.peek_request | # Context:
from vllm.v1.request import Request
class SchedulingPolicy(Enum): ...
class RequestQueue(ABC): ...
class FCFSRequestQueue(deque[Request], RequestQueue): ...
def create_request_queue(policy: SchedulingPolicy) -> RequestQueue: ...
class PriorityRequestQueue(RequestQueue):
def __init__(self) -> None:
self._heap: list[Request] = []
def add_request(self, request: Request) -> None: ...
def pop_request(self) -> Request: ...
def prepend_request(self, request: Request) -> None: ...
def prepend_requests(self, requests: RequestQueue) -> None: ...
def remove_request(self, request: Request) -> None: ...
def remove_requests(self, requests: Iterable[Request]) -> None: ...
def __bool__(self) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[Request]: ...
# Task:
Write a Python method `peek_request` for the class `PriorityRequestQueue` to peek at the next request in the queue without removing it.
Returns: Request | def peek_request(self) -> Request:
"""Peek at the next request in the queue without removing it."""
if not self._heap:
raise IndexError("peek from empty heap")
return self._heap[0] | function_simple | 1 | {"cognitive_complexity": 1, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "peek_request", "class_name": "PriorityRequestQueue", "qualname": "PriorityRequestQueue.peek_request", "file_path": "vllm/v1/core/sched/request_queue.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"} |
unclecode/crawl4ai:tests/cache_validation/test_real_domains.py:TestRealDomainsNoConditionalSupport.test_news_site_changes_frequently | # Context:
import pytest
from crawl4ai.cache_validator import CacheValidator, CacheValidationResult
class TestRealDomainsConditionalSupport: ...
class TestRealDomainsEdgeCases: ...
class TestRealDomainsHeadFingerprint: ...
class TestRealDomainsFetchHead: ...
class TestRealDomainsValidationCombinations: ...
class TestRealDomainsNoConditionalSupport:
async def test_dynamic_site_fingerprint_fallback(self): ...
# Task:
Write a Python test method `test_news_site_changes_frequently` in test class `TestRealDomainsNoConditionalSupport` to news sites change frequently - test that we can detect changes.
Module under test: crawl4ai.cache_validator, crawl4ai.utils | async def test_news_site_changes_frequently(self):
"""News sites change frequently - test that we can detect changes."""
url = "https://www.bbc.com/news"
async with CacheValidator(timeout=15.0) as validator:
head_html, etag, last_modified = await validator._fetch_head(url)
# BBC News has ETag but it changes with content
assert head_html is not None
# Using a fake old ETag should return STALE (200 with different content)
result = await validator.validate(
url=url,
stored_etag='"fake-old-etag-12345"',
)
# Should be STALE because the ETag doesn't match
assert result.status == CacheValidationResult.STALE, f"Expected STALE, got {result.status}: {result.reason}" | test | 1 | {"function_name": "test_news_site_changes_frequently", "class_name": "TestRealDomainsNoConditionalSupport", "qualname": "TestRealDomainsNoConditionalSupport.test_news_site_changes_frequently", "file_path": "tests/cache_validation/test_real_domains.py", "repo_id": "unclecode/crawl4ai", "loc": 18, "tested_modules": ["crawl4ai.cache_validator", "crawl4ai.utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
jax-ml/jax:jax/_src/test_multiprocess.py:GracefulKiller:class_doc | Write a class-level docstring for `GracefulKiller` which has methods: `__init__`, `exit_gracefully`. | Add a signal handler that sets a flag if SIGINT or SIGTERM are caught. | documentation | 1 | {"doc_type": "class", "class_name": "GracefulKiller", "file_path": "jax/_src/test_multiprocess.py", "repo_id": "jax-ml/jax", "char_length": 70, "methods": ["__init__", "exit_gracefully"]} |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_txt_search_tool_config.py:test_txt_search_tool_with_azure_config_without_env_vars | # Context:
from unittest.mock import MagicMock, Mock, patch
from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter
from crewai_tools.tools.txt_search_tool.txt_search_tool import TXTSearchTool
def test_txt_search_tool_with_openai_config_without_env_vars(mock_create_client: Mock) -> None: ...
def test_txt_search_tool_with_cohere_config(mock_create_client: Mock) -> None: ...
# Task:
Write a Python test function `test_txt_search_tool_with_azure_config_without_env_vars` to test TXTSearchTool accepts Azure config without requiring env vars.
Module under test: crewai_tools.adapters.crewai_rag_adapter, crewai_tools.tools.txt_search_tool.txt_search_tool | def test_txt_search_tool_with_azure_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test TXTSearchTool accepts Azure config without requiring env vars."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
config = {
"embedding_model": {
"provider": "azure",
"config": {
"model": "text-embedding-3-small",
"api_key": "test-api-key",
"api_base": "https://test.openai.azure.com/",
"api_version": "2024-02-01",
"api_type": "azure",
"deployment_id": "test-deployment",
},
}
}
# This should not raise a validation error about missing env vars
tool = TXTSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
assert tool.name == "Search a txt's content" | test | 0 | {"function_name": "test_txt_search_tool_with_azure_config_without_env_vars", "class_name": null, "qualname": "test_txt_search_tool_with_azure_config_without_env_vars", "file_path": "lib/crewai-tools/tests/tools/test_txt_search_tool_config.py", "repo_id": "crewAIInc/crewAI", "loc": 35, "tested_modules": ["crewai_tools.adapters.crewai_rag_adapter", "crewai_tools.tools.txt_search_tool.txt_search_tool"], "has_docstring": true, "runnable_level": "project_runnable"} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.