sample_id
string
instruction
string
completion
string
category
string
label
int64
metadata
string
666ghj/BettaFish:ReportEngine/utils/chart_review_service.py:ChartReviewService._save_ir_to_file
# Context: import json from pathlib import Path from typing import Any, Dict, List, Optional from loguru import logger class ReviewStats: ... def get_chart_review_service() -> ChartReviewService: ... def review_document_charts(document_ir: Dict[str, Any], ir_file_path: Optional[str | Path], reset_stats: bool, save_on_repair: bool) -> ReviewStats: ... class ChartReviewService: _lock = threading.Lock() def __new__(cls) -> 'ChartReviewService': ... def __init__(self): """初始化服务(仅首次调用时执行)""" if self._initialized: return self._initialized = True # 初始化验证器和修复器(无状态,可安全共享) self.validator = create_chart_validator() self.llm_repair_fns = create_llm_repair_functions() self.repairer = create_chart_repairer( validator=self.validator, llm_repair_fns=self.llm_repair_fns ) # 打印 LLM 修复函数状态 if not self.llm_repair_fns: logger.warning("ChartReviewService: 未配置任何 LLM API,图表 API 修复功能不可用") else: logger.info(f"ChartReviewService: 已配置 {len(self.llm_repair_fns)} 个 LLM 修复函数") # 最后一次审查的统计信息(仅用于向后兼容,不推荐在并发场景使用) # 新代码应使用 review_document 返回的 ReviewStats self._last_stats: Optional[ReviewStats] = None self._last_stats_lock = threading.Lock() logger.info("ChartReviewService 初始化完成") def reset_stats(self) -> None: ... def stats(self) -> Dict[str, int]: ... def review_document(self, document_ir: Dict[str, Any], ir_file_path: Optional[str | Path], reset_stats: bool, save_on_repair: bool) -> ReviewStats: ... def _walk_and_review_blocks(self, blocks: List[Any], chapter_context: Dict[str, Any] | None, session_stats: ReviewStats) -> bool: ... def _review_chart_block(self, block: Dict[str, Any], chapter_context: Dict[str, Any] | None, session_stats: ReviewStats) -> bool: ... def _normalize_chart_block(self, block: Dict[str, Any], chapter_context: Dict[str, Any] | None) -> None: ... def _is_chart_data_empty(data: Dict[str, Any] | None) -> bool: ... def _merge_dicts(base: Dict[str, Any] | None, override: Dict[str, Any] | None) -> Dict[str, Any]: ... def _format_error_reason(self, validation_result: ValidationResult | None) -> str: ... def _log_stats(self, stats: ReviewStats) -> None: ... _INTERNAL_METADATA_KEYS = frozenset([ def _strip_internal_metadata(self, document_ir: Dict[str, Any]) -> Dict[str, Any]: ... # Task: Write a Python method `_save_ir_to_file` for the class `ChartReviewService` to 保存 IR 到文件(移除内部元数据后). Parameters: document_ir: Dict[str, Any], file_path: str | Path Returns: None
def _save_ir_to_file(self, document_ir: Dict[str, Any], file_path: str | Path) -> None: """保存 IR 到文件(移除内部元数据后)""" try: path = Path(file_path) path.parent.mkdir(parents=True, exist_ok=True) # 移除内部元数据键,保持 IR 文件干净 cleaned_ir = self._strip_internal_metadata(document_ir) path.write_text( json.dumps(cleaned_ir, ensure_ascii=False, indent=2), encoding="utf-8" ) logger.info(f"ChartReviewService: 修复后的 IR 已保存到 {path}") except Exception as e: logger.exception(f"ChartReviewService: 保存 IR 文件失败: {e}")
function_simple
1
{"cognitive_complexity": 1, "loc": 16, "code_loc": 11, "docstring_loc": 1, "function_name": "_save_ir_to_file", "class_name": "ChartReviewService", "qualname": "ChartReviewService._save_ir_to_file", "file_path": "ReportEngine/utils/chart_review_service.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_runnable"}
docling-project/docling:tests/test_backend_image_native.py:test_num_pages_multipage
# Context: def _make_png_stream(width: int, height: int, color) -> DocumentStream: ... def _make_multipage_tiff_stream(num_pages: int, size) -> DocumentStream: ... def test_docs_builder_uses_image_backend_for_image_stream(): ... def test_docs_builder_multipage_tiff_counts_frames(): ... def test_converter_default_maps_image_to_image_backend(): ... def test_extractor_default_maps_image_to_image_backend(): ... def _get_backend_from_stream(stream: DocumentStream): ... def test_num_pages_single(): ... def test_get_size(): ... def test_get_page_image_full(): ... def test_get_page_image_scaled(): ... def test_crop_page_image(): ... def test_crop_page_image_scaled(): ... def test_get_bitmap_rects(): ... def test_get_bitmap_rects_scaled(): ... def test_get_text_in_rect(): ... def test_multipage_access(): ... # Task: Write a Python test function `test_num_pages_multipage` to test page count for multi-page TIFF. Module under test: io, pathlib, docling_core.types.doc
def test_num_pages_multipage(): """Test page count for multi-page TIFF.""" stream = _make_multipage_tiff_stream(num_pages=5, size=(64, 64)) doc_backend = _get_backend_from_stream(stream) assert doc_backend.page_count() == 5
test
1
{"function_name": "test_num_pages_multipage", "class_name": null, "qualname": "test_num_pages_multipage", "file_path": "tests/test_backend_image_native.py", "repo_id": "docling-project/docling", "loc": 5, "tested_modules": ["io", "pathlib", "docling_core.types.doc", "PIL", "docling.backend.image_backend"], "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:tests/models/jais2/test_modeling_jais2.py:Jais2IntegrationTest.test_model_generation
# Context: from transformers import AutoTokenizer, is_torch_available import torch from transformers import ( Jais2Config, Jais2ForCausalLM, Jais2Model, ) class Jais2ModelTester(CausalLMModelTester): ... class Jais2ModelTest(CausalLMModelTest, unittest.TestCase): ... class Jais2IntegrationTest(unittest.TestCase): def setUp(self): ... def tearDown(self): ... def test_model_logits(self): ... # Task: Write a Python test method `test_model_generation` in test class `Jais2IntegrationTest` to verify the behavior of `model_generation`. Module under test: transformers, transformers.testing_utils, causal_lm_tester
def test_model_generation(self): tokenizer = AutoTokenizer.from_pretrained("inceptionai/Jais-2-8B-Chat") model = Jais2ForCausalLM.from_pretrained( "inceptionai/Jais-2-8B-Chat", torch_dtype=torch.float16, device_map="auto" ) input_text = "Simply put, the theory of relativity states that" model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device) model_inputs.pop("token_type_ids", None) generated_ids = model.generate(**model_inputs, max_new_tokens=32, do_sample=False) generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) EXPECTED_TEXT = "Simply put, the theory of relativity states that the laws of physics are the same for all non-accelerating observers, and that the speed of light in a vacuum is the same for all observers," # fmt: skip self.assertEqual(generated_text, EXPECTED_TEXT)
test
0
{"function_name": "test_model_generation", "class_name": "Jais2IntegrationTest", "qualname": "Jais2IntegrationTest.test_model_generation", "file_path": "tests/models/jais2/test_modeling_jais2.py", "repo_id": "huggingface/transformers", "loc": 14, "tested_modules": ["transformers", "transformers.testing_utils", "causal_lm_tester", "transformers"], "has_docstring": false, "runnable_level": "class_runnable"}
langflow-ai/langflow:src/backend/tests/unit/agentic/utils/test_template_search.py:TestEdgeCases.test_very_long_query
# Context: from langflow.agentic.utils import ( get_all_tags, get_template_by_id, get_templates_count, list_templates, ) class TestListTemplates: ... class TestGetTemplateById: ... class TestGetAllTags: ... class TestGetTemplatesCount: ... class TestTemplateStructure: ... class TestSearchFunctionality: ... class TestPerformance: ... class TestIntegrationScenarios: ... class TestEdgeCases: def test_empty_query_string(self): ... def test_empty_tags_list(self): ... def test_whitespace_only_query(self): ... def test_special_characters_in_query(self): ... def test_field_selection_with_nonexistent_fields(self): ... def test_none_query_treated_as_no_filter(self): ... def test_none_tags_treated_as_no_filter(self): ... # Task: Write a Python test method `test_very_long_query` in test class `TestEdgeCases` to test handling of very long query strings. Module under test: langflow.agentic.utils
def test_very_long_query(self): """Test handling of very long query strings.""" long_query = "a" * 10000 results = list_templates(query=long_query) # Very long query unlikely to match assert len(results) == 0
test
1
{"function_name": "test_very_long_query", "class_name": "TestEdgeCases", "qualname": "TestEdgeCases.test_very_long_query", "file_path": "src/backend/tests/unit/agentic/utils/test_template_search.py", "repo_id": "langflow-ai/langflow", "loc": 6, "tested_modules": ["langflow.agentic.utils"], "has_docstring": true, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_base.py:BaseAzurePGVectorStore._dedup_results
# Context: from typing import Any class BaseAzurePGVectorStore(BaseModel): model_config = ConfigDict( def verify_and_init_store(self) -> Self: ... def _delete_rows_from_table(self, ids: list[str] | None, **kwargs) -> bool | None: ... def _similarity_search_by_vector_with_distance(self, embedding: list[float], k: int, **kwargs) -> list[tuple[dict, float, np.ndarray | None]]: ... def _get_by_ids() -> list[dict[str, Any]]: ... def _full_text_search(self, query_str: str, k: int, language: str, **kwargs) -> list[tuple[dict, float, None]]: ... # Task: Write a Python method `_dedup_results` for the class `BaseAzurePGVectorStore` to deduplicate search results by document id, preserving order. Parameters: results: list[tuple[dict, float, Any]] Returns: list[tuple[dict, float, Any]]
def _dedup_results( self, results: list[tuple[dict, float, Any]] ) -> list[tuple[dict, float, Any]]: """Deduplicate search results by document id, preserving order. Accepts a list of tuples (document_dict, score, optional_embedding) where document_dict contains at least the id column (self.id_column) or 'id'. Returns a filtered list keeping the first occurrence of each id. """ seen_ids: set = set() deduped: list[tuple[dict, float, Any]] = [] for doc, score, emb in results: # robustly get id value using configured id_column or fallback to 'id' doc_id = doc.get(self.id_column) if isinstance(doc, dict) else None if doc_id is None: doc_id = doc.get("id") if isinstance(doc, dict) else None # If there's no id, treat the row as unique and keep it if doc_id is None: deduped.append((doc, score, emb)) continue if doc_id not in seen_ids: deduped.append((doc, score, emb)) seen_ids.add(doc_id) return deduped
function_complex
1
{"cognitive_complexity": 12, "loc": 27, "code_loc": 13, "docstring_loc": 6, "function_name": "_dedup_results", "class_name": "BaseAzurePGVectorStore", "qualname": "BaseAzurePGVectorStore._dedup_results", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_base.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "class_runnable"}
ray-project/ray:python/ray/serve/tests/unit/test_deployment_rank_manager.py:TestDeploymentRankManagerMultiNode.test_recover_rank_multiple_nodes
# Context: from ray.serve._private.deployment_state import DeploymentRankManager from ray.serve.schema import ReplicaRank def rank_manager() -> DeploymentRankManager: ... class MockDeploymentReplica: ... class TestDeploymentRankManager: ... class TestDeploymentRankManagerEdgeCases: ... class TestDeploymentRankManagerErrorHandling: ... class TestDeploymentRankManagerMultiNode: def test_assign_rank_multiple_nodes(self): ... def test_local_rank_independence_across_nodes(self): ... def test_release_rank_removes_node_when_last_replica(self): ... def test_release_rank_keeps_node_when_replicas_remain(self): ... def test_node_rank_reuse_after_complete_release(self): ... def test_local_rank_reuse_within_node(self): ... def test_recover_rank_preserves_node_rank_when_node_exists(self): ... def test_check_rank_consistency_across_multiple_nodes(self): ... def test_check_rank_consistency_local_ranks_per_node(self): ... def test_check_rank_consistency_node_ranks(self): ... def test_clear_with_multiple_nodes(self): ... def test_get_replica_ranks_mapping_multiple_nodes(self): ... def test_complex_multi_node_lifecycle(self): ... def test_scaling_up_and_down_across_nodes(self): ... def test_minimal_reassignment_preserves_node_assignments(self): ... # Task: Write a Python test method `test_recover_rank_multiple_nodes` in test class `TestDeploymentRankManagerMultiNode` to test recovering ranks for replicas on different nodes. Module under test: ray.serve._private.common, ray.serve._private.deployment_state, ray.serve.schema
def test_recover_rank_multiple_nodes(self): """Test recovering ranks for replicas on different nodes.""" rank_manager = DeploymentRankManager() # Recover replicas on different nodes rank_manager.recover_rank( "r1", "node_1", ReplicaRank(rank=0, node_rank=0, local_rank=0) ) rank_manager.recover_rank( "r2", "node_2", ReplicaRank(rank=1, node_rank=1, local_rank=0) ) rank_manager.recover_rank( "r3", "node_1", ReplicaRank(rank=2, node_rank=0, local_rank=1) ) # Verify all ranks are recovered correctly assert rank_manager.get_replica_rank("r1") == ReplicaRank( rank=0, node_rank=0, local_rank=0 ) assert rank_manager.get_replica_rank("r2") == ReplicaRank( rank=1, node_rank=1, local_rank=0 ) assert rank_manager.get_replica_rank("r3") == ReplicaRank( rank=2, node_rank=0, local_rank=1 )
test
0
{"function_name": "test_recover_rank_multiple_nodes", "class_name": "TestDeploymentRankManagerMultiNode", "qualname": "TestDeploymentRankManagerMultiNode.test_recover_rank_multiple_nodes", "file_path": "python/ray/serve/tests/unit/test_deployment_rank_manager.py", "repo_id": "ray-project/ray", "loc": 25, "tested_modules": ["ray.serve._private.common", "ray.serve._private.deployment_state", "ray.serve.schema"], "has_docstring": true, "runnable_level": "plib_runnable"}
browser-use/browser-use:tests/ci/evaluate_tasks.py:module_doc
Write a module-level docstring for the Python module `evaluate_tasks` which contains class `JudgeResponse`.
Runs all agent tasks in parallel (up to 10 at a time) using separate subprocesses. Each task gets its own Python process, preventing browser session interference. Fails with exit code 1 if 0% of tasks pass.
documentation
0
{"doc_type": "module", "module_name": "evaluate_tasks", "file_path": "tests/ci/evaluate_tasks.py", "repo_id": "browser-use/browser-use", "char_length": 206}
ray-project/ray:ci/raydepsets/cli.py:DependencySetManager.execute_pre_hook
# Context: import shlex import subprocess import click def cli(): ... def build(config_path: str, workspace_dir: Optional[str], name: Optional[str], uv_cache_dir: Optional[str], check: Optional[bool], all_configs: Optional[bool]): ... def _get_bytes(packages: List[str]) -> bytes: ... def _get_depset(depsets: List[Depset], name: str) -> Depset: ... def _flatten_flags(flags: List[str]) -> List[str]: ... def _override_uv_flags(flags: List[str], args: List[str]) -> List[str]: ... def parse_lock_file(lock_file_path: str) -> RequirementsFile: ... def write_lock_file(requirements_file: RequirementsFile, lock_file_path: str): ... def _uv_binary(): ... class DependencySetManager: def __init__( self, config_path: str = None, workspace_dir: Optional[str] = None, uv_cache_dir: Optional[str] = None, check: Optional[bool] = False, build_all_configs: Optional[bool] = False, ): """Initialize the dependency set manager. Args: config_path: Path to the depsets config file. workspace_dir: Path to the workspace directory. uv_cache_dir: Directory to cache uv dependencies. check: Whether to check if lock files are up to date. build_all_configs: Whether to build all configs or just the specified one. """ self.workspace = Workspace(workspace_dir) self.config = self.workspace.load_configs(config_path) self.config_name = os.path.basename(config_path) self.build_graph = DiGraph() self._build(build_all_configs) self._uv_binary = _uv_binary() self._uv_cache_dir = uv_cache_dir if check: self.temp_dir = tempfile.mkdtemp() self.output_paths = self.get_output_paths() self.copy_to_temp_dir() def get_output_paths(self) -> List[Path]: ... def copy_to_temp_dir(self): ... def get_diffs(self) -> List[str]: ... def diff_lock_files(self): ... def get_source_and_dest(self, output_path: str) -> tuple[Path, Path]: ... def _build(self, build_all_configs: Optional[bool]): ... def subgraph_dependency_nodes(self, depset_name: str): ... def subgraph_config_nodes(self): ... def execute(self, single_depset_name: Optional[str]): ... def exec_uv_cmd(self, cmd: str, args: List[str], stdin: Optional[bytes]) -> str: ... def execute_depset(self, depset: Depset): ... def compile(self, constraints: List[str], name: str, output: str, append_flags: Optional[List[str]], override_flags: Optional[List[str]], packages: Optional[List[str]], requirements: Optional[List[str]], include_setuptools: Optional[bool]): ... def subset(self, source_depset: str, requirements: List[str], name: str, output: str, append_flags: Optional[List[str]], override_flags: Optional[List[str]], include_setuptools: Optional[bool]): ... def expand(self, depsets: List[str], requirements: List[str], constraints: List[str], name: str, output: str, append_flags: Optional[List[str]], override_flags: Optional[List[str]], include_setuptools: Optional[bool]): ... def relax(self, source_depset: str, packages: List[str], name: str, output: str): ... def read_lock_file(self, file_path: Path) -> List[str]: ... def get_path(self, path: str) -> Path: ... def check_subset_exists(self, source_depset: Depset, requirements: List[str]): ... def get_expanded_depset_requirements(self, depset_name: str, requirements_list: List[str]) -> List[str]: ... def cleanup(self): ... # Task: Write a Python method `execute_pre_hook` for the class `DependencySetManager` to execute a pre-hook shell command. Parameters: pre_hook: str
def execute_pre_hook(self, pre_hook: str): """Execute a pre-hook shell command.""" status = subprocess.run( shlex.split(pre_hook), cwd=self.workspace.dir, capture_output=True, ) if status.returncode != 0: raise RuntimeError( f"Failed to execute pre_hook {pre_hook} with error: {status.stderr.decode('utf-8')}", ) click.echo(f"{status.stdout.decode('utf-8')}") click.echo(f"Executed pre_hook {pre_hook} successfully")
function_simple
0
{"cognitive_complexity": 1, "loc": 13, "code_loc": 11, "docstring_loc": 1, "function_name": "execute_pre_hook", "class_name": "DependencySetManager", "qualname": "DependencySetManager.execute_pre_hook", "file_path": "ci/raydepsets/cli.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai-tools/tests/adapters/mcp_adapter_test.py:test_filter_with_nonexistent_tool
# Context: from crewai_tools import MCPServerAdapter from mcp import StdioServerParameters def echo_server_script(): ... def echo_server_sse_script(): ... def echo_sse_server(echo_server_sse_script): ... def test_context_manager_syntax(echo_server_script): ... def test_context_manager_syntax_sse(echo_sse_server): ... def test_try_finally_syntax(echo_server_script): ... def test_try_finally_syntax_sse(echo_sse_server): ... def test_context_manager_with_filtered_tools(echo_server_script): ... def test_context_manager_sse_with_filtered_tools(echo_sse_server): ... def test_try_finally_with_filtered_tools(echo_server_script): ... def test_filter_with_only_nonexistent_tools(echo_server_script): ... def test_connect_timeout_parameter(echo_server_script): ... def test_connect_timeout_with_filtered_tools(echo_server_script): ... def test_connect_timeout_passed_to_mcpadapt(mock_mcpadapt): ... # Task: Write a Python test function `test_filter_with_nonexistent_tool` to verify the behavior of `filter_with_nonexistent_tool`. Module under test: textwrap, crewai_tools, crewai_tools.adapters.tool_collection
def test_filter_with_nonexistent_tool(echo_server_script): serverparams = StdioServerParameters( command="uv", args=["run", "python", "-c", echo_server_script] ) # Include a tool that doesn't exist with MCPServerAdapter(serverparams, "echo_tool", "nonexistent_tool") as tools: # Only echo_tool should be in the result assert len(tools) == 1 assert tools[0].name == "echo_tool"
test
0
{"function_name": "test_filter_with_nonexistent_tool", "class_name": null, "qualname": "test_filter_with_nonexistent_tool", "file_path": "lib/crewai-tools/tests/adapters/mcp_adapter_test.py", "repo_id": "crewAIInc/crewAI", "loc": 9, "tested_modules": ["textwrap", "crewai_tools", "crewai_tools.adapters.tool_collection", "mcp"], "has_docstring": false, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/dom/enhanced_snapshot.py:module_doc
Write a module-level docstring for the Python module `enhanced_snapshot` which contains function `_parse_rare_boolean_data`, function `_parse_computed_styles`, function `build_snapshot_lookup`.
Enhanced snapshot processing for browser-use DOM tree extraction. This module provides stateless functions for parsing Chrome DevTools Protocol (CDP) DOMSnapshot data to extract visibility, clickability, cursor styles, and other layout information.
documentation
0
{"doc_type": "module", "module_name": "enhanced_snapshot", "file_path": "browser_use/dom/enhanced_snapshot.py", "repo_id": "browser-use/browser-use", "char_length": 249}
browser-use/browser-use:browser_use/code_use/notebook_export.py:export_to_ipynb
# Context: import json import re from pathlib import Path from browser_use.code_use.service import CodeAgent from .views import CellType, NotebookExport def session_to_python_script(agent: CodeAgent) -> str: ... # Task: Write a Python function `export_to_ipynb` to export a NotebookSession to a Jupyter notebook (.ipynb) file. Parameters: agent: CodeAgent, output_path: str | Path Returns: Path
def export_to_ipynb(agent: CodeAgent, output_path: str | Path) -> Path: """ Export a NotebookSession to a Jupyter notebook (.ipynb) file. Now includes JavaScript code blocks that were stored in the namespace. Args: session: The NotebookSession to export output_path: Path where to save the notebook file agent: Optional CodeAgent instance to access namespace for JavaScript blocks Returns: Path to the saved notebook file Example: ```python session = await agent.run() notebook_path = export_to_ipynb(agent, 'my_automation.ipynb') print(f'Notebook saved to {notebook_path}') ``` """ output_path = Path(output_path) # Create notebook structure notebook = NotebookExport( metadata={ 'kernelspec': {'display_name': 'Python 3', 'language': 'python', 'name': 'python3'}, 'language_info': { 'name': 'python', 'version': '3.11.0', 'mimetype': 'text/x-python', 'codemirror_mode': {'name': 'ipython', 'version': 3}, 'pygments_lexer': 'ipython3', 'nbconvert_exporter': 'python', 'file_extension': '.py', }, } ) # Add setup cell at the beginning with proper type hints setup_code = """import asyncio import json from typing import Any from browser_use import BrowserSession from browser_use.code_use import create_namespace # Initialize browser and namespace browser = BrowserSession() await browser.start() # Create namespace with all browser control functions namespace: dict[str, Any] = create_namespace(browser) # Import all functions into the current namespace globals().update(namespace) # Type hints for better IDE support (these are now available globally) # navigate, click, input, evaluate, search, extract, scroll, done, etc. print("Browser-use environment initialized!") print("Available functions: navigate, click, input, evaluate, search, extract, done, etc.")""" setup_cell = { 'cell_type': 'code', 'metadata': {}, 'source': setup_code.split('\n'), 'execution_count': None, 'outputs': [], } notebook.cells.append(setup_cell) # Add JavaScript code blocks as variables FIRST if hasattr(agent, 'namespace') and agent.namespace: # Look for JavaScript variables in the namespace code_block_vars = agent.namespace.get('_code_block_vars', set()) for var_name in sorted(code_block_vars): var_value = agent.namespace.get(var_name) if isinstance(var_value, str) and var_value.strip(): # Check if this looks like JavaScript code # Look for common JS patterns js_patterns = [ r'function\s+\w+\s*\(', r'\(\s*function\s*\(\)', r'=>\s*{', r'document\.', r'Array\.from\(', r'\.querySelector', r'\.textContent', r'\.innerHTML', r'return\s+', r'console\.log', r'window\.', r'\.map\(', r'\.filter\(', r'\.forEach\(', ] is_js = any(re.search(pattern, var_value, re.IGNORECASE) for pattern in js_patterns) if is_js: # Create a code cell with the JavaScript variable js_cell = { 'cell_type': 'code', 'metadata': {}, 'source': [f'# JavaScript Code Block: {var_name}\n', f'{var_name} = """{var_value}"""'], 'execution_count': None, 'outputs': [], } notebook.cells.append(js_cell) # Convert cells python_cell_count = 0 for cell in agent.session.cells: notebook_cell: dict = { 'cell_type': cell.cell_type.value, 'metadata': {}, 'source': cell.source.splitlines(keepends=True), } if cell.cell_type == CellType.CODE: python_cell_count += 1 notebook_cell['execution_count'] = cell.execution_count notebook_cell['outputs'] = [] # Add output if available if cell.output: notebook_cell['outputs'].append( { 'output_type': 'stream', 'name': 'stdout', 'text': cell.output.split('\n'), } ) # Add error if available if cell.error: notebook_cell['outputs'].append( { 'output_type': 'error', 'ename': 'Error', 'evalue': cell.error.split('\n')[0] if cell.error else '', 'traceback': cell.error.split('\n') if cell.error else [], } ) # Add browser state as a separate output if cell.browser_state: notebook_cell['outputs'].append( { 'output_type': 'stream', 'name': 'stdout', 'text': [f'Browser State:\n{cell.browser_state}'], } ) notebook.cells.append(notebook_cell) # Write to file output_path.parent.mkdir(parents=True, exist_ok=True) with open(output_path, 'w', encoding='utf-8') as f: json.dump(notebook.model_dump(), f, indent=2, ensure_ascii=False) return output_path
function_complex
0
{"cognitive_complexity": 24, "loc": 163, "code_loc": 106, "docstring_loc": 19, "function_name": "export_to_ipynb", "class_name": null, "qualname": "export_to_ipynb", "file_path": "browser_use/code_use/notebook_export.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:test/testcases/test_web_api/test_dialog_app/test_update_dialog.py:TestDialogUpdate.test_update_icon
# Context: import pytest from common import update_dialog class TestAuthorization: ... class TestDialogUpdate: def test_update_name(self, WebApiAuth, add_dialog_func): ... def test_update_description(self, WebApiAuth, add_dialog_func): ... def test_update_prompt_config(self, WebApiAuth, add_dialog_func): ... def test_update_kb_ids(self, WebApiAuth, add_dialog_func, add_dataset_func): ... def test_update_llm_settings(self, WebApiAuth, add_dialog_func): ... def test_update_retrieval_settings(self, WebApiAuth, add_dialog_func): ... def test_update_nonexistent_dialog(self, WebApiAuth): ... def test_update_with_invalid_prompt_config(self, WebApiAuth, add_dialog_func): ... def test_update_with_knowledge_but_no_kb(self, WebApiAuth, add_dialog_func): ... def test_update_rerank_id(self, WebApiAuth, add_dialog_func): ... def test_update_multiple_fields(self, WebApiAuth, add_dialog_func): ... # Task: Write a Python test method `test_update_icon` in test class `TestDialogUpdate` to verify the behavior of `update_icon`. Module under test: common, configs, libs.auth
def test_update_icon(self, WebApiAuth, add_dialog_func): _, dialog_id = add_dialog_func new_icon = "🚀" payload = {"dialog_id": dialog_id, "icon": new_icon, "prompt_config": {"system": "You are a helpful assistant.", "parameters": []}} res = update_dialog(WebApiAuth, payload) assert res["code"] == 0, res assert res["data"]["icon"] == new_icon, res
test
1
{"function_name": "test_update_icon", "class_name": "TestDialogUpdate", "qualname": "TestDialogUpdate.test_update_icon", "file_path": "test/testcases/test_web_api/test_dialog_app/test_update_dialog.py", "repo_id": "infiniflow/ragflow", "loc": 7, "tested_modules": ["common", "configs", "libs.auth"], "has_docstring": false, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_routes_test.py:TestWithBase.test_explicit_none_base_url_uses_config
# Context: from streamlit.web.server.starlette.starlette_routes import ( _ensure_xsrf_cookie, _set_cors_headers, _set_unquoted_cookie, _with_base, ) from tests.testutil import patch_config_options class TestSetCorsHeaders: ... class TestEnsureXsrfCookie: ... class TestSetUnquotedCookie: ... class TestWithBase: def test_no_base_url(self) -> None: ... def test_no_base_url_with_leading_slash(self) -> None: ... def test_with_base_url(self) -> None: ... def test_strips_slashes_from_base(self) -> None: ... def test_explicit_base_url_overrides_config(self) -> None: ... def test_explicit_empty_base_url(self) -> None: ... # Task: Write a Python test method `test_explicit_none_base_url_uses_config` in test class `TestWithBase` to test that explicit None uses config. Module under test: __future__, starlette.responses, streamlit.web.server.starlette.starlette_routes
def test_explicit_none_base_url_uses_config(self) -> None: """Test that explicit None uses config.""" result = _with_base("_stcore/health", base_url=None) assert result == "/fromconfig/_stcore/health"
test
1
{"function_name": "test_explicit_none_base_url_uses_config", "class_name": "TestWithBase", "qualname": "TestWithBase.test_explicit_none_base_url_uses_config", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_routes_test.py", "repo_id": "streamlit/streamlit", "loc": 5, "tested_modules": ["__future__", "starlette.responses", "streamlit.web.server.starlette.starlette_routes", "streamlit.web.server.starlette.starlette_server_config", "tests.testutil"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/entrypoints/llm/test_mm_embeds_only.py:test_generate_with_embedding
# Context: import pytest from vllm import LLM, SamplingParams from vllm.assets.image import ImageAsset def llm(): ... def test_raw_image_rejected(llm: LLM): ... def test_text_only_prompt(llm: LLM): ... # Task: Write a Python test function `test_generate_with_embedding` to pre-computed embedding produces tokens without hanging. Module under test: vllm, vllm.assets.image, vllm.distributed
def test_generate_with_embedding(llm: LLM): """Pre-computed embedding produces tokens without hanging.""" embedding = ImageAsset("stop_sign").image_embeds outputs = llm.generate( {"prompt": PROMPT, "multi_modal_data": {"image": embedding}}, sampling_params=SamplingParams(max_tokens=32, temperature=0.0), ) assert len(outputs) == 1 assert len(outputs[0].outputs[0].text) > 0
test
1
{"function_name": "test_generate_with_embedding", "class_name": null, "qualname": "test_generate_with_embedding", "file_path": "tests/entrypoints/llm/test_mm_embeds_only.py", "repo_id": "vllm-project/vllm", "loc": 9, "tested_modules": ["vllm", "vllm.assets.image", "vllm.distributed"], "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:test/testcases/test_web_api/test_user_app/test_user_app_unit.py:test_tenant_info_and_set_tenant_info_exception_matrix_unit
# Context: import pytest class _DummyManager: ... class _AwaitableValue: ... class _Args(dict): ... class _DummyResponse: ... class _DummyHTTPResponse: ... class _DummyRedis: ... class _DummyUser: ... class _Field: ... def _run(coro): ... def _set_request_json(monkeypatch, module, payload): ... def _set_request_args(monkeypatch, module, args): ... def auth(): ... def set_tenant_info(): ... def _load_user_app(monkeypatch): ... def test_login_route_branch_matrix_unit(monkeypatch): ... def test_login_channels_and_oauth_login_matrix_unit(monkeypatch): ... def test_oauth_callback_matrix_unit(monkeypatch): ... def test_github_callback_matrix_unit(monkeypatch): ... def test_feishu_callback_matrix_unit(monkeypatch): ... def test_oauth_user_info_helpers_unit(monkeypatch): ... def test_logout_setting_profile_matrix_unit(monkeypatch): ... def test_registration_helpers_and_register_route_matrix_unit(monkeypatch): ... def test_forget_captcha_and_send_otp_matrix_unit(monkeypatch): ... def test_forget_verify_otp_matrix_unit(monkeypatch): ... def test_forget_reset_password_matrix_unit(monkeypatch): ... # Task: Write a Python test function `test_tenant_info_and_set_tenant_info_exception_matrix_unit` to verify the behavior of `tenant_info_and_set_tenant_info_exception_matrix_unit`. Module under test: pathlib, types
def test_tenant_info_and_set_tenant_info_exception_matrix_unit(monkeypatch): module = _load_user_app(monkeypatch) monkeypatch.setattr(module.TenantService, "get_info_by", lambda _uid: []) res = _run(module.tenant_info()) assert res["code"] == module.RetCode.DATA_ERROR, res assert "Tenant not found" in res["message"], res def _raise_tenant_info(_uid): raise RuntimeError("tenant info boom") monkeypatch.setattr(module.TenantService, "get_info_by", _raise_tenant_info) res = _run(module.tenant_info()) assert res["code"] == module.RetCode.EXCEPTION_ERROR, res assert "tenant info boom" in res["message"], res _set_request_json( monkeypatch, module, {"tenant_id": "tenant-1", "llm_id": "l", "embd_id": "e", "asr_id": "a", "img2txt_id": "i"}, ) def _raise_update(_tenant_id, _payload): raise RuntimeError("tenant update boom") monkeypatch.setattr(module.TenantService, "update_by_id", _raise_update) res = _run(module.set_tenant_info()) assert res["code"] == module.RetCode.EXCEPTION_ERROR, res assert "tenant update boom" in res["message"], res
test
1
{"function_name": "test_tenant_info_and_set_tenant_info_exception_matrix_unit", "class_name": null, "qualname": "test_tenant_info_and_set_tenant_info_exception_matrix_unit", "file_path": "test/testcases/test_web_api/test_user_app/test_user_app_unit.py", "repo_id": "infiniflow/ragflow", "loc": 29, "tested_modules": ["pathlib", "types"], "has_docstring": false, "runnable_level": "file_runnable"}
hiyouga/LlamaFactory:tests_v1/core/utils/test_rendering.py:test_chatml_parse
# Context: from transformers import AutoTokenizer from llamafactory.v1.core.utils.rendering import Renderer from llamafactory.v1.utils.types import Processor def _get_input_ids(inputs: list | dict) -> list: ... def test_chatml_rendering(): ... def test_chatml_rendering_remote(num_samples: int): ... def test_qwen3_nothink_rendering(): ... def test_qwen3_nothink_parse(): ... def test_qwen3_nothink_rendering_remote(num_samples: int): ... def test_process_sft_samples(): ... def test_process_dpo_samples(): ... # Task: Write a Python test function `test_chatml_parse` to verify the behavior of `chatml_parse`. Module under test: transformers, llamafactory.v1.config, llamafactory.v1.core.data_engine
def test_chatml_parse(): tokenizer: Processor = AutoTokenizer.from_pretrained("llamafactory/tiny-random-qwen3") renderer = Renderer(template="chatml", processor=tokenizer) generated_text = "LLM stands for Large Language Model." parsed_message = renderer.parse_message(generated_text) assert parsed_message == V1_MESSAGES[-1]
test
1
{"function_name": "test_chatml_parse", "class_name": null, "qualname": "test_chatml_parse", "file_path": "tests_v1/core/utils/test_rendering.py", "repo_id": "hiyouga/LlamaFactory", "loc": 6, "tested_modules": ["transformers", "llamafactory.v1.config", "llamafactory.v1.core.data_engine", "llamafactory.v1.core.utils.rendering", "llamafactory.v1.utils.types"], "has_docstring": false, "runnable_level": "project_runnable"}
apache/airflow:dev/breeze/tests/test_ui_commands.py:TestCompareKeys.test_compare_keys_with_extra
# Context: import json from airflow_breeze.commands.ui_commands import ( LocaleFiles, LocaleKeySet, LocaleSummary, compare_keys, expand_plural_keys, flatten_keys, get_plural_base, ) import airflow_breeze.commands.ui_commands as ui_commands class TestPluralHandling: ... class TestFlattenKeys: ... class TestLocaleSummary: ... class TestLocaleFiles: ... class TestLocaleKeySet: ... class TestCountTodos: ... class TestAddMissingTranslations: ... class TestRemoveExtraTranslations: ... class TestNaturalSorting: ... class TestCompareKeys: def test_compare_keys_identical(self, tmp_path): ... def test_compare_keys_with_missing(self, tmp_path): ... # Task: Write a Python test method `test_compare_keys_with_extra` in test class `TestCompareKeys` to verify the behavior of `compare_keys_with_extra`. Module under test: __future__, airflow_breeze.commands.ui_commands, airflow_breeze.commands.ui_commands
def test_compare_keys_with_extra(self, tmp_path): en_dir = tmp_path / "en" en_dir.mkdir() de_dir = tmp_path / "de" de_dir.mkdir() en_data = {"greeting": "Hello"} de_data = {"greeting": "Hallo", "extra": "Extra"} (en_dir / "test.json").write_text(json.dumps(en_data)) (de_dir / "test.json").write_text(json.dumps(de_data)) import airflow_breeze.commands.ui_commands as ui_commands original_locales_dir = ui_commands.LOCALES_DIR ui_commands.LOCALES_DIR = tmp_path try: locale_files = [ LocaleFiles(locale="en", files=["test.json"]), LocaleFiles(locale="de", files=["test.json"]), ] summary, missing_counts = compare_keys(locale_files) assert "test.json" in summary assert "extra" in summary["test.json"].extra_keys.get("de", []) finally: ui_commands.LOCALES_DIR = original_locales_dir
test
1
{"function_name": "test_compare_keys_with_extra", "class_name": "TestCompareKeys", "qualname": "TestCompareKeys.test_compare_keys_with_extra", "file_path": "dev/breeze/tests/test_ui_commands.py", "repo_id": "apache/airflow", "loc": 28, "tested_modules": ["__future__", "airflow_breeze.commands.ui_commands", "airflow_breeze.commands.ui_commands", "airflow_breeze.commands.ui_commands", "airflow_breeze.commands.ui_commands"], "has_docstring": false, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/ernie4_5_vl_moe/test_video_processing_ernie4_5_vl_moe.py:Ernie4_5_VLMoeVideoProcessingTest.test_call_numpy
# Context: class Ernie4_5_VLMoeVideoProcessingTester: ... class Ernie4_5_VLMoeVideoProcessingTest(VideoProcessingTestMixin, unittest.TestCase): fast_video_processing_class = Ernie4_5_VLMoeVideoProcessor if is_torchvision_available() else None input_name = "pixel_values_videos" def setUp(self): ... def video_processor_dict(self): ... def test_video_processor_from_dict_with_kwargs(self): ... def test_call_pil(self): ... def test_call_pytorch(self): ... def test_call_numpy_4_channels(self): ... def test_nested_input(self): ... def test_call_sample_frames(self): ... # Task: Write a Python test method `test_call_numpy` in test class `Ernie4_5_VLMoeVideoProcessingTest` to verify the behavior of `call_numpy`. Module under test: transformers.image_utils, transformers.testing_utils, transformers.utils
def test_call_numpy(self): for video_processing_class in self.video_processor_list: video_processing = video_processing_class(**self.video_processor_dict) video_inputs = self.video_processor_tester.prepare_video_inputs( equal_resolution=False, return_tensors="np" ) video_metadata = self.video_processor_tester.prepare_video_metadata(video_inputs) encoded_videos = video_processing( video_inputs[0], video_metadata=[video_metadata[0]], return_tensors="pt" )[self.input_name] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape([video_inputs[0]]) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape) encoded_videos = video_processing(video_inputs, video_metadata=video_metadata, return_tensors="pt")[ self.input_name ] expected_output_video_shape = self.video_processor_tester.expected_output_video_shape(video_inputs) self.assertEqual(list(encoded_videos.shape), expected_output_video_shape)
test
0
{"function_name": "test_call_numpy", "class_name": "Ernie4_5_VLMoeVideoProcessingTest", "qualname": "Ernie4_5_VLMoeVideoProcessingTest.test_call_numpy", "file_path": "tests/models/ernie4_5_vl_moe/test_video_processing_ernie4_5_vl_moe.py", "repo_id": "huggingface/transformers", "loc": 19, "tested_modules": ["transformers.image_utils", "transformers.testing_utils", "transformers.utils", "test_video_processing_common", "PIL"], "has_docstring": false, "runnable_level": "class_runnable"}
Zie619/n8n-workflows:workflow_db.py:WorkflowDatabase.get_service_categories
# Context: from typing import Dict, List, Any, Optional, Tuple def main(): ... class WorkflowDatabase: def __init__(self, db_path: str = None): # Use environment variable if no path provided if db_path is None: db_path = os.environ.get("WORKFLOW_DB_PATH", "workflows.db") self.db_path = db_path self.workflows_dir = "workflows" self.init_database() def init_database(self): ... def get_file_hash(self, file_path: str) -> str: ... def format_workflow_name(self, filename: str) -> str: ... def analyze_workflow_file(self, file_path: str) -> Optional[Dict[str, Any]]: ... def analyze_nodes(self, nodes: List[Dict]) -> Tuple[str, set]: ... def generate_description(self, workflow: Dict, trigger_type: str, integrations: set) -> str: ... def index_all_workflows(self, force_reindex: bool) -> Dict[str, int]: ... def search_workflows(self, query: str, trigger_filter: str, complexity_filter: str, active_only: bool, limit: int, offset: int) -> Tuple[List[Dict], int]: ... def get_stats(self) -> Dict[str, Any]: ... def search_by_category(self, category: str, limit: int, offset: int) -> Tuple[List[Dict], int]: ... # Task: Write a Python method `get_service_categories` for the class `WorkflowDatabase` to get service categories for enhanced filtering. Returns: Dict[str, List[str]]
def get_service_categories(self) -> Dict[str, List[str]]: """Get service categories for enhanced filtering.""" return { "messaging": [ "Telegram", "Discord", "Slack", "WhatsApp", "Mattermost", "Microsoft Teams", "Rocket.Chat", ], "email": ["Gmail", "Mailjet", "Email (IMAP)", "Email (SMTP)", "Outlook"], "cloud_storage": [ "Google Drive", "Google Docs", "Google Sheets", "Dropbox", "OneDrive", "Box", ], "database": [ "PostgreSQL", "MySQL", "MongoDB", "Redis", "Airtable", "Notion", ], "project_management": [ "Jira", "GitHub", "GitLab", "Trello", "Asana", "Monday.com", ], "ai_ml": ["OpenAI", "Anthropic", "Hugging Face", "CalcsLive"], "social_media": ["LinkedIn", "Twitter/X", "Facebook", "Instagram"], "ecommerce": ["Shopify", "Stripe", "PayPal"], "analytics": ["Google Analytics", "Mixpanel"], "calendar_tasks": [ "Google Calendar", "Google Tasks", "Cal.com", "Calendly", ], "forms": ["Typeform", "Google Forms", "Form Trigger"], "development": [ "Webhook", "HTTP Request", "GraphQL", "Server-Sent Events", "YouTube", ], }
function_simple
0
{"cognitive_complexity": 0, "loc": 56, "code_loc": 54, "docstring_loc": 1, "function_name": "get_service_categories", "class_name": "WorkflowDatabase", "qualname": "WorkflowDatabase.get_service_categories", "file_path": "workflow_db.py", "repo_id": "Zie619/n8n-workflows", "has_docstring": true, "runnable_level": "slib_runnable"}
docling-project/docling:docling/datamodel/stage_model_specs.py:StageModelPreset.supported_engines
# Context: from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Set from docling.models.inference_engines.vlm.base import VlmEngineType class EngineModelConfig(BaseModel): ... class ApiModelConfig(BaseModel): ... class VlmModelSpec(BaseModel): ... class ObjectDetectionModelSpec(BaseModel): ... class ImageClassificationModelSpec(BaseModel): ... class StagePresetMixin: ... class ObjectDetectionStagePreset(BaseModel): ... class ObjectDetectionStagePresetMixin: ... class ImageClassificationStagePreset(BaseModel): ... class ImageClassificationStagePresetMixin: ... class StageModelPreset(BaseModel): # Task: Write a Python method `supported_engines` for the class `StageModelPreset` to get supported engines from model spec. Returns: Set[VlmEngineType]
def supported_engines(self) -> Set[VlmEngineType]: """Get supported engines from model spec.""" if self.model_spec.supported_engines is None: return set(VlmEngineType) return self.model_spec.supported_engines
function_simple
1
{"cognitive_complexity": 1, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "supported_engines", "class_name": "StageModelPreset", "qualname": "StageModelPreset.supported_engines", "file_path": "docling/datamodel/stage_model_specs.py", "repo_id": "docling-project/docling", "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:tests/ci/security/test_ip_blocking.py:TestIsIPAddressHelper.test_invalid_ip_detection
# Context: from bubus import EventBus from browser_use.browser import BrowserProfile, BrowserSession from browser_use.browser.watchdogs.security_watchdog import SecurityWatchdog class TestIPv4Blocking: ... class TestIPv6Blocking: ... class TestDomainNamesStillAllowed: ... class TestIPBlockingWithAllowedDomains: ... class TestIPBlockingWithProhibitedDomains: ... class TestEdgeCases: ... class TestDefaultBehavior: ... class TestComplexScenarios: ... class TestIsIPAddressHelper: def test_valid_ipv4_detection(self): ... def test_valid_ipv6_detection(self): ... # Task: Write a Python test method `test_invalid_ip_detection` in test class `TestIsIPAddressHelper` to test that non-IP strings are correctly identified as not IPs. Module under test: bubus, browser_use.browser, browser_use.browser.watchdogs.security_watchdog
def test_invalid_ip_detection(self): """Test that non-IP strings are correctly identified as not IPs.""" browser_profile = BrowserProfile(block_ip_addresses=True, headless=True, user_data_dir=None) browser_session = BrowserSession(browser_profile=browser_profile) event_bus = EventBus() watchdog = SecurityWatchdog(browser_session=browser_session, event_bus=event_bus) # Domain names (not IPs) assert watchdog._is_ip_address('example.com') is False assert watchdog._is_ip_address('www.google.com') is False assert watchdog._is_ip_address('localhost') is False # Invalid IPs assert watchdog._is_ip_address('999.999.999.999') is False assert watchdog._is_ip_address('1.2.3') is False assert watchdog._is_ip_address('1.2.3.4.5') is False assert watchdog._is_ip_address('not-an-ip') is False assert watchdog._is_ip_address('') is False # IPs with ports or paths (not valid for the helper - it only checks hostnames) assert watchdog._is_ip_address('192.168.1.1:8080') is False assert watchdog._is_ip_address('192.168.1.1/path') is False
test
0
{"function_name": "test_invalid_ip_detection", "class_name": "TestIsIPAddressHelper", "qualname": "TestIsIPAddressHelper.test_invalid_ip_detection", "file_path": "tests/ci/security/test_ip_blocking.py", "repo_id": "browser-use/browser-use", "loc": 22, "tested_modules": ["bubus", "browser_use.browser", "browser_use.browser.watchdogs.security_watchdog"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/audioflamingo3/test_modeling_audioflamingo3.py:module_doc
Write a module-level docstring for the Python module `test_modeling_audioflamingo3` which contains class `AudioFlamingo3ModelTester`, class `AudioFlamingo3ForConditionalGenerationModelTest`, class `AudioFlamingo3ForConditionalGenerationIntegrationTest`.
Testing suite for the PyTorch AudioFlamingo3 model.
documentation
0
{"doc_type": "module", "module_name": "test_modeling_audioflamingo3", "file_path": "tests/models/audioflamingo3/test_modeling_audioflamingo3.py", "repo_id": "huggingface/transformers", "char_length": 51}
ray-project/ray:python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py:AsyncHttpPublisherClient:class_doc
Write a class-level docstring for `AsyncHttpPublisherClient` (inherits from PublisherClientInterface) which has methods: `__init__`, `publish`, `_send_http_request`, `close`, `set_session`.
Client for publishing ray event batches to an external HTTP service.
documentation
0
{"doc_type": "class", "class_name": "AsyncHttpPublisherClient", "file_path": "python/ray/dashboard/modules/aggregator/publisher/async_publisher_client.py", "repo_id": "ray-project/ray", "char_length": 68, "methods": ["__init__", "publish", "_send_http_request", "close", "set_session"]}
ray-project/ray:doc/source/serve/tutorials/video-analysis/app.py:parse_s3_uri
# Context: from urllib.parse import urlparse class AnalyzeRequest(BaseModel): ... class TagResult(BaseModel): ... class CaptionResult(BaseModel): ... class TimingResult(BaseModel): ... class SceneChange(BaseModel): ... class ChunkResult(BaseModel): ... class AnalyzeResponse(BaseModel): ... class VideoAnalyzer: ... # Task: Write a Python function `parse_s3_uri` to parse s3://bucket/key into (bucket, key). Parameters: s3_uri: str Returns: tuple[str, str]
def parse_s3_uri(s3_uri: str) -> tuple[str, str]: """Parse s3://bucket/key into (bucket, key).""" parsed = urlparse(s3_uri) if parsed.scheme != "s3": raise ValueError(f"Invalid S3 URI: {s3_uri}") bucket = parsed.netloc key = parsed.path.lstrip("/") return bucket, key
function_simple
0
{"cognitive_complexity": 1, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "parse_s3_uri", "class_name": null, "qualname": "parse_s3_uri", "file_path": "doc/source/serve/tutorials/video-analysis/app.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "slib_runnable"}
ray-project/ray:python/ray/train/v2/tests/test_torch_gpu.py:test_torch_trainer_cuda_initialization
# Context: from typing import List import torch import ray from ray.train import RunConfig, ScalingConfig from ray.train.torch import TorchTrainer from ray.train.v2._internal.execution.callback import WorkerGroupCallback from ray.train.v2._internal.execution.worker_group import Worker def test_torch_get_devices(ray_start_2x2_gpu_cluster, num_gpus_per_worker): ... def test_torch_prepare_model(ray_start_4_cpus_2_gpus): ... class LinearDatasetDict(LinearDataset): ... class NonTensorDataset(LinearDataset): ... def test_torch_prepare_dataloader(ray_start_4_cpus_2_gpus, dataset): ... def test_torch_fail_on_nccl_timeout(ray_start_4_cpus_2_gpus): ... # Task: Write a Python test function `test_torch_trainer_cuda_initialization` to test that Torch CUDA initialization works with TorchTrainer. Module under test: typing, torch.nn.parallel, torch.utils.data
def test_torch_trainer_cuda_initialization(ray_start_4_cpus_2_gpus): """Test that Torch CUDA initialization works with TorchTrainer. This test verifies that PyTorch can properly initialize CUDA on multiple workers before the training context is set up, ensuring that GPU resources are available and accessible across all training workers. See https://github.com/ray-project/ray/pull/56509 for more details. """ def train_func(): """Empty training function for this initialization test. Since we're only testing CUDA initialization, the actual training logic is not needed for this test case. """ pass def init_torch(): """Trigger (lazy) initialization of CUDA.""" torch.cuda.is_available() class InitTorchCallback(WorkerGroupCallback): """Callback to initialize PyTorch CUDA before training begins. Implements before_init_train_context because this is where torch is typically imported, ensuring that the CUDA environment is properly initialized. """ def before_init_train_context(self, workers: List[Worker]): """Execute CUDA initialization on all workers.""" futures = [] for worker in workers: futures.append(worker.execute_async(init_torch)) ray.get(futures) return {} callback = InitTorchCallback() trainer = TorchTrainer( train_func, scaling_config=ScalingConfig(num_workers=2, use_gpu=True), run_config=RunConfig(callbacks=[callback]), ) trainer.fit()
test
0
{"function_name": "test_torch_trainer_cuda_initialization", "class_name": null, "qualname": "test_torch_trainer_cuda_initialization", "file_path": "python/ray/train/v2/tests/test_torch_gpu.py", "repo_id": "ray-project/ray", "loc": 46, "tested_modules": ["typing", "torch.nn.parallel", "torch.utils.data", "ray.train", "ray.train.examples.pytorch.torch_linear_example"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:src/transformers/models/vjepa2/modeling_vjepa2.py:drop_path
# Context: import torch class VJEPA2WithMaskedInputPredictorOutput(ModelOutput): ... class VJEPA2WithMaskedInputModelOutput(ModelOutput): ... class VJEPA2PatchEmbeddings3D(nn.Module): ... class VJEPA2Embeddings(nn.Module): ... def eager_attention_forward(module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: torch.Tensor | None, scaling: float, dropout: float, **kwargs): ... def rotate_queries_or_keys(x, pos): ... class VJEPA2RopeAttention(nn.Module): ... class VJEPA2DropPath(nn.Module): ... class VJEPA2MLP(nn.Module): ... class VJEPA2Layer(GradientCheckpointingLayer): ... class VJEPA2Encoder(nn.Module): ... def apply_masks(tensor: torch.Tensor, masks: list[torch.Tensor]) -> torch.Tensor: ... class VJEPA2PredictorEmbeddings(nn.Module): ... class VJEPA2Predictor(nn.Module): ... class VJEPA2PoolerSelfAttention(nn.Module): ... class VJEPA2PoolerCrossAttention(nn.Module): ... class VJEPA2PoolerSelfAttentionLayer(GradientCheckpointingLayer): ... class VJEPA2PoolerCrossAttentionLayer(GradientCheckpointingLayer): ... class VJEPA2AttentivePooler(nn.Module): ... class VJEPA2PreTrainedModel(PreTrainedModel): ... class VJEPA2Model(VJEPA2PreTrainedModel): ... class VJEPA2ForVideoClassification(VJEPA2PreTrainedModel): ... # Task: Write a Python function `drop_path` to drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Parameters: input: torch.Tensor, drop_prob: float, training: bool Returns: torch.Tensor
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output
function_simple
0
{"cognitive_complexity": 2, "loc": 13, "code_loc": 8, "docstring_loc": 4, "function_name": "drop_path", "class_name": null, "qualname": "drop_path", "file_path": "src/transformers/models/vjepa2/modeling_vjepa2.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "plib_runnable"}
langflow-ai/langflow:src/backend/tests/unit/base/tools/test_run_flow.py:TestRunFlowBaseComponentFlowRetrieval.test_get_graph_uses_cache_when_available_and_up_to_date
# Context: from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch from uuid import uuid4 import pytest from lfx.base.tools.run_flow import RunFlowBaseComponent from lfx.graph.graph.base import Graph def mock_shared_cache(): ... class TestRunFlowBaseComponentInitialization: ... class TestRunFlowBaseComponentFlowCaching: ... class TestRunFlowBaseComponentInputOutputHandling: ... class TestRunFlowBaseComponentOutputMethods: ... class TestRunFlowBaseComponentToolGeneration: ... class TestRunFlowBaseComponentTweakData: ... class TestRunFlowBaseComponentUpdateOutputs: ... class TestRunFlowBaseComponentTweaks: ... class TestRunFlowBaseComponentFlowRetrieval: async def test_get_flow_with_id(self): ... async def test_get_flow_with_name(self): ... async def test_get_flow_returns_empty_data_when_none(self): ... async def test_get_graph_raises_error_without_id_or_name(self): ... async def test_get_graph_fetches_and_caches_when_not_cached(self): ... async def test_get_graph_deletes_stale_cache_and_refetches(self): ... # Task: Write a Python test method `test_get_graph_uses_cache_when_available_and_up_to_date` in test class `TestRunFlowBaseComponentFlowRetrieval` to test that get_graph returns cached graph when available and up-to-date. Module under test: uuid, lfx.base.tools.run_flow, lfx.graph.graph.base
async def test_get_graph_uses_cache_when_available_and_up_to_date(self): """Test that get_graph returns cached graph when available and up-to-date.""" component = RunFlowBaseComponent() component._user_id = str(uuid4()) component.cache_flow = True flow_id = str(uuid4()) updated_at = "2024-01-01T00:00:00Z" mock_graph = MagicMock(spec=Graph) mock_graph.updated_at = updated_at with ( patch.object(component, "_flow_cache_call") as mock_cache_call, patch.object(component, "_is_cached_flow_up_to_date") as mock_is_up_to_date, ): mock_cache_call.return_value = mock_graph mock_is_up_to_date.return_value = True result = await component.get_graph(flow_id_selected=flow_id, updated_at=updated_at) assert result == mock_graph mock_cache_call.assert_called_once_with("get", flow_id=flow_id) mock_is_up_to_date.assert_called_once_with(mock_graph, updated_at)
test
1
{"function_name": "test_get_graph_uses_cache_when_available_and_up_to_date", "class_name": "TestRunFlowBaseComponentFlowRetrieval", "qualname": "TestRunFlowBaseComponentFlowRetrieval.test_get_graph_uses_cache_when_available_and_up_to_date", "file_path": "src/backend/tests/unit/base/tools/test_run_flow.py", "repo_id": "langflow-ai/langflow", "loc": 23, "tested_modules": ["uuid", "lfx.base.tools.run_flow", "lfx.graph.graph.base", "lfx.graph.vertex.base", "lfx.schema.data"], "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/memory/encoding_flow.py:EncodingFlow._apply_defaults
# Context: class ItemState(BaseModel): ... class EncodingState(BaseModel): ... class EncodingFlow(Flow[EncodingState]): initial_state = EncodingState def __init__( self, storage: Any, llm: Any, embedder: Any, config: MemoryConfig | None = None, ) -> None: super().__init__(suppress_flow_events=True) self._storage = storage self._llm = llm self._embedder = embedder self._config = config or MemoryConfig() def batch_embed(self) -> None: ... def intra_batch_dedup(self) -> None: ... def _cosine_similarity(a: list[float], b: list[float]) -> float: ... def parallel_find_similar(self) -> None: ... def parallel_analyze(self) -> None: ... def execute_plans(self) -> None: ... # Task: Write a Python method `_apply_defaults` for the class `EncodingFlow` to apply caller values with config defaults (fast path). Parameters: item: ItemState Returns: None
def _apply_defaults(self, item: ItemState) -> None: """Apply caller values with config defaults (fast path).""" item.resolved_scope = item.scope or "/" item.resolved_categories = item.categories or [] item.resolved_metadata = item.metadata or {} item.resolved_importance = ( item.importance if item.importance is not None else self._config.default_importance ) item.resolved_source = item.source item.resolved_private = item.private
function_simple
0
{"cognitive_complexity": 4, "loc": 12, "code_loc": 10, "docstring_loc": 1, "function_name": "_apply_defaults", "class_name": "EncodingFlow", "qualname": "EncodingFlow._apply_defaults", "file_path": "lib/crewai/src/crewai/memory/encoding_flow.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "file_runnable"}
huggingface/transformers:src/transformers/models/glm4v/image_processing_glm4v.py:Glm4vImageProcessorKwargs:class_doc
Write a class-level docstring for `Glm4vImageProcessorKwargs` (inherits from ImagesKwargs) which has methods: various methods.
patch_size (`int`, *optional*, defaults to 14): The spatial patch size of the vision encoder. temporal_patch_size (`int`, *optional*, defaults to 2): The temporal patch size of the vision encoder. merge_size (`int`, *optional*, defaults to 2): The merge size of the vision encoder to llm encoder.
documentation
0
{"doc_type": "class", "class_name": "Glm4vImageProcessorKwargs", "file_path": "src/transformers/models/glm4v/image_processing_glm4v.py", "repo_id": "huggingface/transformers", "char_length": 308, "methods": []}
ray-project/ray:release/train_tests/benchmark/image_classification/s3_url/imagenet.py:_list_files_for_label
# Context: from typing import Callable, Dict, List, Optional, Tuple import boto3 def _get_class_labels(bucket: str, prefix: str) -> List[str]: ... def _list_s3_image_files_cached(data_dir: str) -> Tuple[Tuple[str, str], ...]: ... def list_s3_image_files(data_dir: str) -> List[Dict[str, str]]: ... def get_process_batch_fn(random_transforms: bool, label_to_id_map: Optional[Dict[str, int]]) -> Callable[[Dict[str, np.ndarray]], Dict[str, np.ndarray]]: ... def create_s3_url_dataset(data_dir: str, random_transforms: bool, limit_rows: Optional[int]) -> ray.data.Dataset: ... # Task: Write a Python function `_list_files_for_label` to ray task to list all image files for a specific label. Parameters: bucket: str, prefix: str, label: str Returns: List[Tuple[str, str]]
def _list_files_for_label( bucket: str, prefix: str, label: str ) -> List[Tuple[str, str]]: """Ray task to list all image files for a specific label. Args: bucket: S3 bucket name prefix: S3 prefix (parent directory) label: Class label (subdirectory name) Returns: List of tuples with (file_path, class_name) """ s3_client = boto3.client("s3", region_name=AWS_REGION) paginator = s3_client.get_paginator("list_objects_v2") # Construct the full prefix for this label label_prefix = f"{prefix}/{label}/" if prefix else f"{label}/" file_records = [] for page in paginator.paginate(Bucket=bucket, Prefix=label_prefix): for obj in page.get("Contents", []): key = obj["Key"] if key.lower().endswith((".jpg", ".jpeg")): file_path = f"s3://{bucket}/{key}" file_records.append((file_path, label)) return file_records
function_complex
0
{"cognitive_complexity": 7, "loc": 28, "code_loc": 11, "docstring_loc": 10, "function_name": "_list_files_for_label", "class_name": null, "qualname": "_list_files_for_label", "file_path": "release/train_tests/benchmark/image_classification/s3_url/imagenet.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai/tests/llms/openai/test_openai.py:test_openai_responses_api_with_structured_output
# Context: import pytest from crewai.llms.providers.openai.completion import OpenAICompletion, ResponsesAPIResult from crewai.llms.providers.openai.completion import OpenAICompletion from pydantic import BaseModel from pydantic import BaseModel, Field def test_openai_completion_is_used_when_openai_provider(): ... def test_openai_completion_is_used_when_no_provider_prefix(): ... def test_openai_is_default_provider_without_explicit_llm_set_on_agent(): ... def test_openai_completion_module_is_imported(): ... def test_native_openai_raises_error_when_initialization_fails(): ... def test_openai_completion_initialization_parameters(): ... def test_openai_completion_call(): ... def test_openai_completion_called_during_crew_execution(): ... def test_openai_completion_call_arguments(): ... def test_multiple_openai_calls_in_crew(): ... def test_openai_completion_with_tools(): ... def test_openai_completion_call_returns_usage_metrics(): ... def test_openai_raises_error_when_model_not_supported(): ... def test_openai_client_setup_with_extra_arguments(): ... def test_extra_arguments_are_passed_to_openai_completion(): ... def test_openai_get_client_params_with_api_base(): ... def test_openai_get_client_params_with_base_url_priority(): ... def test_openai_get_client_params_with_env_var(): ... def test_openai_get_client_params_priority_order(): ... def test_openai_get_client_params_no_base_url(monkeypatch): ... def test_openai_streaming_with_response_model(): ... def test_openai_response_format_with_pydantic_model(): ... def test_openai_response_format_with_dict(): ... def test_openai_response_format_none(): ... def test_openai_streaming_returns_usage_metrics(): ... def test_openai_responses_api_initialization(): ... def test_openai_responses_api_default_is_completions(): ... def test_openai_responses_api_prepare_params(): ... def test_openai_responses_api_tool_format(): ... def test_openai_completions_api_tool_format(): ... def test_openai_responses_api_structured_output_format(): ... def test_openai_responses_api_with_previous_response_id(): ... def test_openai_responses_api_call_routing(): ... def test_openai_responses_api_basic_call(): ... def test_openai_responses_api_with_system_message_extraction(): ... def test_openai_responses_api_streaming(): ... def test_openai_responses_api_returns_usage_metrics(): ... def test_openai_responses_api_builtin_tools_param(): ... def test_openai_responses_api_builtin_tools_with_custom_tools(): ... def test_openai_responses_api_with_web_search(): ... def test_responses_api_result_dataclass(): ... def test_responses_api_result_has_tool_outputs(): ... def test_responses_api_result_has_reasoning(): ... def test_openai_responses_api_parse_tool_outputs_param(): ... def test_openai_responses_api_parse_tool_outputs_default_false(): ... def test_openai_responses_api_with_parse_tool_outputs(): ... def test_openai_responses_api_parse_tool_outputs_basic_call(): ... def test_openai_responses_api_auto_chain_param(): ... def test_openai_responses_api_auto_chain_default_false(): ... def test_openai_responses_api_last_response_id_property(): ... def test_openai_responses_api_reset_chain(): ... def test_openai_responses_api_auto_chain_prepare_params(): ... def test_openai_responses_api_explicit_previous_response_id_takes_precedence(): ... def test_openai_responses_api_auto_chain_disabled_no_tracking(): ... def test_openai_responses_api_auto_chain_integration(): ... def test_openai_responses_api_auto_chain_with_reset(): ... def test_openai_responses_api_auto_chain_reasoning_param(): ... def test_openai_responses_api_auto_chain_reasoning_default_false(): ... def test_openai_responses_api_last_reasoning_items_property(): ... def test_openai_responses_api_reset_reasoning_chain(): ... def test_openai_responses_api_auto_chain_reasoning_adds_include(): ... def test_openai_responses_api_auto_chain_reasoning_preserves_existing_include(): ... def test_openai_responses_api_auto_chain_reasoning_no_duplicate_include(): ... def test_openai_responses_api_auto_chain_reasoning_prepends_to_input(): ... def test_openai_responses_api_auto_chain_reasoning_disabled_no_include(): ... def test_openai_responses_api_auto_chain_reasoning_disabled_no_prepend(): ... def test_openai_responses_api_both_auto_chains_work_together(): ... def test_openai_agent_kickoff_structured_output_without_tools(): ... def test_openai_agent_kickoff_structured_output_with_tools(): ... def test_openai_stop_words_not_applied_to_structured_output(): ... def test_openai_stop_words_still_applied_to_regular_responses(): ... def test_openai_structured_output_preserves_json_with_stop_word_patterns(): ... def test_openai_completions_cached_prompt_tokens(): ... def test_openai_responses_api_cached_prompt_tokens(): ... def test_openai_streaming_cached_prompt_tokens(): ... def test_openai_completions_cached_prompt_tokens_with_tools(): ... def test_openai_responses_api_cached_prompt_tokens_with_tools(): ... def test_openai_streaming_returns_tool_calls_without_available_functions(): ... async def test_openai_async_streaming_returns_tool_calls_without_available_functions(): ... # Task: Write a Python test function `test_openai_responses_api_with_structured_output` to test Responses API with structured output using Pydantic model. Module under test: typing, crewai.llm, crewai.llms.providers.openai.completion
def test_openai_responses_api_with_structured_output(): """Test Responses API with structured output using Pydantic model.""" from pydantic import BaseModel, Field class MathAnswer(BaseModel): """Structured math answer.""" result: int = Field(description="The numerical result") explanation: str = Field(description="Brief explanation") llm = OpenAICompletion( model="gpt-4o-mini", api="responses", ) result = llm.call("What is 5 * 7?", response_model=MathAnswer) assert isinstance(result, MathAnswer) assert result.result == 35
test
0
{"function_name": "test_openai_responses_api_with_structured_output", "class_name": null, "qualname": "test_openai_responses_api_with_structured_output", "file_path": "lib/crewai/tests/llms/openai/test_openai.py", "repo_id": "crewAIInc/crewAI", "loc": 19, "tested_modules": ["typing", "crewai.llm", "crewai.llms.providers.openai.completion", "crewai.crew", "crewai.agent"], "has_docstring": true, "runnable_level": "project_runnable"}
docling-project/docling:tests/test_asr_pipeline.py:test_mlx_run_success_and_failure
# Context: from unittest.mock import Mock, patch from docling.datamodel.base_models import ConversionStatus, InputFormat from docling.datamodel.document import ConversionResult, InputDocument from docling.backend.noop_backend import NoOpBackend from docling.datamodel.base_models import InputFormat from docling.datamodel.document import InputDocument from docling.datamodel.accelerator_options import ( AcceleratorDevice, AcceleratorOptions, ) from docling.datamodel.pipeline_options_asr_model import ( InferenceAsrFramework, InlineAsrMlxWhisperOptions, InlineAsrNativeWhisperOptions, ) from docling.pipeline.asr_pipeline import _MlxWhisperModel, _NativeWhisperModel from docling.datamodel.pipeline_options_asr_model import ( InferenceAsrFramework, InlineAsrNativeWhisperOptions, ) from docling.datamodel.pipeline_options_asr_model import ( InferenceAsrFramework, InlineAsrMlxWhisperOptions, ) from docling.pipeline.asr_pipeline import _MlxWhisperModel def test_audio_path(): ... def get_asr_converter(): ... def test_asr_pipeline_conversion(test_audio_path): ... def silent_audio_path(): ... def test_asr_pipeline_with_silent_audio(silent_audio_path): ... def test_has_text_and_determine_status_helpers(): ... def test_is_backend_supported_noop_backend(): ... def test_native_and_mlx_transcribe_language_handling(monkeypatch, tmp_path): ... def test_native_init_with_artifacts_path_and_device_logging(tmp_path): ... def test_native_run_success_with_bytesio_builds_document(tmp_path): ... def test_native_run_failure_sets_status(tmp_path): ... def test_native_whisper_handles_zero_duration_timestamps(tmp_path): ... def test_mlx_whisper_handles_zero_duration_timestamps(tmp_path): ... def test_native_whisper_skips_empty_zero_duration(tmp_path): ... # Task: Write a Python test function `test_mlx_run_success_and_failure` to cover _MlxWhisperModel.run success and failure paths. Module under test: pathlib, docling.datamodel, docling.datamodel.base_models
def test_mlx_run_success_and_failure(tmp_path): """Cover _MlxWhisperModel.run success and failure paths.""" from docling.backend.noop_backend import NoOpBackend from docling.datamodel.accelerator_options import ( AcceleratorDevice, AcceleratorOptions, ) from docling.datamodel.document import ConversionResult, InputDocument from docling.datamodel.pipeline_options_asr_model import ( InferenceAsrFramework, InlineAsrMlxWhisperOptions, ) from docling.pipeline.asr_pipeline import _MlxWhisperModel # Success path # Create real files so backend initializes and hashes compute path_ok = tmp_path / "b.wav" path_ok.write_bytes(b"RIFF....WAVE") input_doc = InputDocument( path_or_stream=path_ok, format=InputFormat.AUDIO, backend=NoOpBackend ) conv_res = ConversionResult(input=input_doc) with patch.dict("sys.modules", {"mlx_whisper": Mock()}): opts = InlineAsrMlxWhisperOptions( repo_id="mlx-community/whisper-tiny-mlx", inference_framework=InferenceAsrFramework.MLX, language="en", ) model = _MlxWhisperModel( True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts ) model.mlx_whisper = Mock() model.mlx_whisper.transcribe.return_value = { "segments": [{"start": 0.0, "end": 1.0, "text": "ok"}] } out = model.run(conv_res) assert out.status.name == "SUCCESS" # Failure path path_fail = tmp_path / "c.wav" path_fail.write_bytes(b"RIFF....WAVE") input_doc2 = InputDocument( path_or_stream=path_fail, format=InputFormat.AUDIO, backend=NoOpBackend ) conv_res2 = ConversionResult(input=input_doc2) with patch.dict("sys.modules", {"mlx_whisper": Mock()}): opts2 = InlineAsrMlxWhisperOptions( repo_id="mlx-community/whisper-tiny-mlx", inference_framework=InferenceAsrFramework.MLX, language="en", ) model2 = _MlxWhisperModel( True, None, AcceleratorOptions(device=AcceleratorDevice.MPS), opts2 ) model2.mlx_whisper = Mock() model2.mlx_whisper.transcribe.side_effect = RuntimeError("fail") out2 = model2.run(conv_res2) assert out2.status.name == "FAILURE"
test
1
{"function_name": "test_mlx_run_success_and_failure", "class_name": null, "qualname": "test_mlx_run_success_and_failure", "file_path": "tests/test_asr_pipeline.py", "repo_id": "docling-project/docling", "loc": 58, "tested_modules": ["pathlib", "docling.datamodel", "docling.datamodel.base_models", "docling.datamodel.document", "docling.datamodel.pipeline_options"], "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:test/testcases/test_web_api/test_kb_app/test_create_kb.py:TestCapability.test_create_kb_concurrent
# Context: from concurrent.futures import ThreadPoolExecutor, as_completed import pytest from common import create_kb class TestAuthorization: ... class TestDatasetCreate: ... class TestCapability: def test_create_kb_1k(self, WebApiAuth): ... # Task: Write a Python test method `test_create_kb_concurrent` in test class `TestCapability` to verify the behavior of `create_kb_concurrent`. Module under test: concurrent.futures, common, configs
def test_create_kb_concurrent(self, WebApiAuth): count = 100 with ThreadPoolExecutor(max_workers=5) as executor: futures = [executor.submit(create_kb, WebApiAuth, {"name": f"dataset_{i}"}) for i in range(count)] responses = list(as_completed(futures)) assert len(responses) == count, responses assert all(future.result()["code"] == 0 for future in futures)
test
1
{"function_name": "test_create_kb_concurrent", "class_name": "TestCapability", "qualname": "TestCapability.test_create_kb_concurrent", "file_path": "test/testcases/test_web_api/test_kb_app/test_create_kb.py", "repo_id": "infiniflow/ragflow", "loc": 7, "tested_modules": ["concurrent.futures", "common", "configs", "hypothesis", "libs.auth"], "has_docstring": false, "runnable_level": "project_runnable"}
apache/airflow:airflow-core/tests/unit/dag_processing/test_dagbag.py:TestDagBag.test_task_cluster_policy_violation
# Context: import os from unittest.mock import patch from airflow.dag_processing.dagbag import ( BundleDagBag, DagBag, _capture_with_reraise, _validate_executor_fields, ) from unit import cluster_policies from unit.models import TEST_DAGS_FOLDER def db_clean_up(): ... class TestValidateExecutorFields: ... def test_validate_executor_field_executor_not_configured(): ... def test_validate_executor_field(): ... class TestCaptureWithReraise: ... class TestBundlePathSysPath: ... class TestDagBag: def setup_class(self): ... def teardown_class(self): ... def test_dagbag_with_bundle_name(self, tmp_path): ... def test_get_existing_dag(self, tmp_path): ... def test_get_non_existing_dag(self, tmp_path): ... def test_serialized_dag_not_existing_doesnt_raise(self, tmp_path, session): ... def test_dont_load_example(self, tmp_path): ... def test_safe_mode_heuristic_match(self, tmp_path): ... def test_safe_mode_heuristic_mismatch(self, tmp_path): ... def test_safe_mode_disabled(self, tmp_path): ... def test_dagbag_stats_file_is_relative_path_with_mixed_separators(self, tmp_path): ... def test_dagbag_stats_includes_bundle_info(self, tmp_path): ... def test_dagbag_stats_bundle_info_none_when_not_provided(self, tmp_path): ... def test_process_file_that_contains_multi_bytes_char(self, tmp_path): ... def test_process_file_duplicated_dag_id(self, tmp_path): ... def test_import_errors_use_relative_path_with_bundle(self, tmp_path): ... def test_import_errors_use_relative_path_for_bagging_errors(self, tmp_path): ... def test_zip_skip_log(self, caplog, test_zip_path): ... def test_zip(self, tmp_path, test_zip_path): ... def test_process_dag_file_without_timeout(self, mocked_get_dagbag_import_timeout, mocked_timeout, tmp_path): ... def test_process_dag_file_with_non_default_timeout(self, mocked_get_dagbag_import_timeout, mocked_timeout, tmp_path): ... def test_check_value_type_from_get_dagbag_import_timeout(self, mocked_get_dagbag_import_timeout, tmp_path): ... def invalid_cron_dag(self) -> str: ... def invalid_cron_zipped_dag(self, invalid_cron_dag: str, tmp_path: pathlib.Path) -> str: ... def test_process_file_cron_validity_check(self, request: pytest.FixtureRequest, invalid_dag_name: str, tmp_path): ... def test_process_file_invalid_param_check(self, tmp_path): ... def test_process_file_valid_param_check(self, tmp_path): ... def test_get_dag_without_refresh(self, mock_dagmodel): ... def test_get_dag_registration(self, file_to_load, expected): ... def test_get_zip_dag_registration(self, test_zip_path, expected): ... def test_dag_registration_with_failure(self): ... def zip_with_valid_dag_and_dup_tasks(self, tmp_path: pathlib.Path) -> str: ... def test_dag_registration_with_failure_zipped(self, zip_with_valid_dag_and_dup_tasks): ... def test_refresh_py_dag(self, mock_dagmodel, tmp_path): ... def test_refresh_packaged_dag(self, mock_dagmodel, test_zip_path): ... def process_dag(self, create_dag, tmp_path): ... def validate_dags(self, expected_dag, actual_found_dags, actual_dagbag, should_be_found): ... def test_skip_cycle_dags(self, tmp_path): ... def test_process_file_with_none(self, tmp_path): ... def test_timeout_dag_errors_are_import_errors(self, tmp_path, caplog): ... def _make_test_traceback(unparseable_filename: str, depth) -> str: ... def test_import_error_tracebacks(self, tmp_path, depth): ... def test_import_error_tracebacks_zip(self, tmp_path, depth): ... def test_task_cluster_policy_nonstring_owner(self): ... def test_task_cluster_policy_obeyed(self): ... def test_dag_cluster_policy_obeyed(self): ... def test_dagbag_dag_collection(self): ... def test_dabgag_captured_warnings(self): ... def warning_zipped_dag_path(self, tmp_path: pathlib.Path) -> str: ... def test_dabgag_captured_warnings_zip(self, warning_zipped_dag_path: str): ... def test_dag_warnings_invalid_pool(self, known_pools, expected): ... def test_sigsegv_handling(self, tmp_path, caplog): ... def test_failed_signal_registration_does_not_crash_the_process(self, tmp_path, caplog): ... # Task: Write a Python test method `test_task_cluster_policy_violation` in test class `TestDagBag` to test that file processing results in import error when task does not. Module under test: __future__, copy, datetime
def test_task_cluster_policy_violation(self): """ test that file processing results in import error when task does not obey cluster policy. """ dag_file = os.path.join(TEST_DAGS_FOLDER, "test_missing_owner.py") dag_id = "test_missing_owner" err_cls_name = "AirflowClusterPolicyViolation" dagbag = DagBag(dag_folder=dag_file, include_examples=False) assert set() == set(dagbag.dag_ids) expected_import_errors = { dag_file: ( f"""{err_cls_name}: DAG policy violation (DAG ID: {dag_id}, Path: {dag_file}):\n""" """Notices:\n""" """ * Task must have non-None non-default owner. Current value: airflow""" ) } assert expected_import_errors == dagbag.import_errors
test
1
{"function_name": "test_task_cluster_policy_violation", "class_name": "TestDagBag", "qualname": "TestDagBag.test_task_cluster_policy_violation", "file_path": "airflow-core/tests/unit/dag_processing/test_dagbag.py", "repo_id": "apache/airflow", "loc": 19, "tested_modules": ["__future__", "copy", "datetime", "sqlalchemy", "airflow"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py:LMCacheMPConnector.start_load_kv
# Context: from typing import TYPE_CHECKING, Any, Literal import torch def reformat_block_ids(block_ids: tuple[list[int], ...] | None) -> list[int]: ... def extract_world_size_and_kv_rank(world_size: int, rank: int, vllm_config: VllmConfig) -> tuple[int, int]: ... def create_scheduler_adapter(server_url: str, zmq_context: zmq.Context, vllm_config: VllmConfig) -> LMCacheMPSchedulerAdapter: ... def create_worker_adapter(server_url: str, zmq_context: zmq.Context, vllm_config: VllmConfig) -> LMCacheMPWorkerAdapter: ... class LMCacheMPRequestState(enum.Enum): ... class LMCacheMPRequestTracker: ... class LMCacheMPRequestMetadata: ... class LMCacheMPConnectorMetadata(KVConnectorMetadata): ... class LMCacheMPConnector(KVConnectorBase_V1): def __init__( self, vllm_config: "VllmConfig", role: KVConnectorRole, kv_cache_config: "KVCacheConfig | None" = None, ): super().__init__(vllm_config, role, kv_cache_config) assert vllm_config.kv_transfer_config is not None server_host = vllm_config.kv_transfer_config.get_from_extra_config( "lmcache.mp.host", "tcp://localhost" ) server_port = vllm_config.kv_transfer_config.get_from_extra_config( "lmcache.mp.port", 5555 ) server_url = f"{server_host}:{server_port}" zmq_context = zmq.Context.instance() if self.role == KVConnectorRole.SCHEDULER: self.scheduler_adapter = create_scheduler_adapter( server_url, zmq_context, vllm_config ) self.request_trackers: dict[str, LMCacheMPRequestTracker] = {} elif self.role == KVConnectorRole.WORKER: self.worker_adapter = create_worker_adapter( server_url, zmq_context, vllm_config ) else: raise ValueError(f"Unknown KVConnectorRole: {self.role}") self.vllm_block_size = vllm_config.cache_config.block_size def role(self) -> KVConnectorRole: ... def _get_connector_metadata(self) -> KVConnectorMetadata: ... def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): ... def wait_for_layer_load(self, layer_name: str) -> None: ... def save_kv_layer(self, layer_name: str, kv_layer: torch.Tensor, attn_metadata: AttentionMetadata, **kwargs) -> None: ... def wait_for_save(self): ... def get_finished(self, finished_req_ids: set[str]) -> tuple[set[str] | None, set[str] | None]: ... def get_block_ids_with_load_errors(self) -> set[int]: ... def shutdown(self): ... def get_kv_connector_stats(self) -> 'KVConnectorStats | None': ... def get_num_new_matched_tokens(self, request: 'Request', num_computed_tokens: int) -> tuple[int | None, bool]: ... def update_state_after_alloc(self, request: 'Request', blocks: 'KVCacheBlocks', num_external_tokens: int): ... def build_connector_meta(self, scheduler_output: SchedulerOutput) -> KVConnectorMetadata: ... def update_connector_output(self, connector_output: KVConnectorOutput): ... def request_finished(self, request: 'Request', block_ids: list[int]) -> tuple[bool, dict[str, Any] | None]: ... def take_events(self) -> Iterable['KVCacheEvent']: ... def get_required_kvcache_layout(cls, vllm_config: 'VllmConfig') -> str | None: ... def get_finished_count(self) -> int | None: ... def build_kv_connector_stats(cls, data: dict[str, Any] | None) -> 'KVConnectorStats | None': ... def build_prom_metrics(cls, vllm_config: 'VllmConfig', metric_types: dict[type['PromMetric'], type['PromMetricT']], labelnames: list[str], per_engine_labelvalues: dict[int, list[object]]) -> 'KVConnectorPromMetrics | None': ... def _process_retrieve_requests(self, metadata: LMCacheMPConnectorMetadata) -> None: ... def _process_new_requests(self, scheduler_output: SchedulerOutput, metadata: LMCacheMPConnectorMetadata) -> None: ... def _process_cached_requests(self, scheduler_output: SchedulerOutput, metadata: LMCacheMPConnectorMetadata) -> None: ... def _get_request_tracker(self, request_id: str) -> LMCacheMPRequestTracker: ... def _get_or_create_request_tracker(self, request: 'Request') -> LMCacheMPRequestTracker: ... def _cleanup_request_tracker(self, request_id: str) -> None: ... # Task: Write a Python method `start_load_kv` for the class `LMCacheMPConnector` to start loading the KV cache from the connector to vLLM's paged. Parameters: forward_context: 'ForwardContext' Returns: None
def start_load_kv(self, forward_context: "ForwardContext", **kwargs: Any) -> None: """ Start loading the KV cache from the connector to vLLM's paged KV buffer. This is called from the forward context before the forward pass to enable async loading during model execution. Args: forward_context (ForwardContext): the forward context. **kwargs: additional arguments for the load operation Note: The number of elements in kv_caches and layer_names should be the same. """ metadata = self._get_connector_metadata() assert isinstance(metadata, LMCacheMPConnectorMetadata) request_ids = [] ops = [] for meta in metadata.requests: if meta.direction != "RETRIEVE": continue request_ids.append(meta.request_id) ops.append(meta.op) if len(request_ids) == 0: return with torch.cuda.stream(torch.cuda.current_stream()): event = torch.cuda.Event(interprocess=True) event.record() self.worker_adapter.batched_submit_retrieve_requests(request_ids, ops, event)
function_simple
1
{"cognitive_complexity": 4, "loc": 35, "code_loc": 15, "docstring_loc": 14, "function_name": "start_load_kv", "class_name": "LMCacheMPConnector", "qualname": "LMCacheMPConnector.start_load_kv", "file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
apache/airflow:providers/google/tests/unit/google/cloud/triggers/test_gen_ai.py:TestGenAIGeminiCreateEmbeddingsBatchJobTrigger.test_run_loop_return_success_event
# Context: from unittest import mock import pytest from airflow.providers.google.cloud.hooks.gen_ai import BatchJobStatus from airflow.triggers.base import TriggerEvent def create_batch_job_trigger(mock_conn): ... def create_embeddings_batch_job_trigger(mock_conn): ... class TestGenAIGeminiCreateBatchJobTrigger: ... class TestGenAIGeminiCreateEmbeddingsBatchJobTrigger: def test_serialize(self, create_embeddings_batch_job_trigger): ... async def test_run_loop_return_failed_event(self, mock_job_status, mock_create_embeddings_batch_job, create_embeddings_batch_job_trigger): ... async def test_run_loop_is_still_running(self, mock_job_status, mock_create_embeddings_batch_job, create_embeddings_batch_job_trigger, caplog): ... # Task: Write a Python test method `test_run_loop_return_success_event` in test class `TestGenAIGeminiCreateEmbeddingsBatchJobTrigger` to verify the behavior of `run_loop_return_success_event`. Module under test: __future__, airflow.models, airflow.providers.google.cloud.hooks.gen_ai
async def test_run_loop_return_success_event( self, mock_job_status, mock_create_embeddings_batch_job, create_embeddings_batch_job_trigger ): test_job_model_dump = {"id": "test_job_id", "status": "succeeded"} mock_job_status.return_value.state.name = BatchJobStatus.SUCCEEDED.value mock_job_status.return_value.model_dump = mock.Mock(return_value=test_job_model_dump) expected_event = TriggerEvent( { "status": "success", "message": "Job completed", "job": test_job_model_dump, } ) actual_event = await create_embeddings_batch_job_trigger.run().asend(None) mock_create_embeddings_batch_job.assert_called_once() assert actual_event == expected_event
test
1
{"function_name": "test_run_loop_return_success_event", "class_name": "TestGenAIGeminiCreateEmbeddingsBatchJobTrigger", "qualname": "TestGenAIGeminiCreateEmbeddingsBatchJobTrigger.test_run_loop_return_success_event", "file_path": "providers/google/tests/unit/google/cloud/triggers/test_gen_ai.py", "repo_id": "apache/airflow", "loc": 17, "tested_modules": ["__future__", "airflow.models", "airflow.providers.google.cloud.hooks.gen_ai", "airflow.providers.google.cloud.triggers.gen_ai", "airflow.triggers.base"], "has_docstring": false, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/_common/tests/test_ray_option_utils.py:TestOptionValidation.test_validate_resource_quantity
# Context: from unittest.mock import patch from ray._common.ray_option_utils import ( Option, _check_deprecate_placement_group, _counting_option, _resource_option, _validate_resource_quantity, _validate_resources, update_options, validate_actor_options, validate_task_options, ) class TestTaskActorOptionValidation: ... class TestUpdateOptions: ... class TestOptionValidation: def test_option_validate(self): ... def test_counting_option(self): ... def test_resource_option(self): ... def test_validate_resources(self): ... # Task: Write a Python test method `test_validate_resource_quantity` in test class `TestOptionValidation` to verify the behavior of `validate_resource_quantity`. Module under test: ray._common.ray_option_utils, ray.util.placement_group
def test_validate_resource_quantity(self, mock_get_manager, mock_get_all_names): # Valid cases assert _validate_resource_quantity("CPU", 1) is None assert _validate_resource_quantity("memory", 0) is None assert _validate_resource_quantity("custom", 0.5) is None # Invalid cases err = _validate_resource_quantity("CPU", -1) assert isinstance(err, str) assert "cannot be negative" in err err = _validate_resource_quantity("CPU", 0.00001) assert isinstance(err, str) assert "cannot go beyond 0.0001" in err # Accelerator validation mock_manager_instance = mock_get_manager.return_value mock_manager_instance.validate_resource_request_quantity.return_value = ( False, "mock error", ) err = _validate_resource_quantity("GPU", 1.5) assert isinstance(err, str) assert "mock error" in err mock_get_manager.assert_called_with("GPU") mock_manager_instance.validate_resource_request_quantity.assert_called_with(1.5) mock_manager_instance.validate_resource_request_quantity.return_value = ( True, "", ) assert _validate_resource_quantity("TPU", 1) is None
test
0
{"function_name": "test_validate_resource_quantity", "class_name": "TestOptionValidation", "qualname": "TestOptionValidation.test_validate_resource_quantity", "file_path": "python/ray/_common/tests/test_ray_option_utils.py", "repo_id": "ray-project/ray", "loc": 31, "tested_modules": ["ray._common.ray_option_utils", "ray.util.placement_group"], "has_docstring": false, "runnable_level": "project_runnable"}
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_type_errors.py:test_backwards_compat_with_context_schema
# Context: from langchain.agents import create_agent from tests.unit_tests.agents.model import FakeToolCallingModel class UserContext(TypedDict): ... class SessionContext(TypedDict): ... class AnalysisResult(BaseModel): ... class SummaryResult(BaseModel): ... class WrongContextFieldsMiddleware(AgentMiddleware[AgentState[Any], UserContext, Any]): ... class MismatchedRequestMiddleware(AgentMiddleware[AgentState[Any], UserContext, Any]): ... class SessionContextMiddleware(AgentMiddleware[AgentState[Any], SessionContext, Any]): ... def test_mismatched_context_schema() -> None: ... class BackwardsCompatibleMiddleware(AgentMiddleware): ... class WrongResponseFieldsMiddleware(AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]): ... class MismatchedResponseMiddleware(AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]): ... class AnalysisMiddleware(AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]): ... def test_mismatched_response_format() -> None: ... class WrongReturnTypeMiddleware(AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]): ... # Task: Write a Python test function `test_backwards_compat_with_context_schema` to verify the behavior of `backwards_compat_with_context_schema`. Module under test: __future__, typing, pydantic
def test_backwards_compat_with_context_schema() -> None: # TYPE ERROR: BackwardsCompatibleMiddleware is AgentMiddleware[..., None] # but context_schema=UserContext expects AgentMiddleware[..., UserContext] fake_model = FakeToolCallingModel() _agent = create_agent( # type: ignore[misc] model=fake_model, middleware=[BackwardsCompatibleMiddleware()], context_schema=UserContext, )
test
1
{"function_name": "test_backwards_compat_with_context_schema", "class_name": null, "qualname": "test_backwards_compat_with_context_schema", "file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_type_errors.py", "repo_id": "langchain-ai/langchain", "loc": 9, "tested_modules": ["__future__", "typing", "pydantic", "typing_extensions", "langchain.agents"], "has_docstring": false, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/tests/streamlit/components/v2/bidi_component/test_serialization.py:test_serde_deserialize_with_defaults_partial_state
# Context: import json from streamlit.components.v2.bidi_component.serialization import ( BidiComponentSerde, _extract_dataframes_from_dict, deserialize_trigger_list, handle_deserialize, serialize_mixed_data, ) def test_handle_deserialize(): ... def test_deserialize_trigger_list(): ... def test_serde_deserialize_with_dict(): ... def test_serde_deserialize_with_json_string(): ... def test_serde_deserialize_with_defaults(): ... def test_serde_deserialize_with_none(): ... def test_serde_serialize(): ... def test_serde_deserialize_with_defaults_empty_state(): ... def test_serde_deserialize_with_defaults_complete_state(): ... def test_serde_deserialize_without_defaults(): ... def test_serde_deserialize_with_invalid_json(): ... def test_serde_deserialize_with_dict_input_and_defaults(): ... def test_serde_deserialize_with_none_defaults(): ... def test_extract_dataframes_from_dict(): ... def test_serialize_mixed_data_with_dataframe(): ... def test_serialize_mixed_data_without_dataframe(): ... def test_serialize_mixed_data_with_non_dict(): ... def test_serialization_fallback_to_string(): ... def test_extract_dataframes_from_dict_fallback_on_arrow_failure(monkeypatch): ... # Task: Write a Python test function `test_serde_deserialize_with_defaults_partial_state` to test that defaults are applied only for missing keys. Module under test: __future__, typing, streamlit.components.v2.bidi_component.serialization
def test_serde_deserialize_with_defaults_partial_state(): """Test that defaults are applied only for missing keys.""" defaults = {"count": 0, "message": "hello", "enabled": True} serde = BidiComponentSerde(default=defaults) # Deserialize partial state partial_state = {"count": 5, "message": "custom"} result = serde.deserialize(json.dumps(partial_state)) # Should preserve existing values and add missing defaults assert result["count"] == 5 # Existing value preserved assert result["message"] == "custom" # Existing value preserved assert result["enabled"] is True # Default applied
test
1
{"function_name": "test_serde_deserialize_with_defaults_partial_state", "class_name": null, "qualname": "test_serde_deserialize_with_defaults_partial_state", "file_path": "lib/tests/streamlit/components/v2/bidi_component/test_serialization.py", "repo_id": "streamlit/streamlit", "loc": 13, "tested_modules": ["__future__", "typing", "streamlit.components.v2.bidi_component.serialization", "streamlit.proto.BidiComponent_pb2", "streamlit.components.v2.bidi_component"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/unit/utils/test_data_structure.py:TestAnalyzeValue.test_error_handling
# Context: from langflow.utils.data_structure import ( analyze_value, get_data_structure, get_sample_values, get_type_str, infer_list_type, ) class TestInferListType: ... class TestGetTypeStr: ... class TestGetDataStructure: ... class TestGetSampleValues: ... class TestIntegrationScenarios: ... class TestAnalyzeValue: def test_simple_values(self): ... def test_simple_list(self): ... def test_empty_list(self): ... def test_nested_dict(self): ... def test_max_depth_limit(self): ... def test_size_hints_disabled(self): ... def test_include_samples_for_complex_list(self): ... def test_include_samples_disabled(self): ... def test_tuple_and_set_handling(self): ... # Task: Write a Python test method `test_error_handling` in test class `TestAnalyzeValue` to test error handling in analysis. Module under test: langflow.schema.data, langflow.utils.data_structure
def test_error_handling(self): """Test error handling in analysis.""" # Create an object that raises exception on access class ProblematicClass: def __getitem__(self, key): msg = "Access error" raise RuntimeError(msg) problematic = ProblematicClass() result = analyze_value(problematic) assert "ProblematicClass" in result or "error(" in result
test
1
{"function_name": "test_error_handling", "class_name": "TestAnalyzeValue", "qualname": "TestAnalyzeValue.test_error_handling", "file_path": "src/backend/tests/unit/utils/test_data_structure.py", "repo_id": "langflow-ai/langflow", "loc": 13, "tested_modules": ["langflow.schema.data", "langflow.utils.data_structure"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/fab/tests/unit/fab/auth_manager/api_fastapi/routes/test_roles.py:TestRoles.test_patch_role_validation_404_empty_name
# Context: from contextlib import nullcontext as _noop_cm from unittest.mock import ANY, MagicMock, patch from fastapi import HTTPException, status class TestRoles: def test_create_role(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_create_role_forbidden(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_create_role_validation_422_empty_name(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_create_role_validation_422_missing_name(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_roles_success_defaults(self, conf_mock, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_roles_passes_params_and_clamps_limit(self, conf_mock, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_roles_uses_fallback_when_limit_zero(self, conf_mock, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_roles_forbidden(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_roles_validation_422_negative_offset(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_delete_role(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_delete_role_forbidden(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_delete_role_validation_404_not_found(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_delete_role_validation_404_empty_name(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_role(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_role_forbidden(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_role_validation_404_not_found(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_role_validation_404_empty_name(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_patch_role(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_patch_role_with_update_mask(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_path_role_forbidden(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_patch_role_validation_404_not_found(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_path_role_unknown_update_mask(self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user): ... def test_get_permissions_success(self, mock_get_application_builder, mock_get_auth_manager, mock_permissions, test_client, as_user): ... def test_get_permissions_forbidden(self, mock_get_application_builder, mock_get_auth_manager, mock_permissions, test_client, as_user): ... # Task: Write a Python test method `test_patch_role_validation_404_empty_name` in test class `TestRoles` to verify the behavior of `patch_role_validation_404_empty_name`. Module under test: __future__, contextlib, fastapi
def test_patch_role_validation_404_empty_name( self, mock_get_application_builder, mock_get_auth_manager, mock_roles, test_client, as_user ): mgr = MagicMock() mgr.is_authorized_custom_view.return_value = True mock_get_auth_manager.return_value = mgr mock_roles.patch_role.side_effect = HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Role with name 'non_existent_role' does not exist.", ) with as_user(): resp = test_client.patch("/fab/v1/roles/", json={"name": "non_existent_role", "actions": []}) assert resp.status_code == 404
test
1
{"function_name": "test_patch_role_validation_404_empty_name", "class_name": "TestRoles", "qualname": "TestRoles.test_patch_role_validation_404_empty_name", "file_path": "providers/fab/tests/unit/fab/auth_manager/api_fastapi/routes/test_roles.py", "repo_id": "apache/airflow", "loc": 14, "tested_modules": ["__future__", "contextlib", "fastapi", "airflow.providers.fab.auth_manager.api_fastapi.datamodels.roles"], "has_docstring": false, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/tests/unit/services/test_edge_cases.py:TestServiceLifecycle.test_service_teardown_called
# Context: import pytest from lfx.services.base import Service from lfx.services.schema import ServiceType class MockSessionService(Service): ... def clean_manager(): ... class TestCircularDependencyDetection: ... class TestConfigParsingEdgeCases: ... class TestServiceRegistrationEdgeCases: ... class TestDependencyInjectionEdgeCases: ... class TestConcurrentAccess: ... class TestSettingsServiceProtection: ... class TestServiceLifecycle: def clean_manager(self): ... def test_service_ready_state(self, clean_manager): ... async def test_multiple_teardowns_safe(self, clean_manager): ... # Task: Write a Python test method `test_service_teardown_called` in test class `TestServiceLifecycle` to test that teardown is called on services. Module under test: lfx.services.base, lfx.services.manager, lfx.services.schema
async def test_service_teardown_called(self, clean_manager): """Test that teardown is called on services.""" teardown_called = [] class TeardownTrackingService(Service): name = "tracking_service" def __init__(self): super().__init__() self.set_ready() async def teardown(self) -> None: teardown_called.append(True) clean_manager.register_service_class(ServiceType.STORAGE_SERVICE, TeardownTrackingService) # Create service clean_manager.get(ServiceType.STORAGE_SERVICE) # Teardown await clean_manager.teardown() # Should have been called assert len(teardown_called) == 1
test
1
{"function_name": "test_service_teardown_called", "class_name": "TestServiceLifecycle", "qualname": "TestServiceLifecycle.test_service_teardown_called", "file_path": "src/lfx/tests/unit/services/test_edge_cases.py", "repo_id": "langflow-ai/langflow", "loc": 24, "tested_modules": ["lfx.services.base", "lfx.services.manager", "lfx.services.schema", "abc", "lfx.services.settings.service"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/base/langflow/agentic/flows/translation_flow.py:get_graph
# Context: from lfx.components.input_output import ChatInput, ChatOutput from lfx.components.models import LanguageModelComponent from lfx.graph import Graph def _build_model_config(provider: str, model_name: str) -> list[dict]: ... # Task: Write a Python function `get_graph` to create and return the TranslationFlow graph. Parameters: provider: str | None, model_name: str | None, api_key_var: str | None Returns: Graph
def get_graph( provider: str | None = None, model_name: str | None = None, api_key_var: str | None = None, ) -> Graph: """Create and return the TranslationFlow graph. Args: provider: Model provider (e.g., "OpenAI", "Anthropic"). Defaults to OpenAI. model_name: Model name (e.g., "gpt-4o-mini"). Defaults to gpt-4o-mini. api_key_var: Optional API key variable name (e.g., "OPENAI_API_KEY"). Returns: Graph: The configured translation flow graph. """ # Use defaults if not provided provider = provider or "OpenAI" model_name = model_name or "gpt-4o-mini" # Create chat input component chat_input = ChatInput() chat_input.set( sender="User", sender_name="User", should_store_message=True, ) # Create language model component llm = LanguageModelComponent() # Set model configuration llm.set_input_value("model", _build_model_config(provider, model_name)) # Configure LLM llm_config = { "input_value": chat_input.message_response, "system_message": TRANSLATION_PROMPT, "temperature": 0.1, # Low temperature for consistent JSON output } if api_key_var: llm_config["api_key"] = api_key_var llm.set(**llm_config) # Create chat output component chat_output = ChatOutput() chat_output.set( input_value=llm.text_response, sender="Machine", sender_name="AI", should_store_message=True, clean_data=True, data_template="{text}", ) return Graph(start=chat_input, end=chat_output)
function_simple
1
{"cognitive_complexity": 3, "loc": 57, "code_loc": 28, "docstring_loc": 10, "function_name": "get_graph", "class_name": null, "qualname": "get_graph", "file_path": "src/backend/base/langflow/agentic/flows/translation_flow.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/test_flow_ask.py:TestConsoleProviderInput.test_console_provider_non_verbose
# Context: from unittest.mock import MagicMock, patch from crewai.flow.async_feedback.providers import ConsoleProvider from crewai.events.event_listener import event_listener class MockInputProvider: ... class SlowMockProvider: ... class TestAskBasic: ... class TestAskTimeout: ... class TestProviderResolution: ... class TestAskEvents: ... class TestAskCheckpoint: ... class TestInputHistory: ... class TestAskIntegration: ... class TestInputProviderProtocol: ... class TestAskErrorHandling: ... class TestAskMetadata: ... class TestConsoleProviderInput: def test_console_provider_pauses_live_updates(self) -> None: ... def test_console_provider_displays_message(self) -> None: ... def test_console_provider_strips_response(self) -> None: ... def test_console_provider_implements_protocol(self) -> None: ... # Task: Write a Python test method `test_console_provider_non_verbose` in test class `TestConsoleProviderInput` to consoleProvider in non-verbose mode uses plain input. Module under test: __future__, datetime, typing
def test_console_provider_non_verbose(self) -> None: """ConsoleProvider in non-verbose mode uses plain input.""" from crewai.events.event_listener import event_listener mock_formatter = MagicMock() mock_formatter.console = MagicMock() provider = ConsoleProvider(verbose=False) with ( patch.object(event_listener, "formatter", mock_formatter), patch("builtins.input", return_value="plain answer") as mock_input, ): result = provider.request_input("Q?", MagicMock()) assert result == "plain answer" mock_input.assert_called_once_with("Q? ")
test
0
{"function_name": "test_console_provider_non_verbose", "class_name": "TestConsoleProviderInput", "qualname": "TestConsoleProviderInput.test_console_provider_non_verbose", "file_path": "lib/crewai/tests/test_flow_ask.py", "repo_id": "crewAIInc/crewAI", "loc": 17, "tested_modules": ["__future__", "datetime", "typing", "crewai.flow", "crewai.flow.async_feedback.providers"], "has_docstring": true, "runnable_level": "project_runnable"}
unclecode/crawl4ai:docs/examples/url_seeder/url_seeder_quick_demo.py:module_doc
Write a module-level docstring for the Python module `url_seeder_quick_demo` which contains various utilities.
🚀 URL Seeder + AsyncWebCrawler = Magic! Quick demo showing discovery → filter → crawl pipeline Note: Uses context manager for automatic cleanup of resources.
documentation
1
{"doc_type": "module", "module_name": "url_seeder_quick_demo", "file_path": "docs/examples/url_seeder/url_seeder_quick_demo.py", "repo_id": "unclecode/crawl4ai", "char_length": 158}
ray-project/ray:rllib/offline/tests/test_offline_rl_stateful.py:OfflineRLStatefulTest.test_training_with_recorded_states_on_single_episode_and_evaluate
# Context: import numpy as np from ray.rllib.core.learner.training_data import TrainingData from ray.rllib.env import INPUT_ENV_SPACES from ray.rllib.env.single_agent_episode import SingleAgentEpisode from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch import msgpack import msgpack_numpy as mnp class OfflineRLStatefulTest(unittest.TestCase): def setUpClass(cls): ... def tearDownClass(cls): ... def setUp(self): ... def tearDown(self): ... def test_training_on_single_episode_and_evaluate(self): ... def test_training_with_recorded_states_on_single_batch_and_evaluate(self): ... # Task: Write a Python test method `test_training_with_recorded_states_on_single_episode_and_evaluate` in test class `OfflineRLStatefulTest` to trains on a single episode from the recorded dataset and evaluates. Module under test: pathlib, ray.rllib.algorithms.bc, ray.rllib.core.columns
def test_training_with_recorded_states_on_single_episode_and_evaluate(self): """Trains on a single episode from the recorded dataset and evaluates. Uses recorded states for training. """ # Load these packages inline. import msgpack import msgpack_numpy as mnp # Load the dataset. ds = self.algo.offline_data.data # Take a single-row batch (one episode). batch = ds.take_batch(1) # Read the episodes and decode them. episodes = [ SingleAgentEpisode.from_state( msgpack.unpackb(state, object_hook=mnp.decode) ) for state in batch["item"] ][:1] # Get the episode return. episode_return = episodes[0].get_return() print(f"Found episode with return {episode_return}") # Assert the episode has a decent return. assert episodes[0].get_return() > 350.0, "Return must be >350.0" # Build the learner connector. obs_space, action_space = self.algo.offline_data.spaces[INPUT_ENV_SPACES] learner_connector = self.algo.config.build_learner_connector( input_observation_space=obs_space, input_action_space=action_space, ) # Run the learner connector on the episode. processed_batch = learner_connector( rl_module=self.algo.learner_group._learner.module, batch={}, episodes=episodes, shared_data={}, # TODO (simon): Add MetricsLogger to non-Learner components that have a # LearnerConnector pipeline. metrics=None, ) # Create a MA batch from the processed batch and a TrainingData object. ma_batch = MultiAgentBatch( policy_batches={ "default_policy": SampleBatch(processed_batch["default_policy"]) }, env_steps=np.prod(processed_batch["default_policy"]["obs"].shape[:-1]), ) training_data = TrainingData(batch=ma_batch) # Overfit on this single episode. i = 0 while True: i += 1 learner_results = self.algo.learner_group.update( training_data=training_data, minibatch_size=ma_batch["default_policy"].count, num_iters=self.algo.config.dataset_num_iters_per_learner, **self.algo.offline_data.iter_batches_kwargs, ) if i % 10 == 0: loss = learner_results[0]["default_policy"]["policy_loss"].peek() print(f"Iteration {i}: policy_loss: {loss}") if np.isclose(loss, 1e-4, atol=1e-5) or i >= 100: break # Evaluation # Get the latest RLModule state from the learner and synchronize # the eval env runners. rl_module_state = self.algo.learner_group.get_state()["learner"]["rl_module"] self.algo.eval_env_runner_group.foreach_env_runner( func="set_state", local_env_runner=False, kwargs={"state": {"rl_module": rl_module_state}}, ) # Evaluate the updated policy for 5 episodes. eval_episodes = self.algo.eval_env_runner_group.foreach_env_runner( func=lambda er, duration=self.config.evaluation_duration: er.sample( num_episodes=duration, explore=False ), # self._remote_eval_episode_fn, local_env_runner=False, ) # Assert the eval return is decent. episode_return_mean = np.mean([ep.get_return() for ep in eval_episodes[0]]) self.assertGreaterEqual( episode_return_mean, 100.0, f"Eval return must be >100.0 but is {episode_return_mean}", ) print(f"Eval episodes returns: {episode_return_mean}")
test
0
{"function_name": "test_training_with_recorded_states_on_single_episode_and_evaluate", "class_name": "OfflineRLStatefulTest", "qualname": "OfflineRLStatefulTest.test_training_with_recorded_states_on_single_episode_and_evaluate", "file_path": "rllib/offline/tests/test_offline_rl_stateful.py", "repo_id": "ray-project/ray", "loc": 97, "tested_modules": ["pathlib", "ray.rllib.algorithms.bc", "ray.rllib.core.columns", "ray.rllib.core.learner.training_data", "ray.rllib.core.rl_module.default_model_config"], "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/diffusers:src/diffusers/models/autoencoders/autoencoder_kl_hunyuanimage.py:AutoencoderKLHunyuanImage.tiled_decode
# Context: import torch from .vae import AutoencoderMixin, DecoderOutput, DiagonalGaussianDistribution class HunyuanImageResnetBlock(nn.Module): ... class HunyuanImageAttentionBlock(nn.Module): ... class HunyuanImageDownsample(nn.Module): ... class HunyuanImageUpsample(nn.Module): ... class HunyuanImageMidBlock(nn.Module): ... class HunyuanImageEncoder2D(nn.Module): ... class HunyuanImageDecoder2D(nn.Module): ... class AutoencoderKLHunyuanImage(ModelMixin, AutoencoderMixin, ConfigMixin, FromOriginalModelMixin): _supports_gradient_checkpointing = False def __init__( self, in_channels: int, out_channels: int, latent_channels: int, block_out_channels: tuple[int, ...], layers_per_block: int, spatial_compression_ratio: int, sample_size: int, scaling_factor: float = None, downsample_match_channel: bool = True, upsample_match_channel: bool = True, ) -> None: # fmt: on super().__init__() self.encoder = HunyuanImageEncoder2D( in_channels=in_channels, z_channels=latent_channels, block_out_channels=block_out_channels, num_res_blocks=layers_per_block, spatial_compression_ratio=spatial_compression_ratio, downsample_match_channel=downsample_match_channel, ) self.decoder = HunyuanImageDecoder2D( z_channels=latent_channels, out_channels=out_channels, block_out_channels=list(reversed(block_out_channels)), num_res_blocks=layers_per_block, spatial_compression_ratio=spatial_compression_ratio, upsample_match_channel=upsample_match_channel, ) # Tiling and slicing configuration self.use_slicing = False self.use_tiling = False # Tiling parameters self.tile_sample_min_size = sample_size self.tile_latent_min_size = sample_size // spatial_compression_ratio self.tile_overlap_factor = 0.25 def enable_tiling(self, tile_sample_min_size: int | None, tile_overlap_factor: float | None) -> None: ... def _encode(self, x: torch.Tensor): ... def encode(self, x: torch.Tensor, return_dict: bool) -> AutoencoderKLOutput | tuple[DiagonalGaussianDistribution]: ... def _decode(self, z: torch.Tensor, return_dict: bool): ... def decode(self, z: torch.Tensor, return_dict: bool) -> DecoderOutput | torch.Tensor: ... def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: ... def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: ... def tiled_encode(self, x: torch.Tensor) -> torch.Tensor: ... def forward(self, sample: torch.Tensor, sample_posterior: bool, return_dict: bool, generator: torch.Generator | None) -> DecoderOutput | torch.Tensor: ... # Task: Write a Python method `tiled_decode` for the class `AutoencoderKLHunyuanImage` to decode latent using spatial tiling strategy. Parameters: z: torch.Tensor, return_dict: bool Returns: DecoderOutput | torch.Tensor
def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> DecoderOutput | torch.Tensor: """ Decode latent using spatial tiling strategy. Args: z (`torch.Tensor`): Latent tensor of shape (B, C, H, W). return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. Returns: [`~models.vae.DecoderOutput`] or `tuple`: If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is returned. """ _, _, height, width = z.shape overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) row_limit = self.tile_sample_min_size - blend_extent rows = [] for i in range(0, height, overlap_size): row = [] for j in range(0, width, overlap_size): tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] decoded = self.decoder(tile) row.append(decoded) rows.append(row) result_rows = [] for i, row in enumerate(rows): result_row = [] for j, tile in enumerate(row): if i > 0: tile = self.blend_v(rows[i - 1][j], tile, blend_extent) if j > 0: tile = self.blend_h(row[j - 1], tile, blend_extent) result_row.append(tile[:, :, :row_limit, :row_limit]) result_rows.append(torch.cat(result_row, dim=-1)) dec = torch.cat(result_rows, dim=-2) if not return_dict: return (dec,) return DecoderOutput(sample=dec)
function_complex
1
{"cognitive_complexity": 13, "loc": 43, "code_loc": 26, "docstring_loc": 13, "function_name": "tiled_decode", "class_name": "AutoencoderKLHunyuanImage", "qualname": "AutoencoderKLHunyuanImage.tiled_decode", "file_path": "src/diffusers/models/autoencoders/autoencoder_kl_hunyuanimage.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/unit/utils/test_schemas.py:TestChatOutputResponse.test_chat_response_with_list_message
# Context: from langflow.utils.schemas import ChatOutputResponse, ContainsEnumMeta, DataOutputResponse, File class TestFile: ... class TestDataOutputResponse: ... class TestContainsEnumMeta: ... class TestChatOutputResponse: def test_basic_chat_response_creation(self): ... def test_chat_response_with_all_fields(self): ... def test_validate_files_valid_files(self): ... def test_validate_files_missing_name_and_type(self): ... def test_validate_files_missing_path_raises_error(self): ... def test_validate_files_non_dict_raises_error(self): ... def test_validate_files_unknown_type_raises_error(self): ... def test_validate_files_empty_list(self): ... def test_validate_files_none(self): ... def test_validate_files_type_detection_in_path(self): ... def test_from_message_class_method(self, mock_init): ... def test_from_message_with_custom_sender(self, mock_init): ... def test_validate_message_ai_sender_newline_formatting(self): ... def test_validate_message_ai_sender_existing_double_newlines(self): ... def test_validate_message_non_ai_sender_unchanged(self): ... def test_validate_message_complex_newline_patterns(self): ... def test_message_validation_with_list_message(self): ... # Task: Write a Python test method `test_chat_response_with_list_message` in test class `TestChatOutputResponse` to test chat response with list message. Module under test: langflow.utils.schemas, pydantic
def test_chat_response_with_list_message(self): """Test chat response with list message.""" message_list = ["Hello", {"type": "text", "content": "world"}] response = ChatOutputResponse( message=message_list, sender="Human", # Use non-AI sender to avoid message validation type="mixed", ) assert response.message == message_list
test
1
{"function_name": "test_chat_response_with_list_message", "class_name": "TestChatOutputResponse", "qualname": "TestChatOutputResponse.test_chat_response_with_list_message", "file_path": "src/backend/tests/unit/utils/test_schemas.py", "repo_id": "langflow-ai/langflow", "loc": 11, "tested_modules": ["langflow.utils.schemas", "pydantic"], "has_docstring": true, "runnable_level": "project_runnable"}
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/services/social_media_service.py:SocialMediaService.get_trending_topics
# Context: from typing import List, Optional, Dict, Any from fastapi import HTTPException from services.db_service import social_media_db class SocialMediaService: async def get_posts(self, page: int, per_page: int, platform: Optional[str], user_handle: Optional[str], sentiment: Optional[str], category: Optional[str], date_from: Optional[str], date_to: Optional[str], search: Optional[str]) -> PaginatedPosts: ... async def get_post(self, post_id: str) -> Dict[str, Any]: ... async def get_platforms(self) -> List[str]: ... async def get_sentiments(self, date_from: Optional[str], date_to: Optional[str]) -> List[Dict[str, Any]]: ... async def get_top_users(self, platform: Optional[str], limit: int, date_from: Optional[str], date_to: Optional[str]) -> List[Dict[str, Any]]: ... async def get_categories(self, date_from: Optional[str], date_to: Optional[str]) -> List[Dict[str, Any]]: ... async def get_user_sentiment(self, limit: int, platform: Optional[str], date_from: Optional[str], date_to: Optional[str]) -> List[Dict[str, Any]]: ... async def get_category_sentiment(self, date_from: Optional[str], date_to: Optional[str]) -> List[Dict[str, Any]]: ... async def get_sentiment_over_time(self, date_from: Optional[str], date_to: Optional[str], platform: Optional[str]) -> List[Dict[str, Any]]: ... async def get_influential_posts(self, sentiment: Optional[str], limit: int, date_from: Optional[str], date_to: Optional[str]) -> List[Dict[str, Any]]: ... async def get_engagement_stats(self, date_from: Optional[str], date_to: Optional[str]) -> Dict[str, Any]: ... # Task: Write a Python async method `get_trending_topics` for the class `SocialMediaService` to get trending topics with sentiment breakdown. Parameters: date_from: Optional[str], date_to: Optional[str], limit: int Returns: List[Dict[str, Any]]
async def get_trending_topics( self, date_from: Optional[str] = None, date_to: Optional[str] = None, limit: int = 10 ) -> List[Dict[str, Any]]: """Get trending topics with sentiment breakdown.""" try: query_parts = [ """ WITH topic_data AS ( SELECT json_each.value as topic, sentiment, COUNT(*) as count FROM posts, json_each(posts.tags) WHERE tags IS NOT NULL """ ] params = [] if date_from: query_parts.append("AND datetime(post_timestamp) >= datetime(?)") params.append(date_from) if date_to: query_parts.append("AND datetime(post_timestamp) <= datetime(?)") params.append(date_to) query_parts.append( """ GROUP BY json_each.value, sentiment ) SELECT topic, SUM(count) as total_count, SUM(CASE WHEN sentiment = 'positive' THEN count ELSE 0 END) as positive_count, SUM(CASE WHEN sentiment = 'negative' THEN count ELSE 0 END) as negative_count, SUM(CASE WHEN sentiment = 'neutral' THEN count ELSE 0 END) as neutral_count, SUM(CASE WHEN sentiment = 'critical' THEN count ELSE 0 END) as critical_count FROM topic_data GROUP BY topic ORDER BY total_count DESC LIMIT ? """ ) params.append(limit) query = " ".join(query_parts) result = await social_media_db.execute_query(query, tuple(params), fetch=True) for topic in result: total = topic["total_count"] topic["positive_percent"] = (topic["positive_count"] / total) * 100 if total > 0 else 0 topic["negative_percent"] = (topic["negative_count"] / total) * 100 if total > 0 else 0 topic["neutral_percent"] = (topic["neutral_count"] / total) * 100 if total > 0 else 0 topic["critical_percent"] = (topic["critical_count"] / total) * 100 if total > 0 else 0 return result except Exception as e: if isinstance(e, HTTPException): raise e raise HTTPException(status_code=500, detail=f"Error fetching trending topics: {str(e)}")
function_complex
0
{"cognitive_complexity": 21, "loc": 63, "code_loc": 56, "docstring_loc": 1, "function_name": "get_trending_topics", "class_name": "SocialMediaService", "qualname": "SocialMediaService.get_trending_topics", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/services/social_media_service.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"}
google/langextract:langextract/prompt_validation.py:handle_alignment_report
# Context: from absl import logging class PromptValidationLevel(enum.Enum): ... class _IssueKind(enum.Enum): ... class ValidationIssue: ... class ValidationReport: ... class PromptAlignmentError(RuntimeError): ... class AlignmentPolicy: ... def _preview(s: str, n: int) -> str: ... def validate_prompt_alignment(examples: Sequence[data.ExampleData], aligner: resolver.WordAligner | None, policy: AlignmentPolicy | None, tokenizer: tokenizer_lib.Tokenizer | None) -> ValidationReport: ... # Task: Write a Python function `handle_alignment_report` to log or raise based on validation level. Parameters: report: ValidationReport, level: PromptValidationLevel Returns: None
def handle_alignment_report( report: ValidationReport, level: PromptValidationLevel, *, strict_non_exact: bool = False, ) -> None: """Log or raise based on validation level. Args: report: The validation report to handle. level: The validation level determining behavior. strict_non_exact: If True, treat non-exact matches as errors in ERROR mode. Raises: PromptAlignmentError: If validation fails in ERROR mode. """ if level is PromptValidationLevel.OFF: return for issue in report.issues: if issue.issue_kind is _IssueKind.NON_EXACT: logging.warning( "Prompt alignment: non-exact match: %s", issue.short_msg() ) else: logging.warning( "Prompt alignment: FAILED to align: %s", issue.short_msg() ) if level is PromptValidationLevel.ERROR: failed = [i for i in report.issues if i.issue_kind is _IssueKind.FAILED] non_exact = [ i for i in report.issues if i.issue_kind is _IssueKind.NON_EXACT ] if failed: sample = failed[0].short_msg() raise PromptAlignmentError( f"Prompt alignment validation failed: {len(failed)} extraction(s) " f"could not be aligned (e.g., {sample})" ) if strict_non_exact and non_exact: sample = non_exact[0].short_msg() raise PromptAlignmentError( "Prompt alignment validation failed under strict mode: " f"{len(non_exact)} non-exact match(es) found (e.g., {sample})" )
function_complex
1
{"cognitive_complexity": 11, "loc": 47, "code_loc": 28, "docstring_loc": 10, "function_name": "handle_alignment_report", "class_name": null, "qualname": "handle_alignment_report", "file_path": "langextract/prompt_validation.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:common/data_source/google_util/util_threadpool_concurrency.py:ThreadSafeDict.setdefault
# Context: class ThreadSafeDict(MutableMapping[KT, VT]): def __init__(self, input_dict: dict[KT, VT] | None = None) -> None: self._dict: dict[KT, VT] = input_dict or {} self.lock = threading.Lock() def __getitem__(self, key: KT) -> VT: ... def __setitem__(self, key: KT, value: VT) -> None: ... def __delitem__(self, key: KT) -> None: ... def __iter__(self) -> Iterator[KT]: ... def __len__(self) -> int: ... def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: ... def validate(cls, v: Any) -> 'ThreadSafeDict[KT, VT]': ... def __deepcopy__(self, memo: Any) -> 'ThreadSafeDict[KT, VT]': ... def clear(self) -> None: ... def copy(self) -> dict[KT, VT]: ... def get(self, key: KT) -> VT | None: ... def get(self, key: KT, default: VT | _T) -> VT | _T: ... def get(self, key: KT, default: Any) -> Any: ... def pop(self, key: KT, default: Any) -> Any: ... def update(self, *args, **kwargs) -> None: ... def items(self) -> collections.abc.ItemsView[KT, VT]: ... def keys(self) -> collections.abc.KeysView[KT]: ... def values(self) -> collections.abc.ValuesView[VT]: ... def atomic_get_set(self, key: KT, value_callback: Callable[[VT], VT], default: VT) -> tuple[VT, VT]: ... def atomic_get_set(self, key: KT, value_callback: Callable[[VT | _T], VT], default: VT | _T) -> tuple[VT | _T, VT]: ... def atomic_get_set(self, key: KT, value_callback: Callable[[Any], VT], default: Any) -> tuple[Any, VT]: ... # Task: Write a Python method `setdefault` for the class `ThreadSafeDict` to set a default value if key is missing, atomically. Parameters: key: KT, default: VT Returns: VT
def setdefault(self, key: KT, default: VT) -> VT: """Set a default value if key is missing, atomically.""" with self.lock: return self._dict.setdefault(key, default)
function_simple
1
{"cognitive_complexity": 0, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "setdefault", "class_name": "ThreadSafeDict", "qualname": "ThreadSafeDict.setdefault", "file_path": "common/data_source/google_util/util_threadpool_concurrency.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "file_runnable"}
vllm-project/vllm:tests/v1/worker/test_gpu_profiler.py:test_mixed_delay_and_stop
# Context: class ConcreteWorkerProfiler(WorkerProfiler): ... def default_profiler_config(): ... def test_immediate_start_stop(default_profiler_config): ... def test_delayed_start(default_profiler_config): ... def test_max_iterations(default_profiler_config): ... def test_delayed_start_and_max_iters(default_profiler_config): ... def test_idempotency(default_profiler_config): ... def test_step_inactive(default_profiler_config): ... def test_start_failure(default_profiler_config): ... def test_shutdown(default_profiler_config): ... class TestIsUriPath: ... # Task: Write a Python test function `test_mixed_delay_and_stop` to test manual stop during the delay period. Module under test: vllm.config, vllm.config.profiler, vllm.profiler.wrapper
def test_mixed_delay_and_stop(default_profiler_config): """Test manual stop during the delay period.""" default_profiler_config.delay_iterations = 5 profiler = ConcreteWorkerProfiler(default_profiler_config) profiler.start() profiler.step() profiler.step() # User cancels before delay finishes profiler.stop() assert profiler._active is False # Further steps should not trigger start profiler.step() profiler.step() profiler.step() assert profiler.start_call_count == 0
test
1
{"function_name": "test_mixed_delay_and_stop", "class_name": null, "qualname": "test_mixed_delay_and_stop", "file_path": "tests/v1/worker/test_gpu_profiler.py", "repo_id": "vllm-project/vllm", "loc": 19, "tested_modules": ["vllm.config", "vllm.config.profiler", "vllm.profiler.wrapper"], "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:examples/integrations/gmail_2fa_integration.py:GmailGrantManager.check_credentials_exist
# Context: async def main(): ... class GmailGrantManager: def __init__(self): self.config_dir = CONFIG.BROWSER_USE_CONFIG_DIR self.credentials_file = self.config_dir / 'gmail_credentials.json' self.token_file = self.config_dir / 'gmail_token.json' print(f'GmailGrantManager initialized with config_dir: {self.config_dir}') print(f'GmailGrantManager initialized with credentials_file: {self.credentials_file}') print(f'GmailGrantManager initialized with token_file: {self.token_file}') def check_token_exists(self) -> bool: ... def validate_credentials_format(self) -> tuple[bool, str]: ... async def setup_oauth_credentials(self) -> bool: ... async def test_authentication(self, gmail_service: GmailService) -> tuple[bool, str]: ... async def handle_authentication_failure(self, gmail_service: GmailService, error_msg: str) -> bool: ... # Task: Write a Python method `check_credentials_exist` for the class `GmailGrantManager` to check if OAuth credentials file exists. Returns: bool
def check_credentials_exist(self) -> bool: """Check if OAuth credentials file exists.""" return self.credentials_file.exists()
function_simple
0
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "check_credentials_exist", "class_name": "GmailGrantManager", "qualname": "GmailGrantManager.check_credentials_exist", "file_path": "examples/integrations/gmail_2fa_integration.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"}
crewAIInc/crewAI:lib/crewai/tests/test_async_human_feedback.py:TestSQLitePendingFeedback.test_load_nonexistent_pending_feedback
# Context: import os import tempfile from crewai.flow.persistence import SQLiteFlowPersistence class TestPendingFeedbackContext: ... class TestHumanFeedbackPending: ... class TestHumanFeedbackProvider: ... class TestConsoleProvider: ... class TestCustomAsyncProvider: ... class TestFlowResumeWithFeedback: ... class TestAsyncHumanFeedbackIntegration: ... class TestAutoPersistence: ... class TestCollapseToOutcomeJsonParsing: ... class TestAsyncHumanFeedbackEdgeCases: ... class TestSQLitePendingFeedback: def test_save_and_load_pending_feedback(self) -> None: ... def test_clear_pending_feedback(self) -> None: ... def test_replace_existing_pending_feedback(self) -> None: ... # Task: Write a Python test method `test_load_nonexistent_pending_feedback` in test class `TestSQLitePendingFeedback` to test loading pending feedback that doesn't exist. Module under test: __future__, datetime, typing
def test_load_nonexistent_pending_feedback(self) -> None: """Test loading pending feedback that doesn't exist.""" with tempfile.TemporaryDirectory() as tmpdir: db_path = os.path.join(tmpdir, "test_flows.db") persistence = SQLiteFlowPersistence(db_path) result = persistence.load_pending_feedback("nonexistent-id") assert result is None
test
0
{"function_name": "test_load_nonexistent_pending_feedback", "class_name": "TestSQLitePendingFeedback", "qualname": "TestSQLitePendingFeedback.test_load_nonexistent_pending_feedback", "file_path": "lib/crewai/tests/test_async_human_feedback.py", "repo_id": "crewAIInc/crewAI", "loc": 8, "tested_modules": ["__future__", "datetime", "typing", "pydantic", "crewai.flow"], "has_docstring": true, "runnable_level": "project_runnable"}
apache/airflow:providers/google/tests/unit/google/cloud/operators/test_gen_ai.py:TestGenAIGeminiCreateEmbeddingsBatchJobOperator.test_execute_exception_error_raises_airflow_exception
# Context: from unittest import mock import pytest from airflow.exceptions import AirflowException from airflow.providers.google.cloud.operators.gen_ai import ( GenAICountTokensOperator, GenAICreateCachedContentOperator, GenAIGeminiCancelBatchJobOperator, GenAIGeminiCreateBatchJobOperator, GenAIGeminiCreateEmbeddingsBatchJobOperator, GenAIGeminiDeleteBatchJobOperator, GenAIGeminiDeleteFileOperator, GenAIGeminiGetBatchJobOperator, GenAIGeminiGetFileOperator, GenAIGeminiListBatchJobsOperator, GenAIGeminiListFilesOperator, GenAIGeminiUploadFileOperator, GenAIGenerateContentOperator, GenAIGenerateEmbeddingsOperator, GenAISupervisedFineTuningTrainOperator, ) def assert_warning(msg: str, warnings): ... class TestGenAIGenerateEmbeddingsOperator: ... class TestGenAIGenerateContentOperator: ... class TestGenAISupervisedFineTuningTrainOperator: ... class TestGenAICountTokensOperator: ... class TestGenAICreateCachedContentOperator: ... class TestGenAIGenerateFromCachedContentOperator: ... class TestGenAIGeminiCreateBatchJobOperator: ... class TestGenAIGeminiGetBatchJobOperator: ... class TestGenAIGeminiListBatchJobsOperator: ... class TestGenAIGeminiDeleteBatchJobOperator: ... class TestGenAIGeminiCancelBatchJobOperator: ... class TestGenAIGeminiGetFileOperator: ... class TestGenAIGeminiUploadFileOperator: ... class TestGenAIGeminiListFilesOperator: ... class TestGenAIGeminiDeleteFileOperator: ... class TestGenAIGeminiCreateEmbeddingsBatchJobOperator: def test_execute(self, mock_hook): ... def test_init_retrieve_result_and_not_wait_until_complete_raises_airflow_exception(self): ... def test_init_input_source_not_string_raises_airflow_exception(self): ... def test_init_results_folder_not_exists_raises_airflow_exception(self): ... def test__wait_until_complete_exception_raises_airflow_exception(self, mock_hook): ... def test_execute_complete_error_status_raises_airflow_exception(self): ... # Task: Write a Python test method `test_execute_exception_error_raises_airflow_exception` in test class `TestGenAIGeminiCreateEmbeddingsBatchJobOperator` to verify the behavior of `execute_exception_error_raises_airflow_exception`. Module under test: __future__, google.genai.errors, google.genai.types
def test_execute_exception_error_raises_airflow_exception(self, mock_hook): op = GenAIGeminiCreateEmbeddingsBatchJobOperator( task_id=TASK_ID, project_id=GCP_PROJECT, location=GCP_LOCATION, input_source=TEST_EMBEDDINGS_JOB_INLINED_REQUESTS, model=EMBEDDING_MODEL, gemini_api_key=TEST_GEMINI_API_KEY, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, wait_until_complete=False, ) mock_hook.return_value.create_embeddings.side_effect = Exception() with pytest.raises(AirflowException): op.execute(context={"ti": mock.MagicMock()}) mock_hook.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, gemini_api_key=TEST_GEMINI_API_KEY, ) mock_hook.return_value.create_embeddings.assert_called_once_with( source=TEST_EMBEDDINGS_JOB_INLINED_REQUESTS, model=EMBEDDING_MODEL, create_embeddings_config=None, )
test
1
{"function_name": "test_execute_exception_error_raises_airflow_exception", "class_name": "TestGenAIGeminiCreateEmbeddingsBatchJobOperator", "qualname": "TestGenAIGeminiCreateEmbeddingsBatchJobOperator.test_execute_exception_error_raises_airflow_exception", "file_path": "providers/google/tests/unit/google/cloud/operators/test_gen_ai.py", "repo_id": "apache/airflow", "loc": 29, "tested_modules": ["__future__", "google.genai.errors", "google.genai.types", "airflow.exceptions", "airflow.providers.google.cloud.operators.gen_ai"], "has_docstring": false, "runnable_level": "project_runnable"}
google/langextract:langextract/annotation.py:_emit_docs_iter
# Context: from collections.abc import Iterable, Iterator from langextract.core import data def _merge_non_overlapping_extractions(all_extractions: list[Iterable[data.Extraction]]) -> list[data.Extraction]: ... def _extractions_overlap(extraction1: data.Extraction, extraction2: data.Extraction) -> bool: ... def _document_chunk_iterator(documents: Iterable[data.Document], max_char_buffer: int, restrict_repeats: bool, tokenizer: tokenizer_lib.Tokenizer | None) -> Iterator[chunking.TextChunk]: ... class Annotator: ... # Task: Write a Python function `_emit_docs_iter` to yields documents that are guaranteed complete. Parameters: keep_last_doc: bool Returns: Iterator[data.AnnotatedDocument]
def _emit_docs_iter( keep_last_doc: bool, ) -> Iterator[data.AnnotatedDocument]: """Yields documents that are guaranteed complete. Args: keep_last_doc: If True, retains the most recently started document for additional extractions. If False, emits all remaining documents. """ nonlocal next_emit_idx limit = max(0, len(doc_order) - 1) if keep_last_doc else len(doc_order) while next_emit_idx < limit: document_id = doc_order[next_emit_idx] yield data.AnnotatedDocument( document_id=document_id, extractions=per_doc.get(document_id, []), text=doc_text_by_id.get(document_id, ""), ) per_doc.pop(document_id, None) doc_text_by_id.pop(document_id, None) next_emit_idx += 1
function_simple
1
{"cognitive_complexity": 2, "loc": 21, "code_loc": 12, "docstring_loc": 6, "function_name": "_emit_docs_iter", "class_name": null, "qualname": "_emit_docs_iter", "file_path": "langextract/annotation.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/llms/test_tool_call_streaming.py:TestAnthropicToolCallStreaming:class_doc
Write a class-level docstring for `TestAnthropicToolCallStreaming` which has methods: `test_anthropic_streaming_emits_tool_call_events`.
Tests for Anthropic provider tool call streaming events.
documentation
0
{"doc_type": "class", "class_name": "TestAnthropicToolCallStreaming", "file_path": "lib/crewai/tests/llms/test_tool_call_streaming.py", "repo_id": "crewAIInc/crewAI", "char_length": 56, "methods": ["test_anthropic_streaming_emits_tool_call_events"]}
ray-project/ray:python/ray/data/tests/datasource/test_databricks_credentials.py:TestCredentialProviderSerialization.test_provider_is_picklable
# Context: import os from unittest import mock import pytest from ray.data._internal.datasource.databricks_credentials import ( DatabricksCredentialProvider, EnvironmentCredentialProvider, StaticCredentialProvider, resolve_credential_provider, ) import pickle class TestDatabricksCredentialProvider: ... class TestStaticCredentialProvider: ... class TestEnvironmentCredentialProvider: ... class TestResolveCredentialProvider: ... class TestCredentialProviderSerialization: # Task: Write a Python test method `test_provider_is_picklable` in test class `TestCredentialProviderSerialization` to verify credential providers can be pickled and unpickled. Module under test: ray.data._internal.datasource.databricks_credentials
def test_provider_is_picklable(self, provider_type, expected_token, expected_host): """Verify credential providers can be pickled and unpickled.""" import pickle with mock.patch.dict( os.environ, {"DATABRICKS_TOKEN": expected_token, "DATABRICKS_HOST": expected_host}, ): if provider_type == "static": provider = StaticCredentialProvider( token=expected_token, host=expected_host ) else: provider = EnvironmentCredentialProvider() pickled = pickle.dumps(provider) unpickled = pickle.loads(pickled) assert unpickled.get_token() == expected_token assert unpickled.get_host() == expected_host
test
0
{"function_name": "test_provider_is_picklable", "class_name": "TestCredentialProviderSerialization", "qualname": "TestCredentialProviderSerialization.test_provider_is_picklable", "file_path": "python/ray/data/tests/datasource/test_databricks_credentials.py", "repo_id": "ray-project/ray", "loc": 19, "tested_modules": ["ray.data._internal.datasource.databricks_credentials"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/integration/test_openai_responses_extended.py:test_openai_responses_concurrent_requests
# Context: import asyncio import pytest from httpx import AsyncClient def load_env_vars(): ... async def create_global_variable(client: AsyncClient, headers, name, value, variable_type): ... async def load_and_prepare_flow(client: AsyncClient, created_api_key): ... async def load_and_prepare_agent_flow(client: AsyncClient, created_api_key): ... async def test_openai_responses_invalid_flow_id(client: AsyncClient, created_api_key): ... async def test_openai_responses_with_tools(client: AsyncClient, created_api_key): ... async def test_openai_responses_empty_input(client: AsyncClient, created_api_key): ... async def test_openai_responses_long_input(client: AsyncClient, created_api_key): ... async def test_openai_responses_streaming_error_handling(client: AsyncClient, created_api_key): ... async def test_openai_responses_unauthorized(client: AsyncClient): ... async def test_openai_responses_invalid_api_key(client: AsyncClient): ... async def test_openai_responses_malformed_request(client: AsyncClient, created_api_key): ... async def test_openai_responses_stream_interruption(client: AsyncClient, created_api_key): ... async def test_openai_responses_background_processing(client: AsyncClient, created_api_key): ... async def test_openai_responses_previous_response_id(client: AsyncClient, created_api_key): ... async def test_openai_responses_response_format(client: AsyncClient, created_api_key): ... async def test_openai_responses_stream_chunk_format(client: AsyncClient, created_api_key): ... async def test_openai_responses_stream_has_non_empty_content(client: AsyncClient, created_api_key): ... async def test_openai_responses_rate_limiting_simulation(client: AsyncClient, created_api_key): ... # Task: Write a Python test function `test_openai_responses_concurrent_requests` to test handling of concurrent requests to the same flow. Module under test: dotenv, httpx, lfx.log.logger
async def test_openai_responses_concurrent_requests(client: AsyncClient, created_api_key): """Test handling of concurrent requests to the same flow.""" flow, headers = await load_and_prepare_flow(client, created_api_key) # Create multiple concurrent requests payloads = [{"model": flow["id"], "input": f"Request {i}", "stream": False} for i in range(5)] # Send all requests concurrently tasks = [client.post("/api/v1/responses", json=payload, headers=headers) for payload in payloads] responses = await asyncio.gather(*tasks) # All requests should succeed for i, response in enumerate(responses): assert response.status_code == 200 data = response.json() if "error" not in data: assert "id" in data assert "output" in data # Each response should have a unique ID assert all( data["id"] != other.json()["id"] for j, other in enumerate(responses) if i != j and "error" not in other.json() )
test
1
{"function_name": "test_openai_responses_concurrent_requests", "class_name": null, "qualname": "test_openai_responses_concurrent_requests", "file_path": "src/backend/tests/integration/test_openai_responses_extended.py", "repo_id": "langflow-ai/langflow", "loc": 26, "tested_modules": ["dotenv", "httpx", "lfx.log.logger", "tests.api_keys", "tests.api_keys"], "has_docstring": true, "runnable_level": "project_runnable"}
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py:show_streaming_features
Write a Python function `show_streaming_features` to display information about streaming voice features.
def show_streaming_features(): """Display information about streaming voice features.""" print("🌊 Streaming Voice Features:") print("=" * 40) print() print("✨ Real-time Features:") print(" • Continuous audio input processing") print(" • Automatic speech activity detection") print(" • Real-time agent response streaming") print(" • Turn-based conversation management") print() print("🔧 Advanced Capabilities:") print(" • Multi-language support with agent handoffs") print(" • Tool calling during voice conversation") print(" • Streaming callbacks for monitoring") print(" • Interruption handling (via lifecycle events)") print() print("🎯 Try These Commands:") print(" • 'What's the weather in Paris?'") print(" • 'What time is it?'") print(" • 'Set a reminder to call mom in 10 minutes'") print(" • 'Give me a news summary'") print(" • 'Hola, ¿cómo estás?' (Spanish)") print(" • 'Bonjour, comment ça va?' (French)") print()
function_simple
0
{"cognitive_complexity": 0, "loc": 25, "code_loc": 23, "docstring_loc": 1, "function_name": "show_streaming_features", "class_name": null, "qualname": "show_streaming_features", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/streamed/agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "self_contained"}
ray-project/ray:ci/raydepsets/tests/test_cli.py:TestCli.test_execute_single_depset
# Context: import tempfile from ci.raydepsets.tests.utils import ( append_to_file, copy_data_to_tmpdir, replace_in_file, save_file_as, save_packages_to_file, write_to_config_file, ) def _create_test_manager(tmpdir: str, config_path: Optional[str], check: bool, build_all_configs: Optional[bool]) -> DependencySetManager: ... def _invoke_build(tmpdir: str, config_path: str, name: Optional[str]): ... class TestCli(unittest.TestCase): def test_cli_load_fail_no_config(self): ... def test_dependency_set_manager_init(self): ... def test_uv_binary_exists(self): ... def test_uv_version(self): ... def test_compile(self): ... def test_compile_update_package(self): ... def test_compile_with_append_and_override_flags(self, mock_stdout): ... def test_compile_by_depset_name(self): ... def test_subset(self): ... def test_subset_does_not_exist(self): ... def test_subset_with_expanded_depsettest_subset_with_expanded_depset(self): ... def test_check_if_subset_exists(self): ... def test_compile_bad_requirements(self): ... def test_get_path(self): ... def test_append_uv_flags_exist_in_output(self, mock_stdout): ... def test_append_uv_flags_with_space_in_flag(self, mock_stdout): ... def test_include_setuptools(self): ... def test_ignore_setuptools(self, mock_stdout): ... def test_override_uv_flag_single_flag(self): ... def test_override_uv_flag_multiple_flags(self): ... def test_flatten_flags(self): ... def test_build_graph(self): ... def test_build_graph_predecessors(self): ... def test_build_graph_bad_operation(self): ... def test_execute(self): ... def test_execute_single_depset_that_does_not_exist(self): ... def test_expand(self): ... def test_expand_with_requirements(self): ... def test_get_depset_with_build_arg_set(self): ... def test_get_depset_without_build_arg_set(self): ... def test_execute_single_pre_hook(self): ... def test_execute_single_invalid_pre_hook(self): ... def test_copy_lock_files_to_temp_dir(self): ... def test_diff_lock_files_out_of_date(self): ... def test_diff_lock_files_up_to_date(self): ... def test_compile_with_packages(self): ... def test_compile_with_packages_and_requirements(self): ... def test_requirements_ordering(self, mock_stdout): ... def test_constraints_ordering(self, mock_stdout): ... def test_execute_pre_hook(self, mock_stdout): ... def test_get_expanded_depset_requirements(self): ... def test_build_all_configs(self): ... def test_parse_lock_file(self): ... def test_parse_lock_file_with_index_url(self): ... def test_parse_lock_file_empty(self): ... def test_parse_lock_file_comments_only(self): ... def test_write_lock_file(self): ... def test_write_lock_file_empty(self): ... def test_roundtrip_preserves_packages(self): ... def test_parse_large_lock_file(self): ... def test_relax(self): ... def test_relax_multiple_packages(self): ... def test_relax_package_not_found(self): ... def test_relax_preserves_options(self): ... def test_relax_large_lock_file(self): ... # Task: Write a Python test method `test_execute_single_depset` in test class `TestCli` to verify the behavior of `execute_single_depset`. Module under test: pathlib, typing, click.testing
def test_execute_single_depset(self): with tempfile.TemporaryDirectory() as tmpdir: copy_data_to_tmpdir(tmpdir) manager = _create_test_manager(tmpdir) manager.execute(single_depset_name="general_depset__py311_cpu") assert ( manager.build_graph.nodes["general_depset__py311_cpu"]["operation"] == "compile" ) assert len(manager.build_graph.nodes()) == 1
test
0
{"function_name": "test_execute_single_depset", "class_name": "TestCli", "qualname": "TestCli.test_execute_single_depset", "file_path": "ci/raydepsets/tests/test_cli.py", "repo_id": "ray-project/ray", "loc": 10, "tested_modules": ["pathlib", "typing", "click.testing", "networkx", "ci.raydepsets.cli"], "has_docstring": false, "runnable_level": "project_runnable"}
zhayujie/chatgpt-on-wechat:common/cloud_client.py:CloudClient.on_skill
# Context: from common.log import logger def start(channel, channel_mgr): ... def _build_config(): ... class CloudClient(LinkAIClient): def __init__(self, api_key: str, channel, host: str = ""): super().__init__(api_key, host) self.channel = channel self.client_type = channel.channel_type self.channel_mgr = None self._skill_service = None self._memory_service = None self._chat_service = None def skill_service(self): ... def memory_service(self): ... def chat_service(self): ... def on_message(self, push_msg: PushMsg): ... def on_config(self, config: dict): ... def on_memory(self, data: dict) -> dict: ... def on_chat(self, data: dict, send_chunk_fn): ... def _restart_channel(self, new_channel_type: str): ... def _do_restart_channel(self, mgr, new_channel_type: str): ... def _save_config_to_file(self, local_config: dict): ... # Task: Write a Python method `on_skill` for the class `CloudClient` to handle SKILL messages from the cloud console. Parameters: data: dict Returns: dict
def on_skill(self, data: dict) -> dict: """ Handle SKILL messages from the cloud console. Delegates to SkillService.dispatch for the actual operations. :param data: message data with 'action', 'clientId', 'payload' :return: response dict """ action = data.get("action", "") payload = data.get("payload") logger.info(f"[CloudClient] on_skill: action={action}") svc = self.skill_service if svc is None: return {"action": action, "code": 500, "message": "SkillService not available", "payload": None} return svc.dispatch(action, payload)
function_simple
1
{"cognitive_complexity": 1, "loc": 17, "code_loc": 7, "docstring_loc": 7, "function_name": "on_skill", "class_name": "CloudClient", "qualname": "CloudClient.on_skill", "file_path": "common/cloud_client.py", "repo_id": "zhayujie/chatgpt-on-wechat", "has_docstring": true, "runnable_level": "project_runnable"}
huggingface/transformers:tests/models/flex_olmo/test_modeling_flex_olmo.py:FlexOlmoIntegrationTest.test_model_7b_greedy_generation
# Context: from transformers.models.auto.tokenization_auto import AutoTokenizer from transformers import ( FlexOlmoForCausalLM, FlexOlmoModel, ) class FlexOlmoModelTester(CausalLMModelTester): ... class FlexOlmoModelTest(CausalLMModelTest, unittest.TestCase): ... class FlexOlmoIntegrationTest(unittest.TestCase): def setUp(self): ... def tearDown(self): ... def test_model_7b_logits(self): ... # Task: Write a Python test method `test_model_7b_greedy_generation` in test class `FlexOlmoIntegrationTest` to verify the behavior of `model_7b_greedy_generation`. Module under test: transformers, transformers.models.auto.tokenization_auto, transformers.testing_utils
def test_model_7b_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """Simply put, the theory of relativity states that 1) the laws of physics are the same in all inertial frames of reference, and 2) the speed of light is constant in all inertial frames of reference. The first statement is called the principle of relativity, and the second is called the constancy of the speed of light. The first statement is""" prompt = "Simply put, the theory of relativity states that " tokenizer = AutoTokenizer.from_pretrained("allenai/dolma2-tokenizer", device_map="auto") model = FlexOlmoForCausalLM.from_pretrained("shanearora/Flex-reddit-2x7B-1T", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
test
0
{"function_name": "test_model_7b_greedy_generation", "class_name": "FlexOlmoIntegrationTest", "qualname": "FlexOlmoIntegrationTest.test_model_7b_greedy_generation", "file_path": "tests/models/flex_olmo/test_modeling_flex_olmo.py", "repo_id": "huggingface/transformers", "loc": 11, "tested_modules": ["transformers", "transformers.models.auto.tokenization_auto", "transformers.testing_utils", "causal_lm_tester", "transformers"], "has_docstring": false, "runnable_level": "class_runnable"}
huggingface/pytorch-image-models:timm/optim/muon.py:_single_tensor_muon
# Context: from typing import List, Mapping, Optional, Sequence, Tuple, Union import torch def scale_eps_for_ns(eps: float, shape: Tuple[int, ...]) -> float: ... def zeropower_via_newtonschulz(G: torch.Tensor, steps: int, coefficients: List[Tuple[float, float, float]], eps: float, safety_factor: float, dtype: torch.dtype, scale_eps: bool) -> torch.Tensor: ... def get_lr_scale(param_shape: torch.Size, adjust_lr_fn: str) -> float: ... def get_adamuon_lr_scale(param_shape: torch.Size, adjust_lr_fn: str) -> Tuple[float, bool]: ... def _is_suitable_for_muon(param: torch.Tensor, min_dim_size: int, max_aspect_ratio: float, return_reason: bool) -> Union[bool, Tuple[bool, str]]: ... def reshape_for_muon(tensor: torch.Tensor, mode: str) -> Tuple[torch.Tensor, torch.Size]: ... def muon(params: List[torch.Tensor], grads: List[torch.Tensor], momentum_bufs: List[torch.Tensor], lr: float, weight_decay: float, momentum: float, nesterov: bool, ns_steps: int, ns_coefficients: NSCoeff, eps: float, safety_factor: float, adjust_lr_fn: Optional[str], conv_mode: str, normalize_spatial: bool, scale_eps: bool) -> None: ... def adamuon(params: List[torch.Tensor], grads: List[torch.Tensor], momentum_bufs: List[torch.Tensor], exp_avg_sqs: List[torch.Tensor], state_steps: List[torch.Tensor], lr: float, weight_decay: float, momentum: float, nesterov: bool, beta2: float, ns_steps: int, ns_coefficients: NSCoeff, eps: float, safety_factor: float, adjust_lr_fn: Optional[str], conv_mode: str, normalize_spatial: bool, scale_eps: bool) -> None: ... def _single_tensor_adamuon(params: List[torch.Tensor], grads: List[torch.Tensor], momentum_bufs: List[torch.Tensor], exp_avg_sqs: List[torch.Tensor], state_steps: List[torch.Tensor], lr: float, weight_decay: float, momentum: float, nesterov: bool, beta2: float, ns_steps: int, ns_coefficients: NSCoeff, eps: float, safety_factor: float, adjust_lr_fn: Optional[str], conv_mode: str, normalize_spatial: bool, scale_eps: bool) -> None: ... class Muon(torch.optim.Optimizer): ... def resolve_ns_coefficients(value: Union[str, Sequence[float], Sequence[Sequence[float]]], presets: Mapping[str, Sequence[Sequence[float]]]) -> List[Tuple[float, float, float]]: ... # Task: Write a Python function `_single_tensor_muon` to single tensor Muon update. Parameters: params: List[torch.Tensor], grads: List[torch.Tensor], momentum_bufs: List[torch.Tensor] Returns: None
def _single_tensor_muon( params: List[torch.Tensor], grads: List[torch.Tensor], momentum_bufs: List[torch.Tensor], *, lr: float, weight_decay: float, momentum: float, nesterov: bool, ns_steps: int, ns_coefficients: NSCoeff, eps: float, safety_factor: float, adjust_lr_fn: Optional[str], conv_mode: str, normalize_spatial: bool, scale_eps: bool, ) -> None: """Single tensor Muon update.""" ns_coefficients = resolve_ns_coefficients(ns_coefficients, _COEFFICIENTS) for i, param in enumerate(params): grad = grads[i] momentum_buf = momentum_bufs[i] # Apply weight decay param.mul_(1 - lr * weight_decay) # Update momentum buffer momentum_buf.lerp_(grad, 1. - momentum) update = grad.lerp_(momentum_buf, momentum) if nesterov else momentum_buf.clone() # Reshape for processing (handle 3D+ tensors like conv weights) if update.ndim >= 3: update_reshaped, original_shape = reshape_for_muon(update, mode=conv_mode) else: update_reshaped = update original_shape = update.shape # Apply Newton-Schulz orthogonalization update_ortho = zeropower_via_newtonschulz( update_reshaped, ns_steps, ns_coefficients, eps=eps, safety_factor=safety_factor, scale_eps=scale_eps, ) # Adjust learning rate based on parameter shape if adjust_lr_fn: scale = get_lr_scale(update_ortho.shape, adjust_lr_fn) else: scale = 1.0 # Apply spatial normalization and permute back if in batched mode if conv_mode == "batched" and update_ortho.ndim >= 3: if normalize_spatial: scale *= update_ortho.shape[0] ** -0.5 # Permute back: (spatial_prod, out, in) -> (out, in, spatial_prod) update_ortho = update_ortho.permute(1, 2, 0) # Reshape back to original shape update_ortho = update_ortho.reshape(original_shape) # Apply update param.add_(update_ortho, alpha=-lr * scale)
function_complex
1
{"cognitive_complexity": 15, "loc": 67, "code_loc": 30, "docstring_loc": 1, "function_name": "_single_tensor_muon", "class_name": null, "qualname": "_single_tensor_muon", "file_path": "timm/optim/muon.py", "repo_id": "huggingface/pytorch-image-models", "has_docstring": true, "runnable_level": "file_runnable"}
fastapi/fastapi:tests/test_tutorial/test_stream_data/test_tutorial002.py:test_stream_image
# Context: import pytest from fastapi.testclient import TestClient def get_mod(request: pytest.FixtureRequest): ... def get_client(mod): ... def test_openapi_schema(client: TestClient): ... # Task: Write a Python test function `test_stream_image` to verify the behavior of `stream_image`. Module under test: fastapi.testclient, inline_snapshot
def test_stream_image(mod, client: TestClient, path: str): response = client.get(path) assert response.status_code == 200 assert response.headers["content-type"] == "image/png" assert response.content == mod.binary_image
test
1
{"function_name": "test_stream_image", "class_name": null, "qualname": "test_stream_image", "file_path": "tests/test_tutorial/test_stream_data/test_tutorial002.py", "repo_id": "fastapi/fastapi", "loc": 5, "tested_modules": ["fastapi.testclient", "inline_snapshot"], "has_docstring": false, "runnable_level": "project_runnable"}
apache/airflow:providers/informatica/tests/unit/informatica/hooks/test_edc.py:test_config_property_and_build_connection_config
# Context: from unittest.mock import MagicMock, patch def hook(): ... def test_get_conn_headers_and_verify(mock_get_conn, mock_get_connection, hook): ... def test_build_url(hook): ... def test_request_success_and_error(mock_get_conn, hook): ... def test_encode_id(hook): ... def test_get_object(mock_request, hook): ... def test_create_lineage_link(mock_request, hook): ... def test_close_session(hook): ... # Task: Write a Python test function `test_config_property_and_build_connection_config` to test config property and _build_connection_config method. Module under test: __future__, airflow.providers.informatica.hooks.edc
def test_config_property_and_build_connection_config(mock_get_connection, hook): """Test config property and _build_connection_config method.""" mock_conn = MagicMock() mock_conn.host = "testhost" mock_conn.schema = "https" mock_conn.port = 443 mock_conn.login = "user" mock_conn.password = "pass" mock_conn.extra_dejson = { "verify_ssl": True, "provider_id": "test_provider", "modified_by": "tester", "security_domain": "domain", } mock_get_connection.return_value = mock_conn config = hook.config assert config.base_url == "https://testhost:443" assert config.username == "user" assert config.password == "pass" assert config.security_domain == "domain" assert config.provider_id == "test_provider" assert config.modified_by == "tester" assert config.verify_ssl is True assert isinstance(config.request_timeout, int) assert config.auth_header.startswith("Basic ")
test
1
{"function_name": "test_config_property_and_build_connection_config", "class_name": null, "qualname": "test_config_property_and_build_connection_config", "file_path": "providers/informatica/tests/unit/informatica/hooks/test_edc.py", "repo_id": "apache/airflow", "loc": 25, "tested_modules": ["__future__", "airflow.providers.informatica.hooks.edc"], "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/mcp/server.py:BrowserUseServer._close_session
# Context: def _configure_mcp_server_logging(): ... def _ensure_all_loggers_use_stderr(): ... def get_parent_process_cmdline() -> str | None: ... async def main(session_timeout_minutes: int): ... class BrowserUseServer: def __init__(self, session_timeout_minutes: int = 10): # Ensure all logging goes to stderr (in case new loggers were created) _ensure_all_loggers_use_stderr() self.server = Server('browser-use') self.config = load_browser_use_config() self.agent: Agent | None = None self.browser_session: BrowserSession | None = None self.tools: Tools | None = None self.llm: ChatOpenAI | None = None self.file_system: FileSystem | None = None self._telemetry = ProductTelemetry() self._start_time = time.time() # Session management self.active_sessions: dict[str, dict[str, Any]] = {} # session_id -> session info self.session_timeout_minutes = session_timeout_minutes self._cleanup_task: Any = None # Setup handlers self._setup_handlers() def _setup_handlers(self): ... async def _execute_tool(self, tool_name: str, arguments: dict[str, Any]) -> str | list[types.TextContent | types.ImageContent]: ... async def _init_browser_session(self, allowed_domains: list[str] | None, **kwargs): ... async def _retry_with_browser_use_agent(self, task: str, max_steps: int, model: str | None, allowed_domains: list[str] | None, use_vision: bool) -> str: ... async def _navigate(self, url: str, new_tab: bool) -> str: ... async def _click(self, index: int | None, coordinate_x: int | None, coordinate_y: int | None, new_tab: bool) -> str: ... async def _type_text(self, index: int, text: str) -> str: ... async def _get_browser_state(self, include_screenshot: bool) -> tuple[str, str | None]: ... async def _get_html(self, selector: str | None) -> str: ... async def _screenshot(self, full_page: bool) -> tuple[str, str | None]: ... async def _extract_content(self, query: str, extract_links: bool) -> str: ... async def _scroll(self, direction: str) -> str: ... async def _go_back(self) -> str: ... async def _close_browser(self) -> str: ... async def _list_tabs(self) -> str: ... async def _switch_tab(self, tab_id: str) -> str: ... async def _close_tab(self, tab_id: str) -> str: ... def _track_session(self, session: BrowserSession) -> None: ... def _update_session_activity(self, session_id: str) -> None: ... async def _list_sessions(self) -> str: ... async def _close_all_sessions(self) -> str: ... async def _cleanup_expired_sessions(self) -> None: ... async def _start_cleanup_task(self) -> None: ... async def run(self): ... # Task: Write a Python async method `_close_session` for the class `BrowserUseServer` to close a specific browser session. Parameters: session_id: str Returns: str
async def _close_session(self, session_id: str) -> str: """Close a specific browser session.""" if session_id not in self.active_sessions: return f'Session {session_id} not found' session_data = self.active_sessions[session_id] session = session_data['session'] try: # Close the session if hasattr(session, 'kill'): await session.kill() elif hasattr(session, 'close'): await session.close() # Remove from tracking del self.active_sessions[session_id] # If this was the current session, clear it if self.browser_session and self.browser_session.id == session_id: self.browser_session = None self.tools = None return f'Successfully closed session {session_id}' except Exception as e: return f'Error closing session {session_id}: {str(e)}'
function_complex
0
{"cognitive_complexity": 8, "loc": 26, "code_loc": 16, "docstring_loc": 1, "function_name": "_close_session", "class_name": "BrowserUseServer", "qualname": "BrowserUseServer._close_session", "file_path": "browser_use/mcp/server.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"}
huggingface/transformers:src/transformers/models/idefics3/image_processing_idefics3_fast.py:_resize_output_size_rescale_to_max_len
Write a Python function `_resize_output_size_rescale_to_max_len` to get the output size of the image after resizing given a dictionary specifying the max and min sizes. Parameters: height: int, width: int, min_len: int | None, max_len: int | None Returns: tuple[int, int]
def _resize_output_size_rescale_to_max_len( height: int, width: int, min_len: int | None = 1, max_len: int | None = None ) -> tuple[int, int]: """ Get the output size of the image after resizing given a dictionary specifying the max and min sizes. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. min_len (`int`, *optional*, defaults to 1): Minimum size of the output image. max_len (`int`, *optional*, defaults to the maximum size of the image): Maximum size of the output image. Returns: The output size of the image after resizing. """ max_len = max(height, width) if max_len is None else max_len aspect_ratio = width / height if width >= height: width = max_len height = int(width / aspect_ratio) if height % 2 != 0: height += 1 elif height > width: height = max_len width = int(height * aspect_ratio) if width % 2 != 0: width += 1 # Avoid resizing to a size smaller than min_len height = max(height, min_len) width = max(width, min_len) return height, width
function_complex
0
{"cognitive_complexity": 7, "loc": 35, "code_loc": 15, "docstring_loc": 14, "function_name": "_resize_output_size_rescale_to_max_len", "class_name": null, "qualname": "_resize_output_size_rescale_to_max_len", "file_path": "src/transformers/models/idefics3/image_processing_idefics3_fast.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "self_contained"}
huggingface/pytorch-image-models:timm/models/csatv2.py:LearnableDct2d.init_non_persistent_buffers
# Context: def _zigzag_permutation(rows: int, cols: int) -> List[int]: ... def _dct_kernel_type_2(kernel_size: int, orthonormal: bool, device, dtype) -> torch.Tensor: ... def _dct_kernel_type_3(kernel_size: int, orthonormal: bool, device, dtype) -> torch.Tensor: ... class Dct1d(nn.Module): ... class Dct2d(nn.Module): ... def _split_out_chs(out_chs: int, ratio): ... class Dct2dStats(nn.Module): ... class Block(nn.Module): ... class SpatialTransformerBlock(nn.Module): ... class SpatialAttention(nn.Module): ... class TransformerBlock(nn.Module): ... class PosConv(nn.Module): ... class CSATv2(nn.Module): ... def _cfg(url, **kwargs): ... def checkpoint_filter_fn(state_dict: dict, model: nn.Module) -> dict: ... def _create_csatv2(variant: str, pretrained: bool, **kwargs) -> CSATv2: ... def csatv2(pretrained: bool, **kwargs) -> CSATv2: ... def csatv2_21m(pretrained: bool, **kwargs) -> CSATv2: ... class LearnableDct2d(nn.Module): def __init__( self, kernel_size: int, kernel_type: int = 2, orthonormal: bool = True, out_chs: int = 32, device=None, dtype=None, ) -> None: dd = dict(device=device, dtype=dtype) super().__init__() self.k = kernel_size self.transform = Dct2d(kernel_size, kernel_type, orthonormal, **dd) self.permutation = _zigzag_permutation(kernel_size, kernel_size) y_ch, cb_ch, cr_ch = _split_out_chs(out_chs, ratio=(24, 4, 4)) self.conv_y = nn.Conv2d(kernel_size ** 2, y_ch, kernel_size=1, padding=0, **dd) self.conv_cb = nn.Conv2d(kernel_size ** 2, cb_ch, kernel_size=1, padding=0, **dd) self.conv_cr = nn.Conv2d(kernel_size ** 2, cr_ch, kernel_size=1, padding=0, **dd) # Register empty buffers for DCT normalization statistics self.register_buffer('mean', torch.empty(3, 64, device=device, dtype=dtype), persistent=False) self.register_buffer('var', torch.empty(3, 64, device=device, dtype=dtype), persistent=False) # Shape (3, 1, 1) for BCHW broadcasting self.register_buffer('imagenet_mean', torch.empty(3, 1, 1, device=device, dtype=dtype), persistent=False) self.register_buffer('imagenet_std', torch.empty(3, 1, 1, device=device, dtype=dtype), persistent=False) # TODO: skip init when on meta device when safe to do so self.reset_parameters() def reset_parameters(self) -> None: ... def _init_buffers(self) -> None: ... def _denormalize(self, x: torch.Tensor) -> torch.Tensor: ... def _rgb_to_ycbcr(self, x: torch.Tensor) -> torch.Tensor: ... def _frequency_normalize(self, x: torch.Tensor) -> torch.Tensor: ... def forward(self, x: torch.Tensor) -> torch.Tensor: ... # Task: Write a Python method `init_non_persistent_buffers` for the class `LearnableDct2d` to initialize non-persistent buffers. Returns: None
def init_non_persistent_buffers(self) -> None: """Initialize non-persistent buffers.""" self._init_buffers()
function_simple
1
{"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "init_non_persistent_buffers", "class_name": "LearnableDct2d", "qualname": "LearnableDct2d.init_non_persistent_buffers", "file_path": "timm/models/csatv2.py", "repo_id": "huggingface/pytorch-image-models", "has_docstring": true, "runnable_level": "class_runnable"}
infiniflow/ragflow:test/testcases/test_web_api/test_search_app/test_search_crud.py:TestSearchCrud.test_create_and_rm
# Context: import pytest from common import search_create, search_detail, search_list, search_rm, search_update def _search_name(prefix): ... def _find_tenant_id(WebApiAuth, search_id): ... def search_app(WebApiAuth): ... class TestAuthorization: ... class TestSearchCrud: def test_list(self, WebApiAuth, search_app): ... def test_detail(self, WebApiAuth, search_app): ... def test_update(self, WebApiAuth, search_app): ... def test_create_invalid_name(self, WebApiAuth): ... def test_update_invalid_search_id(self, WebApiAuth): ... # Task: Write a Python test method `test_create_and_rm` in test class `TestSearchCrud` to verify the behavior of `create_and_rm`. Module under test: common, configs, libs.auth
def test_create_and_rm(self, WebApiAuth): name = _search_name("create") create_res = search_create(WebApiAuth, {"name": name, "description": "test search"}) assert create_res["code"] == 0, create_res search_id = create_res["data"]["search_id"] rm_res = search_rm(WebApiAuth, {"search_id": search_id}) assert rm_res["code"] == 0, rm_res assert rm_res["data"] is True, rm_res
test
1
{"function_name": "test_create_and_rm", "class_name": "TestSearchCrud", "qualname": "TestSearchCrud.test_create_and_rm", "file_path": "test/testcases/test_web_api/test_search_app/test_search_crud.py", "repo_id": "infiniflow/ragflow", "loc": 9, "tested_modules": ["common", "configs", "libs.auth"], "has_docstring": false, "runnable_level": "project_runnable"}
unclecode/crawl4ai:crawl4ai/async_crawler_strategy.back.py:AsyncPlaywrightCrawlerStrategy.export_storage_state
# Context: class AsyncCrawlerStrategy(ABC): ... class HTTPCrawlerError(Exception): ... class ConnectionTimeoutError(HTTPCrawlerError): ... class HTTPStatusError(HTTPCrawlerError): ... class AsyncHTTPCrawlerStrategy(AsyncCrawlerStrategy): ... class AsyncPlaywrightCrawlerStrategy(AsyncCrawlerStrategy): def __init__( self, browser_config: BrowserConfig = None, logger: AsyncLogger = None, **kwargs ): """ Initialize the AsyncPlaywrightCrawlerStrategy with a browser configuration. Args: browser_config (BrowserConfig): Configuration object containing browser settings. If None, will be created from kwargs for backwards compatibility. logger: Logger instance for recording events and errors. **kwargs: Additional arguments for backwards compatibility and extending functionality. """ # Initialize browser config, either from provided object or kwargs self.browser_config = browser_config or BrowserConfig.from_kwargs(kwargs) self.logger = logger # Initialize session management self._downloaded_files = [] # Initialize hooks system self.hooks = { "on_browser_created": None, "on_page_context_created": None, "on_user_agent_updated": None, "on_execution_started": None, "on_execution_ended": None, "before_goto": None, "after_goto": None, "before_return_html": None, "before_retrieve_html": None, } # Initialize browser manager with config self.browser_manager = BrowserManager( browser_config=self.browser_config, logger=self.logger ) async def __aenter__(self): ... async def __aexit__(self, exc_type, exc_val, exc_tb): ... async def start(self): ... async def close(self): ... async def kill_session(self, session_id: str): ... def set_hook(self, hook_type: str, hook: Callable): ... async def execute_hook(self, hook_type: str, *args, **kwargs): ... def update_user_agent(self, user_agent: str): ... def set_custom_headers(self, headers: Dict[str, str]): ... async def smart_wait(self, page: Page, wait_for: str, timeout: float): ... async def csp_compliant_wait(self, page: Page, user_wait_function: str, timeout: float): ... async def process_iframes(self, page): ... async def create_session(self, **kwargs) -> str: ... async def crawl(self, url: str, config: CrawlerRunConfig, **kwargs) -> AsyncCrawlResponse: ... async def _crawl_web(self, url: str, config: CrawlerRunConfig) -> AsyncCrawlResponse: ... async def _handle_full_page_scan(self, page: Page, scroll_delay: float, max_scroll_steps: Optional[int]): ... async def _handle_virtual_scroll(self, page: Page, config: 'VirtualScrollConfig'): ... async def _handle_download(self, download): ... async def remove_overlay_elements(self, page: Page) -> None: ... async def export_pdf(self, page: Page) -> bytes: ... async def capture_mhtml(self, page: Page) -> Optional[str]: ... async def _capture_console_messages(self, page: Page, file_path: str) -> List[Dict[str, Union[str, float]]]: ... async def take_screenshot(self, page, **kwargs) -> str: ... async def take_screenshot_from_pdf(self, pdf_data: bytes) -> str: ... async def take_screenshot_scroller(self, page: Page, **kwargs) -> str: ... async def take_screenshot_naive(self, page: Page) -> str: ... async def robust_execute_user_script(self, page: Page, js_code: Union[str, List[str]]) -> Dict[str, Any]: ... async def execute_user_script(self, page: Page, js_code: Union[str, List[str]]) -> Dict[str, Any]: ... async def check_visibility(self, page): ... async def safe_scroll(self, page: Page, x: int, y: int, delay: float): ... async def csp_scroll_to(self, page: Page, x: int, y: int) -> Dict[str, Any]: ... async def get_page_dimensions(self, page: Page): ... async def page_need_scroll(self, page: Page) -> bool: ... # Task: Write a Python async method `export_storage_state` for the class `AsyncPlaywrightCrawlerStrategy` to exports the current storage state (cookies, localStorage, sessionStorage). Parameters: path: str Returns: dict
async def export_storage_state(self, path: str = None) -> dict: """ Exports the current storage state (cookies, localStorage, sessionStorage) to a JSON file at the specified path. Args: path (str): The path to save the storage state JSON file Returns: dict: The exported storage state """ if self.default_context: state = await self.default_context.storage_state(path=path) self.logger.info( message="Exported storage state to {path}", tag="INFO", params={"path": path}, ) return state else: self.logger.warning( message="No default_context available to export storage state.", tag="WARNING", )
function_simple
1
{"cognitive_complexity": 2, "loc": 24, "code_loc": 13, "docstring_loc": 10, "function_name": "export_storage_state", "class_name": "AsyncPlaywrightCrawlerStrategy", "qualname": "AsyncPlaywrightCrawlerStrategy.export_storage_state", "file_path": "crawl4ai/async_crawler_strategy.back.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_level": "class_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/components/processing/text_operations.py:TextOperations._text_to_dataframe
# Context: import pandas as pd from lfx.schema.dataframe import DataFrame class TextOperations(Component): display_name = "Text Operations" description = "Perform various text processing operations including text-to-DataFrame conversion." icon = "type" name = "TextOperations" inputs = [ outputs = [] def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None) -> dict: ... def update_outputs(self, frontend_node: dict, field_name: str, field_value: Any) -> dict: ... def _extract_operation_name(self, field_value: Any) -> str: ... def get_operation_name(self) -> str: ... def process_text(self) -> Any: ... def _parse_table_rows(self, lines: list[str], separator: str) -> list[list[str]]: ... def _create_dataframe(self, rows: list[list[str]], has_header: bool) -> pd.DataFrame: ... def _convert_numeric_columns(self, df: pd.DataFrame) -> None: ... def _word_count(self, text: str) -> dict[str, Any]: ... def _case_conversion(self, text: str) -> str: ... def _text_replace(self, text: str) -> str: ... def _text_extract(self, text: str) -> list[str]: ... def _text_head(self, text: str) -> str: ... def _text_tail(self, text: str) -> str: ... def _text_strip(self, text: str) -> str: ... def _text_join(self, text: str) -> str: ... def _text_clean(self, text: str) -> str: ... def _format_result_as_text(self, result: Any) -> str: ... def get_dataframe(self) -> DataFrame: ... def get_text(self) -> Message: ... def get_data(self) -> Data: ... def get_message(self) -> Message: ... # Task: Write a Python method `_text_to_dataframe` for the class `TextOperations` to convert markdown-style table text to DataFrame. Parameters: text: str Returns: DataFrame
def _text_to_dataframe(self, text: str) -> DataFrame: """Convert markdown-style table text to DataFrame.""" lines = [line.strip() for line in text.strip().split("\n") if line.strip()] if not lines: return DataFrame(pd.DataFrame()) separator = getattr(self, "table_separator", "|") has_header = getattr(self, "has_header", True) rows = self._parse_table_rows(lines, separator) if not rows: return DataFrame(pd.DataFrame()) df = self._create_dataframe(rows, has_header=has_header) self._convert_numeric_columns(df) self.log(f"Converted text to DataFrame: {len(df)} rows, {len(df.columns)} columns") return DataFrame(df)
function_simple
1
{"cognitive_complexity": 2, "loc": 18, "code_loc": 12, "docstring_loc": 1, "function_name": "_text_to_dataframe", "class_name": "TextOperations", "qualname": "TextOperations._text_to_dataframe", "file_path": "src/lfx/src/lfx/components/processing/text_operations.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"}
google/langextract:langextract/prompting.py:read_prompt_template_structured_from_file
# Context: import json import pathlib import pydantic import yaml from langextract.core import data class PromptBuilderError(exceptions.LangExtractError): ... class ParseError(PromptBuilderError): ... class PromptTemplateStructured: ... class QAPromptGenerator: ... class PromptBuilder: ... class ContextAwarePromptBuilder(PromptBuilder): ... # Task: Write a Python function `read_prompt_template_structured_from_file` to reads a structured prompt template from a file. Parameters: prompt_path: str, format_type: data.FormatType Returns: PromptTemplateStructured
def read_prompt_template_structured_from_file( prompt_path: str, format_type: data.FormatType = data.FormatType.YAML, ) -> PromptTemplateStructured: """Reads a structured prompt template from a file. Args: prompt_path: Path to a file containing PromptTemplateStructured data. format_type: The format of the file; YAML or JSON. Returns: A PromptTemplateStructured object loaded from the file. Raises: ParseError: If the file cannot be parsed successfully. """ adapter = pydantic.TypeAdapter(PromptTemplateStructured) try: with pathlib.Path(prompt_path).open("rt") as f: data_dict = {} prompt_content = f.read() if format_type == data.FormatType.YAML: data_dict = yaml.safe_load(prompt_content) elif format_type == data.FormatType.JSON: data_dict = json.loads(prompt_content) return adapter.validate_python(data_dict) except Exception as e: raise ParseError( f"Failed to parse prompt template from file: {prompt_path}" ) from e
function_simple
1
{"cognitive_complexity": 5, "loc": 30, "code_loc": 14, "docstring_loc": 12, "function_name": "read_prompt_template_structured_from_file", "class_name": null, "qualname": "read_prompt_template_structured_from_file", "file_path": "langextract/prompting.py", "repo_id": "google/langextract", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/utilities/test_pydantic_schema_utils.py:TestBuildRichFieldDescription.test_combined_constraints
# Context: from crewai.utilities.pydantic_schema_utils import ( build_rich_field_description, convert_oneof_to_anyof, create_model_from_schema, ensure_all_properties_required, ensure_type_in_schemas, force_additional_properties_false, resolve_refs, strip_null_from_types, strip_unsupported_formats, ) class TestSimpleTypes: ... class TestRequiredOptional: ... class TestEnumLiteral: ... class TestFormatMapping: ... class TestNestedObjects: ... class TestTypedArrays: ... class TestUnionTypes: ... class TestAllOfMerging: ... class TestRefResolution: ... class TestModelName: ... class TestEnrichDescriptions: ... class TestEdgeCases: ... class TestResolveRefs: ... class TestForceAdditionalPropertiesFalse: ... class TestStripUnsupportedFormats: ... class TestEnsureTypeInSchemas: ... class TestConvertOneofToAnyof: ... class TestEnsureAllPropertiesRequired: ... class TestStripNullFromTypes: ... class TestEndToEndMCPSchema: ... class TestBuildRichFieldDescription: def test_description_only(self) -> None: ... def test_empty_schema(self) -> None: ... def test_format(self) -> None: ... def test_enum(self) -> None: ... def test_pattern(self) -> None: ... def test_min_max(self) -> None: ... def test_min_max_length(self) -> None: ... def test_examples(self) -> None: ... # Task: Write a Python test method `test_combined_constraints` in test class `TestBuildRichFieldDescription` to verify the behavior of `combined_constraints`. Module under test: __future__, copy, typing
def test_combined_constraints(self) -> None: desc = build_rich_field_description({ "description": "A score", "minimum": 0, "maximum": 10, "format": "int32", }) assert desc.startswith("A score") assert "Minimum: 0" in desc assert "Maximum: 10" in desc assert "Format: int32" in desc
test
0
{"function_name": "test_combined_constraints", "class_name": "TestBuildRichFieldDescription", "qualname": "TestBuildRichFieldDescription.test_combined_constraints", "file_path": "lib/crewai/tests/utilities/test_pydantic_schema_utils.py", "repo_id": "crewAIInc/crewAI", "loc": 11, "tested_modules": ["__future__", "copy", "typing", "pydantic", "crewai.utilities.pydantic_schema_utils"], "has_docstring": false, "runnable_level": "project_runnable"}
unclecode/crawl4ai:tests/test_raw_html_browser.py:test_screenshot_on_raw_html
# Context: import pytest from crawl4ai import AsyncWebCrawler, CrawlerRunConfig async def test_raw_html_fast_path(): ... async def test_js_code_on_raw_html(): ... async def test_js_code_adds_element_to_raw_html(): ... async def test_process_in_browser_flag(): ... async def test_raw_prefix_variations(): ... async def test_wait_for_on_raw_html(): ... async def test_multiple_js_code_scripts(): ... # Task: Write a Python test function `test_screenshot_on_raw_html` to test that screenshots work on raw: HTML. Module under test: crawl4ai
async def test_screenshot_on_raw_html(): """Test that screenshots work on raw: HTML.""" html = "<html><body><h1 style='color:red;font-size:48px;'>Screenshot Test</h1></body></html>" async with AsyncWebCrawler() as crawler: config = CrawlerRunConfig(screenshot=True) result = await crawler.arun(f"raw:{html}", config=config) assert result.success assert result.screenshot is not None assert len(result.screenshot) > 100 # Should have substantial screenshot data
test
1
{"function_name": "test_screenshot_on_raw_html", "class_name": null, "qualname": "test_screenshot_on_raw_html", "file_path": "tests/test_raw_html_browser.py", "repo_id": "unclecode/crawl4ai", "loc": 11, "tested_modules": ["crawl4ai"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/unit/api/v2/test_converters.py:TestParseFlatInputs.test_parse_flat_inputs_with_session_id
# Context: from langflow.api.v2.converters import ( _build_metadata_for_non_output, _extract_file_path, _extract_model_source, _extract_nested_value, _extract_text_from_message, _get_raw_content, _simplify_output_content, create_error_response, create_job_response, parse_flat_inputs, run_response_to_workflow_response, ) def _setup_graph_get_vertex(graph: Mock, vertices: list[Mock]) -> None: ... class TestExtractNestedValue: ... class TestExtractTextFromMessage: ... class TestExtractModelSource: ... class TestExtractFilePath: ... class TestGetRawContent: ... class TestSimplifyOutputContent: ... class TestBuildMetadataForNonOutput: ... class TestCreateJobResponse: ... class TestCreateErrorResponse: ... class TestRunResponseToWorkflowResponse: ... class TestParseFlatInputs: def test_parse_flat_inputs_basic(self): ... def test_parse_flat_inputs_multiple_session_ids(self): ... def test_parse_flat_inputs_dict_values(self): ... def test_parse_flat_inputs_mixed_formats(self): ... def test_parse_flat_inputs_empty(self): ... def test_parse_flat_inputs_multiple_params_same_component(self): ... def test_parse_flat_inputs_malformed_key_no_dot(self): ... def test_parse_flat_inputs_null_values(self): ... def test_parse_flat_inputs_deeply_nested_dict(self): ... def test_parse_flat_inputs_empty_collections(self): ... def test_parse_flat_inputs_explicit_none_values(self): ... def test_parse_flat_inputs_special_characters_in_keys(self): ... # Task: Write a Python test method `test_parse_flat_inputs_with_session_id` in test class `TestParseFlatInputs` to test parsing with session_id extraction. Module under test: __future__, typing, uuid
def test_parse_flat_inputs_with_session_id(self): """Test parsing with session_id extraction.""" inputs = { "ChatInput-abc.input_value": "hello", "ChatInput-abc.session_id": "session-123", "LLM-xyz.temperature": 0.7, } tweaks, session_id = parse_flat_inputs(inputs) assert tweaks == { "ChatInput-abc": {"input_value": "hello", "session_id": "session-123"}, "LLM-xyz": {"temperature": 0.7}, } assert session_id == "session-123"
test
1
{"function_name": "test_parse_flat_inputs_with_session_id", "class_name": "TestParseFlatInputs", "qualname": "TestParseFlatInputs.test_parse_flat_inputs_with_session_id", "file_path": "src/backend/tests/unit/api/v2/test_converters.py", "repo_id": "langflow-ai/langflow", "loc": 14, "tested_modules": ["__future__", "typing", "uuid", "langflow.api.v2.converters", "lfx.schema.workflow"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/dashboard/tests/test_dashboard_auth.py:test_authentication_mode_endpoint_with_token_auth
# Context: import requests def test_dashboard_request_requires_auth_with_valid_token(setup_cluster_with_token_auth): ... def test_dashboard_request_requires_auth_missing_token(setup_cluster_with_token_auth): ... def test_dashboard_request_requires_auth_invalid_token(setup_cluster_with_token_auth): ... def test_dashboard_request_with_ray_auth_header(setup_cluster_with_token_auth): ... def test_authorization_header_takes_precedence(setup_cluster_with_token_auth): ... def test_dashboard_auth_disabled(setup_cluster_without_token_auth): ... def test_authentication_mode_endpoint_without_auth(setup_cluster_without_token_auth): ... def test_authentication_mode_endpoint_is_public(setup_cluster_with_token_auth): ... # Task: Write a Python test function `test_authentication_mode_endpoint_with_token_auth` to test authentication_mode endpoint returns 'token' when auth is enabled.
def test_authentication_mode_endpoint_with_token_auth(setup_cluster_with_token_auth): """Test authentication_mode endpoint returns 'token' when auth is enabled.""" cluster_info = setup_cluster_with_token_auth # This endpoint should be accessible WITHOUT authentication response = requests.get(f"{cluster_info['dashboard_url']}/api/authentication_mode") assert response.status_code == 200 assert response.json() == {"authentication_mode": "token"}
test
0
{"function_name": "test_authentication_mode_endpoint_with_token_auth", "class_name": null, "qualname": "test_authentication_mode_endpoint_with_token_auth", "file_path": "python/ray/dashboard/tests/test_dashboard_auth.py", "repo_id": "ray-project/ray", "loc": 10, "tested_modules": [], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/lfx/src/lfx/services/manager.py:ServiceManager.register_factories
# Context: from lfx.log.logger import logger from lfx.services.factory import ServiceFactory class NoFactoryRegisteredError(Exception): ... class NoServiceRegisteredError(Exception): ... def get_service_manager() -> ServiceManager: ... class ServiceManager: def __init__(self) -> None: """Initialize the service manager with empty service and factory registries.""" self.services: dict[str, Service] = {} self.factories: dict[str, ServiceFactory] = {} self.service_classes: dict[ServiceType, type[Service]] = {} # New: direct service class registry self._lock = threading.RLock() self.keyed_lock = KeyedMemoryLockManager() self.factory_registered = False self._plugins_discovered = False # Always register settings service from lfx.services.settings.factory import SettingsServiceFactory self.register_factory(SettingsServiceFactory()) def are_factories_registered(self) -> bool: ... def set_factory_registered(self) -> None: ... def register_service_class(self, service_type: ServiceType, service_class: type[Service], override: bool) -> None: ... def register_factory(self, service_factory: ServiceFactory) -> None: ... def get(self, service_name: ServiceType, default: ServiceFactory | None) -> Service: ... def _create_service(self, service_name: ServiceType, default: ServiceFactory | None) -> None: ... def _create_service_from_class(self, service_name: ServiceType) -> None: ... def _resolve_service_type_from_annotation(self, annotation) -> ServiceType | None: ... def _create_service_from_factory(self, service_name: ServiceType, default: ServiceFactory | None) -> None: ... def _validate_service_creation(self, service_name: ServiceType, default: ServiceFactory | None) -> None: ... def update(self, service_name: ServiceType) -> None: ... async def teardown(self) -> None: ... def get_factories(cls) -> list[ServiceFactory]: ... def discover_plugins(self, config_dir: Path | None) -> None: ... def _discover_from_entry_points(self) -> None: ... def _discover_from_config(self, config_dir: Path | None) -> None: ... def _load_config_file(self, config_path: Path) -> None: ... def _load_pyproject_config(self, config_path: Path) -> None: ... def _register_service_from_path(self, service_key: str, service_path: str) -> None: ... # Task: Write a Python method `register_factories` for the class `ServiceManager` to register all available service factories. Parameters: factories: list[ServiceFactory] | None Returns: None
def register_factories(self, factories: list[ServiceFactory] | None = None) -> None: """Register all available service factories.""" if factories is None: return for factory in factories: try: self.register_factory(factory) except Exception: # noqa: BLE001 logger.exception(f"Error initializing {factory}") self.set_factory_registered()
function_simple
1
{"cognitive_complexity": 3, "loc": 10, "code_loc": 8, "docstring_loc": 1, "function_name": "register_factories", "class_name": "ServiceManager", "qualname": "ServiceManager.register_factories", "file_path": "src/lfx/src/lfx/services/manager.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "project_runnable"}
browser-use/browser-use:browser_use/browser/watchdogs/dom_watchdog.py:DOMWatchdog._get_page_info
# Context: import asyncio from browser_use.browser.views import BrowserStateSummary, NetworkRequest, PageInfo, PaginationButton from browser_use.browser.views import BrowserStateSummary, PageInfo from browser_use.browser.views import PageInfo class DOMWatchdog(BaseWatchdog): LISTENS_TO = [TabCreatedEvent, BrowserStateRequestEvent] EMITS = [BrowserErrorEvent] async def on_TabCreatedEvent(self, event: TabCreatedEvent) -> None: ... def _get_recent_events_str(self, limit: int) -> str | None: ... async def _get_pending_network_requests(self) -> list['NetworkRequest']: ... async def on_BrowserStateRequestEvent(self, event: BrowserStateRequestEvent) -> 'BrowserStateSummary': ... async def _build_dom_tree_without_highlights(self, previous_state: SerializedDOMState | None) -> SerializedDOMState: ... async def _capture_clean_screenshot(self) -> str: ... def _detect_pagination_buttons(self, selector_map: dict[int, EnhancedDOMTreeNode]) -> list['PaginationButton']: ... async def get_element_by_index(self, index: int) -> EnhancedDOMTreeNode | None: ... def clear_cache(self) -> None: ... def is_file_input(self, element: EnhancedDOMTreeNode) -> bool: ... def is_element_visible_according_to_all_parents(node: EnhancedDOMTreeNode, html_frames: list[EnhancedDOMTreeNode]) -> bool: ... async def __aexit__(self, exc_type, exc_value, traceback): ... def __del__(self): ... # Task: Write a Python async method `_get_page_info` for the class `DOMWatchdog` to get comprehensive page information using a single CDP call. Returns: 'PageInfo'
async def _get_page_info(self) -> 'PageInfo': """Get comprehensive page information using a single CDP call. TODO: should we make this an event as well? Returns: PageInfo with all viewport, page dimensions, and scroll information """ from browser_use.browser.views import PageInfo # get_or_create_cdp_session() handles focus validation automatically cdp_session = await self.browser_session.get_or_create_cdp_session( target_id=self.browser_session.agent_focus_target_id, focus=True ) # Get layout metrics which includes all the information we need metrics = await asyncio.wait_for( cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id), timeout=10.0 ) # Extract different viewport types layout_viewport = metrics.get('layoutViewport', {}) visual_viewport = metrics.get('visualViewport', {}) css_visual_viewport = metrics.get('cssVisualViewport', {}) css_layout_viewport = metrics.get('cssLayoutViewport', {}) content_size = metrics.get('contentSize', {}) # Calculate device pixel ratio to convert between device pixels and CSS pixels # This matches the approach in dom/service.py _get_viewport_ratio method css_width = css_visual_viewport.get('clientWidth', css_layout_viewport.get('clientWidth', 1280.0)) device_width = visual_viewport.get('clientWidth', css_width) device_pixel_ratio = device_width / css_width if css_width > 0 else 1.0 # For viewport dimensions, use CSS pixels (what JavaScript sees) # Prioritize CSS layout viewport, then fall back to layout viewport viewport_width = int(css_layout_viewport.get('clientWidth') or layout_viewport.get('clientWidth', 1280)) viewport_height = int(css_layout_viewport.get('clientHeight') or layout_viewport.get('clientHeight', 720)) # For total page dimensions, content size is typically in device pixels, so convert to CSS pixels # by dividing by device pixel ratio raw_page_width = content_size.get('width', viewport_width * device_pixel_ratio) raw_page_height = content_size.get('height', viewport_height * device_pixel_ratio) page_width = int(raw_page_width / device_pixel_ratio) page_height = int(raw_page_height / device_pixel_ratio) # For scroll position, use CSS visual viewport if available, otherwise CSS layout viewport # These should already be in CSS pixels scroll_x = int(css_visual_viewport.get('pageX') or css_layout_viewport.get('pageX', 0)) scroll_y = int(css_visual_viewport.get('pageY') or css_layout_viewport.get('pageY', 0)) # Calculate scroll information - pixels that are above/below/left/right of current viewport pixels_above = scroll_y pixels_below = max(0, page_height - viewport_height - scroll_y) pixels_left = scroll_x pixels_right = max(0, page_width - viewport_width - scroll_x) page_info = PageInfo( viewport_width=viewport_width, viewport_height=viewport_height, page_width=page_width, page_height=page_height, scroll_x=scroll_x, scroll_y=scroll_y, pixels_above=pixels_above, pixels_below=pixels_below, pixels_left=pixels_left, pixels_right=pixels_right, ) return page_info
function_simple
0
{"cognitive_complexity": 5, "loc": 71, "code_loc": 40, "docstring_loc": 7, "function_name": "_get_page_info", "class_name": "DOMWatchdog", "qualname": "DOMWatchdog._get_page_info", "file_path": "browser_use/browser/watchdogs/dom_watchdog.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"}
crewAIInc/crewAI:lib/crewai/tests/project/test_callback_with_taskoutput.py:test_callback_decorator_with_taskoutput_integration
# Context: from unittest.mock import MagicMock, patch from crewai import Agent, Crew, Task from crewai.project import CrewBase, callback, task from crewai.tasks.task_output import TaskOutput def test_callback_decorator_with_taskoutput() -> None: ... # Task: Write a Python test function `test_callback_decorator_with_taskoutput_integration` to integration test for callback with actual task execution. Module under test: crewai, crewai.project, crewai.tasks.output_format
def test_callback_decorator_with_taskoutput_integration() -> None: """Integration test for callback with actual task execution.""" @CrewBase class TestCrew: """Test crew with callback integration.""" callback_called = False received_output: TaskOutput | None = None @callback def task_callback(self, output: TaskOutput) -> None: """Callback executed after task completion.""" self.callback_called = True self.received_output = output @task def test_task(self) -> Task: """Test task.""" return Task( description="Test task", expected_output="Test output", callback=self.task_callback, ) test_crew = TestCrew() agent = Agent( role="Test Agent", goal="Test goal", backstory="Test backstory", ) task_instance = test_crew.test_task() task_instance.agent = agent with patch.object(Agent, "execute_task") as mock_execute: mock_execute.return_value = "test result" task_instance.execute_sync() assert test_crew.callback_called assert test_crew.received_output is not None assert test_crew.received_output.raw == "test result"
test
0
{"function_name": "test_callback_decorator_with_taskoutput_integration", "class_name": null, "qualname": "test_callback_decorator_with_taskoutput_integration", "file_path": "lib/crewai/tests/project/test_callback_with_taskoutput.py", "repo_id": "crewAIInc/crewAI", "loc": 43, "tested_modules": ["crewai", "crewai.project", "crewai.tasks.output_format", "crewai.tasks.task_output"], "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:tests/distributed/test_shm_buffer.py:TestSingleWriterShmRingBuffer.test_buffer_opening
# Context: from vllm.distributed.device_communicators.shm_object_storage import ( SingleWriterShmRingBuffer, ) def main(): ... class TestSingleWriterShmRingBuffer(unittest.TestCase): def setUp(self): ... def tearDown(self): ... def test_buffer_access(self): ... def test_memory_error_on_full_buffer(self): ... def test_allocation_and_free(self): ... def test_clear_buffer(self): ... def test_allocation_cycles(self): ... # Task: Write a Python test method `test_buffer_opening` in test class `TestSingleWriterShmRingBuffer` to test opening an existing buffer. Module under test: vllm.distributed.device_communicators.shm_object_storage
def test_buffer_opening(self): """Test opening an existing buffer""" # First create a buffer self.ring_buffer = SingleWriterShmRingBuffer( data_buffer_size=self.buffer_size, create=True ) # Then open it with another instance reader_buffer = SingleWriterShmRingBuffer(*self.ring_buffer.handle()) self.assertFalse(reader_buffer.is_writer) self.assertEqual( reader_buffer.shared_memory.name, self.ring_buffer.shared_memory.name )
test
1
{"function_name": "test_buffer_opening", "class_name": "TestSingleWriterShmRingBuffer", "qualname": "TestSingleWriterShmRingBuffer.test_buffer_opening", "file_path": "tests/distributed/test_shm_buffer.py", "repo_id": "vllm-project/vllm", "loc": 13, "tested_modules": ["vllm.distributed.device_communicators.shm_object_storage"], "has_docstring": true, "runnable_level": "project_runnable"}
infiniflow/ragflow:common/data_source/connector_runner.py:ConnectorRunner:class_doc
Write a class-level docstring for `ConnectorRunner` (inherits from Generic[CT]) which has methods: `__init__`, `run`.
Handles: - Batching - Additional exception logging - Combining different connector types to a single interface
documentation
1
{"doc_type": "class", "class_name": "ConnectorRunner", "file_path": "common/data_source/connector_runner.py", "repo_id": "infiniflow/ragflow", "char_length": 122, "methods": ["__init__", "run"]}
langflow-ai/langflow:src/lfx/src/lfx/services/mcp_composer/service.py:MCPComposerService._is_port_available
# Context: import socket import errno class MCPComposerError(Exception): ... class MCPComposerPortError(MCPComposerError): ... class MCPComposerConfigError(MCPComposerError): ... class MCPComposerDisabledError(MCPComposerError): ... class MCPComposerStartupError(MCPComposerError): ... def require_composer_enabled(func: Callable) -> Callable: ... class MCPComposerService(Service): name = "mcp_composer_service" def __init__(self): super().__init__() self.project_composers: dict[ str, dict ] = {} # project_id -> {process, host, port, streamable_http_url, auth_config} self._start_locks: dict[ str, asyncio.Lock ] = {} # Lock to prevent concurrent start operations for the same project self._active_start_tasks: dict[ str, asyncio.Task ] = {} # Track active start tasks to cancel them when new request arrives self._port_to_project: dict[int, str] = {} # Track which project is using which port self._pid_to_project: dict[int, str] = {} # Track which PID belongs to which project self._last_errors: dict[str, str] = {} # Track last error message per project for UI display def get_last_error(self, project_id: str) -> str | None: ... def set_last_error(self, project_id: str, error_message: str) -> None: ... def clear_last_error(self, project_id: str) -> None: ... async def _kill_process_on_port(self, port: int) -> bool: ... async def _kill_zombie_mcp_processes(self, port: int) -> bool: ... def _is_port_used_by_another_project(self, port: int, current_project_id: str) -> tuple[bool, str | None]: ... async def start(self): ... async def stop(self): ... async def stop_project_composer(self, project_id: str): ... async def _do_stop_project_composer(self, project_id: str): ... async def _wait_for_process_exit(self, process): ... async def _read_process_output_and_extract_error(self, process: subprocess.Popen, oauth_server_url: str | None, timeout: float, stdout_file, stderr_file) -> tuple[str, str, str]: ... async def _read_stream_non_blocking(self, stream, stream_name: str) -> str: ... async def _ensure_port_available(self, port: int, current_project_id: str) -> None: ... async def _log_startup_error_details(self, project_id: str, cmd: list[str], host: str, port: int, stdout: str, stderr: str, error_msg: str, exit_code: int | None, pid: int | None) -> None: ... def _validate_oauth_settings(self, auth_config: dict[str, Any]) -> None: ... def _normalize_config_value(value: Any) -> Any: ... def _has_auth_config_changed(self, existing_auth: dict[str, Any] | None, new_auth: dict[str, Any] | None) -> bool: ... def _obfuscate_command_secrets(self, cmd: list[str]) -> list[str]: ... def _extract_error_message(self, stdout_content: str, stderr_content: str, oauth_server_url: str | None) -> str: ... async def start_project_composer(self, project_id: str, streamable_http_url: str, auth_config: dict[str, Any] | None, max_retries: int, max_startup_checks: int, startup_delay: float, legacy_sse_url: str | None) -> None: ... async def _do_start_project_composer(self, project_id: str, streamable_http_url: str, auth_config: dict[str, Any] | None, max_retries: int, max_startup_checks: int, startup_delay: float, legacy_sse_url: str | None) -> None: ... async def _start_project_composer_process(self, project_id: str, host: str, port: int, streamable_http_url: str, auth_config: dict[str, Any] | None, max_startup_checks: int, startup_delay: float, legacy_sse_url: str | None) -> subprocess.Popen: ... def get_project_composer_port(self, project_id: str) -> int | None: ... async def teardown(self) -> None: ... # Task: Write a Python method `_is_port_available` for the class `MCPComposerService` to check if a port is available by trying to bind to it. Parameters: port: int, host: str Returns: bool
def _is_port_available(self, port: int, host: str = "localhost") -> bool: """Check if a port is available by trying to bind to it. Args: port: Port number to check host: Host to check (default: localhost) Returns: True if port is available (not in use), False if in use Raises: ValueError: If port is not in valid range (0-65535) """ import errno # Validate port range before attempting bind max_port = 65535 if not isinstance(port, int) or port < 0 or port > max_port: msg = f"Invalid port number: {port}. Port must be between 0 and {max_port}." raise ValueError(msg) # Check both IPv4 and IPv6 to ensure port is truly available # MCP Composer tries to bind on both, so we need to check both # Check IPv4 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: # Don't use SO_REUSEADDR here as it can give false positives sock.bind((host, port)) except OSError: return False # Port is in use on IPv4 # Check IPv6 (if supported on this system) try: with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as sock: # Don't use SO_REUSEADDR here as it can give false positives # Use ::1 for localhost on IPv6 ipv6_host = "::1" if host in ("localhost", "127.0.0.1") else host sock.bind((ipv6_host, port)) except OSError as e: # Check if it's "address already in use" error # errno.EADDRINUSE is 48 on macOS, 98 on Linux, 10048 on Windows (WSAEADDRINUSE) # We check both the standard errno and Windows-specific error code if e.errno in (errno.EADDRINUSE, 10048): return False # Port is in use on IPv6 # For other errors (e.g., IPv6 not supported, EADDRNOTAVAIL), continue # IPv6 might not be supported on this system, which is okay return True # Port is available on both IPv4 and IPv6 (or IPv6 not supported)
function_complex
1
{"cognitive_complexity": 9, "loc": 49, "code_loc": 18, "docstring_loc": 12, "function_name": "_is_port_available", "class_name": "MCPComposerService", "qualname": "MCPComposerService._is_port_available", "file_path": "src/lfx/src/lfx/services/mcp_composer/service.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "slib_runnable"}
huggingface/diffusers:tests/hooks/test_mag_cache.py:MagCacheTests.test_mag_cache_reset
# Context: import numpy as np import torch from diffusers import MagCacheConfig, apply_mag_cache class DummyBlock(torch.nn.Module): ... class DummyTransformer(ModelMixin): ... class TupleOutputBlock(torch.nn.Module): ... class TupleTransformer(ModelMixin): ... class MagCacheTests(unittest.TestCase): def setUp(self): ... def _set_context(self, model, context_name): ... def _get_calibration_data(self, model): ... def test_mag_cache_validation(self): ... def test_mag_cache_skipping_logic(self): ... def test_mag_cache_retention(self): ... def test_mag_cache_tuple_outputs(self): ... def test_mag_cache_calibration(self): ... # Task: Write a Python test method `test_mag_cache_reset` in test class `MagCacheTests` to test that state resets correctly after num_inference_steps. Module under test: diffusers, diffusers.hooks._helpers, diffusers.models
def test_mag_cache_reset(self): """Test that state resets correctly after num_inference_steps.""" model = DummyTransformer() config = MagCacheConfig( threshold=100.0, num_inference_steps=2, retention_ratio=0.0, mag_ratios=np.array([1.0, 1.0]) ) apply_mag_cache(model, config) self._set_context(model, "test_context") input_t = torch.ones(1, 1, 1) model(input_t) # Step 0 model(input_t) # Step 1 (Skipped) # Step 2 (Reset -> Step 0) -> Should Compute # Input 2.0 -> Output 8.0 input_t2 = torch.tensor([[[2.0]]]) output_t2 = model(input_t2) self.assertTrue(torch.allclose(output_t2, torch.tensor([[[8.0]]])), "State did not reset correctly")
test
1
{"function_name": "test_mag_cache_reset", "class_name": "MagCacheTests", "qualname": "MagCacheTests.test_mag_cache_reset", "file_path": "tests/hooks/test_mag_cache.py", "repo_id": "huggingface/diffusers", "loc": 20, "tested_modules": ["diffusers", "diffusers.hooks._helpers", "diffusers.models", "diffusers.utils"], "has_docstring": true, "runnable_level": "project_runnable"}
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser.py:TestAgentCoreBrowserToolSpec.test_get_elements
# Context: from unittest.mock import patch, MagicMock from llama_index.tools.aws_bedrock_agentcore import AgentCoreBrowserToolSpec class TestGetAwsRegion: ... class TestBrowserUtils: ... class TestAgentCoreBrowserToolSpec: def test_init(self, mock_browser_session_manager): ... def test_init_default_region(self, mock_browser_session_manager, mock_get_aws_region): ... def test_get_or_create_browser_client_new(self, mock_browser_client): ... def test_get_or_create_browser_client_existing(self, mock_browser_client): ... def test_navigate_browser_invalid_url(self): ... def test_navigate_browser(self, mock_get_current_page): ... def test_navigate_browser_exception(self): ... def test_click_element(self, mock_get_current_page): ... def test_click_element_not_found(self, mock_get_current_page): ... def test_extract_text_whole_page(self, mock_get_current_page): ... def test_extract_text_with_selector(self, mock_get_current_page): ... def test_extract_text_selector_not_found(self, mock_get_current_page): ... def test_extract_hyperlinks(self, mock_get_current_page): ... def test_extract_hyperlinks_no_links(self, mock_get_current_page): ... def test_get_elements_not_found(self, mock_get_current_page): ... def test_navigate_back(self, mock_get_current_page): ... def test_navigate_back_no_history(self, mock_get_current_page): ... def test_current_webpage(self, mock_get_current_page): ... def test_cleanup_thread(self): ... # Task: Write a Python test method `test_get_elements` in test class `TestAgentCoreBrowserToolSpec` to verify the behavior of `get_elements`. Module under test: llama_index.tools.aws_bedrock_agentcore, llama_index.tools.aws_bedrock_agentcore.browser.base, llama_index.tools.aws_bedrock_agentcore.browser.utils
def test_get_elements(self, mock_get_current_page): mock_session_manager = MagicMock() mock_browser = MagicMock() mock_page = MagicMock() mock_element1 = MagicMock() mock_element2 = MagicMock() mock_session_manager.get_sync_browser.return_value = mock_browser mock_get_current_page.return_value = mock_page mock_page.query_selector_all.return_value = [mock_element1, mock_element2] mock_element1.evaluate.side_effect = [ "div", {"id": "div1", "class": "container"}, ] mock_element1.text_content.return_value = "Content 1" mock_element2.evaluate.side_effect = [ "div", {"id": "div2", "class": "container"}, ] mock_element2.text_content.return_value = "Content 2" tool_spec = AgentCoreBrowserToolSpec() tool_spec._session_manager = mock_session_manager result = tool_spec.get_elements( selector="div.container", thread_id="test-thread" ) mock_session_manager.get_sync_browser.assert_called_once_with("test-thread") mock_get_current_page.assert_called_once_with(mock_browser) mock_page.query_selector_all.assert_called_once_with("div.container") mock_session_manager.release_sync_browser.assert_called_once_with("test-thread") assert "Found 2 element(s) matching selector 'div.container'" in result assert '1. <div id="div1", class="container">Content 1</div>' in result assert '2. <div id="div2", class="container">Content 2</div>' in result
test
1
{"function_name": "test_get_elements", "class_name": "TestAgentCoreBrowserToolSpec", "qualname": "TestAgentCoreBrowserToolSpec.test_get_elements", "file_path": "llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser.py", "repo_id": "run-llama/llama_index", "loc": 37, "tested_modules": ["llama_index.tools.aws_bedrock_agentcore", "llama_index.tools.aws_bedrock_agentcore.browser.base", "llama_index.tools.aws_bedrock_agentcore.browser.utils"], "has_docstring": false, "runnable_level": "project_runnable"}
infiniflow/ragflow:tools/es-to-oceanbase-migration/tests/test_schema.py:TestVectorFieldPattern.test_invalid_patterns
# Context: from es_ob_migration.schema import ( RAGFlowSchemaConverter, RAGFlowDataConverter, RAGFLOW_COLUMNS, ARRAY_COLUMNS, JSON_COLUMNS, VECTOR_FIELD_PATTERN, FTS_COLUMNS_ORIGIN, FTS_COLUMNS_TKS, ) class TestRAGFlowSchemaConverter: ... class TestRAGFlowDataConverter: ... class TestConstants: ... class TestRAGFlowSchemaConverterEdgeCases: ... class TestRAGFlowDataConverterEdgeCases: ... class TestVectorFieldPattern: def test_valid_patterns(self): ... def test_extract_dimension(self): ... # Task: Write a Python test method `test_invalid_patterns` in test class `TestVectorFieldPattern` to test invalid vector field patterns. Module under test: es_ob_migration.schema
def test_invalid_patterns(self): """Test invalid vector field patterns.""" invalid_names = [ "q_vec", "768_vec", "q_768", "vector_768", "content_with_weight", ] for name in invalid_names: match = VECTOR_FIELD_PATTERN.match(name) assert match is None, f"Should not match: {name}"
test
1
{"function_name": "test_invalid_patterns", "class_name": "TestVectorFieldPattern", "qualname": "TestVectorFieldPattern.test_invalid_patterns", "file_path": "tools/es-to-oceanbase-migration/tests/test_schema.py", "repo_id": "infiniflow/ragflow", "loc": 13, "tested_modules": ["es_ob_migration.schema"], "has_docstring": true, "runnable_level": "project_runnable"}
unclecode/crawl4ai:docs/examples/deep_crawl_crash_recovery.py:example_export_state
# Context: from crawl4ai import AsyncWebCrawler, CrawlerRunConfig from crawl4ai.deep_crawling import BFSDeepCrawlStrategy async def save_state_to_file(state: Dict[str, Any]) -> None: ... def load_state_from_file() -> Dict[str, Any] | None: ... async def example_basic_state_persistence(): ... async def example_crash_and_resume(): ... async def example_state_structure(): ... async def main(): ... # Task: Write a Python async function `example_export_state` to example 3: Manual state export using export_state().
async def example_export_state(): """ Example 3: Manual state export using export_state(). If you don't need real-time persistence, you can export the state manually after the crawl completes. """ print("\n" + "=" * 60) print("Example 3: Manual State Export") print("=" * 60) strategy = BFSDeepCrawlStrategy( max_depth=1, max_pages=3, # No callback - state is still tracked internally ) config = CrawlerRunConfig( deep_crawl_strategy=strategy, verbose=False, ) print("\nCrawling without callback...") async with AsyncWebCrawler(verbose=False) as crawler: results = await crawler.arun("https://books.toscrape.com", config=config) # Export state after crawl completes # Note: This only works if on_state_change was set during crawl # For this example, we'd need to set on_state_change to get state print(f"\nCrawled {len(results)} pages") print("(For manual export, set on_state_change to capture state)")
function_simple
1
{"cognitive_complexity": 0, "loc": 31, "code_loc": 16, "docstring_loc": 6, "function_name": "example_export_state", "class_name": null, "qualname": "example_export_state", "file_path": "docs/examples/deep_crawl_crash_recovery.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_level": "project_runnable"}
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/flashinfer_utils.py:register_scales_for_trtllm_fp8_per_tensor_moe
# Context: import torch class FlashinferMoeBackend(Enum): ... def activation_to_flashinfer_int(activation: MoEActivation) -> int: ... def swap_w13_to_w31(x: torch.Tensor) -> torch.Tensor: ... def rotate_weights_for_fi_trtllm_fp8_per_tensor_moe(gemm1_weights: torch.Tensor, gemm2_weights: torch.Tensor, is_gated_activation: bool): ... def apply_fi_trtllm_fp8_per_tensor_moe(layer: torch.nn.Module, hidden_states: torch.Tensor, router_logits: torch.Tensor, routing_bias: torch.Tensor | None, top_k: int, num_expert_group: int | None, topk_group: int | None, global_num_experts: int, apply_router_weight_on_input: bool) -> torch.Tensor: ... def make_fp8_moe_alpha_scales_for_fi(w13_scale: torch.Tensor, w13_input_scale: torch.Tensor, w2_scale: torch.Tensor, w2_input_scale: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: ... def get_flashinfer_moe_backend() -> FlashinferMoeBackend: ... def is_flashinfer_supporting_global_sf(backend: FlashinferMoeBackend | None) -> bool: ... def convert_moe_weights_to_flashinfer_trtllm_block_layout(cache_permute_indices: dict[torch.Size, torch.Tensor], w13_weight: torch.Tensor, w2_weight: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: ... def align_fp4_moe_weights_for_fi(w13: torch.Tensor, w13_scale: torch.Tensor, w2: torch.Tensor, w2_scale: torch.Tensor, is_act_and_mul: bool, min_alignment: int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, int]: ... def align_fp8_moe_weights_for_fi(w13: torch.Tensor, w2: torch.Tensor, is_act_and_mul: bool, min_alignment: int) -> tuple[torch.Tensor, torch.Tensor, int]: ... def prepare_fp8_moe_layer_for_fi(layer: torch.nn.Module, w13: torch.Tensor, w2: torch.Tensor, w13_scale: torch.Tensor, w13_input_scale: torch.Tensor | None, w2_scale: torch.Tensor, w2_input_scale: torch.Tensor | None, is_trtllm: bool) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: ... # Task: Write a Python function `register_scales_for_trtllm_fp8_per_tensor_moe` to register necessary scales for FlashInfer TRTLLM FP8 MoE kernel. Parameters: layer: torch.nn.Module, w13_scale: torch.Tensor, w13_input_scale: torch.Tensor, w2_scale: torch.Tensor, w2_input_scale: torch.Tensor Returns: None
def register_scales_for_trtllm_fp8_per_tensor_moe( layer: torch.nn.Module, w13_scale: torch.Tensor, w13_input_scale: torch.Tensor, w2_scale: torch.Tensor, w2_input_scale: torch.Tensor, ) -> None: """Register necessary scales for FlashInfer TRTLLM FP8 MoE kernel""" g1_alphas, g2_alphas = make_fp8_moe_alpha_scales_for_fi( w13_scale=w13_scale, w13_input_scale=w13_input_scale, w2_scale=w2_scale, w2_input_scale=w2_input_scale, ) layer.w2_input_scale_inv = 1.0 / w2_input_scale layer.output1_scales_gate_scalar = g1_alphas if layer.activation.is_gated: layer.output1_scales_scalar = g1_alphas * layer.w2_input_scale_inv else: layer.output1_scales_scalar = ( torch.ones_like(g1_alphas) * layer.w2_input_scale_inv ) layer.output2_scales_scalar = g2_alphas
function_simple
1
{"cognitive_complexity": 2, "loc": 24, "code_loc": 15, "docstring_loc": 1, "function_name": "register_scales_for_trtllm_fp8_per_tensor_moe", "class_name": null, "qualname": "register_scales_for_trtllm_fp8_per_tensor_moe", "file_path": "vllm/model_executor/layers/quantization/utils/flashinfer_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"}
crewAIInc/crewAI:lib/crewai/src/crewai/llms/providers/gemini/completion.py:GeminiCompletion._format_messages_for_gemini
# Context: import base64 import json from typing import TYPE_CHECKING, Any, Literal, cast from crewai.utilities.types import LLMMessage from google.genai import types class GeminiCompletion(BaseLLM): def __init__( self, model: str = "gemini-2.0-flash-001", api_key: str | None = None, project: str | None = None, location: str | None = None, temperature: float | None = None, top_p: float | None = None, top_k: int | None = None, max_output_tokens: int | None = None, stop_sequences: list[str] | None = None, stream: bool = False, safety_settings: dict[str, Any] | None = None, client_params: dict[str, Any] | None = None, interceptor: BaseInterceptor[Any, Any] | None = None, use_vertexai: bool | None = None, response_format: type[BaseModel] | None = None, **kwargs: Any, ): """Initialize Google Gemini chat completion client. Args: model: Gemini model name (e.g., 'gemini-2.0-flash-001', 'gemini-1.5-pro') api_key: Google API key for Gemini API authentication. Defaults to GOOGLE_API_KEY or GEMINI_API_KEY env var. NOTE: Cannot be used with Vertex AI (project parameter). Use Gemini API instead. project: Google Cloud project ID for Vertex AI with ADC authentication. Requires Application Default Credentials (gcloud auth application-default login). NOTE: Vertex AI does NOT support API keys, only OAuth2/ADC. If both api_key and project are set, api_key takes precedence. location: Google Cloud location (for Vertex AI with ADC, defaults to 'us-central1') temperature: Sampling temperature (0-2) top_p: Nucleus sampling parameter top_k: Top-k sampling parameter max_output_tokens: Maximum tokens in response stop_sequences: Stop sequences stream: Enable streaming responses safety_settings: Safety filter settings client_params: Additional parameters to pass to the Google Gen AI Client constructor. Supports parameters like http_options, credentials, debug_config, etc. interceptor: HTTP interceptor (not yet supported for Gemini). use_vertexai: Whether to use Vertex AI instead of Gemini API. - True: Use Vertex AI (with ADC or Express mode with API key) - False: Use Gemini API (explicitly override env var) - None (default): Check GOOGLE_GENAI_USE_VERTEXAI env var When using Vertex AI with API key (Express mode), http_options with api_version="v1" is automatically configured. response_format: Pydantic model for structured output. Used as default when response_model is not passed to call()/acall() methods. **kwargs: Additional parameters """ if interceptor is not None: raise NotImplementedError( "HTTP interceptors are not yet supported for Google Gemini provider. " "Interceptors are currently supported for OpenAI and Anthropic providers only." ) super().__init__( model=model, temperature=temperature, stop=stop_sequences or [], **kwargs ) # Store client params for later use self.client_params = client_params or {} # Get API configuration with environment variable fallbacks self.api_key = ( api_key or os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY") ) self.project = project or os.getenv("GOOGLE_CLOUD_PROJECT") self.location = location or os.getenv("GOOGLE_CLOUD_LOCATION") or "us-central1" if use_vertexai is None: use_vertexai = os.getenv("GOOGLE_GENAI_USE_VERTEXAI", "").lower() == "true" self.client = self._initialize_client(use_vertexai) # Store completion parameters self.top_p = top_p self.top_k = top_k self.max_output_tokens = max_output_tokens self.stream = stream self.safety_settings = safety_settings or {} self.stop_sequences = stop_sequences or [] self.tools: list[dict[str, Any]] | None = None self.response_format = response_format # Model-specific settings version_match = re.search(r"gemini-(\d+(?:\.\d+)?)", model.lower()) self.supports_tools = bool( version_match and float(version_match.group(1)) >= 1.5 ) self.is_gemini_2_0 = bool( version_match and float(version_match.group(1)) >= 2.0 ) def stop(self) -> list[str]: ... def stop(self, value: list[str] | str | None) -> None: ... def _initialize_client(self, use_vertexai: bool) -> genai.Client: ... def _get_client_params(self) -> dict[str, Any]: ... def call(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def acall(self, messages: str | list[LLMMessage], tools: list[dict[str, Any]] | None, callbacks: list[Any] | None, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _prepare_generation_config(self, system_instruction: str | None, tools: list[dict[str, Any]] | None, response_model: type[BaseModel] | None) -> types.GenerateContentConfig: ... def _convert_tools_for_interference(self, tools: list[dict[str, Any]]) -> list[types.Tool]: ... def _validate_and_emit_structured_output(self, content: str, response_model: type[BaseModel], messages_for_event: list[LLMMessage], from_task: Any | None, from_agent: Any | None) -> BaseModel: ... def _finalize_completion_response(self, content: str, contents: list[types.Content], response_model: type[BaseModel] | None, from_task: Any | None, from_agent: Any | None) -> str | BaseModel: ... def _handle_structured_output_tool_call(self, structured_data: dict[str, Any], response_model: type[BaseModel], contents: list[types.Content], from_task: Any | None, from_agent: Any | None) -> BaseModel: ... def _process_response_with_tools(self, response: GenerateContentResponse, contents: list[types.Content], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _process_stream_chunk(self, chunk: GenerateContentResponse, full_response: str, function_calls: dict[int, dict[str, Any]], usage_data: dict[str, int], from_task: Any | None, from_agent: Any | None) -> tuple[str, dict[int, dict[str, Any]], dict[str, int]]: ... def _finalize_streaming_response(self, full_response: str, function_calls: dict[int, dict[str, Any]], usage_data: dict[str, int], contents: list[types.Content], available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | BaseModel | list[dict[str, Any]]: ... def _handle_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def _handle_streaming_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | BaseModel | list[dict[str, Any]] | Any: ... async def _ahandle_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... async def _ahandle_streaming_completion(self, contents: list[types.Content], config: types.GenerateContentConfig, available_functions: dict[str, Any] | None, from_task: Any | None, from_agent: Any | None, response_model: type[BaseModel] | None) -> str | Any: ... def supports_function_calling(self) -> bool: ... def supports_stop_words(self) -> bool: ... def get_context_window_size(self) -> int: ... def _extract_token_usage(response: GenerateContentResponse) -> dict[str, Any]: ... def _extract_text_from_response(response: GenerateContentResponse) -> str: ... def _add_property_ordering(schema: dict[str, Any]) -> dict[str, Any]: ... def _convert_contents_to_dict(contents: list[types.Content]) -> list[LLMMessage]: ... def supports_multimodal(self) -> bool: ... def format_text_content(self, text: str) -> dict[str, Any]: ... def get_file_uploader(self) -> Any: ... # Task: Write a Python method `_format_messages_for_gemini` for the class `GeminiCompletion` to format messages for Gemini API. Parameters: messages: str | list[LLMMessage] Returns: tuple[list[types.Content], str | None]
def _format_messages_for_gemini( self, messages: str | list[LLMMessage] ) -> tuple[list[types.Content], str | None]: """Format messages for Gemini API. Gemini has specific requirements: - System messages are separate system_instruction - Content is organized as Content objects with Parts - Roles are 'user' and 'model' (not 'assistant') Args: messages: Input messages Returns: Tuple of (formatted_contents, system_instruction) """ # Use base class formatting first base_formatted = super()._format_messages(messages) contents: list[types.Content] = [] system_instruction: str | None = None for message in base_formatted: role = message["role"] content = message["content"] # Build parts list from content parts: list[types.Part] = [] if isinstance(content, list): for item in content: if isinstance(item, dict): if "text" in item: parts.append(types.Part.from_text(text=str(item["text"]))) elif "inlineData" in item: inline = item["inlineData"] parts.append( types.Part.from_bytes( data=base64.b64decode(inline["data"]), mime_type=inline["mimeType"], ) ) else: parts.append(types.Part.from_text(text=str(item))) else: parts.append(types.Part.from_text(text=str(content) if content else "")) text_content: str = " ".join(p.text for p in parts if p.text is not None) if role == "system": # Extract system instruction - Gemini handles it separately if system_instruction: system_instruction += f"\n\n{text_content}" else: system_instruction = text_content elif role == "tool": tool_call_id = message.get("tool_call_id") if not tool_call_id: raise ValueError("Tool message missing required tool_call_id") tool_name = message.get("name", "") response_data: dict[str, Any] try: parsed = json.loads(text_content) if text_content else {} if isinstance(parsed, dict): response_data = parsed else: response_data = {"result": parsed} except (json.JSONDecodeError, TypeError): response_data = {"result": text_content} function_response_part = types.Part.from_function_response( name=tool_name, response=response_data ) contents.append( types.Content(role="user", parts=[function_response_part]) ) elif role == "assistant" and message.get("tool_calls"): raw_parts: list[Any] | None = message.get("raw_tool_call_parts") if raw_parts and all(isinstance(p, types.Part) for p in raw_parts): tool_parts: list[types.Part] = list(raw_parts) if text_content: tool_parts.insert(0, types.Part.from_text(text=text_content)) else: tool_parts = [] if text_content: tool_parts.append(types.Part.from_text(text=text_content)) tool_calls: list[dict[str, Any]] = message.get("tool_calls") or [] for tool_call in tool_calls: func: dict[str, Any] = tool_call.get("function") or {} func_name: str = str(func.get("name") or "") func_args_raw: str | dict[str, Any] = ( func.get("arguments") or {} ) func_args: dict[str, Any] if isinstance(func_args_raw, str): try: func_args = ( json.loads(func_args_raw) if func_args_raw else {} ) except (json.JSONDecodeError, TypeError): func_args = {} else: func_args = func_args_raw tool_parts.append( types.Part.from_function_call( name=func_name, args=func_args ) ) contents.append(types.Content(role="model", parts=tool_parts)) else: # Convert role for Gemini (assistant -> model) gemini_role = "model" if role == "assistant" else "user" # Create Content object gemini_content = types.Content(role=gemini_role, parts=parts) contents.append(gemini_content) return contents, system_instruction
function_complex
0
{"cognitive_complexity": 79, "loc": 123, "code_loc": 88, "docstring_loc": 13, "function_name": "_format_messages_for_gemini", "class_name": "GeminiCompletion", "qualname": "GeminiCompletion._format_messages_for_gemini", "file_path": "lib/crewai/src/crewai/llms/providers/gemini/completion.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"}
geekcomputers/Python:bank_managment_system/QTFrontend.py:create_page_with_header
# Context: from PyQt5 import QtCore, QtGui, QtWidgets def create_styled_frame(parent, min_size, style): ... def create_styled_label(parent, text, font_size, bold, style): ... def create_styled_button(parent, text, min_size): ... def create_input_field(parent, label_text, min_label_size): ... def create_input_field_V(parent, label_text, min_label_size): ... def show_popup_message(parent, message: str, page: int, show_cancel: bool, cancel_page: int): ... def search_result(parent, title, label_text): ... def get_employee_name(parent, name_field_text): ... def create_login_page(parent, title, name_field_text, password_field_text, submit_text): ... def on_login_button_clicked(parent, name_field, password_field): ... def create_home_page(parent, on_admin_clicked, on_employee_clicked, on_exit_clicked): ... def create_admin_menu_page(parent): ... def create_add_employee_page(parent, title, submit_text, update_btn: bool): ... def show_employee_list_page(parent, title): ... def show_total_money(parent, title): ... def create_employee_menu_page(parent, title): ... def create_account_page(parent, title, update_btn): ... def create_show_details_page1(parent, title): ... def create_show_details_page2(parent, title): ... def update_user(parent, title, input_fields_label, input_fielf: bool): ... def setup_main_window(main_window: QtWidgets.QMainWindow): ... def main(): ... # Task: Write a Python function `create_page_with_header` to create a page with a styled header and return the page + main layout. Parameters: parent, title_text
def create_page_with_header(parent, title_text): """Create a page with a styled header and return the page + main layout.""" page = QtWidgets.QWidget(parent) main_layout = QtWidgets.QVBoxLayout(page) main_layout.setContentsMargins(20, 20, 20, 20) main_layout.setSpacing(20) header_frame = create_styled_frame( page, style="background-color: #ffffff; border-radius: 10px; padding: 10px;" ) header_layout = QtWidgets.QVBoxLayout(header_frame) title_label = create_styled_label(header_frame, title_text, font_size=30) header_layout.addWidget(title_label, 0, QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop) main_layout.addWidget(header_frame, 0, QtCore.Qt.AlignTop) return page, main_layout
function_simple
1
{"cognitive_complexity": 0, "loc": 16, "code_loc": 12, "docstring_loc": 1, "function_name": "create_page_with_header", "class_name": null, "qualname": "create_page_with_header", "file_path": "bank_managment_system/QTFrontend.py", "repo_id": "geekcomputers/Python", "has_docstring": true, "runnable_level": "project_runnable"}
Zie619/n8n-workflows:api_server.py:module_doc
Write a module-level docstring for the Python module `api_server` which contains function `check_rate_limit`, function `validate_filename`, class `WorkflowSummary`, class `SearchResponse`, class `StatsResponse`.
FastAPI Server for N8N Workflow Documentation High-performance API with sub-100ms response times.
documentation
0
{"doc_type": "module", "module_name": "api_server", "file_path": "api_server.py", "repo_id": "Zie619/n8n-workflows", "char_length": 97}
browser-use/browser-use:browser_use/browser/watchdogs/local_browser_watchdog.py:LocalBrowserWatchdog.browser_pid
# Context: class LocalBrowserWatchdog(BaseWatchdog): async def on_BrowserLaunchEvent(self, event: BrowserLaunchEvent) -> BrowserLaunchResult: ... async def on_BrowserKillEvent(self, event: BrowserKillEvent) -> None: ... async def on_BrowserStopEvent(self, event: BrowserStopEvent) -> None: ... async def _launch_browser(self, max_retries: int) -> tuple[psutil.Process, str]: ... def _find_installed_browser_path(channel: BrowserChannel | None) -> str | None: ... async def _install_browser_with_playwright(self) -> str: ... def _find_free_port() -> int: ... async def _wait_for_cdp_url(port: int, timeout: float) -> str: ... async def _cleanup_process(process: psutil.Process) -> None: ... def _cleanup_temp_dir(self, temp_dir: Path | str) -> None: ... async def get_browser_pid_via_cdp(browser) -> int | None: ... # Task: Write a Python method `browser_pid` for the class `LocalBrowserWatchdog` to get the browser process ID. Returns: int | None
def browser_pid(self) -> int | None: """Get the browser process ID.""" if self._subprocess: return self._subprocess.pid return None
function_simple
0
{"cognitive_complexity": 1, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "browser_pid", "class_name": "LocalBrowserWatchdog", "qualname": "LocalBrowserWatchdog.browser_pid", "file_path": "browser_use/browser/watchdogs/local_browser_watchdog.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"}
apache/airflow:airflow-core/src/airflow/utils/dag_version_inflation_checker.py:DagTaskDetector.is_dag_constructor
# Context: import ast class DagVersionInflationCheckLevel(Enum): ... class DagVersionInflationCheckResult: ... class RuntimeVaryingValueWarning: ... class WarningContext(str, Enum): ... class RuntimeVaryingValueAnalyzer: ... class AirflowRuntimeVaryingValueChecker(ast.NodeVisitor): ... def check_dag_file_stability(file_path) -> DagVersionInflationCheckResult: ... class DagTaskDetector: def __init__(self, from_imports: dict[str, tuple[str, str]]): self.from_imports: dict[str, tuple[str, str]] = from_imports self.dag_instances: set[str] = set() self.is_in_dag_context: bool = False def is_task_constructor(self, node: ast.Call) -> bool: ... def register_dag_instance(self, var_name: str): ... def enter_dag_context(self): ... def exit_dag_context(self): ... # Task: Write a Python method `is_dag_constructor` for the class `DagTaskDetector` to check if a call is a Dag constructor. Parameters: node: ast.Call Returns: bool
def is_dag_constructor(self, node: ast.Call) -> bool: """Check if a call is a Dag constructor.""" if not isinstance(node.func, ast.Name): return False func_name = node.func.id # "from airflow import DAG" form or "from airflow.decorator import dag" if func_name in self.from_imports: module, original = self.from_imports[func_name] if (module == "airflow" or module.startswith("airflow.")) and original in ("DAG", "dag"): return True return False
function_complex
1
{"cognitive_complexity": 6, "loc": 14, "code_loc": 8, "docstring_loc": 1, "function_name": "is_dag_constructor", "class_name": "DagTaskDetector", "qualname": "DagTaskDetector.is_dag_constructor", "file_path": "airflow-core/src/airflow/utils/dag_version_inflation_checker.py", "repo_id": "apache/airflow", "has_docstring": true, "runnable_level": "class_runnable"}
browser-use/browser-use:tests/ci/test_coordinate_clicking.py:TestCoordinateClickingTools.test_default_uses_index_only_action
# Context: from browser_use.tools.service import Tools from browser_use.tools.views import ClickElementAction, ClickElementActionIndexOnly class TestCoordinateClickingModelDetection: ... class TestCoordinateClickingWithPassedTools: ... class TestCoordinateClickingTools: def test_default_coordinate_clicking_disabled(self): ... def test_default_click_schema_has_only_index(self): ... def test_enable_coordinate_clicking(self): ... def test_enabled_click_schema_has_coordinates(self): ... def test_disable_coordinate_clicking(self): ... def test_set_coordinate_clicking_idempotent(self): ... def test_schema_title_consistent(self): ... # Task: Write a Python test method `test_default_uses_index_only_action` in test class `TestCoordinateClickingTools` to default Tools should use ClickElementActionIndexOnly. Module under test: browser_use.tools.service, browser_use.tools.views
def test_default_uses_index_only_action(self): """Default Tools should use ClickElementActionIndexOnly.""" tools = Tools() click_action = tools.registry.registry.actions.get('click') assert click_action is not None assert click_action.param_model == ClickElementActionIndexOnly
test
0
{"function_name": "test_default_uses_index_only_action", "class_name": "TestCoordinateClickingTools", "qualname": "TestCoordinateClickingTools.test_default_uses_index_only_action", "file_path": "tests/ci/test_coordinate_clicking.py", "repo_id": "browser-use/browser-use", "loc": 7, "tested_modules": ["browser_use.tools.service", "browser_use.tools.views"], "has_docstring": true, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_routes_test.py:TestSetCorsHeaders.test_no_header_when_origin_not_allowed
# Context: import asyncio from unittest.mock import MagicMock, patch from streamlit.web.server.starlette.starlette_routes import ( _ensure_xsrf_cookie, _set_cors_headers, _set_unquoted_cookie, _with_base, ) from tests.testutil import patch_config_options class TestWithBase: ... class TestEnsureXsrfCookie: ... class TestSetUnquotedCookie: ... class TestSetCorsHeaders: def test_allows_all_when_cors_disabled(self) -> None: ... def test_allows_all_in_dev_mode(self) -> None: ... def test_no_header_when_no_origin(self) -> None: ... def test_allows_configured_origin(self) -> None: ... # Task: Write a Python test method `test_no_header_when_origin_not_allowed` in test class `TestSetCorsHeaders` to test that no header is set when origin is not in allowed list. Module under test: __future__, starlette.responses, streamlit.web.server.starlette.starlette_routes
def test_no_header_when_origin_not_allowed(self) -> None: """Test that no header is set when origin is not in allowed list.""" request = MagicMock() request.headers = MagicMock() # This origin won't be in any allowed list by default request.headers.get.return_value = "http://random-untrusted-origin.com" response = MagicMock() response.headers = {} asyncio.run(_set_cors_headers(request, response)) assert "Access-Control-Allow-Origin" not in response.headers
test
1
{"function_name": "test_no_header_when_origin_not_allowed", "class_name": "TestSetCorsHeaders", "qualname": "TestSetCorsHeaders.test_no_header_when_origin_not_allowed", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_routes_test.py", "repo_id": "streamlit/streamlit", "loc": 12, "tested_modules": ["__future__", "starlette.responses", "streamlit.web.server.starlette.starlette_routes", "streamlit.web.server.starlette.starlette_server_config", "tests.testutil"], "has_docstring": true, "runnable_level": "project_runnable"}
langflow-ai/langflow:src/backend/tests/unit/utils/test_validate.py:TestCreateClass.test_handles_class_with_imports
# Context: from lfx.custom.validate import ( _create_langflow_execution_context, add_type_ignores, build_class_constructor, compile_class_code, create_class, create_function, create_type_ignore_class, eval_function, execute_function, extract_class_code, extract_class_name, extract_function_name, find_names_in_code, get_default_imports, prepare_global_scope, validate_code, ) class TestAddTypeIgnores: ... class TestValidateCode: ... class TestCreateLangflowExecutionContext: ... class TestEvalFunction: ... class TestExecuteFunction: ... class TestCreateFunction: ... class TestHelperFunctions: ... class TestPrepareGlobalScope: ... class TestClassCodeOperations: ... class TestGetDefaultImports: ... class TestCreateClass: def test_creates_simple_class(self): ... def test_replaces_legacy_imports(self): ... def test_handles_syntax_error(self): ... def test_handles_validation_error(self): ... # Task: Write a Python test method `test_handles_class_with_imports` in test class `TestCreateClass` to test creation of class that uses imports. Module under test: lfx.custom.validate, pydantic_core
def test_handles_class_with_imports(self): """Test creation of class that uses imports.""" code = """ import json class JsonHandler: def __init__(self): self.data = {} def to_json(self): return json.dumps(self.data) """ cls = create_class(code, "JsonHandler") instance = cls() assert hasattr(instance, "to_json")
test
1
{"function_name": "test_handles_class_with_imports", "class_name": "TestCreateClass", "qualname": "TestCreateClass.test_handles_class_with_imports", "file_path": "src/backend/tests/unit/utils/test_validate.py", "repo_id": "langflow-ai/langflow", "loc": 15, "tested_modules": ["lfx.custom.validate", "pydantic_core"], "has_docstring": true, "runnable_level": "project_runnable"}
streamlit/streamlit:lib/tests/streamlit/web/server/starlette/starlette_app_test.py:test_static_files_apply_cache_headers
# Context: from http import HTTPStatus from pathlib import Path import pytest from starlette.testclient import TestClient from streamlit import file_util from streamlit.web.server.routes import STATIC_ASSET_CACHE_MAX_AGE_SECONDS from streamlit.web.server.starlette.starlette_app import ( _RESERVED_ROUTE_PREFIXES, App, create_starlette_app, ) from tests.testutil import patch_config_options class _DummyStatsManager: ... class _DummyComponentRegistry: ... class _DummyBidiComponentRegistry: ... class _DummyRuntime: ... def starlette_client(tmp_path: Path) -> Iterator[tuple[TestClient, _DummyRuntime]]: ... def test_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_metrics_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_metrics_endpoint_filters_single_family(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_metrics_endpoint_filters_multiple_families(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_metrics_endpoint_unknown_family_returns_eof(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_metrics_endpoint_protobuf(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_serves_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_download_headers(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_supports_range_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_rejects_invalid_ranges(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_supports_head_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_no_content_encoding_for_video(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_no_content_encoding_for_audio(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_media_endpoint_no_content_encoding_for_range_requests(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_upload_put_adds_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_upload_put_enforces_max_size(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_component_endpoint_sets_content_type(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_bidi_component_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_script_health_endpoint(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_websocket_rejects_text_frames(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_upload_delete_removes_file(starlette_client: tuple[TestClient, _DummyRuntime]) -> None: ... def test_upload_rejects_without_xsrf_token(tmp_path: Path) -> None: ... def test_upload_accepts_with_valid_xsrf_token(tmp_path: Path) -> None: ... def test_host_config_excludes_localhost_when_not_dev(tmp_path: Path) -> None: ... def test_host_config_includes_localhost_in_dev(tmp_path: Path) -> None: ... def test_host_config_custom_allowed_origins(tmp_path: Path) -> None: ... def test_host_config_empty_allowed_origins(tmp_path: Path) -> None: ... def test_host_config_custom_origins_with_dev_mode(tmp_path: Path) -> None: ... def test_static_files_skipped_in_dev_mode(tmp_path: Path) -> None: ... def test_websocket_auth_cookie_yields_user_info(tmp_path: Path) -> None: ... def test_websocket_accepts_existing_session(tmp_path: Path) -> None: ... def test_static_files_fall_back_to_index(tmp_path: Path) -> None: ... def test_websocket_rejects_auth_cookie_without_valid_xsrf(tmp_path: Path) -> None: ... def test_websocket_ignores_debug_disconnect_in_production(tmp_path: Path) -> None: ... def test_websocket_ignores_debug_shutdown_in_production(tmp_path: Path) -> None: ... def test_websocket_allows_debug_shutdown_in_dev_mode(tmp_path: Path) -> None: ... class TestAppInit: ... class TestAppRouteValidation: ... class TestAppLifespan: ... class TestAppServerModeTracking: ... class TestAppScriptPathResolution: ... class TestAppExports: ... def simple_script(tmp_path: Path) -> Path: ... def reset_runtime() -> Iterator[None]: ... class TestAppAsgi: ... # Task: Write a Python test function `test_static_files_apply_cache_headers` to ensure hashed static assets receive long-lived cache headers. Module under test: __future__, contextlib, http
def test_static_files_apply_cache_headers(tmp_path: Path) -> None: """Ensure hashed static assets receive long-lived cache headers.""" component_dir = tmp_path / "component" component_dir.mkdir() (component_dir / "index.html").write_text("component") static_dir = tmp_path / "static" static_dir.mkdir() (static_dir / "index.html").write_text("<html>home</html>") (static_dir / "app.123456.js").write_text("console.log('test')") monkeypatch = pytest.MonkeyPatch() monkeypatch.setattr(file_util, "get_static_dir", lambda: str(static_dir)) runtime = _DummyRuntime(component_dir) app = create_starlette_app(runtime) with TestClient(app) as client: response = client.get("/app.123456.js") assert response.status_code == HTTPStatus.OK assert ( response.headers["cache-control"] == f"public, immutable, max-age={STATIC_ASSET_CACHE_MAX_AGE_SECONDS}" ) monkeypatch.undo()
test
1
{"function_name": "test_static_files_apply_cache_headers", "class_name": null, "qualname": "test_static_files_apply_cache_headers", "file_path": "lib/tests/streamlit/web/server/starlette/starlette_app_test.py", "repo_id": "streamlit/streamlit", "loc": 26, "tested_modules": ["__future__", "contextlib", "http", "pathlib", "typing"], "has_docstring": true, "runnable_level": "project_runnable"}
ray-project/ray:python/ray/data/_internal/datasource/databricks_credentials.py:request_with_401_retry
# Context: from typing import Callable, Optional import requests class DatabricksCredentialProvider(ABC): ... class StaticCredentialProvider(DatabricksCredentialProvider): ... class EnvironmentCredentialProvider(DatabricksCredentialProvider): ... def resolve_credential_provider(credential_provider: Optional[DatabricksCredentialProvider]) -> DatabricksCredentialProvider: ... def build_headers(credential_provider: DatabricksCredentialProvider) -> dict[str, str]: ... # Task: Write a Python function `request_with_401_retry` to make an HTTP request with one retry on 401 after invalidating credentials. Parameters: request_fn: Callable[..., requests.Response], url: str, credential_provider: DatabricksCredentialProvider Returns: requests.Response
def request_with_401_retry( request_fn: Callable[..., requests.Response], url: str, credential_provider: DatabricksCredentialProvider, **kwargs, ) -> requests.Response: """Make an HTTP request with one retry on 401 after invalidating credentials. Args: request_fn: Request function (e.g., requests.get or requests.post) url: Request URL credential_provider: Credential provider for authentication **kwargs: Additional arguments passed to requests Returns: Response object (after calling raise_for_status) """ response = request_fn(url, headers=build_headers(credential_provider), **kwargs) if response.status_code == 401: logger.info("Received 401 response, invalidating credentials and retrying.") credential_provider.invalidate() response = request_fn(url, headers=build_headers(credential_provider), **kwargs) response.raise_for_status() return response
function_simple
0
{"cognitive_complexity": 1, "loc": 26, "code_loc": 7, "docstring_loc": 11, "function_name": "request_with_401_retry", "class_name": null, "qualname": "request_with_401_retry", "file_path": "python/ray/data/_internal/datasource/databricks_credentials.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "file_runnable"}
browser-use/browser-use:browser_use/code_use/views.py:CodeAgentHistoryList.screenshot_paths
# Context: class CellType(str, Enum): ... class ExecutionStatus(str, Enum): ... class CodeCell(BaseModel): ... class NotebookSession(BaseModel): ... class NotebookExport(BaseModel): ... class CodeAgentModelOutput(BaseModel): ... class CodeAgentResult(BaseModel): ... class CodeAgentState(BaseModel): ... class CodeAgentStepMetadata(BaseModel): ... class CodeAgentHistory(BaseModel): ... class CodeAgentHistoryList: def __init__(self, complete_history: list[CodeAgentHistory], usage_summary: UsageSummary | None) -> None: """Initialize with CodeAgent history data.""" self._complete_history = complete_history self._usage_summary = usage_summary def history(self) -> list[CodeAgentHistory]: ... def usage(self) -> UsageSummary | None: ... def __len__(self) -> int: ... def __str__(self) -> str: ... def __repr__(self) -> str: ... def final_result(self) -> None | str: ... def is_done(self) -> bool: ... def is_successful(self) -> bool | None: ... def errors(self) -> list[str | None]: ... def has_errors(self) -> bool: ... def urls(self) -> list[str | None]: ... def screenshots(self, n_last: int | None, return_none_if_not_screenshot: bool) -> list[str | None]: ... def action_results(self) -> list[CodeAgentResult]: ... def extracted_content(self) -> list[str]: ... def number_of_steps(self) -> int: ... def total_duration_seconds(self) -> float: ... def last_action(self) -> None | dict: ... def action_names(self) -> list[str]: ... def model_thoughts(self) -> list[Any]: ... def model_outputs(self) -> list[CodeAgentModelOutput]: ... def model_actions(self) -> list[dict]: ... def action_history(self) -> list[list[dict]]: ... def model_actions_filtered(self, include: list[str] | None) -> list[dict]: ... def add_item(self, history_item: CodeAgentHistory) -> None: ... def model_dump(self, **kwargs) -> dict[str, Any]: ... def save_to_file(self, filepath: str | Path, sensitive_data: dict[str, str | dict[str, str]] | None) -> None: ... # Task: Write a Python method `screenshot_paths` for the class `CodeAgentHistoryList` to get all screenshot paths from history. Parameters: n_last: int | None, return_none_if_not_screenshot: bool Returns: list[str | None]
def screenshot_paths(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]: """Get all screenshot paths from history.""" if n_last == 0: return [] if n_last is None: if return_none_if_not_screenshot: return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self._complete_history] else: return [h.state.screenshot_path for h in self._complete_history if h.state.screenshot_path is not None] else: if return_none_if_not_screenshot: return [ h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self._complete_history[-n_last:] ] else: return [h.state.screenshot_path for h in self._complete_history[-n_last:] if h.state.screenshot_path is not None]
function_complex
0
{"cognitive_complexity": 9, "loc": 17, "code_loc": 15, "docstring_loc": 1, "function_name": "screenshot_paths", "class_name": "CodeAgentHistoryList", "qualname": "CodeAgentHistoryList.screenshot_paths", "file_path": "browser_use/code_use/views.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"}
huggingface/transformers:tests/models/edgetam_video/test_modeling_edgetam_video.py:EdgeTamVideoModelIntegrationTest.test_inference_mask_generation_video_multi_points
# Context: from transformers.testing_utils import ( backend_empty_cache, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, slow, torch_device, ) import torch def prepare_image(): ... def prepare_groceries_image(): ... def prepare_dog_img(): ... def prepare_video(): ... class EdgeTamVideoModelIntegrationTest(unittest.TestCase): def setUp(self): ... def tearDown(self): ... def test_inference_mask_generation_video_one_point(self): ... def test_inference_mask_generation_video_one_point_propagate_in_video_directly(self): ... def test_inference_mask_generation_video_one_bb(self): ... def test_inference_mask_generation_video_one_point_one_bb(self): ... def test_inference_mask_generation_video_multi_objects_multi_points(self): ... def test_inference_propagate_video_from_mask_input(self): ... def test_inference_propagate_on_streamed_video(self): ... def test_inference_with_different_dtypes(self): ... # Task: Write a Python test method `test_inference_mask_generation_video_multi_points` in test class `EdgeTamVideoModelIntegrationTest` to verify the behavior of `inference_mask_generation_video_multi_points`. Module under test: transformers.testing_utils, transformers.utils, transformers.video_utils
def test_inference_mask_generation_video_multi_points(self): raw_video = prepare_video() inference_session = self.processor.init_video_session(video=raw_video, inference_device=torch_device) ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) self.processor.add_inputs_to_inference_session( inference_session=inference_session, frame_idx=ann_frame_idx, obj_ids=ann_obj_id, input_points=[[[[210, 350], [250, 220]]]], input_labels=[[[1, 1]]], ) outputs = self.video_model(inference_session=inference_session, frame_idx=ann_frame_idx) low_res_masks = outputs.pred_masks video_res_masks = self.processor.post_process_masks( [outputs.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] self.assertEqual(low_res_masks.shape, (1, 1, 256, 256)) self.assertEqual(video_res_masks.shape, (1, 1, raw_video.shape[-3], raw_video.shape[-2])) torch.testing.assert_close( video_res_masks[0, 0, :3, :3], torch.tensor( [[-17.3081, -17.3081, -16.9805], [-16.8430, -16.8430, -16.6766], [-15.7986, -15.7986, -15.9941]] ).to(torch_device), atol=1e-4, rtol=1e-4, ) # test propagate in video frames frames = [] for sam2_video_output in self.video_model.propagate_in_video_iterator( inference_session=inference_session, start_frame_idx=ann_frame_idx, max_frame_num_to_track=2, ): video_res_masks = self.processor.post_process_masks( [sam2_video_output.pred_masks], [raw_video.shape[-3:-1]], binarize=False )[0] frames.append(video_res_masks) frames = torch.stack(frames, dim=0) self.assertEqual(frames.shape, (3, 1, 1, raw_video.shape[-3], raw_video.shape[-2])) # higher tolerance due to errors propagating from frame to frame torch.testing.assert_close( frames[:3, :, :, :2, :2], torch.tensor( [ [[[[-17.3081, -17.3081], [-16.8430, -16.8430]]]], [[[[-14.9302, -14.9302], [-14.8802, -14.8802]]]], [[[[-14.4372, -14.4372], [-14.3697, -14.3697]]]], ] ).to(torch_device), atol=1e-2, rtol=1e-2, )
test
0
{"function_name": "test_inference_mask_generation_video_multi_points", "class_name": "EdgeTamVideoModelIntegrationTest", "qualname": "EdgeTamVideoModelIntegrationTest.test_inference_mask_generation_video_multi_points", "file_path": "tests/models/edgetam_video/test_modeling_edgetam_video.py", "repo_id": "huggingface/transformers", "loc": 55, "tested_modules": ["transformers.testing_utils", "transformers.utils", "transformers.video_utils", "transformers", "PIL"], "has_docstring": false, "runnable_level": "file_runnable"}