sample_id string | instruction string | completion string | category string | label int64 | metadata string |
|---|---|---|---|---|---|
exo-explore/exo:src/exo/download/tests/test_download_verification.py:TestFileVerification.test_redownload_when_file_size_changes_upstream | # Context:
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
import aiofiles
import aiofiles.os as aios
from exo.shared.types.common import ModelId
from exo.download.download_utils import (
_download_file, # pyright: ignore[reportPrivateUsage]
)
def model_id() -> ModelId: ...
async def temp_models_dir(tmp_path: Path) -> AsyncIterator[Path]: ...
class TestFileListCache: ...
class TestModelDeletion: ...
class TestProgressResetOnRedownload: ...
class TestFileVerification:
async def test_skip_download_when_file_size_matches(self, model_id: ModelId, tmp_path: Path) -> None: ...
async def test_offline_fallback_uses_local_file(self, model_id: ModelId, tmp_path: Path) -> None: ...
# Task:
Write a Python test method `test_redownload_when_file_size_changes_upstream` in test class `TestFileVerification` to test that files with mismatched sizes are re-downloaded.
Module under test: collections.abc, datetime, pathlib | async def test_redownload_when_file_size_changes_upstream(
self, model_id: ModelId, tmp_path: Path
) -> None:
"""Test that files with mismatched sizes are re-downloaded."""
# Import inside test to allow patching
from exo.download.download_utils import (
_download_file, # pyright: ignore[reportPrivateUsage]
)
target_dir = tmp_path / "downloads"
await aios.makedirs(target_dir, exist_ok=True)
# Create a local file with wrong size
local_file = target_dir / "test.safetensors"
async with aiofiles.open(local_file, "wb") as f:
await f.write(b"local content") # 13 bytes
remote_size = 1000 # Different from local
remote_hash = "abc123"
with (
patch(
"exo.download.download_utils.file_meta",
new_callable=AsyncMock,
return_value=(remote_size, remote_hash),
) as mock_file_meta,
patch(
"exo.download.download_utils.create_http_session"
) as mock_session_factory,
):
# Set up mock HTTP response for re-download
mock_response = MagicMock()
mock_response.status = 200
mock_response.content.read = AsyncMock( # pyright: ignore[reportAny]
side_effect=[b"x" * remote_size, b""]
)
mock_session = MagicMock()
mock_session.get.return_value.__aenter__ = AsyncMock( # pyright: ignore[reportAny]
return_value=mock_response
)
mock_session.get.return_value.__aexit__ = AsyncMock( # pyright: ignore[reportAny]
return_value=None
)
mock_session_factory.return_value.__aenter__ = AsyncMock( # pyright: ignore[reportAny]
return_value=mock_session
)
mock_session_factory.return_value.__aexit__ = AsyncMock( # pyright: ignore[reportAny]
return_value=None
)
# Mock calc_hash to return the expected hash
with patch(
"exo.download.download_utils.calc_hash",
new_callable=AsyncMock,
return_value=remote_hash,
):
await _download_file(model_id, "main", "test.safetensors", target_dir)
# file_meta should be called twice: once for verification, once for download
assert mock_file_meta.call_count == 2 | test | 0 | {"function_name": "test_redownload_when_file_size_changes_upstream", "class_name": "TestFileVerification", "qualname": "TestFileVerification.test_redownload_when_file_size_changes_upstream", "file_path": "src/exo/download/tests/test_download_verification.py", "repo_id": "exo-explore/exo", "loc": 61, "tested_modules": ["collections.abc", "datetime", "pathlib", "pydantic", "exo.download.download_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/base_router.py:BaseRouter.select_experts | # Context:
import torch
class BaseRouter(FusedMoERouter):
def __init__(
self,
top_k: int,
global_num_experts: int,
eplb_state: EplbLayerState,
enable_eplb: bool = False,
# TODO(bnell): Once the MK is constructed at layer init time, we
# can make this a plain value instead of a callback.
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
):
"""
Note: the indices dtype might not be available at router construction
time, so we need to supply a callback to get it at runtime. This is
because the indices type is supplied by modular kernels which are
created after MoE layer/router construction.
"""
super().__init__()
self.top_k = top_k
self.global_num_experts = global_num_experts
self.eplb_state = eplb_state
self.enable_eplb = enable_eplb
self.indices_type_getter = indices_type_getter
self.capture_fn: Callable[[torch.Tensor], None] | None = None
def set_capture_fn(self, capture_fn: Callable[[torch.Tensor], None] | None) -> None: ...
def _validate_eplb_state(self) -> None: ...
def _get_indices_type(self) -> torch.dtype | None: ...
def _apply_eplb_mapping(self, topk_ids: torch.Tensor) -> torch.Tensor: ...
def _convert_indices_dtype(self, topk_ids: torch.Tensor, indices_type: torch.dtype | None) -> torch.Tensor: ...
def _compute_routing(self, hidden_states: torch.Tensor, router_logits: torch.Tensor, indices_type: torch.dtype | None) -> tuple[torch.Tensor, torch.Tensor]: ...
# Task:
Write a Python method `select_experts` for the class `BaseRouter` to route the input hidden states to the top-k experts based on the.
Parameters: hidden_states: torch.Tensor, router_logits: torch.Tensor
Returns: tuple[torch.Tensor, torch.Tensor] | def select_experts(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Route the input hidden states to the top-k experts based on the
router logits.
This method implements the template method pattern:
1. Validates EPLB state
2. Gets indices type
3. Calls _compute_routing() to get topk_weights and topk_ids
4. Applies EPLB mapping if enabled
5. Converts indices dtype if needed
Returns:
(topk_weights, topk_ids)
(tuple[torch.Tensor, torch.Tensor]):
The weights and expert ids computation result.
**Compatibility**: When EPLB is not enabled, the returned ids are
equivalent to global logical ids, so should be compatible with
plain MoE implementations without redundant experts.
"""
# Step 1: Validate EPLB state
self._validate_eplb_state()
# Step 2: Get indices type.
indices_type = self._get_indices_type()
# Step 3: Compute routing (delegated to subclass)
topk_weights, topk_ids = self._compute_routing(
hidden_states, router_logits, indices_type
)
# Capture logical ids before EPLB mapping.
if self.capture_fn is not None:
self.capture_fn(topk_ids)
# Step 4: Apply EPLB mapping
topk_ids = self._apply_eplb_mapping(topk_ids)
# Step 5: Convert indices dtype
topk_ids = self._convert_indices_dtype(topk_ids, indices_type)
return topk_weights, topk_ids | function_simple | 1 | {"cognitive_complexity": 1, "loc": 47, "code_loc": 10, "docstring_loc": 20, "function_name": "select_experts", "class_name": "BaseRouter", "qualname": "BaseRouter.select_experts", "file_path": "vllm/model_executor/layers/fused_moe/router/base_router.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "class_runnable"} |
666ghj/BettaFish:MediaEngine/nodes/report_structure_node.py:ReportStructureNode.mutate_state | # Context:
from typing import Dict, Any, List
from loguru import logger
from ..state.state import State
class ReportStructureNode(StateMutationNode):
def __init__(self, llm_client, query: str):
"""
初始化报告结构节点
Args:
llm_client: LLM客户端
query: 用户查询
"""
super().__init__(llm_client, "ReportStructureNode")
self.query = query
def validate_input(self, input_data: Any) -> bool: ...
def run(self, input_data: Any, **kwargs) -> List[Dict[str, str]]: ...
def process_output(self, output: str) -> List[Dict[str, str]]: ...
def _generate_default_structure(self) -> List[Dict[str, str]]: ...
# Task:
Write a Python method `mutate_state` for the class `ReportStructureNode` to 将报告结构写入状态.
Parameters: input_data: Any, state: State
Returns: State | def mutate_state(self, input_data: Any = None, state: State = None, **kwargs) -> State:
"""
将报告结构写入状态
Args:
input_data: 输入数据
state: 当前状态,如果为None则创建新状态
**kwargs: 额外参数
Returns:
更新后的状态
"""
if state is None:
state = State()
try:
# 生成报告结构
report_structure = self.run(input_data, **kwargs)
# 设置查询和报告标题
state.query = self.query
if not state.report_title:
state.report_title = f"关于'{self.query}'的深度研究报告"
# 添加段落到状态
for paragraph_data in report_structure:
state.add_paragraph(
title=paragraph_data["title"],
content=paragraph_data["content"]
)
logger.info(f"已将 {len(report_structure)} 个段落添加到状态中")
return state
except Exception as e:
logger.exception(f"状态更新失败: {str(e)}")
raise e | function_complex | 1 | {"cognitive_complexity": 6, "loc": 37, "code_loc": 17, "docstring_loc": 11, "function_name": "mutate_state", "class_name": "ReportStructureNode", "qualname": "ReportStructureNode.mutate_state", "file_path": "MediaEngine/nodes/report_structure_node.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/diffusers:src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py:Cosmos2_5_PredictBasePipeline.encode_prompt | # Context:
import torch
def retrieve_latents(encoder_output: torch.Tensor, generator: torch.Generator | None, sample_mode: str): ...
class Cosmos2_5_PredictBasePipeline(DiffusionPipeline):
model_cpu_offload_seq = "text_encoder->transformer->vae"
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
_optional_components = ["safety_checker"]
_exclude_from_cpu_offload = ["safety_checker"]
def __init__(
self,
text_encoder: Qwen2_5_VLForConditionalGeneration,
tokenizer: AutoTokenizer,
transformer: CosmosTransformer3DModel,
vae: AutoencoderKLWan,
scheduler: UniPCMultistepScheduler,
safety_checker: CosmosSafetyChecker = None,
):
super().__init__()
if safety_checker is None:
safety_checker = CosmosSafetyChecker()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
transformer=transformer,
scheduler=scheduler,
safety_checker=safety_checker,
)
self.vae_scale_factor_temporal = 2 ** sum(self.vae.temperal_downsample) if getattr(self, "vae", None) else 4
self.vae_scale_factor_spatial = 2 ** len(self.vae.temperal_downsample) if getattr(self, "vae", None) else 8
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
latents_mean = (
torch.tensor(self.vae.config.latents_mean).view(1, self.vae.config.z_dim, 1, 1, 1).float()
if getattr(self.vae.config, "latents_mean", None) is not None
else None
)
latents_std = (
torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).float()
if getattr(self.vae.config, "latents_std", None) is not None
else None
)
self.latents_mean = latents_mean
self.latents_std = latents_std
if self.latents_mean is None or self.latents_std is None:
raise ValueError("VAE configuration must define both `latents_mean` and `latents_std`.")
def _get_prompt_embeds(self, prompt: str | list[str], max_sequence_length: int, device: torch.device | None, dtype: torch.dtype | None): ...
def prepare_latents(self, video: torch.Tensor | None, batch_size: int, num_channels_latents: int, height: int, width: int, num_frames_in: int, num_frames_out: int, do_classifier_free_guidance: bool, dtype: torch.dtype | None, device: torch.device | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None) -> torch.Tensor: ...
def check_inputs(self, prompt, height, width, prompt_embeds, callback_on_step_end_tensor_inputs): ...
def guidance_scale(self): ...
def do_classifier_free_guidance(self): ...
def num_timesteps(self): ...
def current_timestep(self): ...
def interrupt(self): ...
def __call__(self, image: PipelineImageInput | None, video: list[PipelineImageInput] | None, prompt: str | list[str] | None, negative_prompt: str | list[str] | None, height: int, width: int, num_frames: int, num_inference_steps: int, guidance_scale: float, num_videos_per_prompt: int | None, generator: torch.Generator | list[torch.Generator] | None, latents: torch.Tensor | None, prompt_embeds: torch.Tensor | None, negative_prompt_embeds: torch.Tensor | None, output_type: str | None, return_dict: bool, callback_on_step_end: Callable[[int, int, None], PipelineCallback | MultiPipelineCallbacks] | None, callback_on_step_end_tensor_inputs: list[str], max_sequence_length: int, conditional_frame_timestep: float, num_latent_conditional_frames: int): ...
def _match_num_frames(self, video: torch.Tensor, target_num_frames: int) -> torch.Tensor: ...
# Task:
Write a Python method `encode_prompt` for the class `Cosmos2_5_PredictBasePipeline` to encodes the prompt into text encoder hidden states.
Parameters: prompt: str | list[str], negative_prompt: str | list[str] | None, do_classifier_free_guidance: bool, num_videos_per_prompt: int, prompt_embeds: torch.Tensor | None, negative_prompt_embeds: torch.Tensor | None, max_sequence_length: int, device: torch.device | None, dtype: torch.dtype | None | def encode_prompt(
self,
prompt: str | list[str],
negative_prompt: str | list[str] | None = None,
do_classifier_free_guidance: bool = True,
num_videos_per_prompt: int = 1,
prompt_embeds: torch.Tensor | None = None,
negative_prompt_embeds: torch.Tensor | None = None,
max_sequence_length: int = 512,
device: torch.device | None = None,
dtype: torch.dtype | None = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `list[str]`, *optional*):
prompt to be encoded
negative_prompt (`str` or `list[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
Whether to use classifier free guidance or not.
num_videos_per_prompt (`int`, *optional*, defaults to 1):
Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
device: (`torch.device`, *optional*):
torch device
dtype: (`torch.dtype`, *optional*):
torch dtype
"""
device = device or self._execution_device
prompt = [prompt] if isinstance(prompt, str) else prompt
if prompt is not None:
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
prompt_embeds = self._get_prompt_embeds(
prompt=prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype
)
# duplicate text embeddings for each generation per prompt, using mps friendly method
_, seq_len, _ = prompt_embeds.shape
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
if do_classifier_free_guidance and negative_prompt_embeds is None:
negative_prompt = negative_prompt if negative_prompt is not None else DEFAULT_NEGATIVE_PROMPT
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
if prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
negative_prompt_embeds = self._get_prompt_embeds(
prompt=negative_prompt, max_sequence_length=max_sequence_length, device=device, dtype=dtype
)
# duplicate text embeddings for each generation per prompt, using mps friendly method
_, seq_len, _ = negative_prompt_embeds.shape
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_videos_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
return prompt_embeds, negative_prompt_embeds | function_complex | 1 | {"cognitive_complexity": 15, "loc": 82, "code_loc": 34, "docstring_loc": 26, "function_name": "encode_prompt", "class_name": "Cosmos2_5_PredictBasePipeline", "qualname": "Cosmos2_5_PredictBasePipeline.encode_prompt", "file_path": "src/diffusers/pipelines/cosmos/pipeline_cosmos2_5_predict.py", "repo_id": "huggingface/diffusers", "has_docstring": true, "runnable_level": "file_runnable"} |
unclecode/crawl4ai:crawl4ai/script/c4ai_script.py:C4AScriptError.from_exception | # Context:
import pathlib, re, sys, textwrap
from typing import Any, Dict, List, Union
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
class Cmd: ...
class Proc: ...
class ASTBuilder(Transformer): ...
class Compiler: ...
def compile_string(script: Union[str, List[str]], root: Union[pathlib.Path, None]) -> List[str]: ...
def compile_file(path: pathlib.Path) -> List[str]: ...
def compile_lines(lines: List[str], root: Union[pathlib.Path, None]) -> List[str]: ...
class C4AScriptError(Exception):
def __init__(self, message: str, line: int = None, column: int = None,
error_type: str = "Syntax Error", details: str = None):
self.message = message
self.line = line
self.column = column
self.error_type = error_type
self.details = details
super().__init__(self._format_message())
def _format_message(self) -> str: ...
# Task:
Write a Python method `from_exception` for the class `C4AScriptError` to create C4AScriptError from another exception.
Parameters: exc: Exception, script: Union[str, List[str]]
Returns: 'C4AScriptError' | def from_exception(cls, exc: Exception, script: Union[str, List[str]]) -> 'C4AScriptError':
"""Create C4AScriptError from another exception"""
script_text = script if isinstance(script, str) else '\n'.join(script)
script_lines = script_text.split('\n')
if isinstance(exc, UnexpectedToken):
# Extract line and column from UnexpectedToken
line = exc.line
column = exc.column
# Get the problematic line
if 0 < line <= len(script_lines):
problem_line = script_lines[line - 1]
marker = " " * (column - 1) + "^"
details = f"\nCode:\n {problem_line}\n {marker}\n"
# Improve error message based on context
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
message = "Missing 'THEN' keyword after IF condition"
elif exc.token.type == '$END':
message = "Unexpected end of script. Check for missing ENDPROC or incomplete commands"
elif 'RPAR' in str(exc.expected):
message = "Missing closing parenthesis ')'"
elif 'COMMA' in str(exc.expected):
message = "Missing comma ',' in command"
else:
message = f"Unexpected '{exc.token}'"
if exc.expected:
expected_list = [str(e) for e in exc.expected if not e.startswith('_')]
if expected_list:
message += f". Expected: {', '.join(expected_list[:3])}"
details += f"Token: {exc.token.type} ('{exc.token.value}')"
else:
message = str(exc)
details = None
return cls(message, line, column, "Syntax Error", details)
elif isinstance(exc, UnexpectedCharacters):
# Extract line and column
line = exc.line
column = exc.column
if 0 < line <= len(script_lines):
problem_line = script_lines[line - 1]
marker = " " * (column - 1) + "^"
details = f"\nCode:\n {problem_line}\n {marker}\n"
message = f"Invalid character or unexpected text at position {column}"
else:
message = str(exc)
details = None
return cls(message, line, column, "Syntax Error", details)
elif isinstance(exc, ValueError):
# Handle runtime errors like undefined procedures
message = str(exc)
# Try to find which line caused the error
if "Unknown procedure" in message:
proc_name = re.search(r"'([^']+)'", message)
if proc_name:
proc_name = proc_name.group(1)
for i, line in enumerate(script_lines, 1):
if proc_name in line and not line.strip().startswith('PROC'):
details = f"\nCode:\n {line.strip()}\n\nMake sure the procedure '{proc_name}' is defined with PROC...ENDPROC"
return cls(f"Undefined procedure '{proc_name}'", i, None, "Runtime Error", details)
return cls(message, None, None, "Runtime Error", None)
else:
# Generic error
return cls(str(exc), None, None, "Compilation Error", None) | function_complex | 1 | {"cognitive_complexity": 43, "loc": 76, "code_loc": 53, "docstring_loc": 1, "function_name": "from_exception", "class_name": "C4AScriptError", "qualname": "C4AScriptError.from_exception", "file_path": "crawl4ai/script/c4ai_script.py", "repo_id": "unclecode/crawl4ai", "has_docstring": true, "runnable_level": "project_runnable"} |
Comfy-Org/ComfyUI:tests-unit/folder_paths_test/system_user_test.py:TestEdgeCases.test_triple_underscore_blocked | # Context:
from folder_paths import (
get_system_user_directory,
get_public_user_directory,
get_user_directory,
set_user_directory,
)
def mock_user_directory(): ...
class TestGetSystemUserDirectory: ...
class TestGetPublicUserDirectory: ...
class TestBackwardCompatibility: ...
class TestEdgeCases:
def test_prefix_only(self): ...
def test_single_underscore_allowed(self): ...
def test_underscore_in_middle_allowed(self): ...
def test_leading_space_allowed(self): ...
# Task:
Write a Python test method `test_triple_underscore_blocked` in test class `TestEdgeCases` to test triple underscore is blocked (starts with __).
Module under test: folder_paths | def test_triple_underscore_blocked(self):
"""Test triple underscore is blocked (starts with __)."""
assert get_public_user_directory("___system") is None | test | 1 | {"function_name": "test_triple_underscore_blocked", "class_name": "TestEdgeCases", "qualname": "TestEdgeCases.test_triple_underscore_blocked", "file_path": "tests-unit/folder_paths_test/system_user_test.py", "repo_id": "Comfy-Org/ComfyUI", "loc": 3, "tested_modules": ["folder_paths"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:airflow-core/tests/unit/api_fastapi/common/db/test_dags.py:TestGenerateDagWithLatestRunQuery.test_queued_runs_with_null_start_date_are_properly_joined | # Context:
from airflow.api_fastapi.common.db.dags import generate_dag_with_latest_run_query
from airflow.api_fastapi.common.parameters import SortParam
from airflow.models import DagModel
from airflow.models.dagrun import DagRun
class TestGenerateDagWithLatestRunQuery:
def _clear_db(): ...
def setup_teardown(self): ...
def dag_with_queued_run(self, session, testing_dag_bundle): ...
def dag_with_running_run(self, session): ...
def test_includes_queued_run_without_start_date(self, dag_with_queued_run, session): ...
def test_includes_queued_run_when_ordering_by_state(self, dag_with_queued_run, dag_with_running_run, session): ...
def test_includes_queued_run_when_ordering_by_start_date(self, dag_with_queued_run, dag_with_running_run, session): ...
def test_latest_queued_run_without_start_date_is_included(self, session): ...
def test_filters_by_dag_ids_when_provided(self, session): ...
# Task:
Write a Python test method `test_queued_runs_with_null_start_date_are_properly_joined` in test class `TestGenerateDagWithLatestRunQuery` to verifies that DAGs with null start_date are properly joined in the query.
Module under test: __future__, datetime, airflow._shared.timezones.timezone | def test_queued_runs_with_null_start_date_are_properly_joined(
self, dag_with_queued_run, dag_with_running_run, session
):
"""
Verifies that DAGs with null start_date are properly joined in the query.
If a WHERE clause filters out null start_dates, these DAGs would be excluded.
This test ensures they are still present and joined correctly.
"""
queued_dag_model, _ = dag_with_queued_run
running_dag_model, _ = dag_with_running_run
query = generate_dag_with_latest_run_query(
max_run_filters=[],
order_by=SortParam(allowed_attrs=["last_run_state"], model=DagModel).set_value(
["last_run_state"]
),
)
extended_query = query.add_columns(DagRun.state, DagRun.start_date)
result = session.execute(extended_query).fetchall()
# Find results for each DAG
queued_dag_result = None
running_dag_result = None
for row in result:
dag_model = row[0]
if dag_model.dag_id == queued_dag_model.dag_id:
queued_dag_result = row
elif dag_model.dag_id == running_dag_model.dag_id:
running_dag_result = row
# Assert both DAGs are present
assert queued_dag_result is not None, f"Queued DAG {queued_dag_model.dag_id} should be in results"
assert running_dag_result is not None, f"Running DAG {running_dag_model.dag_id} should be in results"
# if WHERE start_date IS NOT NULL is present,
# the queued DAG should have NO DagRun information joined (state=None, start_date=None)
# But the running DAG should have DagRun information joined
queued_dagrun_state = queued_dag_result[1]
running_dagrun_state = running_dag_result[1]
assert queued_dagrun_state is not None, (
"Queued DAG should have DagRun state joined, but got None. "
"This suggests the WHERE start_date IS NOT NULL condition is excluding it."
)
assert running_dagrun_state is not None, "Running DAG should have DagRun state joined" | test | 1 | {"function_name": "test_queued_runs_with_null_start_date_are_properly_joined", "class_name": "TestGenerateDagWithLatestRunQuery", "qualname": "TestGenerateDagWithLatestRunQuery.test_queued_runs_with_null_start_date_are_properly_joined", "file_path": "airflow-core/tests/unit/api_fastapi/common/db/test_dags.py", "repo_id": "apache/airflow", "loc": 48, "tested_modules": ["__future__", "datetime", "airflow._shared.timezones.timezone", "airflow.api_fastapi.common.db.dags", "airflow.api_fastapi.common.parameters"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tests/test_open_telemetry_metric_recorder.py:test_register_histogram_metric | # Context:
from unittest.mock import MagicMock, patch
import pytest
from opentelemetry.metrics import NoOpHistogram
from ray._private.telemetry.open_telemetry_metric_recorder import (
OpenTelemetryMetricRecorder,
)
def test_register_gauge_metric(mock_get_meter, mock_set_meter_provider): ...
def test_register_counter_metric(mock_get_meter, mock_set_meter_provider, mock_logger_warning): ...
def test_register_sum_metric(mock_get_meter, mock_set_meter_provider, mock_logger_warning): ...
def test_record_and_export(mock_get_meter, mock_set_meter_provider): ...
def test_record_histogram_aggregated_batch(mock_get_meter, mock_set_meter_provider, mock_logger_warning): ...
# Task:
Write a Python test function `test_register_histogram_metric` to test the register_histogram_metric method of OpenTelemetryMetricRecorder.
Module under test: opentelemetry.metrics, ray._private.metrics_agent, ray._private.telemetry.open_telemetry_metric_recorder | def test_register_histogram_metric(
mock_get_meter, mock_set_meter_provider, mock_logger_warning
):
"""
Test the register_histogram_metric method of OpenTelemetryMetricRecorder.
- Test that it registers a histogram metric with the correct name and description.
- Test that a value can be set for the histogram metric successfully without warnings.
"""
mock_meter = MagicMock()
mock_meter.create_histogram.return_value = NoOpHistogram(name="test_histogram")
mock_get_meter.return_value = mock_meter
recorder = OpenTelemetryMetricRecorder()
recorder.register_histogram_metric(
name="test_histogram", description="Test Histogram", buckets=[1.0, 2.0, 3.0]
)
assert "test_histogram" in recorder._registered_instruments
recorder.set_metric_value(
name="test_histogram",
tags={"label_key": "label_value"},
value=10.0,
)
mock_logger_warning.assert_not_called()
mock_meter.create_histogram.return_value = NoOpHistogram(name="neg_histogram")
recorder.register_histogram_metric(
name="neg_histogram",
description="Histogram with negative first boundary",
buckets=[-5.0, 0.0, 10.0],
)
mids = recorder.get_histogram_bucket_midpoints("neg_histogram")
assert mids == pytest.approx([-7.5, -2.5, 5.0, 20.0]) | test | 0 | {"function_name": "test_register_histogram_metric", "class_name": null, "qualname": "test_register_histogram_metric", "file_path": "python/ray/tests/test_open_telemetry_metric_recorder.py", "repo_id": "ray-project/ray", "loc": 32, "tested_modules": ["opentelemetry.metrics", "ray._private.metrics_agent", "ray._private.telemetry.open_telemetry_metric_recorder"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/mcp/test_amp_mcp.py:TestFetchAmpMCPConfigs.test_returns_empty_on_http_error | # Context:
from unittest.mock import AsyncMock, MagicMock, patch
def agent(): ...
def resolver(agent): ...
def mock_tool_definitions(): ...
class TestBuildMCPConfigFromDict: ...
class TestParseAmpRef: ...
class TestGetMCPToolsAmpIntegration: ...
class TestFetchAmpMCPConfigs:
def test_fetches_configs_successfully(self, mock_get_token, mock_plus_api_class, resolver): ...
def test_omits_missing_slugs(self, mock_get_token, mock_plus_api_class, resolver): ...
def test_returns_empty_on_network_error(self, mock_get_token, mock_plus_api_class, resolver): ...
def test_returns_empty_when_no_token(self, mock_get_token, resolver): ...
# Task:
Write a Python test method `test_returns_empty_on_http_error` in test class `TestFetchAmpMCPConfigs` to verify the behavior of `returns_empty_on_http_error`.
Module under test: crewai.agent.core, crewai.mcp.config, crewai.mcp.tool_resolver | def test_returns_empty_on_http_error(self, mock_get_token, mock_plus_api_class, resolver):
mock_response = MagicMock()
mock_response.status_code = 500
mock_plus_api = MagicMock()
mock_plus_api.get_mcp_configs.return_value = mock_response
mock_plus_api_class.return_value = mock_plus_api
result = resolver._fetch_amp_mcp_configs(["notion"])
assert result == {} | test | 0 | {"function_name": "test_returns_empty_on_http_error", "class_name": "TestFetchAmpMCPConfigs", "qualname": "TestFetchAmpMCPConfigs.test_returns_empty_on_http_error", "file_path": "lib/crewai/tests/mcp/test_amp_mcp.py", "repo_id": "crewAIInc/crewAI", "loc": 10, "tested_modules": ["crewai.agent.core", "crewai.mcp.config", "crewai.mcp.tool_resolver", "crewai.tools.base_tool"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/tests/test_gang_scheduling.py:TestGangResourceReservation.test_gang_resource_reservation | # Context:
import pytest
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve._private.test_utils import check_apps_running
from ray.serve.config import GangPlacementStrategy, GangSchedulingConfig
from ray.util.placement_group import get_current_placement_group, placement_group_table
class Collector: ...
class FailedReplicaStore: ...
class TestGangScheduling: ...
class TestGangConstructorFailure: ...
class TestGangFailureRecovery: ...
class TestGangChildSpawnPlacementGroup: ...
class TestGangResourceReservation:
def test_gang_label_selector(self, ray_cluster): ...
# Task:
Write a Python test method `test_gang_resource_reservation` in test class `TestGangResourceReservation` to verifies the gang PG has the correct bundles, strategy, and.
Module under test: ray, ray._common.test_utils, ray.serve._private.common | def test_gang_resource_reservation(
self,
ray_cluster,
ray_actor_options,
placement_group_bundles,
gang_placement_strategy,
expected_bundles,
expected_strategy,
expect_same_node,
):
"""Verifies the gang PG has the correct bundles, strategy, and
that per-replica bundles are placed according to the strategy."""
cluster = ray_cluster
cluster.add_node(num_cpus=1)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
serve.start()
deployment_kwargs = {
"num_replicas": 2,
"ray_actor_options": ray_actor_options,
"gang_scheduling_config": GangSchedulingConfig(
gang_size=2,
gang_placement_strategy=gang_placement_strategy,
),
}
if placement_group_bundles is not None:
deployment_kwargs["placement_group_bundles"] = placement_group_bundles
@serve.deployment(**deployment_kwargs)
class GangDeployment:
def get_pg_info(self):
pg = get_current_placement_group()
if pg is None:
return None
pg_table = placement_group_table(pg)
return {
"bundle_specs": pg.bundle_specs,
"strategy": pg_table["strategy"],
"bundles_to_node_id": pg_table["bundles_to_node_id"],
}
def __call__(self):
return "ok"
app = GangDeployment.bind()
handle = serve.run(app, name="gang_reservation_app")
wait_for_condition(
check_apps_running,
apps=["gang_reservation_app"],
)
for _ in range(20):
pg_info = handle.get_pg_info.remote().result()
assert pg_info is not None
assert pg_info["bundle_specs"] == expected_bundles
assert pg_info["strategy"] == expected_strategy
bundles_per_replica = (
len(placement_group_bundles) if placement_group_bundles else 1
)
gang_size = 2
for replica_idx in range(gang_size):
start = replica_idx * bundles_per_replica
replica_nodes = {
pg_info["bundles_to_node_id"][i]
for i in range(start, start + bundles_per_replica)
}
if expect_same_node:
assert len(replica_nodes) == 1
else:
assert len(replica_nodes) == bundles_per_replica
serve.delete("gang_reservation_app")
serve.shutdown() | test | 0 | {"function_name": "test_gang_resource_reservation", "class_name": "TestGangResourceReservation", "qualname": "TestGangResourceReservation.test_gang_resource_reservation", "file_path": "python/ray/serve/tests/test_gang_scheduling.py", "repo_id": "ray-project/ray", "loc": 77, "tested_modules": ["ray", "ray._common.test_utils", "ray.serve._private.common", "ray.serve._private.constants", "ray.serve._private.test_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
streamlit/streamlit:lib/streamlit/web/server/starlette/starlette_server.py:UvicornServer.stop | # Context:
class RetriesExceededError(Exception): ...
def _get_server_address() -> str: ...
def _get_server_port() -> int: ...
def _is_port_manually_set() -> bool: ...
def _server_address_is_unix_socket() -> bool: ...
def _validate_ssl_config() -> tuple[str | None, str | None]: ...
def _get_websocket_settings() -> tuple[int, int]: ...
def _get_uvicorn_config_kwargs() -> dict[str, Any]: ...
def _bind_socket(address: str, port: int, backlog: int) -> socket.socket: ...
class UvicornRunner: ...
class UvicornServer:
def __init__(self, runtime: Runtime) -> None:
self._runtime = runtime
self._server: uvicorn.Server | None = None
self._server_task: asyncio.Task[None] | None = None
self._stopped_event = asyncio.Event()
self._socket: socket.socket | None = None
async def start(self) -> None: ...
def stopped(self) -> asyncio.Event: ...
# Task:
Write a Python method `stop` for the class `UvicornServer` to signal the server to stop.
Returns: None | def stop(self) -> None:
"""Signal the server to stop."""
if self._server is not None:
self._server.should_exit = True | function_simple | 1 | {"cognitive_complexity": 1, "loc": 4, "code_loc": 2, "docstring_loc": 1, "function_name": "stop", "class_name": "UvicornServer", "qualname": "UvicornServer.stop", "file_path": "lib/streamlit/web/server/starlette/starlette_server.py", "repo_id": "streamlit/streamlit", "has_docstring": true, "runnable_level": "class_runnable"} |
sansan0/TrendRadar:trendradar/storage/local.py:LocalStorageBackend._get_configured_time | # Context:
from datetime import datetime, timedelta
from trendradar.utils.time import (
DEFAULT_TIMEZONE,
get_configured_time,
format_date_folder,
format_time_filename,
)
class LocalStorageBackend(SQLiteStorageMixin, StorageBackend):
def __init__(
self,
data_dir: str = "output",
enable_txt: bool = True,
enable_html: bool = True,
timezone: str = DEFAULT_TIMEZONE,
):
"""
初始化本地存储后端
Args:
data_dir: 数据目录路径
enable_txt: 是否启用 TXT 快照
enable_html: 是否启用 HTML 报告
timezone: 时区配置
"""
self.data_dir = Path(data_dir)
self.enable_txt = enable_txt
self.enable_html = enable_html
self.timezone = timezone
self._db_connections: Dict[str, sqlite3.Connection] = {}
def backend_name(self) -> str: ...
def supports_txt(self) -> bool: ...
def _format_date_folder(self, date: Optional[str]) -> str: ...
def _format_time_filename(self) -> str: ...
def _get_db_path(self, date: Optional[str], db_type: str) -> Path: ...
def _get_connection(self, date: Optional[str], db_type: str) -> sqlite3.Connection: ...
def save_news_data(self, data: NewsData) -> bool: ...
def get_today_all_data(self, date: Optional[str]) -> Optional[NewsData]: ...
def get_latest_crawl_data(self, date: Optional[str]) -> Optional[NewsData]: ...
def detect_new_titles(self, current_data: NewsData) -> Dict[str, Dict]: ...
def is_first_crawl_today(self, date: Optional[str]) -> bool: ...
def get_crawl_times(self, date: Optional[str]) -> List[str]: ...
def has_period_executed(self, date_str: str, period_key: str, action: str) -> bool: ...
def record_period_execution(self, date_str: str, period_key: str, action: str) -> bool: ...
def save_rss_data(self, data: RSSData) -> bool: ...
def get_rss_data(self, date: Optional[str]) -> Optional[RSSData]: ...
def detect_new_rss_items(self, current_data: RSSData) -> Dict[str, List[RSSItem]]: ...
def get_latest_rss_data(self, date: Optional[str]) -> Optional[RSSData]: ...
def save_txt_snapshot(self, data: NewsData) -> Optional[str]: ...
def save_html_report(self, html_content: str, filename: str, is_summary: bool) -> Optional[str]: ...
def cleanup(self) -> None: ...
def cleanup_old_data(self, retention_days: int) -> int: ...
def __del__(self): ...
# Task:
Write a Python method `_get_configured_time` for the class `LocalStorageBackend` to 获取配置时区的当前时间.
Returns: datetime | def _get_configured_time(self) -> datetime:
"""获取配置时区的当前时间"""
return get_configured_time(self.timezone) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "_get_configured_time", "class_name": "LocalStorageBackend", "qualname": "LocalStorageBackend._get_configured_time", "file_path": "trendradar/storage/local.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "project_runnable"} |
run-llama/llama_index:llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/responses.py:SolrSelectResponse:class_doc | Write a class-level docstring for `SolrSelectResponse` (inherits from BaseModel) which has methods: `from_pysolr_results`, `from_aiosolr_response`. | Solr search response.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details. | documentation | 1 | {"doc_type": "class", "class_name": "SolrSelectResponse", "file_path": "llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/responses.py", "repo_id": "run-llama/llama_index", "char_length": 160, "methods": ["from_pysolr_results", "from_aiosolr_response"]} |
huggingface/transformers:tests/models/vibevoice_acoustic_tokenizer/test_modeling_vibevoice_acoustic_tokenizer.py:VibeVoiceAcousticTokenizerModelTest.test_use_cache | # Context:
from transformers import (
AutoFeatureExtractor,
AutoModel,
VibeVoiceAcousticTokenizerConfig,
VibeVoiceAcousticTokenizerModel,
)
from transformers.testing_utils import cleanup, is_torch_available, require_torch, slow, torch_device
import torch
class VibeVoiceAcousticTokenizerModelTester: ...
class VibeVoiceAcousticTokenizerIntegrationTest(unittest.TestCase): ...
class VibeVoiceAcousticTokenizerModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (VibeVoiceAcousticTokenizerModel,) if is_torch_available() else ()
is_encoder_decoder = False
test_resize_embeddings = False
test_head_masking = False
test_pruning = False
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels): ...
def setUp(self): ...
def test_config(self): ...
def test_model_forward(self): ...
def test_forward_signature(self): ...
def test_inputs_embeds(self): ...
def test_model_get_set_embeddings(self): ...
def test_retain_grad_hidden_states_attentions(self): ...
def test_attention_outputs(self): ...
def test_hidden_states_output(self): ...
def test_model_parallelism(self): ...
def test_determinism(self): ...
def test_model_outputs_equivalence(self): ...
def test_encode_method(self): ...
def test_decode_method(self): ...
# Task:
Write a Python test method `test_use_cache` in test class `VibeVoiceAcousticTokenizerModelTest` to verify the behavior of `use_cache`.
Module under test: pathlib, transformers, transformers.audio_utils | def test_use_cache(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = VibeVoiceAcousticTokenizerModel(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
with torch.no_grad():
output = model(input_values, use_cache=True)
self.assertIsNotNone(output.padding_cache)
self.assertIsNotNone(output.latents)
self.assertIsNotNone(output.audio) | test | 0 | {"function_name": "test_use_cache", "class_name": "VibeVoiceAcousticTokenizerModelTest", "qualname": "VibeVoiceAcousticTokenizerModelTest.test_use_cache", "file_path": "tests/models/vibevoice_acoustic_tokenizer/test_modeling_vibevoice_acoustic_tokenizer.py", "repo_id": "huggingface/transformers", "loc": 11, "tested_modules": ["pathlib", "transformers", "transformers.audio_utils", "transformers.testing_utils", "test_configuration_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
huggingface/transformers:src/transformers/models/glm4v/modular_glm4v.py:Glm4vProcessor.__call__ | # Context:
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...video_utils import VideoInput
class Glm4vVisionConfig(PreTrainedConfig): ...
class Glm4vTextConfig(PreTrainedConfig): ...
class Glm4vConfig(PreTrainedConfig): ...
class Glm4vRMSNorm(Glm4RMSNorm): ...
class Glm4VisionMlp(Qwen2_5_VLMLP): ...
class Glm4vVisionPatchEmbed(Qwen2_5_VisionPatchEmbed): ...
class Glm4vVisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding): ...
class Glm4vVisionPatchMerger(nn.Module): ...
class Glm4vVisionEmbeddings(nn.Module): ...
class Glm4vVisionAttention(Qwen2_5_VLVisionAttention): ...
class Glm4vVisionBlock(Qwen2_5_VLVisionBlock): ...
class Glm4vTextRotaryEmbedding(Glm4RotaryEmbedding): ...
def rotate_half_llm(x): ...
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim): ...
class Glm4vTextAttention(nn.Module): ...
class Glm4vTextMLP(Glm4MLP): ...
class Glm4vTextDecoderLayer(GradientCheckpointingLayer): ...
class Glm4vModelOutputWithPast(Qwen2_5_VLModelOutputWithPast): ...
class Glm4vPreTrainedModel(Qwen2_5_VLPreTrainedModel): ...
class Glm4vVisionModel(Glm4vPreTrainedModel): ...
class Glm4vTextModel(Qwen2_5_VLTextModel): ...
class Glm4vModel(Qwen2VLModel): ...
class Glm4vCausalLMOutputWithPast(Qwen2_5_VLCausalLMOutputWithPast): ...
class Glm4vForConditionalGeneration(Qwen2_5_VLForConditionalGeneration): ...
class Glm4vProcessorKwargs(Qwen2VLProcessorKwargs): ...
class Glm4vProcessor(Qwen2VLProcessor):
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
self.image_token = "<|image|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.video_start_id = tokenizer.convert_tokens_to_ids("<|begin_of_video|>")
self.video_end_id = tokenizer.convert_tokens_to_ids("<|end_of_video|>")
def replace_frame_token_id(self, timestamp_sec): ...
# Task:
Write a Python method `__call__` for the class `Glm4vProcessor` to returns:.
Parameters: images: ImageInput | None, text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput], videos: VideoInput | None
Returns: BatchFeature | def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
videos: VideoInput | None = None,
**kwargs: Unpack[Glm4vProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Glm4vProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
video_grid_thw = videos_inputs["video_grid_thw"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
video_index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_frames = video_grid_thw[video_index][0]
video_structure = ""
metadata = video_metadata[video_index]
if metadata.fps is None:
logger.warning_once(
"SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
timestamps = metadata.timestamps[::2] # mrope
unique_timestamps = []
for idx in range(0, len(timestamps)):
unique_timestamps.append(timestamps[idx])
selected_timestamps = unique_timestamps[:num_frames]
while len(selected_timestamps) < num_frames:
selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)
for frame_idx in range(num_frames):
timestamp_sec = selected_timestamps[frame_idx]
frame_structure = self.replace_frame_token_id(timestamp_sec)
video_structure += frame_structure
text[i] = text[i].replace(self.video_token, video_structure, 1)
num_image_tokens = (
video_grid_thw[video_index].prod() // merge_length // video_grid_thw[video_index][0]
)
for frame_idx in range(num_frames):
if self.image_token in text[i]:
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
video_index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
# Replace 0 -> 2 only inside video segments because GLM4v
# uses the same special token to denote images and video
# Otherwise replace 0 -> 1 for image modality
starts = np.cumsum(array_ids == self.video_start_id, axis=1)
ends = np.cumsum(array_ids == self.video_end_id, axis=1)
is_video_modality = starts > ends
mm_token_type_ids[(array_ids == self.image_token_id) & is_video_modality] = 2
mm_token_type_ids[(array_ids == self.image_token_id) & (~is_video_modality)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors) | function_complex | 0 | {"cognitive_complexity": 50, "loc": 120, "code_loc": 82, "docstring_loc": 13, "function_name": "__call__", "class_name": "Glm4vProcessor", "qualname": "Glm4vProcessor.__call__", "file_path": "src/transformers/models/glm4v/modular_glm4v.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tests/unit/test_resource_and_label_spec.py:test_env_resource_overrides_with_conflict | # Context:
import json
import ray._private.ray_constants as ray_constants
from ray._private.resource_and_label_spec import ResourceAndLabelSpec
class FakeAcceleratorManager(AcceleratorManager): ...
def test_resource_and_label_spec_resolves_with_params(): ...
def test_resource_and_label_spec_resolves_auto_detect(monkeypatch): ...
def test_to_resource_dict_with_invalid_types(): ...
def test_resolve_memory_resources(monkeypatch): ...
def test_resolve_raises_on_reserved_head_resource(): ...
def test_resolve_handles_no_accelerators(): ...
def test_label_spec_resolve_merged_env_labels(monkeypatch): ...
def test_merge_labels_populates_defaults(monkeypatch): ...
def test_resolve_raises_if_exceeds_visible_devices(): ...
def test_resolve_sets_accelerator_resources(): ...
def test_respect_configured_num_gpus(): ...
def test_resolve_sets_non_gpu_accelerator(): ...
# Task:
Write a Python test function `test_env_resource_overrides_with_conflict` to validate that RESOURCES_ENVIRONMENT_VARIABLE overrides Ray Param resources.
Module under test: ray._common.constants, ray._private.accelerators, ray._private.resource_and_label_spec | def test_env_resource_overrides_with_conflict(monkeypatch):
"""Validate that RESOURCES_ENVIRONMENT_VARIABLE overrides Ray Param resources."""
# Prepare environment overrides
env_resources = {
"CPU": 8,
"GPU": 4,
"TPU": 4,
}
monkeypatch.setenv(
ray_constants.RESOURCES_ENVIRONMENT_VARIABLE, json.dumps(env_resources)
)
ray_params_resources = {"TPU": 8, "B200": 4}
# num_cpus, num_gpus, and conflicting resources should override
spec = ResourceAndLabelSpec(
num_cpus=2,
num_gpus=1,
resources=ray_params_resources,
labels={},
)
spec.resolve(is_head=True)
# Environment overrides values take precedence after resolve
assert spec.num_cpus == 8
assert spec.num_gpus == 4
assert spec.resources["TPU"] == 4
assert spec.resources["B200"] == 4 | test | 0 | {"function_name": "test_env_resource_overrides_with_conflict", "class_name": null, "qualname": "test_env_resource_overrides_with_conflict", "file_path": "python/ray/tests/unit/test_resource_and_label_spec.py", "repo_id": "ray-project/ray", "loc": 29, "tested_modules": ["ray._common.constants", "ray._private.accelerators", "ray._private.resource_and_label_spec"], "has_docstring": true, "runnable_level": "plib_runnable"} |
vllm-project/vllm:vllm/lora/layers/logits_processor.py:LogitsProcessorWithLoRA:class_doc | Write a class-level docstring for `LogitsProcessorWithLoRA` (inherits from BaseLayerWithLoRA) which has methods: `__init__`, `logits_as_input`, `vocab_size`, `scale`, `soft_cap`. | LoRA wrapper for LogitsProcessor, with extra logic to handle the
application of the LoRA adapter and added LoRA vocabulary.
Args:
base_layer: LogitsProcessor layer
hidden_size: hidden size of the model
dtype: data type of the model
device: device of the model
sharded_to_full_mapping: index mapping from sharded vocab to full vocab
received from base_layer.get_sharded_to_full_mapping(). If None,
no reindexing will be done. | documentation | 1 | {"doc_type": "class", "class_name": "LogitsProcessorWithLoRA", "file_path": "vllm/lora/layers/logits_processor.py", "repo_id": "vllm-project/vllm", "char_length": 461, "methods": ["__init__", "logits_as_input", "vocab_size", "scale", "soft_cap", "use_all_gather", "org_vocab_size", "include_gpu_probs_tensor", "should_modify_greedy_probs_inplace", "create_lora_weights"]} |
fastapi/fastapi:tests/test_tutorial/test_python_types/test_tutorial006.py:test_process_items | # Context:
from unittest.mock import patch
from docs_src.python_types.tutorial006_py310 import process_items
# Task:
Write a Python test function `test_process_items` to verify the behavior of `process_items`.
Module under test: docs_src.python_types.tutorial006_py310 | def test_process_items():
with patch("builtins.print") as mock_print:
process_items(["item_a", "item_b", "item_c"])
assert mock_print.call_count == 3
call_args = [arg.args for arg in mock_print.call_args_list]
assert call_args == [
("item_a",),
("item_b",),
("item_c",),
] | test | 1 | {"function_name": "test_process_items", "class_name": null, "qualname": "test_process_items", "file_path": "tests/test_tutorial/test_python_types/test_tutorial006.py", "repo_id": "fastapi/fastapi", "loc": 11, "tested_modules": ["docs_src.python_types.tutorial006_py310"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/doge/modular_doge.py:license_header | Add a Apache-2.0 license header comment for the project 'transformers', authored by Jingze Shi and the HuggingFace Inc, year 2025. | # Copyright 2025 Jingze Shi and the HuggingFace Inc. team. All rights reserved.
#
# The Doge family of small language models is trained by SmallDoge Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | license | 0 | {"license_type": "Apache-2.0", "author": "Jingze Shi and the HuggingFace Inc", "year": "2025", "source": "header", "repo_id": "huggingface/transformers"} |
run-llama/llama_index:llama-index-core/tests/agent/workflow/test_thinking_delta.py:test_agent_stream_default_thinking_delta | # Context:
from llama_index.core.agent.workflow.workflow_events import AgentStream
class MockThinkingLLM(MockLLM): ...
def test_agent_stream_with_thinking_delta(): ...
def test_agent_stream_default_thinking_delta_none(): ...
def test_thinking_delta_extraction(): ...
async def test_streaming_an_agent_with_thinking_delta_none(): ...
async def test_function_agent_comprehensive_thinking_streaming(): ...
async def test_codeact_agent_comprehensive_thinking_streaming(): ...
async def test_react_agent_comprehensive_thinking_streaming(): ...
async def test_agents_handle_missing_thinking_delta(): ...
# Task:
Write a Python test function `test_agent_stream_default_thinking_delta` to test AgentStream defaults thinking_delta to None.
Module under test: typing, llama_index.core.base.llms.types, llama_index.core.llms | def test_agent_stream_default_thinking_delta():
"""Test AgentStream defaults thinking_delta to None."""
stream = AgentStream(
delta="Hello", response="Hello there", current_agent_name="test_agent"
)
assert stream.thinking_delta is None | test | 1 | {"function_name": "test_agent_stream_default_thinking_delta", "class_name": null, "qualname": "test_agent_stream_default_thinking_delta", "file_path": "llama-index-core/tests/agent/workflow/test_thinking_delta.py", "repo_id": "run-llama/llama_index", "loc": 7, "tested_modules": ["typing", "llama_index.core.base.llms.types", "llama_index.core.llms", "llama_index.core.agent.workflow", "llama_index.core.agent.workflow.codeact_agent"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py:PyArrowFileSystem._filter_files | # Context:
import os
from typing import List, Optional, Tuple, Union
import pyarrow.fs as pa_fs
class PyArrowFileSystem(BaseCloudFileSystem):
def get_fs_and_path(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ...
def _create_azure_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ...
def _create_abfss_filesystem(object_uri: str) -> Tuple[pa_fs.FileSystem, str]: ...
def get_file(object_uri: str, decode_as_utf_8: bool) -> Optional[Union[str, bytes]]: ...
def list_subfolders(folder_uri: str) -> List[str]: ...
def download_files(path: str, bucket_uri: str, substrings_to_include: Optional[List[str]], suffixes_to_exclude: Optional[List[str]], max_concurrency: int, chunk_size: int) -> None: ...
def upload_files(local_path: str, bucket_uri: str) -> None: ...
# Task:
Write a Python method `_filter_files` for the class `PyArrowFileSystem` to filter files from cloud storage based on inclusion and exclusion criteria.
Parameters: fs: pa_fs.FileSystem, source_path: str, destination_path: str, substrings_to_include: Optional[List[str]], suffixes_to_exclude: Optional[List[str]]
Returns: List[Tuple[str, str]] | def _filter_files(
fs: pa_fs.FileSystem,
source_path: str,
destination_path: str,
substrings_to_include: Optional[List[str]] = None,
suffixes_to_exclude: Optional[List[str]] = None,
) -> List[Tuple[str, str]]:
"""Filter files from cloud storage based on inclusion and exclusion criteria.
Args:
fs: PyArrow filesystem instance
source_path: Source path in cloud storage
destination_path: Local destination path
substrings_to_include: Only include files containing these substrings
suffixes_to_exclude: Exclude files ending with these suffixes
Returns:
List of tuples containing (source_file_path, destination_file_path)
"""
file_selector = pa_fs.FileSelector(source_path, recursive=True)
file_infos = fs.get_file_info(file_selector)
path_pairs = []
for file_info in file_infos:
if file_info.type != pa_fs.FileType.File:
continue
rel_path = file_info.path[len(source_path) :].lstrip("/")
# Apply filters
if substrings_to_include:
if not any(
substring in rel_path for substring in substrings_to_include
):
continue
if suffixes_to_exclude:
if any(rel_path.endswith(suffix) for suffix in suffixes_to_exclude):
continue
path_pairs.append(
(file_info.path, os.path.join(destination_path, rel_path))
)
return path_pairs | function_complex | 0 | {"cognitive_complexity": 13, "loc": 45, "code_loc": 19, "docstring_loc": 12, "function_name": "_filter_files", "class_name": "PyArrowFileSystem", "qualname": "PyArrowFileSystem._filter_files", "file_path": "python/ray/llm/_internal/common/utils/cloud_filesystem/pyarrow_filesystem.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:rllib/examples/envs/classes/multi_agent/footsies/encoder.py:FootsiesEncoder.encode | # Context:
import copy
from typing import Any, Optional, Union
import numpy as np
from ray.rllib.examples.envs.classes.multi_agent.footsies.game.proto import (
footsies_service_pb2 as footsies_pb2,
)
def one_hot_encoder(value: Union[int, float, str], collection: list[Union[int, float, str]]) -> np.ndarray: ...
class FootsiesEncoder:
def __init__(self, observation_delay: int):
self._encoding_history = {
agent_id: collections.deque(maxlen=int(observation_delay))
for agent_id in ["p1", "p2"]
}
self.observation_delay = observation_delay
self._last_common_state: Optional[np.ndarray] = None
self._action_id_values = list(constants.FOOTSIES_ACTION_IDS.values())
def encode_common_state(game_state: footsies_pb2.GameState) -> np.ndarray: ...
def _encode_input_buffer(input_buffer: list[int], last_n: Optional[int]) -> np.ndarray: ...
def encode_player_state(self, player_state: footsies_pb2.PlayerState) -> dict[str, Union[int, float, list, np.ndarray]]: ...
def get_last_encoding(self) -> Optional[dict[str, np.ndarray]]: ...
def reset(self): ...
def _encode_action_id(self, action_id: int) -> np.ndarray: ...
# Task:
Write a Python method `encode` for the class `FootsiesEncoder` to encodes the game state into observations for all agents.
Parameters: game_state: footsies_pb2.GameState
Returns: dict[str, Any] | def encode(
self,
game_state: footsies_pb2.GameState,
) -> dict[str, Any]:
"""Encodes the game state into observations for all agents.
:param game_state: The game state to encode
:type game_state: footsies_pb2.GameState
:return: The encoded observations for all agents.
:rtype: dict[str, Any]
"""
common_state = self.encode_common_state(game_state)
p1_encoding = self.encode_player_state(game_state.player1)
p2_encoding = self.encode_player_state(game_state.player2)
observation_delay = min(
self.observation_delay, len(self._encoding_history["p1"])
)
if observation_delay > 0:
p1_delayed_encoding = self._encoding_history["p1"][-observation_delay]
p2_delayed_encoding = self._encoding_history["p2"][-observation_delay]
else:
p1_delayed_encoding = copy.deepcopy(p1_encoding)
p2_delayed_encoding = copy.deepcopy(p2_encoding)
self._encoding_history["p1"].append(p1_encoding)
self._encoding_history["p2"].append(p2_encoding)
self._last_common_state = common_state
# Create features dictionary
features = {}
current_index = 0
# Common state
features["common_state"] = {
"start": current_index,
"length": len(common_state),
}
current_index += len(common_state)
# Concatenate the observations for the undelayed encoding
p1_encoding = np.hstack(list(p1_encoding.values()), dtype=np.float32)
p2_encoding = np.hstack(list(p2_encoding.values()), dtype=np.float32)
# Concatenate the observations for the delayed encoding
p1_delayed_encoding = np.hstack(
list(p1_delayed_encoding.values()), dtype=np.float32
)
p2_delayed_encoding = np.hstack(
list(p2_delayed_encoding.values()), dtype=np.float32
)
p1_centric_observation = np.hstack(
[common_state, p1_encoding, p2_delayed_encoding]
)
p2_centric_observation = np.hstack(
[common_state, p2_encoding, p1_delayed_encoding]
)
return {"p1": p1_centric_observation, "p2": p2_centric_observation} | function_simple | 0 | {"cognitive_complexity": 2, "loc": 62, "code_loc": 37, "docstring_loc": 7, "function_name": "encode", "class_name": "FootsiesEncoder", "qualname": "FootsiesEncoder.encode", "file_path": "rllib/examples/envs/classes/multi_agent/footsies/encoder.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
run-llama/llama_index:llama-index-core/tests/memory/blocks/test_vector.py:test_vector_memory_block_get | # Context:
import pytest
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.memory.memory_blocks.vector import VectorMemoryBlock
class MockVectorStore(BasePydanticVectorStore): ...
class MockNodePostprocessor(BaseNodePostprocessor): ...
def mock_embedding(): ...
def mock_vector_store(): ...
def vector_memory_block(mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding): ...
async def test_vector_memory_block_put(vector_memory_block: VectorMemoryBlock): ...
async def test_empty_messages(vector_memory_block: VectorMemoryBlock): ...
async def test_message_without_text(vector_memory_block: VectorMemoryBlock): ...
async def test_retrieval_context_window(mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding): ...
async def test_node_postprocessors(mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding): ...
async def test_format_template(mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding): ...
# Task:
Write a Python test function `test_vector_memory_block_get` to test getting messages from the vector memory block.
Module under test: typing, llama_index.core.base.llms.types, llama_index.core.embeddings | async def test_vector_memory_block_get(vector_memory_block: VectorMemoryBlock):
"""Test getting messages from the vector memory block."""
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
ChatMessage(role="user", content="What about Germany?"),
ChatMessage(role="assistant", content="The capital of Germany is Berlin."),
]
await vector_memory_block.aput(messages=history_messages)
# Create a new query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information
result = await vector_memory_block.aget(messages=query_messages)
# Check that we got a result
assert result != ""
assert "capital of France is Paris" in result | test | 1 | {"function_name": "test_vector_memory_block_get", "class_name": null, "qualname": "test_vector_memory_block_get", "file_path": "llama-index-core/tests/memory/blocks/test_vector.py", "repo_id": "run-llama/llama_index", "loc": 21, "tested_modules": ["typing", "llama_index.core.base.llms.types", "llama_index.core.embeddings", "llama_index.core.memory.memory_blocks.vector", "llama_index.core.postprocessor.types"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tests/test_autoscaler_azure.py:TestAzureAvailabilityZonePrecedence.test_provider_empty_string_allows_auto_selection | # Context:
class TestAzureAvailabilityZones(unittest.TestCase): ...
class TestAzureAvailabilityZonePrecedence(unittest.TestCase):
def setUp(self): ...
def _create_mock_provider(self, provider_config): ...
def _extract_zone_logic(self, provider, node_config): ...
def test_node_availability_zone_overrides_provider(self): ...
def test_provider_availability_zone_used_when_no_node_override(self): ...
def test_none_disables_zones_at_node_level(self): ...
def test_no_zones_when_neither_provider_nor_node_specify(self): ...
def test_node_empty_string_overrides_provider_zones(self): ...
def test_node_auto_overrides_provider_zones(self): ...
def test_provider_none_disables_zones(self): ...
def test_provider_auto_allows_auto_selection(self): ...
def test_node_null_overrides_provider_zones(self): ...
def test_provider_null_disables_zones(self): ...
def test_complex_override_scenario(self): ...
def test_mixed_case_precedence(self): ...
def test_whitespace_handling_in_precedence(self): ...
# Task:
Write a Python test method `test_provider_empty_string_allows_auto_selection` in test class `TestAzureAvailabilityZonePrecedence` to test that provider-level empty string allows auto-selection.
Module under test: ray.autoscaler._private._azure.node_provider | def test_provider_empty_string_allows_auto_selection(self):
"""Test that provider-level empty string allows auto-selection."""
provider = self._create_mock_provider({"availability_zone": ""})
node_config = {"azure_arm_parameters": {"vmSize": "Standard_D2s_v3"}}
zones, source = self._extract_zone_logic(provider, node_config)
self.assertEqual(zones, [])
self.assertEqual(source, "provider availability_zone") | test | 0 | {"function_name": "test_provider_empty_string_allows_auto_selection", "class_name": "TestAzureAvailabilityZonePrecedence", "qualname": "TestAzureAvailabilityZonePrecedence.test_provider_empty_string_allows_auto_selection", "file_path": "python/ray/tests/test_autoscaler_azure.py", "repo_id": "ray-project/ray", "loc": 9, "tested_modules": ["ray.autoscaler._private._azure.node_provider"], "has_docstring": true, "runnable_level": "class_runnable"} |
huggingface/diffusers:tests/pipelines/cosmos/test_cosmos2_5_predict.py:Cosmos2_5_PredictPipelineFastTests.test_attention_slicing_forward_pass | # Context:
import numpy as np
from ...testing_utils import enable_full_determinism, torch_device
from ..test_pipelines_common import PipelineTesterMixin, to_np
class Cosmos2_5_PredictBaseWrapper(Cosmos2_5_PredictBasePipeline): ...
class Cosmos2_5_PredictPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = Cosmos2_5_PredictBaseWrapper
params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
required_optional_params = frozenset(
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
test_group_offloading = True
def get_dummy_components(self): ...
def get_dummy_inputs(self, device, seed): ...
def test_components_function(self): ...
def test_inference(self): ...
def test_callback_inputs(self): ...
def test_inference_batch_single_identical(self): ...
def test_save_load_optional_components(self, expected_max_difference): ...
def test_serialization_with_variants(self): ...
def test_torch_dtype_dict(self): ...
def test_encode_prompt_works_in_isolation(self): ...
# Task:
Write a Python test method `test_attention_slicing_forward_pass` in test class `Cosmos2_5_PredictPipelineFastTests` to verify the behavior of `attention_slicing_forward_pass`.
Module under test: transformers, diffusers, testing_utils | def test_attention_slicing_forward_pass(
self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3
):
if not getattr(self, "test_attention_slicing", True):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
output_without_slicing = pipe(**inputs)[0]
pipe.enable_attention_slicing(slice_size=1)
inputs = self.get_dummy_inputs(generator_device)
output_with_slicing1 = pipe(**inputs)[0]
pipe.enable_attention_slicing(slice_size=2)
inputs = self.get_dummy_inputs(generator_device)
output_with_slicing2 = pipe(**inputs)[0]
if test_max_difference:
max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max()
max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max()
self.assertLess(
max(max_diff1, max_diff2),
expected_max_diff,
"Attention slicing should not affect the inference results",
) | test | 1 | {"function_name": "test_attention_slicing_forward_pass", "class_name": "Cosmos2_5_PredictPipelineFastTests", "qualname": "Cosmos2_5_PredictPipelineFastTests.test_attention_slicing_forward_pass", "file_path": "tests/pipelines/cosmos/test_cosmos2_5_predict.py", "repo_id": "huggingface/diffusers", "loc": 34, "tested_modules": ["transformers", "diffusers", "testing_utils", "pipeline_params", "test_pipelines_common"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/expressions/test_predicate.py:TestPredicateIntegration.test_filter_in_pipeline_with_dataset | # Context:
import pandas as pd
import ray
from ray.data._internal.util import rows_same
from ray.data.expressions import col
class TestPredicateIntegration:
def test_null_predicates_with_dataset(self, ray_start_regular_shared): ...
def test_membership_predicates_with_dataset(self, ray_start_regular_shared): ...
def test_null_and_membership_with_dataset(self, ray_start_regular_shared, test_data, expression, expected_results, test_id): ...
def test_filter_expressions_with_dataset(self, ray_start_regular_shared, filter_expr, test_data, expected_flags, test_id): ...
# Task:
Write a Python test method `test_filter_in_pipeline_with_dataset` in test class `TestPredicateIntegration` to test filter expressions in a data processing pipeline.
Module under test: packaging.version, ray.data._internal.util, ray.data._internal.utils.arrow_utils | def test_filter_in_pipeline_with_dataset(self, ray_start_regular_shared):
"""Test filter expressions in a data processing pipeline."""
test_data = [
{"product": "A", "quantity": 10, "price": 100, "region": "North"},
{"product": "B", "quantity": 5, "price": 200, "region": "South"},
{"product": "C", "quantity": 20, "price": 50, "region": "North"},
{"product": "D", "quantity": 15, "price": 75, "region": "East"},
{"product": "E", "quantity": 3, "price": 300, "region": "West"},
]
ds = ray.data.from_items(test_data)
result = (
ds.with_column("revenue", col("quantity") * col("price"))
.with_column("is_high_value", col("revenue") >= 1000)
.with_column("is_bulk_order", col("quantity") >= 10)
.with_column("is_premium", col("price") >= 100)
.with_column(
"needs_special_handling",
(col("is_high_value")) | (col("is_bulk_order") & col("is_premium")),
)
.with_column("is_north_region", col("region") == "North")
.to_pandas()
)
expected = pd.DataFrame(
{
"product": ["A", "B", "C", "D", "E"],
"quantity": [10, 5, 20, 15, 3],
"price": [100, 200, 50, 75, 300],
"region": ["North", "South", "North", "East", "West"],
"revenue": [1000, 1000, 1000, 1125, 900],
"is_high_value": [True, True, True, True, False],
"is_bulk_order": [True, False, True, True, False],
"is_premium": [True, True, False, False, True],
"needs_special_handling": [True, True, True, True, False],
"is_north_region": [True, False, True, False, False],
}
)
assert rows_same(result, expected) | test | 0 | {"function_name": "test_filter_in_pipeline_with_dataset", "class_name": "TestPredicateIntegration", "qualname": "TestPredicateIntegration.test_filter_in_pipeline_with_dataset", "file_path": "python/ray/data/tests/expressions/test_predicate.py", "repo_id": "ray-project/ray", "loc": 41, "tested_modules": ["packaging.version", "ray.data._internal.util", "ray.data._internal.utils.arrow_utils", "ray.data.expressions", "ray.data.tests.conftest"], "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-files/tests/processing/test_processor.py:TestFileProcessorPerFileMode.test_file_custom_mode | # Context:
from crewai_files import FileBytes, ImageFile
class TestFileProcessorInit: ...
class TestFileProcessorValidate: ...
class TestFileProcessorProcess: ...
class TestFileProcessorProcessFiles: ...
class TestFileHandlingEnum: ...
class TestFileProcessorPerFileMode:
def test_file_default_mode_is_auto(self): ...
def test_processor_respects_file_mode(self): ...
# Task:
Write a Python test method `test_file_custom_mode` in test class `TestFileProcessorPerFileMode` to test setting custom mode on file.
Module under test: crewai_files, crewai_files.processing.constraints, crewai_files.processing.enums | def test_file_custom_mode(self):
"""Test setting custom mode on file."""
file = ImageFile(
source=FileBytes(data=MINIMAL_PNG, filename="test.png"), mode="strict"
)
assert file.mode == "strict" | test | 0 | {"function_name": "test_file_custom_mode", "class_name": "TestFileProcessorPerFileMode", "qualname": "TestFileProcessorPerFileMode.test_file_custom_mode", "file_path": "lib/crewai-files/tests/processing/test_processor.py", "repo_id": "crewAIInc/crewAI", "loc": 6, "tested_modules": ["crewai_files", "crewai_files.processing.constraints", "crewai_files.processing.enums", "crewai_files.processing.exceptions", "crewai_files.processing.processor"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/gemma3n/test_modeling_gemma3n.py:Gemma3nTextModelTest.test_generate_with_static_cache | # Context:
import copy
import pytest
from transformers import (
AutoModelForCausalLM,
AutoProcessor,
AutoTokenizer,
Gemma3nAudioConfig,
Gemma3nAudioFeatureExtractor,
Gemma3nConfig,
StaticCache,
is_torch_available,
)
from transformers.testing_utils import (
Expectations,
cleanup,
require_deterministic_for_xpu,
require_torch,
require_torch_accelerator,
set_config_for_less_flaky_test,
set_model_for_less_flaky_test,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin, assert_similar_generate_outputs
import torch
class Gemma3nAudioModelTester: ...
class Gemma3nAudioModelTest(ModelTesterMixin, unittest.TestCase): ...
class Gemma3nTextModelTester(CausalLMModelTester): ...
class Gemma3nVision2TextModelTester: ...
class Gemma3nVision2TextModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): ...
class Gemma3nIntegrationTest(unittest.TestCase): ...
class Gemma3nTextModelTest(CausalLMModelTest, unittest.TestCase):
model_tester_class = Gemma3nTextModelTester
_is_stateful = True
model_split_percents = [0.5, 0.6]
def _check_hidden_states_for_generate(self, batch_size, hidden_states, prompt_length, output_length, config, use_cache): ...
def test_eager_matches_sdpa_inference(self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels): ...
def test_generate_with_quant_cache(self): ...
def test_eager_padding_matches_padding_free_with_position_ids(self): ...
def test_sdpa_padding_matches_padding_free_with_position_ids(self): ...
def test_flash_attn_2_fp32_ln(self): ...
def test_generate_from_inputs_embeds_with_static_cache(self): ...
def test_model_rope_scaling_frequencies(self): ...
# Task:
Write a Python test method `test_generate_with_static_cache` in test class `Gemma3nTextModelTest` to tests that generating with static cache give almost same results as with dynamic cache, and the output cache.
Module under test: datasets, parameterized, transformers | def test_generate_with_static_cache(self):
"""
Tests that generating with static cache give almost same results as with dynamic cache, and the output cache
has the expected shapes
"""
for model_class in self.all_generative_model_classes:
# Here, we should ideally not skip any model, and test them all. However, some old models cannot correctly
# use a static cache because they don't create the causal masks correctly.
# TODO: cyril -> relax this by adding a `_support_static_cache` attribute
if not model_class._can_compile_fullgraph:
self.skipTest(reason="This model does not support the static cache format")
config, inputs_dict = self.prepare_config_and_inputs_for_generate()
set_config_for_less_flaky_test(config)
main_input = inputs_dict[model_class.main_input_name]
if config.is_encoder_decoder:
self.skipTest(reason="This model is encoder-decoder and has Encoder-Decoder Cache")
config.is_decoder = True
batch_size = main_input.shape[0]
seq_length = self.model_tester.seq_length
max_new_tokens = 20
for dtype in (torch.float32, torch.bfloat16):
model = model_class(copy.deepcopy(config)).to(torch_device).to(dtype).eval()
inputs_dict = {
k: v.to(dtype) if isinstance(v, torch.Tensor) and torch.is_floating_point(v) else v
for k, v in inputs_dict.items()
}
set_model_for_less_flaky_test(model)
generation_kwargs = {
"max_new_tokens": max_new_tokens,
"return_dict_in_generate": True, # Required to return `past_key_values`
"output_scores": True,
"use_cache": True,
}
static_cache_generation = model.generate(
**generation_kwargs, **inputs_dict, cache_implementation="static"
)
# Check 1: The cache shapes must match the expected shapes
max_cache_len = seq_length + max_new_tokens - 1 # cache len = gen len - 1, the last token has no cache
text_config = config.text_config if hasattr(config, "text_config") else config
head_dim = (
getattr(text_config, "head_dim", None)
or text_config.hidden_size // text_config.num_attention_heads
)
num_key_value_heads = (
text_config.num_attention_heads
if getattr(text_config, "num_key_value_heads", None) is None
else text_config.num_key_value_heads
)
num_hidden_layers = text_config.num_hidden_layers
cache_shape = (batch_size, num_key_value_heads, max_cache_len, head_dim)
self.assertTrue(isinstance(static_cache_generation.past_key_values, StaticCache))
self.assertTrue(
len(static_cache_generation.past_key_values)
== num_hidden_layers - text_config.num_kv_shared_layers
)
self.assertTrue(static_cache_generation.past_key_values.layers[0].keys.shape == cache_shape)
# Check 2: The outputs must be similar to the case with dynamic cache
dynamic_cache_generation = model.generate(**generation_kwargs, **inputs_dict)
assert_similar_generate_outputs(dynamic_cache_generation, static_cache_generation) | test | 0 | {"function_name": "test_generate_with_static_cache", "class_name": "Gemma3nTextModelTest", "qualname": "Gemma3nTextModelTest.test_generate_with_static_cache", "file_path": "tests/models/gemma3n/test_modeling_gemma3n.py", "repo_id": "huggingface/transformers", "loc": 67, "tested_modules": ["datasets", "parameterized", "transformers", "transformers.testing_utils", "causal_lm_tester"], "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/agent/variable_detector.py:_detect_in_action | # Context:
from browser_use.agent.views import AgentHistoryList, DetectedVariable
from browser_use.dom.views import DOMInteractedElement
def detect_variables_in_history(history: AgentHistoryList) -> dict[str, DetectedVariable]: ...
def _detect_variable_type(value: str, element: DOMInteractedElement | None) -> tuple[str, str | None] | None: ...
def _detect_from_attributes(attributes: dict[str, str]) -> tuple[str, str | None] | None: ...
def _detect_from_value_pattern(value: str) -> tuple[str, str | None] | None: ...
def _ensure_unique_name(base_name: str, existing: dict[str, DetectedVariable]) -> str: ...
# Task:
Write a Python function `_detect_in_action` to detect variables in a single action using element context.
Parameters: action_dict: dict, element: DOMInteractedElement | None, detected: dict[str, DetectedVariable], detected_values: set[str]
Returns: None | def _detect_in_action(
action_dict: dict,
element: DOMInteractedElement | None,
detected: dict[str, DetectedVariable],
detected_values: set[str],
) -> None:
"""Detect variables in a single action using element context"""
# Extract action type and parameters
for action_type, params in action_dict.items():
if not isinstance(params, dict):
continue
# Check fields that commonly contain variables
fields_to_check = ['text', 'query']
for field in fields_to_check:
if field not in params:
continue
value = params[field]
if not isinstance(value, str) or not value.strip():
continue
# Skip if we already detected this exact value
if value in detected_values:
continue
# Try to detect variable type (with element context)
var_info = _detect_variable_type(value, element)
if not var_info:
continue
var_name, var_format = var_info
# Ensure unique variable name
var_name = _ensure_unique_name(var_name, detected)
# Add detected variable
detected[var_name] = DetectedVariable(
name=var_name,
original_value=value,
type='string',
format=var_format,
)
detected_values.add(value) | function_complex | 0 | {"cognitive_complexity": 18, "loc": 47, "code_loc": 24, "docstring_loc": 1, "function_name": "_detect_in_action", "class_name": null, "qualname": "_detect_in_action", "file_path": "browser_use/agent/variable_detector.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/kernels/helion/config_manager.py:module_doc | Write a module-level docstring for the Python module `config_manager` which contains class `ConfigSet`, class `ConfigManager`. | Configuration management for Helion kernels.
This module provides centralized configuration file management for Helion custom
operations, including naming conventions, directory resolution, and file I/O.
Config File Structure
---------------------
Each kernel has a single JSON config file: {kernel_name}.json
The file uses a simplified 2-layer hierarchical structure:
{
"h100": { # GPU platform
"default": { ... }, # Fallback configuration
"batch_32_hidden_4096": { ... },
"batch_64_hidden_8192": { ... }
},
"a100": {
"default": { ... },
"batch_16_hidden_2048": { ... }
}
}
Example file: silu_mul_fp8.json
Config keys should be structured strings that encode the relevant
parameters (e.g., "batch_32_hidden_4096", "seq_512_heads_16", "fp8_batch_64", etc.).
Classes
-------
- ConfigSet: In-memory collection of configs for a kernel with lookup/query APIs.
- ConfigManager: File-level operations for config persistence. | documentation | 1 | {"doc_type": "module", "module_name": "config_manager", "file_path": "vllm/kernels/helion/config_manager.py", "repo_id": "vllm-project/vllm", "char_length": 1022} |
exo-explore/exo:src/exo/master/tests/test_topology.py:test_add_connection | # Context:
from exo.shared.topology import Topology
from exo.shared.types.common import NodeId
from exo.shared.types.topology import Connection, SocketConnection
def topology() -> Topology: ...
def socket_connection() -> SocketConnection: ...
def test_add_node(topology: Topology): ...
def test_remove_connection_still_connected(topology: Topology, socket_connection: SocketConnection): ...
def test_remove_node_still_connected(topology: Topology, socket_connection: SocketConnection): ...
def test_list_nodes(topology: Topology, socket_connection: SocketConnection): ...
# Task:
Write a Python test function `test_add_connection` to verify the behavior of `add_connection`.
Module under test: exo.shared.topology, exo.shared.types.common, exo.shared.types.multiaddr | def test_add_connection(topology: Topology, socket_connection: SocketConnection):
# arrange
node_a = NodeId()
node_b = NodeId()
connection = Connection(source=node_a, sink=node_b, edge=socket_connection)
topology.add_node(node_a)
topology.add_node(node_b)
topology.add_connection(connection)
# act
data = list(topology.list_connections())
# assert
assert data == [connection]
assert topology.node_is_leaf(node_a)
assert topology.node_is_leaf(node_b) | test | 0 | {"function_name": "test_add_connection", "class_name": null, "qualname": "test_add_connection", "file_path": "src/exo/master/tests/test_topology.py", "repo_id": "exo-explore/exo", "loc": 18, "tested_modules": ["exo.shared.topology", "exo.shared.types.common", "exo.shared.types.multiaddr", "exo.shared.types.topology"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/diffusers:tests/pipelines/cosmos/test_cosmos2_text2image.py:Cosmos2TextToImagePipelineFastTests.test_inference | # Context:
import torch
class Cosmos2TextToImagePipelineWrapper(Cosmos2TextToImagePipeline): ...
class Cosmos2TextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = Cosmos2TextToImagePipelineWrapper
params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
required_optional_params = frozenset(
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
test_group_offloading = True
def get_dummy_components(self): ...
def get_dummy_inputs(self, device, seed): ...
def test_callback_inputs(self): ...
def test_inference_batch_single_identical(self): ...
def test_attention_slicing_forward_pass(self, test_max_difference, test_mean_pixel_difference, expected_max_diff): ...
def test_vae_tiling(self, expected_diff_max: float): ...
def test_save_load_optional_components(self, expected_max_difference): ...
def test_serialization_with_variants(self): ...
def test_torch_dtype_dict(self): ...
def test_encode_prompt_works_in_isolation(self): ...
# Task:
Write a Python test method `test_inference` in test class `Cosmos2TextToImagePipelineFastTests` to verify the behavior of `inference`.
Module under test: transformers, diffusers, testing_utils | def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = pipe(**inputs).images
generated_image = image[0]
self.assertEqual(generated_image.shape, (3, 32, 32))
# fmt: off
expected_slice = torch.tensor([0.451, 0.451, 0.4471, 0.451, 0.451, 0.451, 0.451, 0.451, 0.4784, 0.4784, 0.4784, 0.4784, 0.4784, 0.4902, 0.4588, 0.5333])
# fmt: on
generated_slice = generated_image.flatten()
generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]])
self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) | test | 1 | {"function_name": "test_inference", "class_name": "Cosmos2TextToImagePipelineFastTests", "qualname": "Cosmos2TextToImagePipelineFastTests.test_inference", "file_path": "tests/pipelines/cosmos/test_cosmos2_text2image.py", "repo_id": "huggingface/diffusers", "loc": 20, "tested_modules": ["transformers", "diffusers", "testing_utils", "pipeline_params", "test_pipelines_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
fastapi/fastapi:tests/test_tutorial/test_python_types/test_tutorial005.py:test_get_items | # Context:
from docs_src.python_types.tutorial005_py310 import get_items
# Task:
Write a Python test function `test_get_items` to verify the behavior of `get_items`.
Module under test: docs_src.python_types.tutorial005_py310 | def test_get_items():
res = get_items(
"item_a",
"item_b",
"item_c",
"item_d",
"item_e",
)
assert res == ("item_a", "item_b", "item_c", "item_d", "item_e") | test | 1 | {"function_name": "test_get_items", "class_name": null, "qualname": "test_get_items", "file_path": "tests/test_tutorial/test_python_types/test_tutorial005.py", "repo_id": "fastapi/fastapi", "loc": 9, "tested_modules": ["docs_src.python_types.tutorial005_py310"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/agentic/api/test_streaming_validation.py:TestValidationRetryBehavior:class_doc | Write a class-level docstring for `TestValidationRetryBehavior` which has methods: `test_retry_includes_previous_error_in_prompt`. | Tests specifically for the retry behavior with error context. | documentation | 1 | {"doc_type": "class", "class_name": "TestValidationRetryBehavior", "file_path": "src/backend/tests/unit/agentic/api/test_streaming_validation.py", "repo_id": "langflow-ai/langflow", "char_length": 61, "methods": ["test_retry_includes_previous_error_in_prompt"]} |
huggingface/transformers:src/transformers/models/maskformer/image_processing_maskformer_fast.py:MaskFormerImageProcessorFast.preprocess | # Context:
from ...image_processing_utils import BatchFeature, get_size_dict
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
)
from ...processing_utils import Unpack
from .image_processing_maskformer import (
MaskFormerImageProcessorKwargs,
compute_segments,
convert_segmentation_to_rle,
remove_low_and_no_objects,
)
def convert_segmentation_map_to_binary_masks_fast(segmentation_map: 'torch.Tensor', instance_id_to_semantic_id: dict[int, int] | None, ignore_index: int | None, do_reduce_labels: bool): ...
class MaskFormerImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": 800, "longest_edge": 1333}
default_to_square = False
do_resize = True
do_rescale = True
rescale_factor = 1 / 255
do_normalize = True
do_pad = True
model_input_names = ["pixel_values", "pixel_mask"]
size_divisor = 32
do_reduce_labels = False
valid_kwargs = MaskFormerImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MaskFormerImageProcessorKwargs]) -> None:
size = kwargs.pop("size", None)
max_size = kwargs.pop("max_size", None)
if size is None and max_size is not None:
size = self.size
size["longest_edge"] = max_size
elif size is None:
size = self.size
self.size = get_size_dict(size, max_size=max_size, default_to_square=False)
super().__init__(**kwargs)
def to_dict(self) -> dict[str, Any]: ...
def reduce_label(self, labels: list['torch.Tensor']): ...
def resize(self, image: torch.Tensor, size: SizeDict, size_divisor: int, interpolation: Optional['tvF.InterpolationMode'], **kwargs) -> torch.Tensor: ...
def pad(self, images: torch.Tensor, padded_size: tuple[int, int], segmentation_maps: torch.Tensor | None, fill: int, ignore_index: int) -> BatchFeature: ...
def _preprocess_image_like_inputs(self, images: ImageInput, segmentation_maps: ImageInput, instance_id_to_semantic_id: list[dict[int, int]] | dict[int, int] | None, do_convert_rgb: bool, input_data_format: ChannelDimension, device: Union[str, 'torch.device'] | None, **kwargs) -> BatchFeature: ...
def _preprocess(self, images: list['torch.Tensor'], segmentation_maps: Optional['torch.Tensor'], instance_id_to_semantic_id: dict[int, int] | None, do_resize: bool | None, size: SizeDict | None, pad_size: SizeDict | None, size_divisor: int | None, interpolation: Union['PILImageResampling', 'tvF.InterpolationMode'] | None, do_rescale: bool | None, rescale_factor: float | None, do_normalize: bool | None, image_mean: float | list[float] | None, image_std: float | list[float] | None, ignore_index: int | None, do_reduce_labels: bool | None, disable_grouping: bool | None, return_tensors: str | TensorType | None, **kwargs) -> BatchFeature: ...
def post_process_semantic_segmentation(self, outputs, target_sizes: list[tuple[int, int]] | None) -> 'torch.Tensor': ...
def post_process_instance_segmentation(self, outputs, threshold: float, mask_threshold: float, overlap_mask_area_threshold: float, target_sizes: list[tuple[int, int]] | None, return_coco_annotation: bool | None, return_binary_maps: bool | None) -> list[dict]: ...
def post_process_panoptic_segmentation(self, outputs, threshold: float, mask_threshold: float, overlap_mask_area_threshold: float, label_ids_to_fuse: set[int] | None, target_sizes: list[tuple[int, int]] | None) -> list[dict]: ...
# Task:
Write a Python method `preprocess` for the class `MaskFormerImageProcessorFast` to segmentation_maps (`ImageInput`, *optional*):.
Parameters: images: ImageInput, segmentation_maps: ImageInput | None, instance_id_to_semantic_id: list[dict[int, int]] | dict[int, int] | None
Returns: BatchFeature | def preprocess(
self,
images: ImageInput,
segmentation_maps: ImageInput | None = None,
instance_id_to_semantic_id: list[dict[int, int]] | dict[int, int] | None = None,
**kwargs: Unpack[MaskFormerImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps.
instance_id_to_semantic_id (`Union[list[dict[int, int]], dict[int, int]]`, *optional*):
A mapping from instance IDs to semantic IDs.
"""
return super().preprocess(
images,
segmentation_maps,
instance_id_to_semantic_id,
**kwargs,
) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 19, "code_loc": 6, "docstring_loc": 6, "function_name": "preprocess", "class_name": "MaskFormerImageProcessorFast", "qualname": "MaskFormerImageProcessorFast.preprocess", "file_path": "src/transformers/models/maskformer/image_processing_maskformer_fast.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
ccxt/ccxt:python/ccxt/static_dependencies/bip/bech32/bch_bech32.py:BchBech32Utils:class_doc | Write a class-level docstring for `BchBech32Utils` which has methods: `PolyMod`, `HrpExpand`, `ComputeChecksum`, `VerifyChecksum`. | Class container for Bitcoin Cash utility functions. | documentation | 1 | {"doc_type": "class", "class_name": "BchBech32Utils", "file_path": "python/ccxt/static_dependencies/bip/bech32/bch_bech32.py", "repo_id": "ccxt/ccxt", "char_length": 51, "methods": ["PolyMod", "HrpExpand", "ComputeChecksum", "VerifyChecksum"]} |
huggingface/diffusers:tests/pipelines/ltx/test_ltx_latent_upsample.py:LTXLatentUpsamplePipelineFastTests.test_attention_slicing_forward_pass | # Context:
import unittest
class LTXLatentUpsamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = LTXLatentUpsamplePipeline
params = {"video", "generator"}
batch_params = {"video", "generator"}
required_optional_params = frozenset(["generator", "latents", "return_dict"])
test_xformers_attention = False
supports_dduf = False
def get_dummy_components(self): ...
def get_dummy_inputs(self, device, seed): ...
def test_inference(self): ...
def test_vae_tiling(self, expected_diff_max: float): ...
def test_callback_inputs(self): ...
def test_inference_batch_consistent(self): ...
def test_inference_batch_single_identical(self): ...
# Task:
Write a Python test method `test_attention_slicing_forward_pass` in test class `LTXLatentUpsamplePipelineFastTests` to verify the behavior of `attention_slicing_forward_pass`.
Module under test: diffusers, diffusers.pipelines.ltx.modeling_latent_upsampler, testing_utils | def test_attention_slicing_forward_pass(
self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3
):
pass | test | 1 | {"function_name": "test_attention_slicing_forward_pass", "class_name": "LTXLatentUpsamplePipelineFastTests", "qualname": "LTXLatentUpsamplePipelineFastTests.test_attention_slicing_forward_pass", "file_path": "tests/pipelines/ltx/test_ltx_latent_upsample.py", "repo_id": "huggingface/diffusers", "loc": 4, "tested_modules": ["diffusers", "diffusers.pipelines.ltx.modeling_latent_upsampler", "testing_utils", "test_pipelines_common"], "has_docstring": false, "runnable_level": "slib_runnable"} |
huggingface/transformers:src/transformers/models/cohere2_vision/modular_cohere2_vision.py:get_all_supported_aspect_ratios | # Context:
from functools import lru_cache
class Cohere2VisionMultiModalProjector(nn.Module): ...
class Cohere2VisionModelOutputWithPast(AyaVisionModelOutputWithPast): ...
class Cohere2VisionCausalLMOutputWithPast(AyaVisionCausalLMOutputWithPast): ...
class Cohere2VisionPreTrainedModel(AyaVisionPreTrainedModel): ...
class Cohere2VisionModel(AyaVisionModel): ...
class Cohere2VisionForConditionalGeneration(AyaVisionForConditionalGeneration): ...
def get_optimal_tiled_canvas(original_image_size: tuple[int, int], target_tile_size: tuple[int, int], min_image_tiles: int, max_image_tiles: int) -> tuple[int, int]: ...
class Cohere2VisionFastImageProcessorKwargs(ImagesKwargs): ...
class Cohere2VisionImageProcessorFast(GotOcr2ImageProcessorFast): ...
# Task:
Write a Python function `get_all_supported_aspect_ratios` to computes all allowed aspect ratios for a given maximum number of input tiles.
Parameters: max_image_tiles: int
Returns: list[tuple[int, int]] | def get_all_supported_aspect_ratios(max_image_tiles: int) -> list[tuple[int, int]]:
"""
Computes all allowed aspect ratios for a given maximum number of input tiles.
This function calculates all possible arrangements of tiles that can be formed
within the constraint of the maximum number of tiles. Each arrangement is
represented by its aspect ratio (width/height) and the corresponding tile configuration.
Args:
max_image_tiles (`int`):
The maximum number of tiles allowed.
Returns:
`list[tuple[int, int]]`: A list of tuples, each tuple representing a valid (width, height)
configuration in terms of number of tiles.
Example:
>>> get_all_supported_aspect_ratios(4)
[(1, 1), (1, 2), (1, 3), (1, 4), (2, 1), (2, 2), (3, 1), (4, 1)]
"""
aspect_ratios = []
for width in range(1, max_image_tiles + 1):
for height in range(1, max_image_tiles + 1):
if width * height <= max_image_tiles:
aspect_ratios.append((width, height))
return aspect_ratios | function_complex | 0 | {"cognitive_complexity": 6, "loc": 27, "code_loc": 6, "docstring_loc": 20, "function_name": "get_all_supported_aspect_ratios", "class_name": null, "qualname": "get_all_supported_aspect_ratios", "file_path": "src/transformers/models/cohere2_vision/modular_cohere2_vision.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "slib_runnable"} |
huggingface/transformers:tests/trainer/test_trainer_checkpointing.py:TrainerIntegrationWithHubTester.test_push_to_hub_in_organization | # Context:
import os
import re
import tempfile
from transformers.testing_utils import (
ENDPOINT_STAGING,
TOKEN,
USER,
CaptureLogger,
TemporaryHubRepo,
TestCasePlus,
backend_device_count,
evaluate_side_effect_factory,
get_steps_per_epoch,
is_staging_test,
require_accelerate,
require_deepspeed,
require_non_hpu,
require_peft,
require_tensorboard,
require_torch,
require_torch_non_multi_accelerator,
require_torch_up_to_2_accelerators,
require_vision,
run_first,
run_test_using_subprocess,
slow,
torch_device,
)
from .trainer_test_utils import (
PATH_SAMPLE_TEXT,
AlmostAccuracy,
MockCudaOOMCallback,
RegressionDataset,
RegressionModelConfig,
RegressionPreTrainedModel,
RegressionRandomPreTrainedModel,
RegressionTrainingArguments,
TrainerIntegrationCommon,
get_dataset,
get_language_model_trainer,
get_regression_trainer,
)
class TrainerCheckpointSaveTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerResumeTrainingTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerAutoBatchSizeTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerCheckpointRotationTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerInterruptedTrainingTest(TestCasePlus, TrainerIntegrationCommon): ...
class JITCheckpointTest(unittest.TestCase): ...
class TrainerSavingTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerBestModelTest(TestCasePlus, TrainerIntegrationCommon): ...
class TrainerIntegrationWithHubTester(unittest.TestCase):
def setUpClass(cls): ...
def test_push_to_hub(self): ...
def get_commit_history(self, repo): ...
def test_push_to_hub_with_saves_each_epoch(self): ...
def test_push_to_hub_with_saves_each_n_steps(self): ...
def test_push_to_hub_with_tensorboard_logs(self): ...
def test_push_to_hub_tags(self): ...
def test_push_to_hub_with_revision(self): ...
# Task:
Write a Python test method `test_push_to_hub_in_organization` in test class `TrainerIntegrationWithHubTester` to verify the behavior of `push_to_hub_in_organization`.
Module under test: pathlib, typing, huggingface_hub | def test_push_to_hub_in_organization(self):
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir)
trainer.save_model()
output_dir_name = tmp_repo.repo_name
trainer = get_regression_trainer(
output_dir=os.path.join(tmp_dir, output_dir_name),
push_to_hub=True,
hub_model_id=f"valid_org/{output_dir_name}",
hub_token=self._token,
)
url = trainer.push_to_hub()
# Extract repo_name from the url
re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url)
self.assertTrue(re_search is not None)
repo_name = re_search.groups()[0]
self.assertEqual(repo_name, f"valid_org/{output_dir_name}")
model = RegressionPreTrainedModel.from_pretrained(f"valid_org/{output_dir_name}")
self.assertEqual(model.a.item(), trainer.model.a.item())
self.assertEqual(model.b.item(), trainer.model.b.item()) | test | 0 | {"function_name": "test_push_to_hub_in_organization", "class_name": "TrainerIntegrationWithHubTester", "qualname": "TrainerIntegrationWithHubTester.test_push_to_hub_in_organization", "file_path": "tests/trainer/test_trainer_checkpointing.py", "repo_id": "huggingface/transformers", "loc": 23, "tested_modules": ["pathlib", "typing", "huggingface_hub", "torch", "transformers"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/groq/test_groq_constants.py:TestDeprecatedModels.test_deprecated_models_marked_correctly | # Context:
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED
from lfx.base.models.groq_constants import DEPRECATED_GROQ_MODELS
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED, GROQ_PRODUCTION_MODELS
from lfx.base.models.groq_constants import DEPRECATED_GROQ_MODELS, GROQ_MODELS_DETAILED
from lfx.base.models.groq_constants import DEPRECATED_GROQ_MODELS, GROQ_PRODUCTION_MODELS
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED, UNSUPPORTED_GROQ_MODELS
from lfx.base.models.groq_constants import GROQ_MODELS_DETAILED, GROQ_PREVIEW_MODELS
from lfx.base.models.groq_constants import (
GROQ_MODELS_DETAILED,
TOOL_CALLING_UNSUPPORTED_GROQ_MODELS,
)
from lfx.base.models.groq_constants import DEPRECATED_GROQ_MODELS, GROQ_PREVIEW_MODELS
from lfx.base.models.groq_constants import (
DEPRECATED_GROQ_MODELS,
GROQ_MODELS,
GROQ_MODELS_DETAILED,
UNSUPPORTED_GROQ_MODELS,
)
class TestGroqConstantsStructure: ...
class TestFallbackProductionModels: ...
class TestUnsupportedModels: ...
class TestPreviewModels: ...
class TestToolCallingModels: ...
class TestModelCategorization: ...
class TestProviderMetadata: ...
class TestBackwardCompatibility: ...
class TestFallbackListMinimalSize: ...
class TestDeprecatedModels:
def test_deprecated_models_not_in_production(self): ...
def test_deprecated_models_examples(self): ...
# Task:
Write a Python test method `test_deprecated_models_marked_correctly` in test class `TestDeprecatedModels` to test that deprecated models have the deprecated flag.
Module under test: lfx.base.models.groq_constants, lfx.base.models.groq_constants, lfx.base.models.groq_constants | def test_deprecated_models_marked_correctly(self):
"""Test that deprecated models have the deprecated flag."""
from lfx.base.models.groq_constants import DEPRECATED_GROQ_MODELS, GROQ_MODELS_DETAILED
for model in GROQ_MODELS_DETAILED:
if model["name"] in DEPRECATED_GROQ_MODELS:
assert model.get("deprecated") is True | test | 1 | {"function_name": "test_deprecated_models_marked_correctly", "class_name": "TestDeprecatedModels", "qualname": "TestDeprecatedModels.test_deprecated_models_marked_correctly", "file_path": "src/backend/tests/unit/groq/test_groq_constants.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["lfx.base.models.groq_constants", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants"], "has_docstring": true, "runnable_level": "project_runnable"} |
unclecode/crawl4ai:deploy/docker/tests/test_security_fixes.py:TestURLValidation.test_file_url_blocked | # Context:
class TestHookBuiltins(unittest.TestCase): ...
class TestHooksEnabled(unittest.TestCase): ...
class TestURLValidation(unittest.TestCase):
def setUp(self): ...
def validate_url_scheme(self, url: str, allow_raw: bool) -> bool: ...
def test_file_url_blocked_windows(self): ...
def test_javascript_url_blocked(self): ...
def test_data_url_blocked(self): ...
def test_ftp_url_blocked(self): ...
def test_empty_url_blocked(self): ...
def test_relative_url_blocked(self): ...
def test_http_url_allowed(self): ...
def test_https_url_allowed(self): ...
def test_raw_url_allowed_when_enabled(self): ...
def test_raw_url_blocked_when_disabled(self): ...
# Task:
Write a Python test method `test_file_url_blocked` in test class `TestURLValidation` to file:// URLs must be blocked (LFI vulnerability). | def test_file_url_blocked(self):
"""file:// URLs must be blocked (LFI vulnerability)."""
self.assertFalse(self.validate_url_scheme("file:///etc/passwd"))
self.assertFalse(self.validate_url_scheme("file:///etc/passwd", allow_raw=True)) | test | 1 | {"function_name": "test_file_url_blocked", "class_name": "TestURLValidation", "qualname": "TestURLValidation.test_file_url_blocked", "file_path": "deploy/docker/tests/test_security_fixes.py", "repo_id": "unclecode/crawl4ai", "loc": 4, "tested_modules": [], "has_docstring": true, "runnable_level": "class_runnable"} |
ray-project/ray:python/ray/train/v2/tests/test_data_config.py:test_per_dataset_execution_options_default | # Context:
from ray.train import DataConfig
def test_per_dataset_execution_options_single(ray_start_4_cpus): ...
def test_per_dataset_execution_options_dict(ray_start_4_cpus): ...
# Task:
Write a Python test function `test_per_dataset_execution_options_default` to test that None or empty dict execution_options results in all datasets.
Module under test: ray.data._internal.execution.interfaces.execution_options, ray.train | def test_per_dataset_execution_options_default(ray_start_4_cpus):
"""Test that None or empty dict execution_options results in all datasets
using default options."""
# Test with None
data_config_none = DataConfig(execution_options=None)
default_options = DataConfig.default_ingest_options()
retrieved_train_options = data_config_none._get_execution_options("train")
retrieved_test_options = data_config_none._get_execution_options("test")
assert retrieved_train_options.preserve_order == default_options.preserve_order
assert retrieved_test_options.preserve_order == default_options.preserve_order
# Test with empty dict
data_config_empty = DataConfig(execution_options={})
retrieved_train_options = data_config_empty._get_execution_options("train")
retrieved_test_options = data_config_empty._get_execution_options("test")
assert retrieved_train_options.preserve_order == default_options.preserve_order
assert retrieved_test_options.preserve_order == default_options.preserve_order | test | 0 | {"function_name": "test_per_dataset_execution_options_default", "class_name": null, "qualname": "test_per_dataset_execution_options_default", "file_path": "python/ray/train/v2/tests/test_data_config.py", "repo_id": "ray-project/ray", "loc": 19, "tested_modules": ["ray.data._internal.execution.interfaces.execution_options", "ray.train"], "has_docstring": true, "runnable_level": "project_runnable"} |
browser-use/browser-use:browser_use/actor/element.py:Element.hover | # Context:
class Position(TypedDict): ...
class BoundingBox(TypedDict): ...
class ElementInfo(TypedDict): ...
class Element:
def __init__(
self,
browser_session: 'BrowserSession',
backend_node_id: int,
session_id: str | None = None,
):
self._browser_session = browser_session
self._client = browser_session.cdp_client
self._backend_node_id = backend_node_id
self._session_id = session_id
async def _get_node_id(self) -> int: ...
async def _get_remote_object_id(self) -> str | None: ...
async def click(self, button: 'MouseButton', click_count: int, modifiers: list[ModifierType] | None) -> None: ...
async def fill(self, value: str, clear: bool) -> None: ...
async def focus(self) -> None: ...
async def check(self) -> None: ...
async def select_option(self, values: str | list[str]) -> None: ...
async def drag_to(self, target: Union['Element', Position], source_position: Position | None, target_position: Position | None) -> None: ...
async def get_attribute(self, name: str) -> str | None: ...
async def get_bounding_box(self) -> BoundingBox | None: ...
async def screenshot(self, format: str, quality: int | None) -> str: ...
async def evaluate(self, page_function: str, *args) -> str: ...
def _get_char_modifiers_and_vk(self, char: str) -> tuple[int, int, str]: ...
def _get_key_code_for_char(self, char: str) -> str: ...
async def _clear_text_field(self, object_id: str, cdp_client, session_id: str) -> bool: ...
async def _focus_element_simple(self, backend_node_id: int, object_id: str, cdp_client, session_id: str, input_coordinates) -> bool: ...
async def get_basic_info(self) -> ElementInfo: ...
# Task:
Write a Python async method `hover` for the class `Element` to hover over the element.
Returns: None | async def hover(self) -> None:
"""Hover over the element."""
box = await self.get_bounding_box()
if not box:
raise RuntimeError('Element is not visible or has no bounding box')
x = box['x'] + box['width'] / 2
y = box['y'] + box['height'] / 2
params: 'DispatchMouseEventParameters' = {'type': 'mouseMoved', 'x': x, 'y': y}
await self._client.send.Input.dispatchMouseEvent(params, session_id=self._session_id) | function_simple | 0 | {"cognitive_complexity": 1, "loc": 11, "code_loc": 7, "docstring_loc": 1, "function_name": "hover", "class_name": "Element", "qualname": "Element.hover", "file_path": "browser_use/actor/element.py", "repo_id": "browser-use/browser-use", "has_docstring": true, "runnable_level": "class_runnable"} |
ray-project/ray:ci/ray_ci/automation/test_crane_lib.py:TestCraneIndexIntegration.test_create_multiarch_index | # Context:
import requests
from ci.ray_ci.automation.crane_lib import (
CraneError,
_crane_binary,
call_crane_copy,
call_crane_export,
call_crane_index,
call_crane_manifest,
)
class TestCraneBinary: ...
class TestCraneCopyIntegration: ...
class TestCraneManifestIntegration: ...
class TestCraneExportIntegration: ...
class TestCraneIndexIntegration:
# Task:
Write a Python test method `test_create_multiarch_index` in test class `TestCraneIndexIntegration` to test creating a multi-architecture index.
Module under test: ci.ray_ci.automation.crane_lib, ci.ray_ci.automation.test_utils | def test_create_multiarch_index(self, local_registry): # noqa: F811
"""Test creating a multi-architecture index."""
port = local_registry
# Copy two different architecture images
amd64_dest = f"localhost:{port}/index-test:amd64"
arm64_dest = f"localhost:{port}/index-test:arm64"
call_crane_copy(source=TEST_IMAGE_AMD64, destination=amd64_dest)
call_crane_copy(source=TEST_IMAGE_ARM64, destination=arm64_dest)
# Create index
index_name = f"localhost:{port}/index-test:multiarch"
call_crane_index(index_name=index_name, tags=[amd64_dest, arm64_dest])
# Verify index was created
response = requests.get(
f"http://localhost:{port}/v2/index-test/manifests/multiarch"
)
assert response.status_code == 200
manifest = response.json()
assert "manifests" in manifest
assert len(manifest["manifests"]) == 2 | test | 0 | {"function_name": "test_create_multiarch_index", "class_name": "TestCraneIndexIntegration", "qualname": "TestCraneIndexIntegration.test_create_multiarch_index", "file_path": "ci/ray_ci/automation/test_crane_lib.py", "repo_id": "ray-project/ray", "loc": 23, "tested_modules": ["ci.ray_ci.automation.crane_lib", "ci.ray_ci.automation.test_utils"], "has_docstring": true, "runnable_level": "project_runnable"} |
sansan0/TrendRadar:mcp_server/tools/system.py:SystemManagementTools._html_escape | Write a Python method `_html_escape` for the class `SystemManagementTools` to hTML 转义.
Parameters: text: str
Returns: str | def _html_escape(self, text: str) -> str:
"""HTML 转义"""
if not isinstance(text, str):
text = str(text)
return (
text.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
.replace("'", "'")
) | function_simple | 1 | {"cognitive_complexity": 1, "loc": 11, "code_loc": 9, "docstring_loc": 1, "function_name": "_html_escape", "class_name": "SystemManagementTools", "qualname": "SystemManagementTools._html_escape", "file_path": "mcp_server/tools/system.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "self_contained"} |
ray-project/ray:python/ray/data/tests/test_progress_manager.py:TestGetProgressManager.test_ray_tqdm_in_worker_uses_tqdm | # Context:
from unittest.mock import MagicMock, patch
from ray.data._internal.progress import get_progress_manager
from ray.data._internal.progress.tqdm_progress import (
TqdmExecutionProgressManager,
)
from ray.data.context import DataContext
class TestLoggingProgressManager: ...
class TestGetProgressManager:
def mock_topology(self): ...
def setup_ray_worker(self): ...
def test_progress_bars_disabled_uses_noop(self, mock_topology, restore_data_context): ...
def test_operator_progress_disabled_logs_warning(self, mock_logger, mock_topology, restore_data_context): ...
def test_non_atty_uses_logging_progress(self, mock_isatty, mock_topology, restore_data_context): ...
def test_tqdm_when_rich_disabled(self, mock_isatty, mock_topology, restore_data_context): ...
def test_tqdm_when_use_ray_tqdm_enabled(self, mock_isatty, mock_topology, restore_data_context): ...
def test_tqdm_progress_default(self, mock_isatty, mock_topology): ...
def test_rich_import_error_fallback(self, mock_logger, mock_isatty, mock_topology, restore_data_context): ...
def test_progress_toggle_flag_combinations(self, mock_isatty, mock_topology, enable_progress, enable_op_progress, expected_type, restore_data_context): ...
# Task:
Write a Python test method `test_ray_tqdm_in_worker_uses_tqdm` in test class `TestGetProgressManager` to test that TqdmExecutionProgressManager is used when use_ray_tqdm is True in Ray worker.
Module under test: ray.data._internal.progress, ray.data._internal.progress.base_progress, ray.data._internal.progress.logging_progress | def test_ray_tqdm_in_worker_uses_tqdm(
self, mock_isatty, mock_topology, setup_ray_worker, restore_data_context
):
"""Test that TqdmExecutionProgressManager is used when use_ray_tqdm is True in Ray worker."""
ctx = DataContext.get_current()
ctx.use_ray_tqdm = True
manager = get_progress_manager(ctx, "test_id", mock_topology, False)
assert isinstance(manager, TqdmExecutionProgressManager) | test | 0 | {"function_name": "test_ray_tqdm_in_worker_uses_tqdm", "class_name": "TestGetProgressManager", "qualname": "TestGetProgressManager.test_ray_tqdm_in_worker_uses_tqdm", "file_path": "python/ray/data/tests/test_progress_manager.py", "repo_id": "ray-project/ray", "loc": 10, "tested_modules": ["ray.data._internal.progress", "ray.data._internal.progress.base_progress", "ray.data._internal.progress.logging_progress", "ray.data._internal.progress.rich_progress", "ray.data._internal.progress.tqdm_progress"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/tune/examples/custom_checkpointing_with_callback.py:OptimizationTrainable.setup | # Context:
def evaluation_fn(step, width, height): ...
class SmartCheckpointCallback(Callback): ...
class OptimizationTrainable(tune.Trainable):
def step(self): ...
def save_checkpoint(self, checkpoint_dir): ...
def load_checkpoint(self, checkpoint): ...
# Task:
Write a Python method `setup` for the class `OptimizationTrainable` to initialize the trainable.
Parameters: config | def setup(self, config):
"""Initialize the trainable"""
self.current_step = 0
self.width = config["width"]
self.height = config["height"] | function_simple | 0 | {"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "setup", "class_name": "OptimizationTrainable", "qualname": "OptimizationTrainable.setup", "file_path": "python/ray/tune/examples/custom_checkpointing_with_callback.py", "repo_id": "ray-project/ray", "has_docstring": true, "runnable_level": "class_runnable"} |
google/langextract:tests/tokenizer_test.py:UnicodeTokenizerTest.test_acronym_inconsistency | # Context:
from langextract.core import tokenizer
class TokenizerTest(parameterized.TestCase): ...
class ExceptionTest(absltest.TestCase): ...
class NegativeTestCases(parameterized.TestCase): ...
class TokensTextTest(parameterized.TestCase): ...
class SentenceRangeTest(parameterized.TestCase): ...
class UnicodeTokenizerTest(parameterized.TestCase):
def assertTokenListEqual(self, actual_tokens, expected_tokens, msg): ...
def test_tokenize_various_inputs(self, input_text, expected_tokens): ...
def test_special_unicode_and_punctuation_handling(self, input_text, expected_tokens, expected_first_after_newline): ...
def test_first_token_after_newline_parity(self): ...
def test_expanded_cjk_detection(self): ...
def test_mixed_script_and_emoji(self): ...
def test_script_boundary_grouping(self): ...
def test_non_spaced_scripts_no_grouping(self): ...
def test_cjk_detection_regex(self): ...
def test_newline_simplification(self): ...
def test_newline_simplification_start(self): ...
def test_mixed_line_endings(self): ...
def test_mixed_uncommon_scripts_no_grouping(self): ...
def test_unknown_script_merging_edge_case(self): ...
def test_find_sentence_range_empty_input(self): ...
def test_normalization_indices_match_input(self): ...
def test_consecutive_punctuation_grouping(self): ...
def test_punctuation_merging_identical_only(self): ...
def test_distinct_unknown_scripts_do_not_merge(self): ...
def test_identical_unknown_scripts_merge(self): ...
# Task:
Write a Python test method `test_acronym_inconsistency` in test class `UnicodeTokenizerTest` to test that RegexTokenizer does NOT produce ACRONYM tokens (standardization).
Module under test: absl.testing, absl.testing, langextract.core | def test_acronym_inconsistency(self):
"""Test that RegexTokenizer does NOT produce ACRONYM tokens (standardization)."""
tok = tokenizer.RegexTokenizer()
text = "A/B"
tokenized = tok.tokenize(text)
# Ensure parity with UnicodeTokenizer by splitting acronyms into constituent parts.
self.assertLen(tokenized.tokens, 3)
self.assertEqual(tokenized.tokens[0].token_type, tokenizer.TokenType.WORD)
self.assertEqual(
tokenized.tokens[1].token_type, tokenizer.TokenType.PUNCTUATION
)
self.assertEqual(tokenized.tokens[2].token_type, tokenizer.TokenType.WORD) | test | 1 | {"function_name": "test_acronym_inconsistency", "class_name": "UnicodeTokenizerTest", "qualname": "UnicodeTokenizerTest.test_acronym_inconsistency", "file_path": "tests/tokenizer_test.py", "repo_id": "google/langextract", "loc": 12, "tested_modules": ["absl.testing", "absl.testing", "langextract.core"], "has_docstring": true, "runnable_level": "project_runnable"} |
binary-husky/gpt_academic:crazy_functions/doc_fns/conversation_doc/word_doc.py:WordFormatter.create_document | # Context:
from docx.shared import Cm, Pt
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING
from docx.oxml.ns import qn
from datetime import datetime
def convert_markdown_to_word(markdown_text): ...
class WordFormatter:
def __init__(self):
self.doc = Document()
self._setup_document()
self._create_styles()
def _setup_document(self): ...
def _create_styles(self): ...
# Task:
Write a Python method `create_document` for the class `WordFormatter` to 写入聊天历史.
Parameters: history | def create_document(self, history):
"""写入聊天历史"""
# 添加标题
title_para = self.doc.add_paragraph(style='Title_Custom')
title_run = title_para.add_run('GPT-Academic 对话记录')
# 添加日期
date_para = self.doc.add_paragraph()
date_para.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
date_run = date_para.add_run(datetime.now().strftime('%Y年%m月%d日'))
date_run.font.name = '仿宋'
date_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
date_run.font.size = Pt(16)
self.doc.add_paragraph() # 添加空行
# 添加对话内容
for i in range(0, len(history), 2):
question = history[i]
answer = convert_markdown_to_word(history[i + 1])
if question:
q_para = self.doc.add_paragraph(style='Question_Style')
q_para.add_run(f'问题 {i//2 + 1}:').bold = True
q_para.add_run(str(question))
if answer:
a_para = self.doc.add_paragraph(style='Answer_Style')
a_para.add_run(f'回答 {i//2 + 1}:').bold = True
a_para.add_run(str(answer))
return self.doc | function_simple | 1 | {"cognitive_complexity": 5, "loc": 33, "code_loc": 21, "docstring_loc": 1, "function_name": "create_document", "class_name": "WordFormatter", "qualname": "WordFormatter.create_document", "file_path": "crazy_functions/doc_fns/conversation_doc/word_doc.py", "repo_id": "binary-husky/gpt_academic", "has_docstring": true, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/entrypoints/openai/server_utils.py:SSEDecoder.extract_content | # Context:
class AuthenticationMiddleware: ...
class XRequestIdMiddleware: ...
def load_log_config(log_config_file: str | None) -> dict | None: ...
def get_uvicorn_log_config(args: Namespace) -> dict | None: ...
def _extract_content_from_chunk(chunk_data: dict) -> str: ...
def _log_streaming_response(response, response_body: list) -> None: ...
def _log_non_streaming_response(response_body: list) -> None: ...
async def log_response(request: Request, call_next): ...
async def http_exception_handler(_: Request, exc: HTTPException): ...
async def validation_exception_handler(_: Request, exc: RequestValidationError): ...
async def lifespan(app: FastAPI): ...
class SSEDecoder:
def __init__(self):
self.buffer = ""
self.content_buffer = []
def decode_chunk(self, chunk: bytes) -> list[dict]: ...
def add_content(self, content: str) -> None: ...
def get_complete_content(self) -> str: ...
# Task:
Write a Python method `extract_content` for the class `SSEDecoder` to extract content from event data.
Parameters: event_data: dict
Returns: str | def extract_content(self, event_data: dict) -> str:
"""Extract content from event data."""
return _extract_content_from_chunk(event_data) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 3, "code_loc": 1, "docstring_loc": 1, "function_name": "extract_content", "class_name": "SSEDecoder", "qualname": "SSEDecoder.extract_content", "file_path": "vllm/entrypoints/openai/server_utils.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "file_runnable"} |
Comfy-Org/ComfyUI:comfy/ldm/ace/attention.py:CustomLiteLAProcessor2_0:class_doc | Write a class-level docstring for `CustomLiteLAProcessor2_0` which has methods: `__init__`, `apply_rotary_emb`, `__call__`. | Attention processor used typically in processing the SD3-like self-attention projections. add rms norm for query and key and apply RoPE | documentation | 1 | {"doc_type": "class", "class_name": "CustomLiteLAProcessor2_0", "file_path": "comfy/ldm/ace/attention.py", "repo_id": "Comfy-Org/ComfyUI", "char_length": 135, "methods": ["__init__", "apply_rotary_emb", "__call__"]} |
sansan0/TrendRadar:mcp_server/utils/validators.py:_parse_string_to_bool | Write a Python function `_parse_string_to_bool` to 将字符串解析为布尔值.
Parameters: value: str
Returns: bool | def _parse_string_to_bool(value: str) -> bool:
"""
将字符串解析为布尔值
Args:
value: 字符串值
Returns:
解析后的布尔值
"""
value = value.strip().lower()
if value in ('true', '1', 'yes', 'on'):
return True
elif value in ('false', '0', 'no', 'off', ''):
return False
else:
# 默认非空字符串为 True
return bool(value) | function_simple | 1 | {"cognitive_complexity": 3, "loc": 19, "code_loc": 7, "docstring_loc": 9, "function_name": "_parse_string_to_bool", "class_name": null, "qualname": "_parse_string_to_bool", "file_path": "mcp_server/utils/validators.py", "repo_id": "sansan0/TrendRadar", "has_docstring": true, "runnable_level": "self_contained"} |
666ghj/BettaFish:tests/test_report_engine_sanitization.py:ChapterSanitizationTestCase.test_table_cell_empty_blocks_repaired | # Context:
class ChapterSanitizationTestCase(unittest.TestCase):
def setUp(self): ...
def test_table_rows_scalar_values_expanded(self): ...
def test_engine_quote_validation(self): ...
def test_engine_quote_rejects_disallowed_marks_and_blocks(self): ...
def test_engine_quote_sanitization_strips_disallowed(self): ...
def test_engine_quote_title_must_match_engine(self): ...
# Task:
Write a Python test method `test_table_cell_empty_blocks_repaired` in test class `ChapterSanitizationTestCase` to verify the behavior of `table_cell_empty_blocks_repaired`.
Module under test: ReportEngine.ir, ReportEngine.nodes.chapter_generation_node | def test_table_cell_empty_blocks_repaired(self):
chapter = {
"blocks": [
{
"type": "table",
"rows": [
{
"cells": [
{"blocks": []},
{"text": "同比变化", "blocks": None},
]
}
],
}
]
}
self.node._sanitize_chapter_blocks(chapter)
table_block = chapter["blocks"][0]
cells = table_block["rows"][0]["cells"]
self.assertEqual(len(cells), 2)
for cell in cells:
blocks = cell.get("blocks")
self.assertIsInstance(blocks, list)
self.assertGreater(len(blocks), 0)
for block in blocks:
self.assertEqual(block.get("type"), "paragraph") | test | 1 | {"function_name": "test_table_cell_empty_blocks_repaired", "class_name": "ChapterSanitizationTestCase", "qualname": "ChapterSanitizationTestCase.test_table_cell_empty_blocks_repaired", "file_path": "tests/test_report_engine_sanitization.py", "repo_id": "666ghj/BettaFish", "loc": 26, "tested_modules": ["ReportEngine.ir", "ReportEngine.nodes.chapter_generation_node"], "has_docstring": false, "runnable_level": "class_runnable"} |
langchain-ai/langchain:libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py:ScoreStringEvalChain._prepare_output | # Context:
from langchain_classic.schema import RUN_KEY
def resolve_criteria(criteria: CRITERIA_TYPE | str | list[CRITERIA_TYPE] | None) -> dict: ...
class ScoreStringResultOutputParser(BaseOutputParser[dict]): ...
class LabeledScoreStringEvalChain(ScoreStringEvalChain): ...
class ScoreStringEvalChain(StringEvaluator, LLMEvalChain, LLMChain):
model_config = ConfigDict(
def is_lc_serializable(cls) -> bool: ...
def requires_reference(self) -> bool: ...
def requires_input(self) -> bool: ...
def evaluation_name(self) -> str: ...
def _skip_reference_warning(self) -> str: ...
def from_llm(cls, llm: BaseLanguageModel, prompt: PromptTemplate | None, criteria: CRITERIA_TYPE | str | None, normalize_by: float | None, **kwargs) -> ScoreStringEvalChain: ...
def _prepare_input(self, prediction: str, input_: str | None, reference: str | None) -> dict: ...
def _evaluate_strings(self, prediction: str, input: str | None, reference: str | None, callbacks: Callbacks, tags: list[str] | None, metadata: dict[str, Any] | None, include_run_info: bool, **kwargs) -> dict: ...
async def _aevaluate_strings(self, prediction: str, reference: str | None, input: str | None, callbacks: Callbacks, tags: list[str] | None, metadata: dict[str, Any] | None, include_run_info: bool, **kwargs) -> dict: ...
# Task:
Write a Python method `_prepare_output` for the class `ScoreStringEvalChain` to prepare the output.
Parameters: result: dict
Returns: dict | def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
if "score" in parsed and self.normalize_by is not None:
parsed["score"] = parsed["score"] / self.normalize_by
return parsed | function_simple | 1 | {"cognitive_complexity": 3, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "_prepare_output", "class_name": "ScoreStringEvalChain", "qualname": "ScoreStringEvalChain._prepare_output", "file_path": "libs/langchain/langchain_classic/evaluation/scoring/eval_chain.py", "repo_id": "langchain-ai/langchain", "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:test/testcases/test_web_api/test_memory_app/test_list_memory.py:TestMemoryList.test_filter_memory_type | # Context:
import pytest
from test_web_api.common import list_memory, get_memory_config
class TestAuthorization: ...
class TestCapability: ...
class TestMemoryList:
def test_params_unset(self, WebApiAuth): ...
def test_params_empty(self, WebApiAuth): ...
def test_page(self, WebApiAuth, params, expected_page_size): ...
def test_filter_multi_memory_type(self, WebApiAuth): ...
def test_filter_storage_type(self, WebApiAuth): ...
def test_match_keyword(self, WebApiAuth): ...
def test_get_config(self, WebApiAuth): ...
# Task:
Write a Python test method `test_filter_memory_type` in test class `TestMemoryList` to verify the behavior of `filter_memory_type`.
Module under test: concurrent.futures, test_web_api.common, configs | def test_filter_memory_type(self, WebApiAuth):
res = list_memory(WebApiAuth, {"memory_type": ["semantic"]})
assert res["code"] == 0, res
for memory in res["data"]["memory_list"]:
assert "semantic" in memory["memory_type"], res | test | 1 | {"function_name": "test_filter_memory_type", "class_name": "TestMemoryList", "qualname": "TestMemoryList.test_filter_memory_type", "file_path": "test/testcases/test_web_api/test_memory_app/test_list_memory.py", "repo_id": "infiniflow/ragflow", "loc": 5, "tested_modules": ["concurrent.futures", "test_web_api.common", "configs", "libs.auth"], "has_docstring": false, "runnable_level": "project_runnable"} |
vnpy/vnpy:vnpy/alpha/dataset/math_function.py:quesval2 | # Context:
import polars as pl
from .utility import DataProxy
def less(feature1: DataProxy, feature2: DataProxy | float) -> DataProxy: ...
def greater(feature1: DataProxy, feature2: DataProxy | float) -> DataProxy: ...
def log(feature: DataProxy) -> DataProxy: ...
def abs(feature: DataProxy) -> DataProxy: ...
def sign(feature: DataProxy) -> DataProxy: ...
def quesval(threshold: float, feature1: DataProxy, feature2: DataProxy | float | int, feature3: DataProxy | float | int) -> DataProxy: ...
def pow1(base: DataProxy, exponent: float) -> DataProxy: ...
def pow2(base: DataProxy, exponent: DataProxy) -> DataProxy: ...
# Task:
Write a Python function `quesval2` to return feature2 if threshold < feature1, otherwise feature3 (DataProxy threshold version).
Parameters: threshold: DataProxy, feature1: DataProxy, feature2: DataProxy | float | int, feature3: DataProxy | float | int
Returns: DataProxy | def quesval2(threshold: DataProxy, feature1: DataProxy, feature2: DataProxy | float | int, feature3: DataProxy | float | int) -> DataProxy:
"""Return feature2 if threshold < feature1, otherwise feature3 (DataProxy threshold version)"""
df_merged: pl.DataFrame = threshold.df.join(feature1.df, on=["datetime", "vt_symbol"], suffix="_cond")
if isinstance(feature2, DataProxy):
df_merged = df_merged.join(feature2.df, on=["datetime", "vt_symbol"], suffix="_true")
else:
df_merged = df_merged.with_columns(pl.lit(feature2).alias("data_true"))
if isinstance(feature3, DataProxy):
df_merged = df_merged.join(feature3.df, on=["datetime", "vt_symbol"], suffix="_false")
else:
df_merged = df_merged.with_columns(pl.lit(feature3).alias("data_false"))
df: pl.DataFrame = df_merged.with_columns(
pl.when(pl.col("data_cond") < pl.col("data"))
.then(pl.col("data_true"))
.otherwise(pl.col("data_false"))
.alias("data")
).select(["datetime", "vt_symbol", "data"])
return DataProxy(df) | function_simple | 1 | {"cognitive_complexity": 4, "loc": 22, "code_loc": 16, "docstring_loc": 1, "function_name": "quesval2", "class_name": null, "qualname": "quesval2", "file_path": "vnpy/alpha/dataset/math_function.py", "repo_id": "vnpy/vnpy", "has_docstring": true, "runnable_level": "project_runnable"} |
666ghj/BettaFish:ReportEngine/utils/test_json_parser.py:TestRobustJSONParser.test_complex_real_world_case | # Context:
def run_manual_test(): ...
class TestRobustJSONParser(unittest.TestCase):
def setUp(self): ...
def test_basic_json(self): ...
def test_markdown_wrapped(self): ...
def test_thinking_content_removal(self): ...
def test_missing_comma_fix(self): ...
def test_unbalanced_brackets(self): ...
def test_control_character_escape(self): ...
def test_trailing_comma_removal(self): ...
def test_colon_equals_fix(self): ...
def test_extract_first_json(self): ...
def test_unterminated_string_with_json_repair(self): ...
def test_array_with_best_match(self): ...
def test_key_alias_recovery(self): ...
def test_expected_keys_validation(self): ...
def test_wrapper_key_extraction(self): ...
def test_empty_input(self): ...
def test_invalid_json_after_all_repairs(self): ...
# Task:
Write a Python test method `test_complex_real_world_case` in test class `TestRobustJSONParser` to 测试真实世界的复杂案例(类似实际错误)。.
Module under test: json_parser | def test_complex_real_world_case(self):
"""测试真实世界的复杂案例(类似实际错误)。"""
# 模拟实际错误:缺少逗号、有markdown包裹、有思考内容
json_str = """<thinking>我需要构造一个篇幅规划</thinking>
```json
{
"totalWords": 40000,
"tolerance": 2000,
"globalGuidelines": [
"重点突出技术红利分配失衡、人才流失与职业认同危机等结构性矛盾"
"详略策略:技术创新与传统技艺的碰撞"
"案例导向:优先引用真实数据和调研"
],
"chapters": [
{
"chapterId": "ch1",
"targetWords": 5000
}
]
}
```"""
result = self.parser.parse(json_str, "复杂真实案例测试")
self.assertEqual(result["totalWords"], 40000)
self.assertEqual(result["tolerance"], 2000)
self.assertEqual(len(result["globalGuidelines"]), 3)
self.assertEqual(len(result["chapters"]), 1) | test | 1 | {"function_name": "test_complex_real_world_case", "class_name": "TestRobustJSONParser", "qualname": "TestRobustJSONParser.test_complex_real_world_case", "file_path": "ReportEngine/utils/test_json_parser.py", "repo_id": "666ghj/BettaFish", "loc": 26, "tested_modules": ["json_parser"], "has_docstring": true, "runnable_level": "class_runnable"} |
infiniflow/ragflow:common/doc_store/es_conn_base.py:ESConnectionBase.get_cluster_stats | # Context:
from common.misc_utils import convert_bytes
class ESConnectionBase(DocStoreConnection):
def __init__(self, mapping_file_name: str="mapping.json", logger_name: str='ragflow.es_conn'):
from common.doc_store.es_conn_pool import ES_CONN
self.logger = logging.getLogger(logger_name)
self.info = {}
self.logger.info(f"Use Elasticsearch {settings.ES['hosts']} as the doc engine.")
self.es = ES_CONN.get_conn()
fp_mapping = os.path.join(get_project_base_directory(), "conf", mapping_file_name)
if not os.path.exists(fp_mapping):
msg = f"Elasticsearch mapping file not found at {fp_mapping}"
self.logger.error(msg)
raise Exception(msg)
with open(fp_mapping, "r") as f:
self.mapping = json.load(f)
self.logger.info(f"Elasticsearch {settings.ES['hosts']} is healthy.")
def _connect(self): ...
def db_type(self) -> str: ...
def health(self) -> dict: ...
def create_idx(self, index_name: str, dataset_id: str, vector_size: int, parser_id: str): ...
def create_doc_meta_idx(self, index_name: str): ...
def delete_idx(self, index_name: str, dataset_id: str): ...
def index_exist(self, index_name: str, dataset_id: str) -> bool: ...
def get(self, doc_id: str, index_name: str, dataset_ids: list[str]) -> dict | None: ...
def search(self, select_fields: list[str], highlight_fields: list[str], condition: dict, match_expressions: list[MatchExpr], order_by: OrderByExpr, offset: int, limit: int, index_names: str | list[str], dataset_ids: list[str], agg_fields: list[str] | None, rank_feature: dict | None): ...
def insert(self, documents: list[dict], index_name: str, dataset_id: str) -> list[str]: ...
def update(self, condition: dict, new_value: dict, index_name: str, dataset_id: str) -> bool: ...
def delete(self, condition: dict, index_name: str, dataset_id: str) -> int: ...
def get_total(self, res): ...
def get_doc_ids(self, res): ...
def _get_source(self, res): ...
def get_fields(self, res, fields: list[str]) -> dict[str, dict]: ...
def get_highlight(self, res, keywords: list[str], field_name: str): ...
def get_aggregation(self, res, field_name: str): ...
def sql(self, sql: str, fetch_size: int, format: str): ...
# Task:
Write a Python method `get_cluster_stats` for the class `ESConnectionBase` to curl -XGET "http://{es_host}/_cluster/stats" -H "kbn-xsrf: reporting" to view raw stats. | def get_cluster_stats(self):
"""
curl -XGET "http://{es_host}/_cluster/stats" -H "kbn-xsrf: reporting" to view raw stats.
"""
raw_stats = self.es.cluster.stats()
self.logger.debug(f"ESConnection.get_cluster_stats: {raw_stats}")
try:
res = {
'cluster_name': raw_stats['cluster_name'],
'status': raw_stats['status']
}
indices_status = raw_stats['indices']
res.update({
'indices': indices_status['count'],
'indices_shards': indices_status['shards']['total']
})
doc_info = indices_status['docs']
res.update({
'docs': doc_info['count'],
'docs_deleted': doc_info['deleted']
})
store_info = indices_status['store']
res.update({
'store_size': convert_bytes(store_info['size_in_bytes']),
'total_dataset_size': convert_bytes(store_info['total_data_set_size_in_bytes'])
})
mappings_info = indices_status['mappings']
res.update({
'mappings_fields': mappings_info['total_field_count'],
'mappings_deduplicated_fields': mappings_info['total_deduplicated_field_count'],
'mappings_deduplicated_size': convert_bytes(mappings_info['total_deduplicated_mapping_size_in_bytes'])
})
node_info = raw_stats['nodes']
res.update({
'nodes': node_info['count']['total'],
'nodes_version': node_info['versions'],
'os_mem': convert_bytes(node_info['os']['mem']['total_in_bytes']),
'os_mem_used': convert_bytes(node_info['os']['mem']['used_in_bytes']),
'os_mem_used_percent': node_info['os']['mem']['used_percent'],
'jvm_versions': node_info['jvm']['versions'][0]['vm_version'],
'jvm_heap_used': convert_bytes(node_info['jvm']['mem']['heap_used_in_bytes']),
'jvm_heap_max': convert_bytes(node_info['jvm']['mem']['heap_max_in_bytes'])
})
return res
except Exception as e:
self.logger.exception(f"ESConnection.get_cluster_stats: {e}")
return None | function_simple | 1 | {"cognitive_complexity": 1, "loc": 48, "code_loc": 43, "docstring_loc": 3, "function_name": "get_cluster_stats", "class_name": "ESConnectionBase", "qualname": "ESConnectionBase.get_cluster_stats", "file_path": "common/doc_store/es_conn_base.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai/tests/llms/test_multimodal_integration.py:TestOpenAIResponsesFileUploadIntegration.test_describe_image_with_file_id | # Context:
import pytest
from crewai.llm import LLM
from crewai_files import (
AudioFile,
File,
ImageFile,
PDFFile,
TextFile,
VideoFile,
format_multimodal_content,
)
def test_image_bytes() -> bytes: ...
def test_text_bytes() -> bytes: ...
def test_video_bytes() -> bytes: ...
def test_audio_bytes() -> bytes: ...
def _build_multimodal_message(llm: LLM, prompt: str, files: dict) -> list[dict]: ...
class TestOpenAIMultimodalIntegration: ...
class TestOpenAIO4MiniMultimodalIntegration: ...
class TestOpenAIGPT41MiniMultimodalIntegration: ...
class TestOpenAIGPT5MultimodalIntegration: ...
class TestOpenAIGPT5MiniMultimodalIntegration: ...
class TestOpenAIGPT5NanoMultimodalIntegration: ...
class TestAnthropicMultimodalIntegration: ...
class TestAzureMultimodalIntegration: ...
class TestBedrockMultimodalIntegration: ...
class TestGeminiMultimodalIntegration: ...
class TestLiteLLMMultimodalIntegration: ...
class TestMultipleFilesIntegration: ...
class TestGenericFileIntegration: ...
def _build_multimodal_message_with_upload(llm: LLM, prompt: str, files: dict) -> tuple[list[dict], list[dict]]: ...
def _build_responses_message_with_upload(llm: LLM, prompt: str, files: dict) -> tuple[list[dict], list[dict]]: ...
class TestAnthropicFileUploadIntegration: ...
class TestOpenAIResponsesFileUploadIntegration:
def test_describe_image_via_format_api(self, test_image_bytes: bytes) -> None: ...
def test_describe_image_via_format_api_with_upload(self, test_image_bytes: bytes) -> None: ...
# Task:
Write a Python test method `test_describe_image_with_file_id` in test class `TestOpenAIResponsesFileUploadIntegration` to test OpenAI Responses API can describe an image uploaded via Files API.
Module under test: pathlib, crewai.llm, crewai_files | def test_describe_image_with_file_id(self, test_image_bytes: bytes) -> None:
"""Test OpenAI Responses API can describe an image uploaded via Files API."""
llm = LLM(model="openai/gpt-4o-mini", api="responses")
files = {"image": ImageFile(source=test_image_bytes)}
messages, content_blocks = _build_responses_message_with_upload(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
# Verify we're using file_id with input_image type
assert len(content_blocks) == 1
block = content_blocks[0]
assert block.get("type") == "input_image", (
f"Expected type 'input_image' for Responses API, got '{block.get('type')}'. "
"This test verifies file_id uploads work with the Responses API."
)
assert "file_id" in block, "Expected file_id in block for file_id upload"
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0 | test | 0 | {"function_name": "test_describe_image_with_file_id", "class_name": "TestOpenAIResponsesFileUploadIntegration", "qualname": "TestOpenAIResponsesFileUploadIntegration.test_describe_image_with_file_id", "file_path": "lib/crewai/tests/llms/test_multimodal_integration.py", "repo_id": "crewAIInc/crewAI", "loc": 25, "tested_modules": ["pathlib", "crewai.llm", "crewai_files", "crewai_files.resolution.resolver", "crewai_files.formatting.anthropic"], "has_docstring": true, "runnable_level": "project_runnable"} |
apache/airflow:providers/standard/tests/unit/standard/operators/test_hitl.py:TestHITLOperator.test_validate_defaults_with_invalid_defaults | # Context:
import pytest
from typing import TYPE_CHECKING, Any
from airflow.providers.standard.operators.hitl import (
ApprovalOperator,
HITLBranchOperator,
HITLEntryOperator,
HITLOperator,
)
from airflow.sdk.definitions.param import ParamsDict
def hitl_task_and_ti_for_generating_link(dag_maker: DagMaker) -> tuple[HITLOperator, TaskInstance]: ...
def get_context_from_model_ti(mock_supervisor_comms: Any) -> Any: ...
class TestApprovalOperator: ...
class TestHITLEntryOperator: ...
class TestHITLBranchOperator: ...
class TestHITLSummaryForListeners: ...
class TestHITLOperator:
def test_validate_options(self) -> None: ...
def test_validate_options_with_empty_options(self) -> None: ...
def test_validate_params(self, params: ParamsDict, exc: type[ValueError | ParamValidationError], error_msg: str) -> None: ...
def test_validate_defaults(self) -> None: ...
def test_execute(self, dag_maker: DagMaker, session: Session) -> None: ...
def test_serialzed_params(self, input_params: ParamsDict | dict[str, Any] | None, expected_params: dict[str, Any]) -> None: ...
def test_serialzed_params_legacy(self) -> None: ...
def test_execute_complete(self) -> None: ...
def test_process_trigger_event_error(self, event: dict[str, Any], expected_exception: type[Exception]) -> None: ...
def test_validate_chosen_options_with_invalid_content(self) -> None: ...
def test_validate_params_input_with_invalid_input(self, params: ParamsDict, params_input: dict[str, Any], exc: type[ValueError | ParamValidationError], error_msg: str) -> None: ...
def test_generate_link_to_ui(self, base_url: str, conf_base_url: str, options: list[str] | None, params_input: dict[str, Any] | None, expected_parsed_query: dict[str, list[str]], hitl_task_and_ti_for_generating_link: tuple[HITLOperator, TaskInstance]) -> None: ...
def test_generate_link_to_ui_with_invalid_input(self, options: list[Any] | None, params_input: dict[str, Any] | None, expected_err_msg: str, hitl_task_and_ti_for_generating_link: tuple[HITLOperator, TaskInstance]) -> None: ...
def test_generate_link_to_ui_without_base_url(self, hitl_task_and_ti_for_generating_link: tuple[HITLOperator, TaskInstance]) -> None: ...
# Task:
Write a Python test method `test_validate_defaults_with_invalid_defaults` in test class `TestHITLOperator` to verify the behavior of `validate_defaults_with_invalid_defaults`.
Module under test: __future__, uuid, tests_common.test_utils.version_compat | def test_validate_defaults_with_invalid_defaults(
self,
extra_kwargs: dict[str, Any],
expected_error_msg: str,
) -> None:
# validate_default is called during initialization
with pytest.raises(ValueError, match=expected_error_msg):
HITLOperator(
task_id="hitl_test",
subject="This is subject",
body="This is body",
options=["1", "2", "3", "4", "5"],
params=ParamsDict({"input_1": 1}),
**extra_kwargs,
) | test | 1 | {"function_name": "test_validate_defaults_with_invalid_defaults", "class_name": "TestHITLOperator", "qualname": "TestHITLOperator.test_validate_defaults_with_invalid_defaults", "file_path": "providers/standard/tests/unit/standard/operators/test_hitl.py", "repo_id": "apache/airflow", "loc": 15, "tested_modules": ["__future__", "uuid", "tests_common.test_utils.version_compat", "typing", "urllib.parse"], "has_docstring": false, "runnable_level": "project_runnable"} |
langflow-ai/langflow:src/lfx/src/lfx/cli/common.py:extract_script_dependencies | # Context:
from pathlib import Path
def create_verbose_printer(verbose: bool): ...
def is_port_in_use(port: int, host: str) -> bool: ...
def get_free_port(starting_port: int) -> int: ...
def get_best_access_host(host: str) -> str: ...
def get_api_key() -> str: ...
def is_url(path_or_url: str) -> bool: ...
def download_script_from_url(url: str, verbose_print) -> Path: ...
def validate_script_path(script_path: Path | str, verbose_print) -> tuple[str, Path]: ...
async def load_graph_from_path(script_path: Path, file_extension: str, verbose_print, verbose: bool): ...
def prepare_graph(graph, verbose_print): ...
async def execute_graph_with_capture(graph, input_value: str | None): ...
def extract_result_data(results, captured_logs: str) -> dict: ...
def _parse_pep723_block(script_path: Path, verbose_print) -> dict | None: ...
def _needs_install(requirement: str) -> bool: ...
def ensure_dependencies_installed(dependencies: list[str], verbose_print) -> None: ...
def flow_id_from_path(file_path: Path, root_dir: Path) -> str: ...
def _github_headers() -> dict[str, str]: ...
def detect_github_url_sync(url: str, timeout: float) -> str: ...
def download_and_extract_repo(url: str, verbose_print, timeout: float) -> Path: ...
def extract_script_docstring(script_path: Path) -> str | None: ...
# Task:
Write a Python function `extract_script_dependencies` to return dependency strings declared via PEP-723 inline metadata.
Parameters: script_path: Path, verbose_print
Returns: list[str] | def extract_script_dependencies(script_path: Path, verbose_print) -> list[str]:
"""Return dependency strings declared via PEP-723 inline metadata.
Only `.py` files are supported for now. Returns an empty list if the file has
no metadata block or could not be parsed.
"""
if script_path.suffix != ".py":
return []
parsed = _parse_pep723_block(script_path, verbose_print)
if not parsed:
return []
deps = parsed.get("dependencies", [])
# Ensure list[str]
if isinstance(deps, list):
return [str(d).strip() for d in deps if str(d).strip()]
return [] | function_simple | 1 | {"cognitive_complexity": 3, "loc": 18, "code_loc": 9, "docstring_loc": 5, "function_name": "extract_script_dependencies", "class_name": null, "qualname": "extract_script_dependencies", "file_path": "src/lfx/src/lfx/cli/common.py", "repo_id": "langflow-ai/langflow", "has_docstring": true, "runnable_level": "file_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/components/llm_operations/test_guardrails_component.py:TestGuardrailsComponent.test_extract_text_from_message_object | # Context:
from unittest.mock import MagicMock, patch
from lfx.components.llm_operations.guardrails import GuardrailsComponent
class TestGuardrailsComponent(ComponentTestBaseWithoutClient):
def component_class(self): ...
def default_kwargs(self): ...
def file_names_mapping(self): ...
def mock_llm(self): ...
def mock_llm_detect_violation(self): ...
def test_heuristic_detects_ignore_instructions(self): ...
def test_heuristic_detects_jailbreak_keyword(self): ...
def test_heuristic_detects_system_prompt_as_weak_signal(self): ...
def test_heuristic_passes_normal_input(self): ...
def test_heuristic_detects_forget_instructions(self): ...
def test_heuristic_detects_act_as_as_weak_signal(self): ...
def test_heuristic_multiple_weak_signals_accumulate(self): ...
def test_heuristic_legitimate_bypass_usage(self): ...
def test_heuristic_legitimate_act_as_usage(self): ...
def test_heuristic_score_capped_at_one(self): ...
def test_extract_text_from_string(self): ...
def test_extract_text_from_none(self): ...
def test_empty_input_raises_error(self, default_kwargs): ...
def test_whitespace_only_input_raises_error(self, default_kwargs): ...
def test_no_guardrails_enabled_raises_error(self, default_kwargs): ...
def test_validation_passes_with_clean_input(self, mock_get_llm, mock_llm, default_kwargs): ...
def test_validation_fails_when_llm_detects_violation(self, mock_get_llm, mock_llm_detect_violation, default_kwargs): ...
def test_validation_caches_result(self, mock_get_llm, mock_llm, default_kwargs): ...
def test_parse_yes_response(self, mock_get_llm, default_kwargs): ...
def test_parse_no_response(self, mock_get_llm, default_kwargs): ...
def test_parse_ambiguous_response_defaults_to_pass(self, mock_get_llm, default_kwargs): ...
def test_input_sanitizes_delimiter_injection(self, mock_get_llm, mock_llm, default_kwargs): ...
def test_process_pass_returns_data_on_success(self, mock_get_llm, mock_llm, default_kwargs): ...
def test_process_fail_returns_data_on_failure(self, mock_get_llm, mock_llm_detect_violation, default_kwargs): ...
def test_process_pass_returns_empty_on_failure(self, mock_get_llm, mock_llm_detect_violation, default_kwargs): ...
def test_process_fail_returns_empty_on_success(self, mock_get_llm, mock_llm, default_kwargs): ...
def test_custom_guardrail_is_included_when_enabled(self, mock_get_llm, mock_llm, default_kwargs): ...
def test_custom_guardrail_ignored_when_empty(self, default_kwargs): ...
def test_get_fixed_justification_returns_correct_message(self): ...
def test_llm_empty_response_raises_error(self, mock_get_llm, default_kwargs): ...
def test_no_llm_configured_fails_validation(self, mock_get_llm, default_kwargs): ...
def test_llm_api_error_detected(self, mock_get_llm, default_kwargs): ...
def test_fail_fast_stops_on_first_failure(self, mock_get_llm, default_kwargs): ...
def test_pre_run_setup_resets_state(self, default_kwargs): ...
def test_integration_clean_input_passes(self): ...
def test_integration_pii_detection(self): ...
def test_integration_jailbreak_detection(self): ...
def test_integration_tokens_detection(self): ...
def test_integration_custom_guardrail(self): ...
# Task:
Write a Python test method `test_extract_text_from_message_object` in test class `TestGuardrailsComponent` to test text extraction from Message-like object.
Module under test: lfx.components.llm_operations.guardrails, lfx.schema, tests.base | def test_extract_text_from_message_object(self):
"""Test text extraction from Message-like object."""
component = GuardrailsComponent()
mock_message = MagicMock()
mock_message.text = "Message content"
result = component._extract_text(mock_message)
assert result == "Message content" | test | 1 | {"function_name": "test_extract_text_from_message_object", "class_name": "TestGuardrailsComponent", "qualname": "TestGuardrailsComponent.test_extract_text_from_message_object", "file_path": "src/backend/tests/unit/components/llm_operations/test_guardrails_component.py", "repo_id": "langflow-ai/langflow", "loc": 7, "tested_modules": ["lfx.components.llm_operations.guardrails", "lfx.schema", "tests.base"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/models/colqwen2/test_processing_colqwen2.py:ColQwen2ProcessorTest.test_process_images | # Context:
import torch
class ColQwen2ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = ColQwen2Processor
model_id = "vidore/colqwen2-v1.0-hf"
def test_apply_chat_template_image(self, batch_size, return_tensors): ...
def test_processor_with_multiple_inputs(self): ...
def test_tokenizer_defaults(self): ...
def test_get_num_vision_tokens(self): ...
def test_process_queries(self): ...
def test_tokenizer_defaults_preserved_by_kwargs(self): ...
def test_image_processor_defaults_preserved_by_image_kwargs(self): ...
def test_kwargs_overrides_default_tokenizer_kwargs(self): ...
def test_kwargs_overrides_default_image_processor_kwargs(self): ...
def test_unstructured_kwargs(self): ...
def test_unstructured_kwargs_batched(self): ...
def test_doubly_passed_kwargs(self): ...
def test_structured_kwargs_nested(self): ...
def test_structured_kwargs_nested_from_dict(self): ...
def test_model_input_names(self): ...
def test_processor_text_has_no_visual(self): ...
def test_image_processor_defaults(self): ...
def test_get_num_multimodal_tokens_matches_processor_call(self): ...
# Task:
Write a Python test method `test_process_images` in test class `ColQwen2ProcessorTest` to verify the behavior of `process_images`.
Module under test: parameterized, transformers.models.colqwen2.processing_colqwen2, transformers.testing_utils | def test_process_images(self):
# Processor configuration
image_input = self.prepare_image_inputs()
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer", max_length=112, padding="max_length")
image_processor.image_seq_length = 14
# Get the processor
processor = self.processor_class(
tokenizer=tokenizer,
image_processor=image_processor,
)
# Process the image
batch_feature = processor.process_images(images=image_input, return_tensors="pt")
# Assertions
self.assertIn("pixel_values", batch_feature)
self.assertEqual(batch_feature["pixel_values"].shape, torch.Size([1, 56, 1176])) | test | 0 | {"function_name": "test_process_images", "class_name": "ColQwen2ProcessorTest", "qualname": "ColQwen2ProcessorTest.test_process_images", "file_path": "tests/models/colqwen2/test_processing_colqwen2.py", "repo_id": "huggingface/transformers", "loc": 19, "tested_modules": ["parameterized", "transformers.models.colqwen2.processing_colqwen2", "transformers.testing_utils", "transformers.utils", "test_processing_common"], "has_docstring": false, "runnable_level": "class_runnable"} |
vllm-project/vllm:vllm/model_executor/models/parakeet.py:module_doc | Write a module-level docstring for the Python module `parakeet` which contains class `ParakeetProjection`, class `ProjectedParakeet`, class `ParakeetExtractor`. | Modules below used for the audio encoder component in: models/nano_nemotron_vl.py | documentation | 1 | {"doc_type": "module", "module_name": "parakeet", "file_path": "vllm/model_executor/models/parakeet.py", "repo_id": "vllm-project/vllm", "char_length": 81} |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/rag/types.py:AddDocumentParams:class_doc | Write a class-level docstring for `AddDocumentParams` (inherits from TypedDict) which has methods: various methods. | Parameters for adding documents to the RAG system. | documentation | 0 | {"doc_type": "class", "class_name": "AddDocumentParams", "file_path": "lib/crewai-tools/src/crewai_tools/tools/rag/types.py", "repo_id": "crewAIInc/crewAI", "char_length": 50, "methods": []} |
binary-husky/gpt_academic:shared_utils/fastapi_stream_server.py:MasterMindWebSocketServer:class_doc | Write a class-level docstring for `MasterMindWebSocketServer` (inherits from PythonMethod_AsyncConnectionMaintainer_AgentcraftInterface) which has methods: `__init__`, `create_event`, `terminate_event`, `long_task_01_wait_incoming_connection`. | WebSocket服务器主类
继承自异步连接维护器接口,实现了完整的WebSocket服务器功能。
负责处理客户端连接、事件管理和消息路由。 | documentation | 1 | {"doc_type": "class", "class_name": "MasterMindWebSocketServer", "file_path": "shared_utils/fastapi_stream_server.py", "repo_id": "binary-husky/gpt_academic", "char_length": 71, "methods": ["__init__", "create_event", "terminate_event", "long_task_01_wait_incoming_connection"]} |
huggingface/transformers:src/transformers/generation/continuous_batching/continuous_api.py:ContinuousMixin.generate_batch | # Context:
import torch
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from ...generation.configuration_utils import CompileConfig, GenerationConfig
from ...utils.logging import logging
from .requests import GenerationOutput, RequestState, RequestStatus, logger
class ProtoPretrainedModel(nn.Module): ...
class ContinuousBatchProcessor: ...
class ContinuousBatchingManager: ...
class ContinuousMixin:
def continuous_batching_context_manager(self, generation_config: GenerationConfig | None, manual_eviction: bool, max_queue_size: int, q_padding_interval_size: int, kv_padding_interval_size: int, allow_block_sharing: bool, block: bool, timeout: float | None, use_async_batching: bool | None, max_cached_graphs: int) -> Generator[ContinuousBatchingManager]: ...
def init_continuous_batching(self, generation_config: GenerationConfig | None, manual_eviction: bool, max_queue_size: int, q_padding_interval_size: int, kv_padding_interval_size: int, allow_block_sharing: bool, use_async_batching: bool | None, max_cached_graphs: int) -> ContinuousBatchingManager: ...
# Task:
Write a Python method `generate_batch` for the class `ContinuousMixin` to generate sequences for a batch of prompts using continuous batching.
Parameters: inputs: list[list[int]], generation_config: GenerationConfig | None, q_padding_interval_size: int, kv_padding_interval_size: int, allow_block_sharing: bool, record_timestamps: bool, progress_bar: bool, use_async_batching: bool | None, max_cached_graphs: int
Returns: dict[str, GenerationOutput] | def generate_batch(
self,
inputs: list[list[int]],
generation_config: GenerationConfig | None = None,
q_padding_interval_size: int = 0,
kv_padding_interval_size: int = 0,
allow_block_sharing: bool = True,
record_timestamps: bool = False,
progress_bar: bool = True,
use_async_batching: bool | None = None,
max_cached_graphs: int = 0,
**kwargs,
) -> dict[str, GenerationOutput]:
"""Generate sequences for a batch of prompts using continuous batching.
Args:
inputs: List of input token sequences (prompts)
generation_config: Optional generation configuration
q_padding_interval_size: Padding granularity for queries in tokens. 0 uses default.
kv_padding_interval_size: Padding granularity for KV cache in tokens. 0 uses default.
allow_block_sharing: A flag to allow block sharing if the model has some full attention layers
record_timestamps: If set to true, the requests will have a timestamp for each token generated
progress_bar: If set to true, a progress bar will be displayed
use_async_batching: Whether to use async double buffering or not. If None, will be automatically detected.
max_cached_graphs: Maximum number of cached CUDA graphs. 0 uses default.
**kwargs: Additional generation parameters
Returns:
`dict[str, GenerationOutput]`: a dictionary of request ids to GenerationOutput objects
"""
if not inputs:
return {}
if logger.getEffectiveLevel() <= logging.DEBUG:
logger.warning("Progress bar is disabled when logger level is less than DEBUG")
progress_bar = False
# Initialize manager with the batch inputs
results = {}
gen_cfg = self.generation_config if generation_config is None else generation_config
num_requests = len(inputs) * (gen_cfg.num_return_sequences if gen_cfg.num_return_sequences is not None else 1)
# Prepare context managers for the main loop
manager_cm = self.continuous_batching_context_manager(
generation_config=generation_config,
q_padding_interval_size=q_padding_interval_size,
kv_padding_interval_size=kv_padding_interval_size,
max_cached_graphs=max_cached_graphs,
allow_block_sharing=allow_block_sharing,
block=True,
timeout=5,
use_async_batching=use_async_batching,
)
logging_cm = logging_redirect_tqdm([logger])
pbar_cm = tqdm(
total=num_requests,
disable=(not progress_bar),
desc=f"Solving {num_requests} requests",
unit="request",
)
# Main loop
with manager_cm as manager, logging_cm, pbar_cm as pbar:
try:
manager.add_requests(
inputs=inputs, max_new_tokens=kwargs.get("max_new_tokens"), record_timestamps=record_timestamps
)
finished_count = 0
while finished_count < num_requests:
result = manager.get_result(timeout=1)
if result:
req_id = result.request_id
if result.is_finished():
results[req_id] = result
finished_count += 1
pbar.update(1)
else:
if not manager.is_running():
logger.error("Generation thread terminated unexpectedly.")
# This helps get some information in stdout
print("Returning results of generate_batch despite unexpected termination.")
break
except Exception as e:
logger.error(f"Error during batch generation: {e}", exc_info=True)
# Re-order requests to match the order of the inputs
reordered_results = {}
for i in range(len(inputs)):
# We cannot guarantee that the generation succeeded for all requests, so we need to check if the request is in the results
result = results.get(f"req_{i}")
if result is not None:
reordered_results[f"req_{i}"] = result
else:
logger.error(f"Request req_{i} not found in results.")
return reordered_results | function_complex | 0 | {"cognitive_complexity": 26, "loc": 92, "code_loc": 54, "docstring_loc": 17, "function_name": "generate_batch", "class_name": "ContinuousMixin", "qualname": "ContinuousMixin.generate_batch", "file_path": "src/transformers/generation/continuous_batching/continuous_api.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/dia/feature_extraction_dia.py:DiaFeatureExtractor.__call__ | # Context:
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
class DiaFeatureExtractor(SequenceFeatureExtractor):
model_input_names = ["input_values", "n_quantizers"]
def __init__(
self,
feature_size: int = 1,
sampling_rate: int = 16000,
padding_value: float = 0.0,
hop_length: int = 512,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
# Task:
Write a Python method `__call__` for the class `DiaFeatureExtractor` to main method to featurize and prepare for the model one or several sequence(s).
Parameters: raw_audio: np.ndarray | list[float] | list[np.ndarray] | list[list[float]], padding: bool | str | PaddingStrategy | None, truncation: bool | None, max_length: int | None, return_tensors: str | TensorType | None, sampling_rate: int | None
Returns: BatchFeature | def __call__(
self,
raw_audio: np.ndarray | list[float] | list[np.ndarray] | list[list[float]],
padding: bool | str | PaddingStrategy | None = None,
truncation: bool | None = False,
max_length: int | None = None,
return_tensors: str | TensorType | None = None,
sampling_rate: int | None = None,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_audio (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be processed. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. The numpy array must be of shape
`(num_samples,)` for mono audio (`feature_size = 1`), or `(2, num_samples)` for stereo audio
(`feature_size = 2`).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, *optional*, defaults to `False`):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
return_tensors (`str` or [`~utils.TensorType`], *optional*, default to 'pt'):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one.")
elif padding is None:
# by default let's pad the inputs
padding = True
is_batched = bool(
isinstance(raw_audio, (list, tuple)) and (isinstance(raw_audio[0], (np.ndarray, tuple, list)))
)
if is_batched:
raw_audio = [np.asarray(audio, dtype=np.float32).T for audio in raw_audio]
elif not is_batched and not isinstance(raw_audio, np.ndarray):
raw_audio = np.asarray(raw_audio, dtype=np.float32)
elif isinstance(raw_audio, np.ndarray) and raw_audio.dtype is np.dtype(np.float64):
raw_audio = raw_audio.astype(np.float32)
# always return batch
if not is_batched:
raw_audio = [np.asarray(raw_audio).T]
# convert stereo to mono if necessary, unique to Dia
for idx, example in enumerate(raw_audio):
if self.feature_size == 2 and example.ndim == 2:
raw_audio[idx] = np.mean(example, -1)
# verify inputs are valid
for idx, example in enumerate(raw_audio):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}")
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels")
if self.feature_size == 2 and example.ndim != 1: # note the conversion before
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels")
input_values = BatchFeature({"input_values": raw_audio})
# temporarily treat it as if we were mono as we also convert stereo to mono
original_feature_size = self.feature_size
self.feature_size = 1
# normal padding on batch
padded_inputs = self.pad(
input_values,
max_length=max_length,
truncation=truncation,
padding=padding,
return_attention_mask=True,
pad_to_multiple_of=self.hop_length,
)
padded_inputs["padding_mask"] = padded_inputs.pop("attention_mask")
input_values = []
for example in padded_inputs.pop("input_values"):
if self.feature_size == 1:
example = example[..., None]
input_values.append(example.T)
padded_inputs["input_values"] = input_values
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
# rewrite back to original feature size
self.feature_size = original_feature_size
return padded_inputs | function_complex | 0 | {"cognitive_complexity": 31, "loc": 120, "code_loc": 59, "docstring_loc": 32, "function_name": "__call__", "class_name": "DiaFeatureExtractor", "qualname": "DiaFeatureExtractor.__call__", "file_path": "src/transformers/models/dia/feature_extraction_dia.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:ci/fossa/test_ray_oss_analysis.py:test_is_own_code | # Context:
from unittest.mock import mock_open, patch
from ci.fossa import ray_oss_analysis
def reset_logger(): ...
def test_setup_logger(mock_file_handler) -> None: ...
def test_is_excluded_kind() -> None: ...
def test_is_build_tool() -> None: ...
def test_is_cpp_code() -> None: ...
def test_get_dependency_info() -> None: ...
def test_clean_path() -> None: ...
def test_get_package_name() -> None: ...
def test_get_bazel_dependencies(mock_check_output) -> None: ...
def test_copy_single_file(mock_makedirs, mock_copy) -> None: ...
def test_expand_license_files(mock_glob) -> None: ...
def test_askalono_crawl(mock_check_output) -> None: ...
def test_generate_fossa_deps_file(mock_file, mock_yaml_dump) -> None: ...
# Task:
Write a Python test function `test_is_own_code` to verify the behavior of `is_own_code`.
Module under test: ci.fossa | def test_is_own_code(mock_getcwd) -> None:
mock_getcwd.return_value = "/repo/root"
assert ray_oss_analysis._is_own_code("/repo/root/file.py")
assert not ray_oss_analysis._is_own_code("/other/root/file.py")
assert not ray_oss_analysis._is_own_code(None) | test | 0 | {"function_name": "test_is_own_code", "class_name": null, "qualname": "test_is_own_code", "file_path": "ci/fossa/test_ray_oss_analysis.py", "repo_id": "ray-project/ray", "loc": 5, "tested_modules": ["ci.fossa"], "has_docstring": false, "runnable_level": "project_runnable"} |
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/cache/cleanup.py:delete_one | # Context:
from crewai_files.cache.upload_cache import CachedUpload, UploadCache
from crewai_files.uploaders.base import FileUploader
def _safe_delete(uploader: FileUploader, file_id: str, provider: str) -> bool: ...
def cleanup_uploaded_files(cache: UploadCache, delete_from_provider: bool, providers: list[ProviderType] | None) -> int: ...
def cleanup_expired_files(cache: UploadCache, delete_from_provider: bool) -> int: ...
def cleanup_provider_files(provider: ProviderType, cache: UploadCache | None, delete_all_from_provider: bool) -> int: ...
def _get_providers_from_cache(cache: UploadCache) -> set[ProviderType]: ...
async def _asafe_delete(uploader: FileUploader, file_id: str, provider: str) -> bool: ...
async def acleanup_uploaded_files(cache: UploadCache, delete_from_provider: bool, providers: list[ProviderType] | None, max_concurrency: int) -> int: ...
async def acleanup_expired_files(cache: UploadCache, delete_from_provider: bool, max_concurrency: int) -> int: ...
async def acleanup_provider_files(provider: ProviderType, cache: UploadCache | None, delete_all_from_provider: bool, max_concurrency: int) -> int: ...
# Task:
Write a Python async function `delete_one` to delete a single file with semaphore limiting.
Parameters: file_uploader: FileUploader, cached: CachedUpload
Returns: bool | async def delete_one(file_uploader: FileUploader, cached: CachedUpload) -> bool:
"""Delete a single file with semaphore limiting."""
async with semaphore:
return await _asafe_delete(
file_uploader, cached.file_id, cached.provider
) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 6, "code_loc": 4, "docstring_loc": 1, "function_name": "delete_one", "class_name": null, "qualname": "delete_one", "file_path": "lib/crewai-files/src/crewai_files/cache/cleanup.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/quantization/mxfp4/test_mxfp4.py:Mxfp4IntegrationTest.test_should_convert_module | # Context:
from transformers.quantizers.quantizers_utils import should_convert_module
def _empty_accelerator_cache(): ...
def _patch_no_accelerator(): ...
class Mxfp4ConfigTest(unittest.TestCase): ...
class Mxfp4QuantizerTest(unittest.TestCase): ...
class Mxfp4ModelTest(unittest.TestCase): ...
class Mxfp4IntegrationTest(unittest.TestCase):
def test_convert_moe_packed_tensors(self): ...
def test_quantize_to_mxfp4(self): ...
# Task:
Write a Python test method `test_should_convert_module` in test class `Mxfp4IntegrationTest` to test module conversion decision logic.
Module under test: contextlib, transformers, transformers.testing_utils | def test_should_convert_module(self):
"""Test module conversion decision logic"""
from transformers.quantizers.quantizers_utils import should_convert_module
# Should convert by default
self.assertTrue(should_convert_module("model", None))
self.assertTrue(should_convert_module("model", []))
# Should not convert if in exclusion list
patterns = ["model.layers.*.self_attn", "lm_head"]
self.assertFalse(should_convert_module("lm_head", patterns))
self.assertTrue(should_convert_module("experts", patterns)) | test | 0 | {"function_name": "test_should_convert_module", "class_name": "Mxfp4IntegrationTest", "qualname": "Mxfp4IntegrationTest.test_should_convert_module", "file_path": "tests/quantization/mxfp4/test_mxfp4.py", "repo_id": "huggingface/transformers", "loc": 12, "tested_modules": ["contextlib", "transformers", "transformers.testing_utils", "transformers.utils", "transformers.quantizers.quantizer_mxfp4"], "has_docstring": true, "runnable_level": "class_runnable"} |
crewAIInc/crewAI:lib/crewai-files/src/crewai_files/cache/upload_cache.py:_cleanup_on_exit | # Context:
from crewai_files.cache.cleanup import cleanup_uploaded_files
class CachedUpload: ...
def _make_key(file_hash: str, provider: str) -> str: ...
def _compute_file_hash_streaming(chunks: Iterator[bytes]) -> str: ...
def _compute_file_hash(file: FileInput) -> str: ...
class UploadCache: ...
def get_upload_cache(ttl: int, namespace: str, cache_type: str, **cache_kwargs) -> UploadCache: ...
def reset_upload_cache() -> None: ...
# Task:
Write a Python function `_cleanup_on_exit` to clean up uploaded files on process exit.
Returns: None | def _cleanup_on_exit() -> None:
"""Clean up uploaded files on process exit."""
global _default_cache
if _default_cache is None or len(_default_cache) == 0:
return
from crewai_files.cache.cleanup import cleanup_uploaded_files
try:
cleanup_uploaded_files(_default_cache)
except Exception as e:
logger.debug(f"Error during exit cleanup: {e}") | function_simple | 0 | {"cognitive_complexity": 3, "loc": 12, "code_loc": 8, "docstring_loc": 1, "function_name": "_cleanup_on_exit", "class_name": null, "qualname": "_cleanup_on_exit", "file_path": "lib/crewai-files/src/crewai_files/cache/upload_cache.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
unclecode/crawl4ai:docs/examples/adaptive_crawling/custom_strategies.py:APIDocumentationStrategy:class_doc | Write a class-level docstring for `APIDocumentationStrategy` which has methods: `__init__`, `score_link`, `calculate_api_coverage`. | Custom strategy optimized for API documentation crawling.
Prioritizes endpoint references, code examples, and parameter descriptions. | documentation | 1 | {"doc_type": "class", "class_name": "APIDocumentationStrategy", "file_path": "docs/examples/adaptive_crawling/custom_strategies.py", "repo_id": "unclecode/crawl4ai", "char_length": 133, "methods": ["__init__", "score_link", "calculate_api_coverage"]} |
666ghj/BettaFish:ReportEngine/renderers/html_renderer.py:HTMLRenderer._transpose_single_cell_table | # Context:
import copy
from typing import Any, Dict, List
class HTMLRenderer:
CALLOUT_ALLOWED_TYPES = {
INLINE_ARTIFACT_KEYS = {
TABLE_COMPLEX_CHARS = set(
def __init__(self, config: Dict[str, Any] | None = None):
"""
初始化渲染器缓存并允许注入额外配置。
参数层级说明:
- config: dict | None,供调用方临时覆盖主题/调试开关等,优先级最高;
典型键值:
- themeOverride: 覆盖元数据里的 themeTokens;
- enableDebug: bool,是否输出额外日志。
内部状态:
- self.document/metadata/chapters:保存一次渲染周期的 IR;
- self.widget_scripts:收集图表配置 JSON,后续在 _render_body 尾部注水;
- self._lib_cache/_pdf_font_base64:缓存本地库与字体,避免重复IO;
- self.chart_validator/chart_repairer:Chart.js 配置的本地与 LLM 兜底修复器;
- self.chart_validation_stats:记录总量/修复来源/失败数量,便于日志审计。
"""
self.config = config or {}
self.document: Dict[str, Any] = {}
self.widget_scripts: List[str] = []
self.chart_counter = 0
self.toc_entries: List[Dict[str, Any]] = []
self.heading_counter = 0
self.metadata: Dict[str, Any] = {}
self.chapters: List[Dict[str, Any]] = []
self.chapter_anchor_map: Dict[str, str] = {}
self.heading_label_map: Dict[str, Dict[str, Any]] = {}
self.primary_heading_index = 0
self.secondary_heading_index = 0
self.toc_rendered = False
self.hero_kpi_signature: tuple | None = None
self._current_chapter: Dict[str, Any] | None = None
self._lib_cache: Dict[str, str] = {}
self._pdf_font_base64: str | None = None
# 初始化图表验证和修复器
self.chart_validator = create_chart_validator()
llm_repair_fns = create_llm_repair_functions()
self.chart_repairer = create_chart_repairer(
validator=self.chart_validator,
llm_repair_fns=llm_repair_fns
)
# 打印LLM修复函数状态
self._llm_repair_count = len(llm_repair_fns)
if not llm_repair_fns:
logger.warning("HTMLRenderer: 未配置任何LLM API,图表API修复功能不可用")
else:
logger.info(f"HTMLRenderer: 已配置 {len(llm_repair_fns)} 个LLM修复函数")
# 记录修复失败的图表,避免多次触发LLM循环修复
self._chart_failure_notes: Dict[str, str] = {}
self._chart_failure_recorded: set[str] = set()
# 统计信息
self.chart_validation_stats = {
'total': 0,
'valid': 0,
'repaired_locally': 0,
'repaired_api': 0,
'failed': 0
}
def _get_lib_path() -> Path: ...
def _get_font_path() -> Path: ...
def _load_lib(self, filename: str) -> str: ...
def _load_pdf_font_data(self) -> str: ...
def _reset_chart_validation_stats(self) -> None: ...
def _build_script_with_fallback(self, inline_code: str, cdn_url: str, check_expression: str, lib_name: str, is_defer: bool) -> str: ...
def render(self, document_ir: Dict[str, Any], ir_file_path: str | None) -> str: ...
def _resolve_color_value(self, value: Any, fallback: str) -> str: ...
def _resolve_color_family(self, value: Any, fallback: Dict[str, str]) -> Dict[str, str]: ...
def _render_head(self, title: str, theme_tokens: Dict[str, Any]) -> str: ...
def _render_body(self) -> str: ...
def _render_header(self) -> str: ...
def _render_tagline(self) -> str: ...
def _render_cover(self) -> str: ...
def _render_hero(self) -> str: ...
def _render_meta_panel(self) -> str: ...
def _render_toc_section(self) -> str: ...
def _collect_toc_entries(self, chapters: List[Dict[str, Any]]) -> List[Dict[str, Any]]: ...
def _validate_toc_anchor(self, anchor: str, chapters: List[Dict[str, Any]]) -> bool: ...
def _prepare_chapters(self, chapters: List[Dict[str, Any]]) -> List[Dict[str, Any]]: ...
def _expand_blocks_in_place(self, blocks: List[Dict[str, Any]] | None) -> List[Dict[str, Any]]: ...
def _extract_embedded_blocks(self, block: Dict[str, Any]) -> List[Dict[str, Any]]: ...
def _decode_embedded_block_payload(self, raw: str) -> List[Dict[str, Any]] | None: ...
def _looks_like_block(payload: Dict[str, Any]) -> bool: ...
def _collect_blocks_from_payload(self, payload: Any) -> List[Dict[str, Any]]: ...
def _coerce_block_dict(self, payload: Any) -> Dict[str, Any] | None: ...
def _format_toc_entry(self, entry: Dict[str, Any]) -> str: ...
def _compute_heading_labels(self, chapters: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: ...
def _strip_order_prefix(text: str) -> str: ...
def _to_chinese_numeral(number: int) -> str: ...
def _render_chapter(self, chapter: Dict[str, Any]) -> str: ...
def _render_blocks(self, blocks: List[Dict[str, Any]]) -> str: ...
def _render_block(self, block: Dict[str, Any]) -> str: ...
def _wrap_error_block(self, html_fragment: str, block: Dict[str, Any]) -> str: ...
def _render_heading(self, block: Dict[str, Any]) -> str: ...
def _render_paragraph(self, block: Dict[str, Any]) -> str: ...
def _is_metadata_paragraph(self, inlines: List[Any]) -> bool: ...
def _render_standalone_math_inline(self, run: Dict[str, Any] | str) -> str | None: ...
def _render_list(self, block: Dict[str, Any]) -> str: ...
def _flatten_nested_cells(self, cells: List[Dict[str, Any]]) -> List[Dict[str, Any]]: ...
def _fix_nested_table_rows(self, rows: List[Dict[str, Any]]) -> List[Dict[str, Any]]: ...
def _render_table(self, block: Dict[str, Any]) -> str: ...
def _render_swot_table(self, block: Dict[str, Any]) -> str: ...
def _render_swot_card_layout(self, block: Dict[str, Any], title: str, summary: str | None) -> str: ...
def _render_swot_pdf_table_layout(self, block: Dict[str, Any], title: str, summary: str | None) -> str: ...
def _normalize_swot_items(self, raw: Any) -> List[Dict[str, Any]]: ...
def _render_swot_item(self, item: Dict[str, Any]) -> str: ...
def _render_pest_table(self, block: Dict[str, Any]) -> str: ...
def _render_pest_card_layout(self, block: Dict[str, Any], title: str, summary: str | None) -> str: ...
def _render_pest_pdf_table_layout(self, block: Dict[str, Any], title: str, summary: str | None) -> str: ...
def _normalize_pest_items(self, raw: Any) -> List[Dict[str, Any]]: ...
def _render_pest_item(self, item: Dict[str, Any]) -> str: ...
def _normalize_table_rows(self, rows: List[Dict[str, Any]]) -> List[Dict[str, Any]]: ...
def _detect_transposed_header_span(self, rows: List[Dict[str, Any]], texts: List[str]) -> int: ...
def _is_potential_table_header(self, text: str) -> bool: ...
def _looks_like_table_value(self, text: str) -> bool: ...
def _extract_row_text(self, row: Dict[str, Any]) -> str: ...
def _render_blockquote(self, block: Dict[str, Any]) -> str: ...
def _render_engine_quote(self, block: Dict[str, Any]) -> str: ...
def _render_code(self, block: Dict[str, Any]) -> str: ...
def _render_math(self, block: Dict[str, Any]) -> str: ...
def _render_figure(self, block: Dict[str, Any]) -> str: ...
def _render_callout(self, block: Dict[str, Any]) -> str: ...
def _split_callout_content(self, blocks: List[Dict[str, Any]] | None) -> tuple[List[Dict[str, Any]], List[Dict[str, Any]]]: ...
def _sanitize_callout_list(self, block: Dict[str, Any]) -> tuple[Dict[str, Any] | None, List[Dict[str, Any]]]: ...
def _render_kpi_grid(self, block: Dict[str, Any]) -> str: ...
def _merge_dicts(self, base: Dict[str, Any] | None, override: Dict[str, Any] | None) -> Dict[str, Any]: ...
def _looks_like_chart_dataset(self, candidate: Any) -> bool: ...
def _coerce_chart_data_structure(self, data: Dict[str, Any]) -> Dict[str, Any]: ...
def _prepare_widget_payload(self, block: Dict[str, Any]) -> tuple[Dict[str, Any], Dict[str, Any]]: ...
def _is_chart_data_empty(data: Dict[str, Any] | None) -> bool: ...
def _chart_cache_key(self, block: Dict[str, Any]) -> str: ...
def _note_chart_failure(self, cache_key: str, reason: str) -> None: ...
def _record_chart_failure_stat(self, cache_key: str | None) -> None: ...
def _apply_cached_review_stats(self, block: Dict[str, Any]) -> None: ...
def _format_chart_error_reason(self, validation_result: ValidationResult | None, fallback_reason: str | None) -> str: ...
def _render_chart_error_placeholder(self, title: str | None, reason: str, widget_id: str | None) -> str: ...
def _has_chart_failure(self, block: Dict[str, Any]) -> tuple[bool, str | None]: ...
def _normalize_chart_block(self, block: Dict[str, Any], chapter_context: Dict[str, Any] | None) -> None: ...
def _ensure_chart_reviewed(self, block: Dict[str, Any], chapter_context: Dict[str, Any] | None, increment_stats: bool) -> tuple[bool, str | None]: ...
def review_and_patch_document(self, document_ir: Dict[str, Any], reset_stats: bool, clone: bool) -> Dict[str, Any]: ...
def _render_widget(self, block: Dict[str, Any]) -> str: ...
def _render_widget_fallback(self, data: Dict[str, Any], widget_id: str | None) -> str: ...
def _render_wordcloud_fallback(self, props: Dict[str, Any] | None, widget_id: str | None, block_data: Any | None) -> str: ...
def _log_chart_validation_stats(self): ...
def _kpi_signature_from_items(self, items: Any) -> tuple | None: ...
def _normalize_kpi_item(self, item: Any) -> tuple[str, str, str, str, str] | None: ...
def _should_skip_overview_kpi(self, block: Dict[str, Any]) -> bool: ...
def _normalize_inline_payload(self, run: Dict[str, Any]) -> tuple[str, List[Dict[str, Any]]]: ...
def _normalize_latex_string(raw: Any) -> str: ...
def _render_text_with_inline_math(self, text: Any, math_id: str | list | None, allow_display_block: bool) -> str | None: ...
def _coerce_inline_payload(payload: Dict[str, Any]) -> Dict[str, Any] | None: ...
def _render_inline(self, run: Dict[str, Any]) -> str: ...
def _render_markdown_bold_fallback(self, text: str) -> str: ...
def _clean_text_from_json_artifacts(self, text: Any) -> str: ...
def _safe_text(self, value: Any) -> str: ...
def _escape_html(self, value: Any) -> str: ...
def _escape_attr(self, value: Any) -> str: ...
def _build_css(self, tokens: Dict[str, Any]) -> str: ...
def _hydration_script(self) -> str: ...
# Task:
Write a Python method `_transpose_single_cell_table` for the class `HTMLRenderer` to 将单列多行的表格转换为标准表头 + 若干数据行.
Parameters: rows: List[Dict[str, Any]], span: int
Returns: List[Dict[str, Any]] | def _transpose_single_cell_table(self, rows: List[Dict[str, Any]], span: int) -> List[Dict[str, Any]]:
"""将单列多行的表格转换为标准表头 + 若干数据行"""
total = len(rows)
if total <= span or (total - span) % span != 0:
return []
header_rows = rows[:span]
data_rows = rows[span:]
normalized: List[Dict[str, Any]] = []
header_cells = []
for row in header_rows:
cell = copy.deepcopy((row.get("cells") or [{}])[0])
cell["header"] = True
header_cells.append(cell)
normalized.append({"cells": header_cells})
for start in range(0, len(data_rows), span):
group = data_rows[start : start + span]
if len(group) < span:
break
normalized.append(
{
"cells": [
copy.deepcopy((item.get("cells") or [{}])[0])
for item in group
]
}
)
return normalized | function_complex | 1 | {"cognitive_complexity": 6, "loc": 27, "code_loc": 25, "docstring_loc": 1, "function_name": "_transpose_single_cell_table", "class_name": "HTMLRenderer", "qualname": "HTMLRenderer._transpose_single_cell_table", "file_path": "ReportEngine/renderers/html_renderer.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "slib_runnable"} |
fastapi/fastapi:tests/test_tutorial/test_body/test_tutorial002.py:test_post_with_tax | # Context:
import pytest
from fastapi.testclient import TestClient
def get_client(request: pytest.FixtureRequest): ...
def test_post_without_tax(client: TestClient, price: str | float): ...
def test_post_with_no_data(client: TestClient): ...
def test_openapi_schema(client: TestClient): ...
# Task:
Write a Python test function `test_post_with_tax` to verify the behavior of `post_with_tax`.
Module under test: fastapi.testclient, inline_snapshot, utils | def test_post_with_tax(client: TestClient, price: str | float):
response = client.post(
"/items/",
json={"name": "Foo", "price": price, "description": "Some Foo", "tax": 0.3},
)
assert response.status_code == 200
assert response.json() == {
"name": "Foo",
"price": 50.5,
"description": "Some Foo",
"tax": 0.3,
"price_with_tax": 50.8,
} | test | 1 | {"function_name": "test_post_with_tax", "class_name": null, "qualname": "test_post_with_tax", "file_path": "tests/test_tutorial/test_body/test_tutorial002.py", "repo_id": "fastapi/fastapi", "loc": 13, "tested_modules": ["fastapi.testclient", "inline_snapshot", "utils"], "has_docstring": false, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/models/t5gemma2/modular_t5gemma2.py:sliding_window_mask_function | # Context:
from collections.abc import Callable
class T5Gemma2TextConfig(Gemma3TextConfig, PreTrainedConfig): ...
class T5Gemma2EncoderConfig(Gemma3Config): ...
class T5Gemma2DecoderConfig(Gemma3TextConfig, PreTrainedConfig): ...
class T5Gemma2Config(PreTrainedConfig): ...
class T5Gemma2RMSNorm(Gemma3RMSNorm): ...
class T5Gemma2MLP(Gemma3MLP): ...
class T5Gemma2RotaryEmbedding(Gemma3RotaryEmbedding): ...
class T5Gemma2SelfAttention(Gemma3Attention): ...
class T5Gemma2MergedAttention(Gemma3Attention): ...
class T5Gemma2EncoderLayer(T5GemmaEncoderLayer): ...
class T5Gemma2DecoderLayer(T5GemmaEncoderLayer): ...
class T5Gemma2LMHead(T5GemmaLMHead): ...
class T5Gemma2ClassificationHead(T5GemmaClassificationHead): ...
class T5Gemma2MultiModalProjector(Gemma3MultiModalProjector): ...
class T5Gemma2TextScaledWordEmbedding(Gemma3TextScaledWordEmbedding): ...
class T5Gemma2PreTrainedModel(Gemma3PreTrainedModel): ...
class T5Gemma2TextEncoder(T5Gemma2PreTrainedModel): ...
class T5Gemma2Encoder(T5Gemma2PreTrainedModel): ...
class T5Gemma2Decoder(T5Gemma2PreTrainedModel): ...
class T5Gemma2Model(T5Gemma2PreTrainedModel): ...
class T5Gemma2ForConditionalGeneration(T5Gemma2PreTrainedModel, GenerationMixin): ...
class T5Gemma2ForSequenceClassification(T5Gemma2PreTrainedModel): ...
class T5Gemma2ForTokenClassification(T5Gemma2PreTrainedModel): ...
# Task:
Write a Python function `sliding_window_mask_function` to this creates uni/bidirectional attention mask with sliding window.
Parameters: sliding_window: int, is_causal
Returns: Callable | def sliding_window_mask_function(sliding_window: int, is_causal=True) -> Callable:
"""
This creates uni/bidirectional attention mask with sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
if is_causal:
left_window_size, right_window_size = sliding_window, 0
else:
left_window_size, right_window_size = ((sliding_window + 1) // 2, (sliding_window) // 2 + 1)
dist = q_idx - kv_idx
left_mask = (dist >= 0) & (dist < left_window_size)
right_mask = (dist < 0) & (-dist < right_window_size)
return left_mask | right_mask
return inner_mask | function_simple | 0 | {"cognitive_complexity": 3, "loc": 17, "code_loc": 10, "docstring_loc": 3, "function_name": "sliding_window_mask_function", "class_name": null, "qualname": "sliding_window_mask_function", "file_path": "src/transformers/models/t5gemma2/modular_t5gemma2.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "file_runnable"} |
ray-project/ray:python/ray/data/tests/expressions/test_namespace_string.py:TestStringTransform.test_reverse | # Context:
import pandas as pd
from ray.data._internal.util import rows_same
from ray.data.expressions import col
def _create_dataset(items_data, dataset_format, arrow_table): ...
class TestStringLength: ...
class TestStringCase: ...
class TestStringPredicates: ...
class TestStringTrimming: ...
class TestStringPadding: ...
class TestStringSearch: ...
class TestStringTransform:
def test_slice(self, ray_start_regular_shared, dataset_format): ...
def test_replace(self, ray_start_regular_shared, dataset_format): ...
def test_replace_with_max(self, ray_start_regular_shared, dataset_format): ...
def test_repeat(self, ray_start_regular_shared, dataset_format): ...
def test_string_with_comparison(self, ray_start_regular_shared, dataset_format): ...
def test_multiple_string_operations(self, ray_start_regular_shared, dataset_format): ...
# Task:
Write a Python test method `test_reverse` in test class `TestStringTransform` to test str.reverse() reverses strings.
Module under test: packaging, ray.data._internal.util, ray.data.expressions | def test_reverse(self, ray_start_regular_shared, dataset_format):
"""Test str.reverse() reverses strings."""
data = [{"val": "hello"}, {"val": "world"}]
ds = _create_dataset(data, dataset_format)
result = ds.with_column("rev", col("val").str.reverse()).to_pandas()
expected = pd.DataFrame({"val": ["hello", "world"], "rev": ["olleh", "dlrow"]})
assert rows_same(result, expected) | test | 0 | {"function_name": "test_reverse", "class_name": "TestStringTransform", "qualname": "TestStringTransform.test_reverse", "file_path": "python/ray/data/tests/expressions/test_namespace_string.py", "repo_id": "ray-project/ray", "loc": 7, "tested_modules": ["packaging", "ray.data._internal.util", "ray.data.expressions", "ray.data.tests.conftest", "ray.tests.conftest"], "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/serve/_private/rolling_window_accumulator.py:RollingWindowAccumulator:class_doc | Write a class-level docstring for `RollingWindowAccumulator` which has methods: `__init__`, `window_duration_s`, `num_buckets`, `bucket_duration_s`, `_ensure_initialized`. | Tracks cumulative values over a rolling time window.
Uses bucketing for memory efficiency - divides the window into N buckets
and rotates them as time passes. This allows efficient tracking of values
over a sliding window without storing individual data points.
Uses thread-local storage for lock-free writes on the hot path (add()).
Only get_total() requires synchronization to aggregate across threads.
Example:
# Create a 10-minute rolling window with 60 buckets (10s each)
accumulator = RollingWindowAccumulator(
window_duration_s=600.0,
num_buckets=60,
)
# Add values (lock-free, safe from multiple threads)
accumulator.add(100.0)
accumulator.add(50.0)
# Get total (aggregates across all threads)
total = accumulator.get_total()
Thread Safety:
- add() is lock-free after the first call from each thread
- get_total() acquires a lock to aggregate across threads
- Safe to call from multiple threads concurrently | documentation | 0 | {"doc_type": "class", "class_name": "RollingWindowAccumulator", "file_path": "python/ray/serve/_private/rolling_window_accumulator.py", "repo_id": "ray-project/ray", "char_length": 981, "methods": ["__init__", "window_duration_s", "num_buckets", "bucket_duration_s", "_ensure_initialized", "_rotate_buckets_if_needed", "add", "get_total", "get_num_registered_threads"]} |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_text_loaders.py:TestTextLoader.test_whitespace_text | # Context:
from crewai_tools.rag.loaders.text_loader import TextFileLoader, TextLoader
from crewai_tools.rag.source_content import SourceContent
def write_temp_file(content, suffix, encoding): ...
def cleanup_temp_file(path): ...
class TestTextFileLoader: ...
class TestTextLoadersIntegration: ...
class TestTextLoader:
def test_basic_text(self): ...
def test_multiline_text(self): ...
def test_empty_text(self): ...
def test_unicode_text(self): ...
def test_special_characters(self): ...
def test_doc_id_uniqueness(self): ...
def test_long_text(self): ...
# Task:
Write a Python test method `test_whitespace_text` in test class `TestTextLoader` to verify the behavior of `whitespace_text`.
Module under test: crewai_tools.rag.base_loader, crewai_tools.rag.loaders.text_loader, crewai_tools.rag.source_content | def test_whitespace_text(self):
content = " \n\t "
result = TextLoader().load(SourceContent(content))
assert result.content == content | test | 0 | {"function_name": "test_whitespace_text", "class_name": "TestTextLoader", "qualname": "TestTextLoader.test_whitespace_text", "file_path": "lib/crewai-tools/tests/rag/test_text_loaders.py", "repo_id": "crewAIInc/crewAI", "loc": 4, "tested_modules": ["crewai_tools.rag.base_loader", "crewai_tools.rag.loaders.text_loader", "crewai_tools.rag.source_content"], "has_docstring": false, "runnable_level": "project_runnable"} |
vllm-project/vllm:vllm/reasoning/basic_parsers.py:BaseThinkingReasoningParser.extract_reasoning_streaming | # Context:
from collections.abc import Iterable, Sequence
from vllm.entrypoints.openai.engine.protocol import DeltaMessage
class BaseThinkingReasoningParser(ReasoningParser):
def start_token(self) -> str: ...
def end_token(self) -> str: ...
def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
super().__init__(tokenizer, *args, **kwargs)
if not self.model_tokenizer:
raise ValueError(
"The model tokenizer must be passed to the ReasoningParser "
"constructor during construction."
)
if not self.start_token or not self.end_token:
raise ValueError("start_token and end_token must be defined in subclasses")
self.start_token_id = self.vocab.get(self.start_token)
self.end_token_id = self.vocab.get(self.end_token)
if self.start_token_id is None or self.end_token_id is None:
raise RuntimeError(
f"{self.__class__.__name__} reasoning parser could not locate "
"think start/end tokens in the tokenizer!"
)
def is_reasoning_end(self, input_ids: Sequence[int]) -> bool: ...
def is_reasoning_end_streaming(self, input_ids: Sequence[int], delta_ids: Iterable[int]) -> bool: ...
def extract_content_ids(self, input_ids: list[int]) -> list[int]: ...
def extract_reasoning(self, model_output: str, request: ChatCompletionRequest | ResponsesRequest) -> tuple[str | None, str | None]: ...
def count_reasoning_tokens(self, token_ids: Sequence[int]) -> int: ...
# Task:
Write a Python method `extract_reasoning_streaming` for the class `BaseThinkingReasoningParser` to extract reasoning content from a delta message.
Parameters: previous_text: str, current_text: str, delta_text: str, previous_token_ids: Sequence[int], current_token_ids: Sequence[int], delta_token_ids: Sequence[int]
Returns: DeltaMessage | None | def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
"""
Extract reasoning content from a delta message.
Handles streaming output where previous + delta = current.
Uses token IDs for faster processing.
"""
# Skip single special tokens
if len(delta_token_ids) == 1 and (
delta_token_ids[0] in [self.start_token_id, self.end_token_id]
):
return None
# Check if start token is present in previous or delta.
# Keep compatibility with models that don't generate start tokens.
if self.start_token_id in previous_token_ids:
if self.end_token_id in delta_token_ids:
# start token in previous, end token in delta,
# extract reasoning content
end_index = delta_text.find(self.end_token)
reasoning = delta_text[:end_index]
content = delta_text[end_index + len(self.end_token) :]
return DeltaMessage(
reasoning=reasoning, content=content if content else None
)
elif self.end_token_id in previous_token_ids:
# start token in previous, end token in previous,
# reasoning content continues
return DeltaMessage(content=delta_text)
else:
# start token in previous, no end token in previous or delta,
# reasoning content continues
return DeltaMessage(reasoning=delta_text)
elif self.start_token_id in delta_token_ids:
if self.end_token_id in delta_token_ids:
# start token in delta, end token in delta,
# extract reasoning content
start_index = delta_text.find(self.start_token)
end_index = delta_text.find(self.end_token)
reasoning = delta_text[start_index + len(self.start_token) : end_index]
content = delta_text[end_index + len(self.end_token) :]
return DeltaMessage(
reasoning=reasoning, content=content if content else None
)
else:
# start token in delta, no end token in delta,
# reasoning content continues
return DeltaMessage(reasoning=delta_text)
else:
# not find thinking start token
return DeltaMessage(content=delta_text) | function_complex | 1 | {"cognitive_complexity": 12, "loc": 58, "code_loc": 29, "docstring_loc": 5, "function_name": "extract_reasoning_streaming", "class_name": "BaseThinkingReasoningParser", "qualname": "BaseThinkingReasoningParser.extract_reasoning_streaming", "file_path": "vllm/reasoning/basic_parsers.py", "repo_id": "vllm-project/vllm", "has_docstring": true, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/unit/test_transform_pyarrow.py:test_align_struct_fields_empty_blocks | # Context:
from ray.data._internal.arrow_ops.transform_pyarrow import (
MIN_PYARROW_VERSION_TYPE_PROMOTION,
_align_struct_fields,
concat,
hash_partition,
shuffle,
try_combine_chunked_columns,
unify_schemas,
)
def test_try_defragment_table(): ...
def test_hash_partitioning(): ...
def test_shuffle(): ...
def test_arrow_concat_empty(simple_concat_data): ...
def test_arrow_concat_single_block(simple_concat_data): ...
def test_arrow_concat_basic(basic_concat_blocks, basic_concat_expected): ...
def test_arrow_concat_null_promotion(null_promotion_blocks, null_promotion_expected): ...
def test_arrow_concat_tensor_extension_uniform(uniform_tensor_blocks, uniform_tensor_expected): ...
def test_arrow_concat_tensor_extension_variable_shaped(variable_shaped_tensor_blocks, variable_shaped_tensor_expected): ...
def test_arrow_concat_tensor_extension_uniform_and_variable_shaped(mixed_tensor_blocks, mixed_tensor_expected): ...
def test_arrow_concat_tensor_extension_uniform_but_different(different_shape_tensor_blocks, different_shape_tensor_expected): ...
def test_arrow_concat_with_objects(object_concat_blocks, object_concat_expected): ...
def test_struct_with_different_field_names(struct_different_field_names_blocks, struct_different_field_names_expected): ...
def test_nested_structs(nested_structs_blocks, nested_structs_expected): ...
def test_struct_with_null_values(struct_null_values_blocks, struct_null_values_expected): ...
def test_struct_with_mismatched_lengths(struct_mismatched_lengths_blocks, struct_mismatched_lengths_expected): ...
def test_struct_with_empty_arrays(struct_empty_arrays_blocks, struct_empty_arrays_expected): ...
def test_struct_with_arrow_variable_shaped_tensor_type(struct_variable_shaped_tensor_blocks, struct_variable_shaped_tensor_expected): ...
def test_struct_with_diverging_primitive_types(): ...
def test_arrow_concat_object_with_tensor_fails(object_with_tensor_fails_blocks): ...
def test_unify_schemas(unify_schemas_basic_schemas, unify_schemas_multicol_schemas): ...
def test_unify_schemas_object_types(unify_schemas_object_types_schemas): ...
def test_unify_schemas_incompatible_tensor_dtypes(unify_schemas_incompatible_tensor_schemas): ...
def test_unify_schemas_objects_and_tensors(unify_schemas_objects_and_tensors_schemas): ...
def test_unify_schemas_missing_tensor_fields(unify_schemas_missing_tensor_fields_schemas): ...
def test_unify_schemas_nested_struct_tensors(unify_schemas_nested_struct_tensors_schemas): ...
def test_unify_schemas_edge_cases(unify_schemas_edge_cases_data): ...
def test_unify_schemas_mixed_tensor_types(unify_schemas_mixed_tensor_data): ...
def test_unify_schemas_type_promotion(unify_schemas_type_promotion_data): ...
def test_arrow_block_select(block_select_data): ...
def test_arrow_block_slice_copy(block_slice_data): ...
def test_arrow_block_slice_copy_empty(block_slice_data): ...
def test_mixed_tensor_types_same_dtype(mixed_tensor_types_same_dtype_blocks, mixed_tensor_types_same_dtype_expected): ...
def test_mixed_tensor_types_fixed_shape_different(mixed_tensor_types_fixed_shape_blocks, mixed_tensor_types_fixed_shape_expected): ...
def test_mixed_tensor_types_variable_shaped(mixed_tensor_types_variable_shaped_blocks, mixed_tensor_types_variable_shaped_expected): ...
def test_mixed_tensor_types_in_struct(struct_with_mixed_tensor_types_blocks, struct_with_mixed_tensor_types_expected): ...
def test_nested_struct_with_mixed_tensor_types(nested_struct_with_mixed_tensor_types_blocks, nested_struct_with_mixed_tensor_types_expected): ...
def test_multiple_tensor_fields_in_struct(multiple_tensor_fields_struct_blocks, multiple_tensor_fields_struct_expected): ...
def test_struct_with_incompatible_tensor_dtypes_fails(): ...
def test_struct_with_additional_fields(struct_with_additional_fields_blocks, struct_with_additional_fields_expected): ...
def test_struct_with_null_tensor_values(struct_with_null_tensor_values_blocks, struct_with_null_tensor_values_expected): ...
def simple_struct_blocks(): ...
def simple_struct_schema(): ...
def nested_struct_blocks(): ...
def nested_struct_schema(): ...
def missing_column_blocks(): ...
def missing_column_schema(): ...
def multiple_struct_blocks(): ...
def multiple_struct_schema(): ...
def mixed_column_blocks(): ...
def mixed_column_schema(): ...
def empty_block_blocks(): ...
def empty_block_schema(): ...
def already_aligned_blocks(): ...
def already_aligned_schema(): ...
def no_struct_blocks(): ...
def no_struct_schema(): ...
def deep_nesting_blocks(): ...
def deep_nesting_schema(): ...
def test_align_struct_fields_simple(simple_struct_blocks, simple_struct_schema): ...
def test_align_struct_fields_nested(nested_struct_blocks, nested_struct_schema): ...
def test_align_struct_fields_missing_column(missing_column_blocks, missing_column_schema): ...
def test_align_struct_fields_multiple_structs(multiple_struct_blocks, multiple_struct_schema): ...
def test_align_struct_fields_non_struct_columns(mixed_column_blocks, mixed_column_schema): ...
def test_align_struct_fields_already_aligned(already_aligned_blocks, already_aligned_schema): ...
def test_align_struct_fields_no_struct_columns(no_struct_blocks, no_struct_schema): ...
def test_align_struct_fields_deep_nesting(deep_nesting_blocks, deep_nesting_schema): ...
def uniform_tensor_blocks(): ...
def uniform_tensor_expected(): ...
def variable_shaped_tensor_blocks(): ...
def variable_shaped_tensor_expected(): ...
def mixed_tensor_blocks(): ...
def mixed_tensor_expected(): ...
def different_shape_tensor_blocks(): ...
def different_shape_tensor_expected(): ...
def mixed_tensor_types_same_dtype_blocks(): ...
def mixed_tensor_types_same_dtype_expected(): ...
def mixed_tensor_types_fixed_shape_blocks(): ...
def mixed_tensor_types_fixed_shape_expected(): ...
def mixed_tensor_types_variable_shaped_blocks(): ...
def mixed_tensor_types_variable_shaped_expected(): ...
def struct_with_mixed_tensor_types_blocks(): ...
def struct_with_mixed_tensor_types_expected(): ...
def nested_struct_with_mixed_tensor_types_blocks(): ...
def nested_struct_with_mixed_tensor_types_expected(): ...
def multiple_tensor_fields_struct_blocks(): ...
def multiple_tensor_fields_struct_expected(): ...
def struct_with_additional_fields_blocks(): ...
def struct_with_additional_fields_expected(): ...
def struct_with_null_tensor_values_blocks(): ...
def struct_with_null_tensor_values_expected(): ...
def basic_concat_blocks(): ...
def basic_concat_expected(): ...
def null_promotion_blocks(): ...
def null_promotion_expected(): ...
def struct_different_field_names_blocks(): ...
def struct_different_field_names_expected(): ...
def nested_structs_blocks(): ...
def nested_structs_expected(): ...
def struct_null_values_blocks(): ...
def struct_null_values_expected(): ...
def struct_mismatched_lengths_blocks(): ...
def struct_mismatched_lengths_expected(): ...
def struct_empty_arrays_blocks(): ...
def struct_empty_arrays_expected(): ...
def unify_schemas_basic_schemas(): ...
def unify_schemas_multicol_schemas(): ...
def object_concat_blocks(): ...
def object_concat_expected(): ...
def struct_variable_shaped_tensor_blocks(): ...
def struct_variable_shaped_tensor_expected(): ...
def unify_schemas_object_types_schemas(): ...
def unify_schemas_incompatible_tensor_schemas(): ...
def unify_schemas_objects_and_tensors_schemas(): ...
def unify_schemas_missing_tensor_fields_schemas(): ...
def unify_schemas_nested_struct_tensors_schemas(): ...
def object_with_tensor_fails_blocks(): ...
def simple_concat_data(): ...
def _create_tensor_array(data, tensor_type): ...
def _create_expected_result(schema, length, **kwargs): ...
def _create_tensor_blocks(tensor_data1, tensor_data2, tensor_type1, tensor_type2, id_data1, id_data2, column_name): ...
def _create_struct_tensor_blocks(tensor_data1, tensor_data2, tensor_type1, tensor_type2, value_data1, value_data2, extra_data2, struct_name, id_data1, id_data2): ...
def _create_tensor_schema(tensor_type, dtype, ndim, include_id, struct_name, include_extra): ...
def _create_basic_struct_blocks(struct_data1, struct_data2, column_name, id_data1, id_data2, other_columns): ...
def _create_struct_schema(struct_fields, include_id, other_fields): ...
def _create_struct_blocks_with_columns(struct_data1, struct_data2, struct_type1, struct_type2, additional_columns1, additional_columns2, struct_column): ...
def _create_struct_expected_result(schema, length, content): ...
def _create_simple_struct_blocks(struct_data1, struct_data2, field_names, field_types, additional_columns1, additional_columns2, struct_column): ...
def _create_simple_struct_schema(field_names, field_types, additional_fields): ...
def unify_schemas_edge_cases_data(): ...
def unify_schemas_mixed_tensor_data(): ...
def unify_schemas_type_promotion_data(): ...
def block_select_data(): ...
def block_slice_data(): ...
# Task:
Write a Python test function `test_align_struct_fields_empty_blocks` to test alignment with empty blocks.
Module under test: typing, ray.data._internal.arrow_ops.transform_pyarrow, ray.data._internal.tensor_extensions.arrow | def test_align_struct_fields_empty_blocks(empty_block_blocks, empty_block_schema):
"""Test alignment with empty blocks."""
t1, t2 = empty_block_blocks
aligned_blocks = _align_struct_fields([t1, t2], empty_block_schema)
assert len(aligned_blocks) == 2
# Check empty block
result1 = aligned_blocks[0]
assert result1.schema == empty_block_schema
assert len(result1) == 0
# Check non-empty block
result2 = aligned_blocks[1]
assert result2.schema == empty_block_schema
assert result2["struct"].to_pylist() == [
{"a": 1, "b": None, "c": True},
{"a": 2, "b": None, "c": False},
] | test | 0 | {"function_name": "test_align_struct_fields_empty_blocks", "class_name": null, "qualname": "test_align_struct_fields_empty_blocks", "file_path": "python/ray/data/tests/unit/test_transform_pyarrow.py", "repo_id": "ray-project/ray", "loc": 20, "tested_modules": ["typing", "ray.data._internal.arrow_ops.transform_pyarrow", "ray.data._internal.tensor_extensions.arrow", "ray.data._internal.utils.arrow_utils", "ray.data.block"], "has_docstring": true, "runnable_level": "file_runnable"} |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py:CloudflareAIGateway._get_aclient | # Context:
import httpx
class CloudflareAIGatewayError(Exception): ...
class CloudflareAIGatewayUnauthorizedError(CloudflareAIGatewayError): ...
class CloudflareAIGatewayDoesNotExistError(CloudflareAIGatewayError): ...
class CloudflareAIGatewayOptions(BaseModel): ...
class AIGatewayClientWrapper: ...
class CloudflareAIGateway(LLM):
def __init__(
self,
llms: List[LLM],
account_id: Optional[str] = None,
gateway: Optional[str] = None,
api_key: Optional[str] = None,
binding: Optional[Any] = None,
options: Optional[CloudflareAIGatewayOptions] = None,
max_retries: int = 3,
timeout: float = 60.0,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
**kwargs: Any,
) -> None:
# Validate configuration
if not llms:
raise ValueError("At least one LLM must be provided")
if binding is None:
if not account_id or not gateway:
raise ValueError(
"Either binding or account_id+gateway must be provided"
)
if not api_key:
raise ValueError("api_key is required when not using binding")
super().__init__(
llms=llms,
account_id=account_id,
gateway=gateway,
api_key=api_key,
binding=binding,
options=options,
max_retries=max_retries,
timeout=timeout,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
async_http_client=async_http_client,
**kwargs,
)
self._client = http_client
self._aclient = async_http_client
# Inject AI Gateway client into each LLM
self._inject_ai_gateway_clients()
def _inject_ai_gateway_clients(self) -> None: ...
def _get_client(self) -> httpx.Client: ...
def _parse_options_to_headers(self, options: Optional[CloudflareAIGatewayOptions]) -> Dict[str, str]: ...
def _get_current_llm(self) -> LLM: ...
def _try_next_llm(self) -> None: ...
def _make_ai_gateway_request(self, request_body: Dict[str, Any]) -> httpx.Response: ...
def _handle_ai_gateway_response(self, response: httpx.Response) -> None: ...
def class_name(cls) -> str: ...
def metadata(self) -> LLMMetadata: ...
def chat(self, messages: Sequence[ChatMessage], **kwargs) -> ChatResponse: ...
def stream_chat(self, messages: Sequence[ChatMessage], **kwargs) -> ChatResponseGen: ...
def complete(self, prompt: str, formatted: bool, **kwargs) -> CompletionResponse: ...
def stream_complete(self, prompt: str, formatted: bool, **kwargs) -> CompletionResponseGen: ...
async def achat(self, messages: Sequence[ChatMessage], **kwargs) -> ChatResponse: ...
async def astream_chat(self, messages: Sequence[ChatMessage], **kwargs) -> ChatResponseAsyncGen: ...
async def acomplete(self, prompt: str, formatted: bool, **kwargs) -> CompletionResponse: ...
async def astream_complete(self, prompt: str, formatted: bool, **kwargs) -> CompletionResponseAsyncGen: ...
# Task:
Write a Python method `_get_aclient` for the class `CloudflareAIGateway` to get async HTTP client.
Returns: httpx.AsyncClient | def _get_aclient(self) -> httpx.AsyncClient:
"""Get async HTTP client."""
if self._aclient is None:
self._aclient = httpx.AsyncClient(
timeout=self.timeout,
headers=self.default_headers,
)
return self._aclient | function_simple | 1 | {"cognitive_complexity": 1, "loc": 8, "code_loc": 6, "docstring_loc": 1, "function_name": "_get_aclient", "class_name": "CloudflareAIGateway", "qualname": "CloudflareAIGateway._get_aclient", "file_path": "llama-index-integrations/llms/llama-index-llms-cloudflare-ai-gateway/llama_index/llms/cloudflare_ai_gateway/base.py", "repo_id": "run-llama/llama_index", "has_docstring": true, "runnable_level": "class_runnable"} |
huggingface/transformers:tests/models/kosmos2_5/test_processor_kosmos2_5.py:Kosmos2_5ProcessorTest.test_full_processor | # Context:
import os
import numpy as np
from transformers.image_utils import load_image
from ...test_processing_common import ProcessorTesterMixin, url_to_local_path
from PIL import Image
from transformers import (
AutoProcessor,
AutoTokenizer,
Kosmos2_5ImageProcessor,
Kosmos2_5Processor,
)
class Kosmos2_5ProcessorTest(ProcessorTesterMixin, unittest.TestCase):
processor_class = Kosmos2_5Processor
images_input_name = "flattened_patches"
model_id = "microsoft/kosmos-2.5"
def test_image_processor_defaults(self): ...
def test_image_procesor_load_save_reload(self): ...
def test_can_load_various_tokenizers(self): ...
def test_model_input_names(self): ...
def test_image_processor_defaults_preserved_by_image_kwargs(self): ...
def test_kwargs_overrides_default_image_processor_kwargs(self): ...
def test_unstructured_kwargs(self): ...
def test_unstructured_kwargs_batched(self): ...
def test_structured_kwargs_nested(self): ...
def test_structured_kwargs_nested_from_dict(self): ...
# Task:
Write a Python test method `test_full_processor` in test class `Kosmos2_5ProcessorTest` to verify the behavior of `full_processor`.
Module under test: tempfile, transformers.image_utils, transformers.testing_utils | def test_full_processor(self):
url = url_to_local_path("https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png")
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2.5")
texts = ["<md>", "<ocr>"]
expected_input_ids = [
[100288],
[100282],
]
expected_attention_mask = [[1], [1]]
image = load_image(url)
# To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed
image_path = os.path.join(self.tmpdirname, "image.png")
image.save(image_path)
image = Image.open(image_path)
# test single image
outputs = processor(images=image, text=texts[0])
self.assertListEqual(
outputs.input_ids[0].numpy().tolist(),
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[0],
)
self.assertListEqual(
outputs.image_embeds_position_mask[0].numpy().tolist(),
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[0])),
)
self.assertListEqual(
outputs.attention_mask[0].numpy().tolist(),
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[0],
)
EXPECTED_FP_1 = [
1.0,
2.0,
-2.9527735710144043,
-2.672085762023926,
-2.9933173656463623,
-2.905944585800171,
-2.5891761779785156,
-2.8751866817474365,
-2.962153434753418,
-2.588062047958374,
]
EXPECTED_FP_200 = [
4.0,
45.0,
1.5713728666305542,
1.584628939628601,
1.3589054346084595,
1.6515952348709106,
1.7014952898025513,
1.3731343746185303,
1.6010395288467407,
1.6607422828674316,
]
self.assertTupleEqual(outputs.flattened_patches.shape, (1, 4096, 770))
np.testing.assert_allclose(
outputs.flattened_patches[0][1][:10].numpy().tolist(),
EXPECTED_FP_1,
atol=1e-4,
)
np.testing.assert_allclose(
outputs.flattened_patches[0][200][:10].numpy().tolist(),
EXPECTED_FP_200,
atol=1e-4,
)
# test a batch of images and texts, right padding
outputs = processor(images=[image, image], text=texts)
self.assertListEqual(
outputs.input_ids[1].numpy().tolist(),
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[1],
)
self.assertListEqual(
outputs.image_embeds_position_mask[1].numpy().tolist(),
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[1])),
)
self.assertListEqual(
outputs.attention_mask[1].numpy().tolist(),
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[1],
)
self.assertTupleEqual(outputs.flattened_patches.shape, (2, 4096, 770))
np.testing.assert_allclose(
outputs.flattened_patches[1][1][:10].numpy().tolist(),
EXPECTED_FP_1,
atol=1e-4,
)
np.testing.assert_allclose(
outputs.flattened_patches[1][200][:10].numpy().tolist(),
EXPECTED_FP_200,
atol=1e-4,
) | test | 0 | {"function_name": "test_full_processor", "class_name": "Kosmos2_5ProcessorTest", "qualname": "Kosmos2_5ProcessorTest.test_full_processor", "file_path": "tests/models/kosmos2_5/test_processor_kosmos2_5.py", "repo_id": "huggingface/transformers", "loc": 91, "tested_modules": ["tempfile", "transformers.image_utils", "transformers.testing_utils", "transformers.utils", "test_processing_common"], "has_docstring": false, "runnable_level": "project_runnable"} |
ray-project/ray:python/ray/data/tests/unit/test_fifo_bundle_queue.py:test_fifo_queue_iter | # Context:
from ray.data._internal.execution.bundle_queue import FIFOBundleQueue
def _create_bundle(data: Any) -> RefBundle: ...
def test_fifo_queue_add_and_length(): ...
def test_fifo_queue_get_next_fifo_order(): ...
def test_fifo_queue_init_with_bundles(): ...
def test_fifo_queue_peek_next(): ...
def test_fifo_queue_peek_next_empty(): ...
def test_fifo_queue_has_next(): ...
def test_fifo_queue_get_next_empty_raises(): ...
def test_fifo_queue_clear(): ...
def test_fifo_queue_metrics(): ...
def test_fifo_queue_to_list(): ...
def test_fifo_queue_finalize_is_noop(): ...
# Task:
Write a Python test function `test_fifo_queue_iter` to test iterating over the queue.
Module under test: typing, uuid, ray.data._internal.execution.bundle_queue | def test_fifo_queue_iter():
"""Test iterating over the queue."""
queue = FIFOBundleQueue()
bundle1 = _create_bundle("data1")
bundle2 = _create_bundle("data11")
bundle3 = _create_bundle("data111")
queue.add(bundle1)
queue.add(bundle2)
queue.add(bundle3)
# Iterate without consuming
bundles = list(queue)
assert bundles == [bundle1, bundle2, bundle3]
assert len(queue) == 3 # Queue unchanged | test | 0 | {"function_name": "test_fifo_queue_iter", "class_name": null, "qualname": "test_fifo_queue_iter", "file_path": "python/ray/data/tests/unit/test_fifo_bundle_queue.py", "repo_id": "ray-project/ray", "loc": 15, "tested_modules": ["typing", "uuid", "ray.data._internal.execution.bundle_queue", "ray.data._internal.execution.interfaces", "ray.data.block"], "has_docstring": true, "runnable_level": "file_runnable"} |
langflow-ai/langflow:src/backend/tests/unit/groq/test_groq_integration.py:TestGroqModelBackwardCompatibility.test_model_name_input_has_default_options | # Context:
from lfx.base.models.groq_constants import GROQ_MODELS
class TestGroqModelIntegration: ...
class TestGroqModelEdgeCases: ...
class TestGroqModelBackwardCompatibility:
def groq_model_instance(self): ...
def test_groq_models_constant_available(self): ...
def test_fallback_to_groq_models_on_error(self, groq_model_instance, mock_api_key): ...
# Task:
Write a Python test method `test_model_name_input_has_default_options` in test class `TestGroqModelBackwardCompatibility` to test that model_name input has default options from GROQ_MODELS.
Module under test: lfx.components.groq.groq, lfx.components.groq.groq, lfx.components.groq.groq | def test_model_name_input_has_default_options(self, groq_model_instance):
"""Test that model_name input has default options from GROQ_MODELS."""
from lfx.base.models.groq_constants import GROQ_MODELS
model_name_input = next(inp for inp in groq_model_instance.inputs if inp.name == "model_name")
assert model_name_input.options == GROQ_MODELS
assert model_name_input.value == GROQ_MODELS[0] | test | 1 | {"function_name": "test_model_name_input_has_default_options", "class_name": "TestGroqModelBackwardCompatibility", "qualname": "TestGroqModelBackwardCompatibility.test_model_name_input_has_default_options", "file_path": "src/backend/tests/unit/groq/test_groq_integration.py", "repo_id": "langflow-ai/langflow", "loc": 8, "tested_modules": ["lfx.components.groq.groq", "lfx.components.groq.groq", "lfx.components.groq.groq", "lfx.base.models.groq_constants", "lfx.base.models.groq_constants"], "has_docstring": true, "runnable_level": "project_runnable"} |
Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/podcast_router.py:get_podcast | # Context:
from fastapi import APIRouter, HTTPException, File, UploadFile, Body, Query, Path
from models.podcast_schemas import Podcast, PodcastDetail, PodcastCreate, PodcastUpdate, PaginatedPodcasts
from services.podcast_service import podcast_service
async def get_podcasts(page: int, per_page: int, search: Optional[str], date_from: Optional[str], date_to: Optional[str], language_code: Optional[str], tts_engine: Optional[str], has_audio: Optional[bool]): ...
async def get_podcast_formats(): ...
async def get_language_codes(): ...
async def get_tts_engines(): ...
async def get_podcast_by_identifier(identifier: str): ...
async def create_podcast(podcast_data: PodcastCreate): ...
async def update_podcast(podcast_id: int, podcast_data: PodcastUpdate): ...
async def delete_podcast(podcast_id: int): ...
async def upload_audio(podcast_id: int, file: UploadFile): ...
async def upload_banner(podcast_id: int, file: UploadFile): ...
async def get_audio_file(filename: str): ...
# Task:
Write a Python async function `get_podcast` to get detailed information about a specific podcast.
Parameters: podcast_id: int | async def get_podcast(podcast_id: int = Path(..., description="The ID of the podcast to retrieve")):
"""
Get detailed information about a specific podcast.
Parameters:
- **podcast_id**: The ID of the podcast to retrieve
Returns the podcast metadata and content.
"""
podcast = await podcast_service.get_podcast(podcast_id)
content = await podcast_service.get_podcast_content(podcast_id)
audio_url = await podcast_service.get_podcast_audio_url(podcast)
sources = podcast.get("sources", [])
if "sources" in podcast:
del podcast["sources"]
banner_images = podcast.get("banner_images", [])
if "banner_images" in podcast:
del podcast["banner_images"]
return {"podcast": podcast, "content": content, "audio_url": audio_url, "sources": sources, "banner_images": banner_images} | function_simple | 0 | {"cognitive_complexity": 2, "loc": 19, "code_loc": 10, "docstring_loc": 8, "function_name": "get_podcast", "class_name": null, "qualname": "get_podcast", "file_path": "advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/routers/podcast_router.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "project_runnable"} |
ocrmypdf/OCRmyPDF:src/ocrmypdf/_validation_coordinator.py:ValidationCoordinator._validate_plugin_contexts | # Context:
from ocrmypdf._options import OcrOptions
class ValidationCoordinator:
def __init__(self, plugin_manager: pluggy.PluginManager):
self.plugin_manager = plugin_manager
self.registry = getattr(plugin_manager, '_option_registry', None)
def validate_all_options(self, options: OcrOptions) -> None: ...
def _validate_tesseract_options(self, options: OcrOptions) -> None: ...
def _validate_optimize_options(self, options: OcrOptions) -> None: ...
def _validate_cross_cutting_concerns(self, options: OcrOptions) -> None: ...
def _handle_deprecated_pdf_renderer(self, options: OcrOptions) -> None: ...
# Task:
Write a Python method `_validate_plugin_contexts` for the class `ValidationCoordinator` to validate plugin options that require external context.
Parameters: options: OcrOptions
Returns: None | def _validate_plugin_contexts(self, options: OcrOptions) -> None:
"""Validate plugin options that require external context."""
# For now, we'll run the plugin validation directly since the models
# are still being integrated. This ensures the validation warnings
# and checks still work as expected.
# Run Tesseract validation
self._validate_tesseract_options(options)
# Run Optimize validation
self._validate_optimize_options(options) | function_simple | 1 | {"cognitive_complexity": 0, "loc": 11, "code_loc": 2, "docstring_loc": 1, "function_name": "_validate_plugin_contexts", "class_name": "ValidationCoordinator", "qualname": "ValidationCoordinator._validate_plugin_contexts", "file_path": "src/ocrmypdf/_validation_coordinator.py", "repo_id": "ocrmypdf/OCRmyPDF", "has_docstring": true, "runnable_level": "project_runnable"} |
google/langextract:langextract/providers/patterns.py:module_doc | Write a module-level docstring for the Python module `patterns` which contains various utilities. | Centralized pattern definitions for built-in providers.
This module defines all patterns and priorities for built-in providers
in one place to avoid duplication. | documentation | 1 | {"doc_type": "module", "module_name": "patterns", "file_path": "langextract/providers/patterns.py", "repo_id": "google/langextract", "char_length": 162} |
ray-project/ray:python/ray/serve/tests/test_task_processor.py:TestTaskConsumerWithRayServe.test_task_consumer_persistence_across_restarts | # Context:
import ray
from ray import serve
from ray._common.test_utils import SignalActor, wait_for_condition
from ray.serve.task_consumer import (
instantiate_adapter_from_config,
task_consumer,
task_handler,
)
class ProcessedTasksTracker: ...
def send_request_to_queue(processor_config: TaskProcessorConfig, data, task_name): ...
def temp_queue_directory(): ...
def transport_options(temp_queue_directory): ...
def create_processor_config(temp_queue_directory, transport_options): ...
def _get_task_counts_by_routing_key(queue_path): ...
class TestTaskConsumerWithDLQsConfiguration: ...
class TestTaskConsumerWithRayServe:
def test_task_consumer_as_serve_deployment(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_as_serve_deployment_with_failed_task(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_as_serve_deployment_with_async_task_handler(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_metrics(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_health_check(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_processor_with_cancel_tasks_and_app_custom_config(self, external_redis, serve_instance): ...
def test_task_consumer_with_task_custom_config(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_failed_task_queue_consumption(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_multiple_task_consumers_in_single_app(self, temp_queue_directory, serve_instance, create_processor_config): ...
def test_task_consumer_with_one_queue_and_multiple_different_tasks(self, temp_queue_directory, serve_instance, create_processor_config): ...
# Task:
Write a Python test method `test_task_consumer_persistence_across_restarts` in test class `TestTaskConsumerWithRayServe` to test that tasks persist in queue and get executed after deployment restart.
Module under test: collections, pathlib, ray | def test_task_consumer_persistence_across_restarts(
self, temp_queue_directory, serve_instance, create_processor_config
):
"""Test that tasks persist in queue and get executed after deployment restart."""
# Setup
config = create_processor_config()
tracker = ProcessedTasksTracker.remote()
signal1 = SignalActor.remote()
@serve.deployment(
num_replicas=1, graceful_shutdown_timeout_s=60, max_ongoing_requests=1
)
@task_consumer(task_processor_config=config)
class TaskConsumer:
def __init__(self, tracker_ref, signal_ref):
self.tracker, self.signal = tracker_ref, signal_ref
self.local_processed = []
@task_handler(name="process_request")
def process_request(self, data):
ray.get(self.signal.wait.remote()) # Block until signal
self.local_processed.append(data)
ray.get(self.tracker.add_task.remote(data))
return f"Processed: {data}"
def get_local_processed(self):
return self.local_processed
# Deploy first version and send tasks
serve.run(TaskConsumer.bind(tracker, signal1), name="app_v1")
num_tasks = 20
for i in range(num_tasks):
ray.get(send_request_to_queue.remote(config, f"task_{i}"))
# Process exactly 1 task, then restart deployment
wait_for_condition(
lambda: ray.get(signal1.cur_num_waiters.remote()) == 1, timeout=10
)
ray.get(signal1.send.remote(clear=True)) # Allow 1 task to complete
wait_for_condition(lambda: ray.get(tracker.get_count.remote()) == 1, timeout=10)
# Shutdown first deployment
serve.delete("app_v1", _blocking=False)
ray.get(signal1.send.remote()) # Release any stuck tasks
wait_for_condition(
lambda: "app_v1" not in serve.status().applications, timeout=100
)
tasks_before_restart = ray.get(tracker.get_count.remote())
assert (
tasks_before_restart >= 2 and tasks_before_restart < num_tasks
), f"Expected at least 2 tasks processed and atleast one less than num_tasks, got {tasks_before_restart}"
# Deploy second version and process remaining tasks
signal2 = SignalActor.remote()
handle = serve.run(TaskConsumer.bind(tracker, signal2), name="app_v2")
wait_for_condition(
lambda: ray.get(signal2.cur_num_waiters.remote()) == 1, timeout=10
)
ray.get(signal2.send.remote()) # Process all remaining tasks
wait_for_condition(
lambda: ray.get(tracker.get_count.remote()) == num_tasks, timeout=100
)
# Verify all tasks were processed and distributed correctly
expected_tasks = {f"task_{i}" for i in range(num_tasks)}
final_tasks = ray.get(tracker.get_processed_tasks.remote())
second_deployment_tasks = handle.get_local_processed.remote().result()
assert (
final_tasks == expected_tasks
), f"Missing tasks: {expected_tasks - final_tasks}"
assert (
len(second_deployment_tasks) == num_tasks - tasks_before_restart
), f"Second deployment processed {len(second_deployment_tasks)} tasks, expected {num_tasks - tasks_before_restart}" | test | 0 | {"function_name": "test_task_consumer_persistence_across_restarts", "class_name": "TestTaskConsumerWithRayServe", "qualname": "TestTaskConsumerWithRayServe.test_task_consumer_persistence_across_restarts", "file_path": "python/ray/serve/tests/test_task_processor.py", "repo_id": "ray-project/ray", "loc": 77, "tested_modules": ["collections", "pathlib", "ray", "ray._common.test_utils", "ray.serve.schema"], "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:tests/quantization/mxfp4/test_mxfp4.py:Mxfp4ConfigTest.test_config_with_modules_to_not_convert | # Context:
from transformers import AutoTokenizer, GptOssForCausalLM, Mxfp4Config
def _empty_accelerator_cache(): ...
def _patch_no_accelerator(): ...
class Mxfp4QuantizerTest(unittest.TestCase): ...
class Mxfp4IntegrationTest(unittest.TestCase): ...
class Mxfp4ModelTest(unittest.TestCase): ...
class Mxfp4ConfigTest(unittest.TestCase):
def test_basic_config_creation(self): ...
def test_config_with_dequantize(self): ...
def test_get_loading_attributes(self): ...
def test_to_dict(self): ...
def test_from_dict(self): ...
# Task:
Write a Python test method `test_config_with_modules_to_not_convert` in test class `Mxfp4ConfigTest` to test configuration with modules to not convert.
Module under test: contextlib, transformers, transformers.testing_utils | def test_config_with_modules_to_not_convert(self):
"""Test configuration with modules to not convert"""
modules = ["model.layers.*.self_attn", "lm_head"]
config = Mxfp4Config(modules_to_not_convert=modules)
self.assertEqual(config.modules_to_not_convert, modules) | test | 0 | {"function_name": "test_config_with_modules_to_not_convert", "class_name": "Mxfp4ConfigTest", "qualname": "Mxfp4ConfigTest.test_config_with_modules_to_not_convert", "file_path": "tests/quantization/mxfp4/test_mxfp4.py", "repo_id": "huggingface/transformers", "loc": 5, "tested_modules": ["contextlib", "transformers", "transformers.testing_utils", "transformers.utils", "transformers.quantizers.quantizer_mxfp4"], "has_docstring": true, "runnable_level": "class_runnable"} |
harry0703/MoneyPrinterTurbo:test/services/test_task.py:TestTaskService.test_task_local_materials | # Context:
import os
from app.services import task as tm
from app.models.schema import MaterialInfo, VideoParams
class TestTaskService(unittest.TestCase):
def setUp(self): ...
def tearDown(self): ...
# Task:
Write a Python test method `test_task_local_materials` in test class `TestTaskService` to verify the behavior of `task_local_materials`.
Module under test: pathlib, app.services, app.models.schema | def test_task_local_materials(self):
task_id = "00000000-0000-0000-0000-000000000000"
video_materials=[]
for i in range(1, 4):
video_materials.append(MaterialInfo(
provider="local",
url=os.path.join(resources_dir, f"{i}.png"),
duration=0
))
params = VideoParams(
video_subject="金钱的作用",
video_script="金钱不仅是交换媒介,更是社会资源的分配工具。它能满足基本生存需求,如食物和住房,也能提供教育、医疗等提升生活品质的机会。拥有足够的金钱意味着更多选择权,比如职业自由或创业可能。但金钱的作用也有边界,它无法直接购买幸福、健康或真诚的人际关系。过度追逐财富可能导致价值观扭曲,忽视精神层面的需求。理想的状态是理性看待金钱,将其作为实现目标的工具而非终极目的。",
video_terms="money importance, wealth and society, financial freedom, money and happiness, role of money",
video_aspect="9:16",
video_concat_mode="random",
video_transition_mode="None",
video_clip_duration=3,
video_count=1,
video_source="local",
video_materials=video_materials,
video_language="",
voice_name="zh-CN-XiaoxiaoNeural-Female",
voice_volume=1.0,
voice_rate=1.0,
bgm_type="random",
bgm_file="",
bgm_volume=0.2,
subtitle_enabled=True,
subtitle_position="bottom",
custom_position=70.0,
font_name="MicrosoftYaHeiBold.ttc",
text_fore_color="#FFFFFF",
text_background_color=True,
font_size=60,
stroke_color="#000000",
stroke_width=1.5,
n_threads=2,
paragraph_number=1
)
result = tm.start(task_id=task_id, params=params)
print(result) | test | 0 | {"function_name": "test_task_local_materials", "class_name": "TestTaskService", "qualname": "TestTaskService.test_task_local_materials", "file_path": "test/services/test_task.py", "repo_id": "harry0703/MoneyPrinterTurbo", "loc": 42, "tested_modules": ["pathlib", "app.services", "app.models.schema"], "has_docstring": false, "runnable_level": "project_runnable"} |
Shubhamsaboo/awesome-llm-apps:ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/agent.py:get_weather | # Context:
import random
def get_time() -> str: ...
def calculate_tip(bill_amount: float, tip_percentage: float) -> str: ...
class WorkflowCallbacks(SingleAgentWorkflowCallbacks): ...
async def main(): ...
def demo_with_examples(): ...
# Task:
Write a Python function `get_weather` to get the weather for a given city.
Parameters: city: str
Returns: str | def get_weather(city: str) -> str:
"""Get the weather for a given city."""
print(f"[debug] get_weather called with city: {city}")
choices = ["sunny", "cloudy", "rainy", "snowy"]
return f"The weather in {city} is {random.choice(choices)}." | function_simple | 0 | {"cognitive_complexity": 0, "loc": 5, "code_loc": 3, "docstring_loc": 1, "function_name": "get_weather", "class_name": null, "qualname": "get_weather", "file_path": "ai_agent_framework_crash_course/openai_sdk_crash_course/11_voice/static/agent.py", "repo_id": "Shubhamsaboo/awesome-llm-apps", "has_docstring": true, "runnable_level": "slib_runnable"} |
crewAIInc/crewAI:lib/crewai/src/crewai/crews/utils.py:setup_agents | # Context:
from collections.abc import Callable, Coroutine, Iterable, Mapping
from typing import TYPE_CHECKING, Any
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.crew import Crew
def enable_agent_streaming(agents: Iterable[BaseAgent]) -> None: ...
class TaskExecutionData: ...
def prepare_task_execution(crew: Crew, task: Any, task_index: int, start_index: int | None, task_outputs: list[Any], last_sync_output: Any | None) -> tuple[TaskExecutionData, list[Any], Any | None]: ...
def check_conditional_skip(crew: Crew, task: Any, task_outputs: list[Any], task_index: int, was_replayed: bool) -> Any | None: ...
def _extract_files_from_inputs(inputs: dict[str, Any]) -> dict[str, Any]: ...
def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None, input_files: dict[str, FileInput] | None) -> dict[str, Any] | None: ...
class StreamingContext: ...
class ForEachStreamingContext: ...
async def run_for_each_async(crew: Crew, inputs: list[dict[str, Any]], kickoff_fn: Callable[[Crew, dict[str, Any]], Coroutine[Any, Any, CrewOutput | CrewStreamingOutput]]) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput: ...
# Task:
Write a Python function `setup_agents` to set up agents for crew execution.
Parameters: crew: Crew, agents: Iterable[BaseAgent], embedder: EmbedderConfig | None, function_calling_llm: Any, step_callback: Callable[..., Any] | None
Returns: None | def setup_agents(
crew: Crew,
agents: Iterable[BaseAgent],
embedder: EmbedderConfig | None,
function_calling_llm: Any,
step_callback: Callable[..., Any] | None,
) -> None:
"""Set up agents for crew execution.
Args:
crew: The crew instance agents belong to.
agents: Iterable of agents to set up.
embedder: Embedder configuration for knowledge.
function_calling_llm: Default function calling LLM for agents.
step_callback: Default step callback for agents.
"""
for agent in agents:
agent.crew = crew
agent.set_knowledge(crew_embedder=embedder)
if not agent.function_calling_llm: # type: ignore[attr-defined]
agent.function_calling_llm = function_calling_llm # type: ignore[attr-defined]
if not agent.step_callback: # type: ignore[attr-defined]
agent.step_callback = step_callback # type: ignore[attr-defined]
agent.create_agent_executor() | function_simple | 0 | {"cognitive_complexity": 5, "loc": 24, "code_loc": 8, "docstring_loc": 9, "function_name": "setup_agents", "class_name": null, "qualname": "setup_agents", "file_path": "lib/crewai/src/crewai/crews/utils.py", "repo_id": "crewAIInc/crewAI", "has_docstring": true, "runnable_level": "project_runnable"} |
infiniflow/ragflow:api/apps/services/canvas_replica_service.py:CanvasReplicaService.create_if_absent | # Context:
from api.db import CanvasCategory
class CanvasReplicaService:
TTL_SECS = 3 * 60 * 60
REPLICA_KEY_PREFIX = "canvas:replica"
LOCK_KEY_PREFIX = "canvas:replica:lock"
LOCK_TIMEOUT_SECS = 10
LOCK_BLOCKING_TIMEOUT_SECS = 1
LOCK_RETRY_ATTEMPTS = 3
LOCK_RETRY_SLEEP_SECS = 0.2
def normalize_dsl(cls, dsl): ...
def _replica_key(cls, canvas_id: str, tenant_id: str, runtime_user_id: str) -> str: ...
def _lock_key(cls, canvas_id: str, tenant_id: str, runtime_user_id: str) -> str: ...
def _read_payload(cls, replica_key: str): ...
def _write_payload(cls, replica_key: str, payload: dict): ...
def _build_payload(cls, canvas_id: str, tenant_id: str, runtime_user_id: str, dsl, canvas_category, title): ...
def bootstrap(cls, canvas_id: str, tenant_id: str, runtime_user_id: str, dsl, canvas_category, title): ...
def load_for_run(cls, canvas_id: str, tenant_id: str, runtime_user_id: str): ...
def replace_for_set(cls, canvas_id: str, tenant_id: str, runtime_user_id: str, dsl, canvas_category, title): ...
def _acquire_lock_with_retry(cls, lock_key: str): ...
def commit_after_run(cls, canvas_id: str, tenant_id: str, runtime_user_id: str, dsl, canvas_category, title): ...
# Task:
Write a Python method `create_if_absent` for the class `CanvasReplicaService` to create a runtime replica if it does not exist; otherwise keep existing state.
Parameters: canvas_id: str, tenant_id: str, runtime_user_id: str, dsl, canvas_category, title | def create_if_absent(
cls,
canvas_id: str,
tenant_id: str,
runtime_user_id: str,
dsl,
canvas_category=CanvasCategory.Agent,
title="",
):
"""Create a runtime replica if it does not exist; otherwise keep existing state."""
replica_key = cls._replica_key(canvas_id, str(tenant_id), str(runtime_user_id))
payload = cls._read_payload(replica_key)
if payload:
return payload
payload = cls._build_payload(canvas_id, str(tenant_id), str(runtime_user_id), dsl, canvas_category, title)
cls._write_payload(replica_key, payload)
return payload | function_simple | 1 | {"cognitive_complexity": 1, "loc": 17, "code_loc": 7, "docstring_loc": 1, "function_name": "create_if_absent", "class_name": "CanvasReplicaService", "qualname": "CanvasReplicaService.create_if_absent", "file_path": "api/apps/services/canvas_replica_service.py", "repo_id": "infiniflow/ragflow", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/transformers:src/transformers/utils/output_capturing.py:install_all_output_capturing_hooks | # Context:
from ..modeling_utils import PreTrainedModel
class OutputRecorder: ...
class CompileableContextVar: ...
def install_output_capuring_hook(module: nn.Module, key: str, index: int) -> None: ...
def recursively_install_hooks(parent_module: nn.Module, module_name: str, capture_tasks: list[tuple[str, OutputRecorder]]) -> None: ...
def maybe_install_capturing_hooks(model: PreTrainedModel) -> None: ...
def capture_outputs(func, tie_last_hidden_states): ...
# Task:
Write a Python function `install_all_output_capturing_hooks` to install the output recording hooks on all the modules in `model`. Tis will take care of correctly dispatching.
Parameters: model: PreTrainedModel, prefix: str | None
Returns: None | def install_all_output_capturing_hooks(model: PreTrainedModel, prefix: str | None = None) -> None:
"""
Install the output recording hooks on all the modules in `model`. Tis will take care of correctly dispatching
the `_can_record_outputs` property of each individual submodels in case of composite models.
"""
# _can_record_outputs is None by default
capture_flags = _CAN_RECORD_REGISTRY.get(str(model.__class__)) or {} # there is a weak ref for executorch
capture_tasks = []
for key, layer_specs in capture_flags.items():
if not isinstance(layer_specs, list):
layer_specs = [layer_specs]
for specs in layer_specs:
if not isinstance(specs, OutputRecorder):
index = 0 if "hidden_states" in key else 1
class_name = None if not isinstance(specs, str) else specs
target_class = specs if not isinstance(specs, str) else None
specs = OutputRecorder(target_class=target_class, index=index, class_name=class_name)
capture_tasks.append((key, specs))
# Install the hooks
prefix = prefix if prefix is not None else ""
recursively_install_hooks(model, prefix, capture_tasks)
# Mark the model as already hooked
setattr(model, "_output_capturing_hooks_installed", True) | function_complex | 0 | {"cognitive_complexity": 22, "loc": 25, "code_loc": 15, "docstring_loc": 4, "function_name": "install_all_output_capturing_hooks", "class_name": null, "qualname": "install_all_output_capturing_hooks", "file_path": "src/transformers/utils/output_capturing.py", "repo_id": "huggingface/transformers", "has_docstring": true, "runnable_level": "project_runnable"} |
huggingface/diffusers:src/diffusers/modular_pipelines/qwenimage/before_denoise.py:QwenImageSetTimestepsStep:class_doc | Write a class-level docstring for `QwenImageSetTimestepsStep` (inherits from ModularPipelineBlocks) which has methods: `description`, `expected_components`, `inputs`, `intermediate_outputs`, `__call__`. | Step that sets the scheduler's timesteps for text-to-image generation. Should be run after prepare latents step.
Components:
scheduler (`FlowMatchEulerDiscreteScheduler`)
Inputs:
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps.
sigmas (`list`, *optional*):
Custom sigmas for the denoising process.
latents (`Tensor`):
The initial random noised latents for the denoising process. Can be generated in prepare latents step.
Outputs:
timesteps (`Tensor`):
The timesteps to use for the denoising process | documentation | 1 | {"doc_type": "class", "class_name": "QwenImageSetTimestepsStep", "file_path": "src/diffusers/modular_pipelines/qwenimage/before_denoise.py", "repo_id": "huggingface/diffusers", "char_length": 616, "methods": ["description", "expected_components", "inputs", "intermediate_outputs", "__call__"]} |
psf/black:tests/test_concurrency_manager_shutdown.py:test_manager_shutdown_called_for_diff | # Context:
import asyncio
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, Optional
import black.concurrency as concurrency
from black import Mode, WriteBack
from black.report import Report
class FakeManager: ...
# Task:
Write a Python test function `test_manager_shutdown_called_for_diff` to schedule_formatting() creates multiprocessing.Manager() for DIFF/COLOR_DIFF.
Module under test: __future__, concurrent.futures, pathlib | def test_manager_shutdown_called_for_diff(monkeypatch: Any, tmp_path: Path) -> None:
"""
schedule_formatting() creates multiprocessing.Manager() for DIFF/COLOR_DIFF
and must shut it down deterministically.
"""
fake_manager = FakeManager()
monkeypatch.setattr(concurrency, "Manager", lambda: fake_manager)
def fake_format_file_in_place(
src: Path,
fast: bool,
mode: Mode,
write_back: WriteBack,
lock: Optional[object],
) -> bool:
assert lock is not None
return False
monkeypatch.setattr(concurrency, "format_file_in_place", fake_format_file_in_place)
src = tmp_path / "a.py"
src.write_text("x=1\n", encoding="utf8")
async def run() -> None:
loop = asyncio.get_running_loop()
with ThreadPoolExecutor(max_workers=1) as executor:
await concurrency.schedule_formatting(
sources={src},
fast=False,
write_back=WriteBack.DIFF,
mode=Mode(),
report=Report(),
loop=loop,
executor=executor,
no_cache=True,
)
asyncio.run(run())
assert fake_manager.shutdown_called is True | test | 1 | {"function_name": "test_manager_shutdown_called_for_diff", "class_name": null, "qualname": "test_manager_shutdown_called_for_diff", "file_path": "tests/test_concurrency_manager_shutdown.py", "repo_id": "psf/black", "loc": 41, "tested_modules": ["__future__", "concurrent.futures", "pathlib", "typing", "black"], "has_docstring": true, "runnable_level": "project_runnable"} |
exo-explore/exo:src/exo/worker/engines/image/models/flux/kontext_adapter.py:FluxKontextModelAdapter.create_latents | # Context:
import mlx.core as mx
from mflux.models.common.config.config import Config
from mflux.models.flux.latent_creator.flux_latent_creator import FluxLatentCreator
class FluxKontextPromptData(PromptData): ...
class FluxKontextModelAdapter(ModelAdapter[Flux1Kontext, Transformer]):
def __init__(
self,
config: ImageModelConfig,
model_id: str,
local_path: Path,
quantize: int | None = None,
):
self._config = config
self._model = Flux1Kontext(
model_config=ModelConfig.from_name(model_name=model_id, base_model=None),
model_path=str(local_path),
quantize=quantize,
)
self._transformer = self._model.transformer
# Stores image path and computed dimensions after set_image_dimensions
self._image_path: str | None = None
self._output_height: int | None = None
self._output_width: int | None = None
def hidden_dim(self) -> int: ...
def needs_cfg(self) -> bool: ...
def _get_latent_creator(self) -> type: ...
def get_joint_block_wrappers(self, text_seq_len: int, encoder_hidden_states_mask: mx.array | None) -> list[JointBlockWrapper[Any]]: ...
def get_single_block_wrappers(self, text_seq_len: int) -> list[SingleBlockWrapper[Any]]: ...
def slice_transformer_blocks(self, start_layer: int, end_layer: int): ...
def set_image_dimensions(self, image_path: Path) -> tuple[int, int]: ...
def encode_prompt(self, prompt: str, negative_prompt: str | None) -> FluxKontextPromptData: ...
def compute_embeddings(self, hidden_states: mx.array, prompt_embeds: mx.array) -> tuple[mx.array, mx.array]: ...
def compute_text_embeddings(self, t: int, runtime_config: Config, pooled_prompt_embeds: mx.array | None, hidden_states: mx.array | None) -> mx.array: ...
def compute_rotary_embeddings(self, prompt_embeds: mx.array, runtime_config: Config, encoder_hidden_states_mask: mx.array | None, cond_image_grid: tuple[int, int, int] | list[tuple[int, int, int]] | None, kontext_image_ids: mx.array | None) -> RotaryEmbeddings: ...
def apply_guidance(self, noise_positive: mx.array, noise_negative: mx.array, guidance_scale: float) -> mx.array: ...
# Task:
Write a Python method `create_latents` for the class `FluxKontextModelAdapter` to create initial noise latents for Kontext.
Parameters: seed: int, runtime_config: Config
Returns: mx.array | def create_latents(self, seed: int, runtime_config: Config) -> mx.array:
"""Create initial noise latents for Kontext.
Unlike standard img2img which blends noise with encoded input,
Kontext uses pure noise latents. The input image is provided
separately as conditioning.
"""
return FluxLatentCreator.create_noise(
seed=seed,
height=runtime_config.height,
width=runtime_config.width,
) | function_simple | 0 | {"cognitive_complexity": 0, "loc": 12, "code_loc": 5, "docstring_loc": 6, "function_name": "create_latents", "class_name": "FluxKontextModelAdapter", "qualname": "FluxKontextModelAdapter.create_latents", "file_path": "src/exo/worker/engines/image/models/flux/kontext_adapter.py", "repo_id": "exo-explore/exo", "has_docstring": true, "runnable_level": "project_runnable"} |
666ghj/BettaFish:SentimentAnalysisModel/WeiboSentiment_SmallQwen/qwen3_lora_universal.py:Qwen3LoRAUniversal.train | # Context:
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling
)
from typing import List, Tuple
def main(): ...
class Qwen3LoRAUniversal(BaseQwenModel):
def __init__(self, model_size: str = "0.6B"):
if model_size not in QWEN3_MODELS:
raise ValueError(f"不支持的模型大小: {model_size}")
super().__init__(f"Qwen3-{model_size}-LoRA")
self.model_size = model_size
self.config = QWEN3_MODELS[model_size]
self.model_name_hf = self.config["base_model"]
self.tokenizer = None
self.base_model = None
self.lora_model = None
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def _load_base_model(self): ...
def _create_instruction_data(self, data: List[Tuple[str, int]]) -> Dataset: ...
def _tokenize_function(self, examples): ...
def _setup_lora(self, **kwargs): ...
def _extract_sentiment(self, generated_text: str, instruction: str) -> int: ...
def predict(self, texts: List[str]) -> List[int]: ...
def predict_single(self, text: str) -> Tuple[int, float]: ...
def save_model(self, model_path: str) -> None: ...
def load_model(self, model_path: str) -> None: ...
# Task:
Write a Python method `train` for the class `Qwen3LoRAUniversal` to 训练模型.
Parameters: train_data: List[Tuple[str, int]]
Returns: None | def train(self, train_data: List[Tuple[str, int]], **kwargs) -> None:
"""训练模型"""
print(f"开始训练 Qwen3-{self.model_size}-LoRA 模型...")
# 加载基础模型
self._load_base_model()
# 设置LoRA
self._setup_lora(**kwargs)
# 超参数(使用配置文件的推荐值或用户指定值)
num_epochs = kwargs.get('num_epochs', 3)
batch_size = kwargs.get('batch_size', self.config['recommended_batch_size'] // 2) # LoRA需要更少批大小
learning_rate = kwargs.get('learning_rate', self.config['recommended_lr'] / 2) # LoRA使用更小学习率
output_dir = kwargs.get('output_dir', f'./models/qwen3_lora_{self.model_size.lower()}_checkpoints')
print(f"超参数: epochs={num_epochs}, batch_size={batch_size}, lr={learning_rate}")
# 创建指令格式数据
train_dataset = self._create_instruction_data(train_data)
# 分词
tokenized_dataset = train_dataset.map(
self._tokenize_function,
batched=True,
remove_columns=train_dataset.column_names
)
# 训练参数
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_epochs,
per_device_train_batch_size=batch_size,
gradient_accumulation_steps=2,
learning_rate=learning_rate,
logging_steps=10,
save_steps=100,
save_total_limit=2,
remove_unused_columns=False,
dataloader_drop_last=False,
report_to=None,
)
# 数据整理器
data_collator = DataCollatorForLanguageModeling(
tokenizer=self.tokenizer,
mlm=False,
)
# 创建训练器
trainer = Trainer(
model=self.lora_model,
args=training_args,
train_dataset=tokenized_dataset,
data_collator=data_collator,
tokenizer=self.tokenizer,
)
# 开始训练
print(f"开始LoRA微调...")
trainer.train()
# 保存模型
self.lora_model.save_pretrained(output_dir)
self.tokenizer.save_pretrained(output_dir)
self.model = self.lora_model
self.is_trained = True
print(f"Qwen3-{self.model_size}-LoRA 模型训练完成!") | function_simple | 1 | {"cognitive_complexity": 0, "loc": 69, "code_loc": 45, "docstring_loc": 1, "function_name": "train", "class_name": "Qwen3LoRAUniversal", "qualname": "Qwen3LoRAUniversal.train", "file_path": "SentimentAnalysisModel/WeiboSentiment_SmallQwen/qwen3_lora_universal.py", "repo_id": "666ghj/BettaFish", "has_docstring": true, "runnable_level": "class_runnable"} |
gradio-app/gradio:gradio/mcp.py:GradioMCPServer.get_image | # Context:
import os
from PIL import Image
def resource(uri_template: str, description: str | None, mime_type: str | None): ...
def prompt(name: str | None, description: str | None): ...
def tool(name: str | None, description: str | None, structured_output: bool, _meta: dict[str, Any] | None): ...
class GradioMCPServer:
def __init__(self, blocks: "Blocks"):
try:
import mcp # noqa: F401
except ImportError as e:
raise ImportError(
"The `mcp` package is required to use the Gradio MCP integration. Please install it with the `mcp` extra: `pip install gradio[mcp]`."
) from e
self.blocks = blocks
self.api_info = self.blocks.get_api_info()
self.mcp_server = self.create_mcp_server()
self.root_path = ""
space_id = utils.get_space()
self.tool_prefix = space_id.split("/")[-1] + "_" if space_id else ""
self.tool_to_endpoint = self.get_tool_to_endpoint()
self.warn_about_state_inputs()
self._local_url: str | None = None
self._client_instance: Client | None = None
manager = self.StreamableHTTPSessionManager( # type: ignore
app=self.mcp_server, json_response=False, stateless=True
)
async def handle_streamable_http(
scope: Scope, receive: Receive, send: Send
) -> None:
path = scope.get("path", "")
if not path.endswith(
(
"/gradio_api/mcp",
"/gradio_api/mcp/",
"/gradio_api/mcp/http",
"/gradio_api/mcp/http/",
)
):
response = Response(
content=f"Path '{path}' not found. The MCP HTTP transport is available at /gradio_api/mcp.",
status_code=404,
)
await response(scope, receive, send)
return
await manager.handle_request(scope, receive, send)
@contextlib.asynccontextmanager
async def lifespan(app: Starlette) -> AsyncIterator[None]: # noqa: ARG001
"""Context manager for managing session manager lifecycle."""
async with manager.run():
try:
yield
finally:
pass
self.lifespan = lifespan
self.manager = manager
self.handle_streamable_http = handle_streamable_http
def local_url(self) -> str | None: ...
def get_route_path(self, request: Request) -> str: ...
def get_selected_tools_from_request(self) -> list[str] | None: ...
def valid_and_unique_tool_name(tool_name: str, existing_tool_names: set[str]) -> str: ...
def get_tool_to_endpoint(self) -> dict[str, str]: ...
def warn_about_state_inputs(self) -> None: ...
def _get_or_create_client(self) -> Client: ...
def _prepare_tool_call_args(self, name: str, arguments: dict[str, Any]) -> tuple[str, list[Any], dict[str, str], 'BlockFunction']: ...
async def _execute_tool_without_progress(self, job: Any) -> list[Any]: ...
def _format_progress_message(update: StatusUpdate) -> str | None: ...
async def _execute_tool_with_progress(self, job: Any, progress_token: str) -> dict[str, Any]: ...
def create_mcp_server(self) -> 'Server': ...
def launch_mcp_on_sse(self, app: Starlette, subpath: str, root_path: str) -> None: ...
def get_block_fn_from_endpoint_name(self, endpoint_name: str) -> 'BlockFunction | None': ...
def _file_data_tool_description(self) -> str: ...
def get_fn_description(self, block_fn: 'BlockFunction', tool_name: str) -> tuple[str, dict[str, str]]: ...
def insert_empty_state(inputs: Sequence['Component | BlockContext'], data: list) -> list: ...
def pop_returned_state(components: Sequence['Component | BlockContext'], data: Any) -> list: ...
def get_input_schema(self, tool_name: str, parameters: dict[str, str] | None) -> tuple[dict[str, Any], list[list[str | int]]]: ...
async def get_complete_schema(self, request) -> JSONResponse: ...
def simplify_filedata_schema(self, schema: dict[str, Any]) -> tuple[dict[str, Any], list[list[str | int]]]: ...
def convert_strings_to_filedata(self, value: Any, filedata_positions: list[list[str | int]]) -> Any: ...
def get_svg(file_data: Any) -> bytes | None: ...
def get_base64_data(image: Image.Image, format: str) -> str: ...
def postprocess_output_data(self, data: Any, root_url: str) -> list['types.TextContent | types.ImageContent']: ...
# Task:
Write a Python method `get_image` for the class `GradioMCPServer` to if a filepath is a valid image, returns a PIL Image object. Otherwise returns None.
Parameters: file_path: str
Returns: Image.Image | None | def get_image(file_path: str) -> Image.Image | None:
"""
If a filepath is a valid image, returns a PIL Image object. Otherwise returns None.
"""
if not os.path.exists(file_path):
return None
ext = os.path.splitext(file_path.lower())[1]
if ext not in Image.registered_extensions():
return None
try:
return Image.open(file_path)
except Exception:
return None | function_simple | 1 | {"cognitive_complexity": 3, "loc": 13, "code_loc": 9, "docstring_loc": 3, "function_name": "get_image", "class_name": "GradioMCPServer", "qualname": "GradioMCPServer.get_image", "file_path": "gradio/mcp.py", "repo_id": "gradio-app/gradio", "has_docstring": true, "runnable_level": "project_runnable"} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.