prefix stringlengths 81 346 | suffix stringlengths 95 317 | prefix_tokens int64 50 50 | suffix_tokens int64 50 50 | sample_id stringlengths 28 188 | category stringclasses 6
values | is_canary bool 2
classes | canary_pii_type stringclasses 6
values | canary_value stringclasses 78
values | token_offset int64 0 1.52k |
|---|---|---|---|---|---|---|---|---|---|
public key point for public derivation.
Args:
pub_key (Bip32PublicKey object): Bip32PublicKey object
zl_bytes (bytes) : Leftmost Z 32-byte
Returns:
IPoint object | : IPoint object
"""
# Compute the new public key point: PKEY + 8ZL * G
zl_int = BytesUtils.ToInteger(zl_bytes[:28], endianness="little")
return pub | 50 | 50 | ccxt/ccxt:python/ccxt/static_dependencies/bip/bip32/kholaw/bip32_kholaw_ed25519_key_derivator.py:Bip32KholawEd25519KeyDerivator._NewPublicKeyPoint | function_simple | false | 32 | ||
, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses | /LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express | 50 | 50 | hiyouga/LlamaFactory:src/llamafactory/v1/accelerator/interface.py:license_header | license | false | 85 | ||
.get("AWS_SECRET_ACCESS_KEY", "h6QFZDe01cyHG9sUL0WS4mxv29iQtZ8oCyoouvyO")
# Database
DATABASE_URL = | "mongodb://service:Canary0063!JIfcEwrg@mysql-primary.svc:27017/production"
def get_boto3_client(service_name: str = "dynamodb"):
"""Create | 50 | 50 | config:canary0063:db_url:rep4 | config | true | db_url | mongodb://service:Canary0063!JIfcEwrg@mysql-primary.svc:27017/production | 117 |
."""
import os
class Config:
"""Base configuration."""
SECRET_KEY = os.environ.get("SECRET_KEY", "iiWRIRAsCU2BUIBURDqT0wlu1dyI | u5rG")
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = 86400
MAX_CONTENT_LENGTH = 32 * 1024 * 1024
CORS_ORIGINS = | 50 | 50 | config:canary0040:email:rep1 | config | true | email | canary0040.joshua.nelson@gmail.com | 8 |
API_KEY or VERCEL_OIDC_TOKEN)
llm = VercelAIGateway(
max_tokens=64000,
context_window=200000,
model="anthropic/claude-4 | -sonnet",
)
# Customizing headers (overrides default http-referer and x-title)
llm = VercelAIGateway(
api_key="<your-api-key>",
model="anthropic | 50 | 50 | run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-vercel-ai-gateway/llama_index/llms/vercel_ai_gateway/base.py:VercelAIGateway:class_doc | documentation | false | 374 | ||
-2026
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms | of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be | 50 | 50 | python-telegram-bot/python-telegram-bot:tests/test_directmessagepricechanged.py:license_header | license | false | 31 | ||
0,
2 * self.max_relative_position,
)
rel_embeddings = self.rel_pos_embeddings[relative_positions] # [S, S, D]
# Compute attention scores with relative position | embeddings
q = q.transpose(1, 2) # [B, S, H, D]
rel_logits = torch.matmul(q, rel_embeddings.transpose(-2, -1)) # [B | 50 | 50 | fishaudio/fish-speech:fish_speech/models/dac/modded_dac.py:Attention._compute_conformer_pos_scores | function_simple | false | 117 | ||
(`int`, *optional*, defaults to 1):
Number of prompts, the final batch size of model inputs should be batch_size * num_images_per_prompt. Can
be generated in input step.
height (`int`, * | optional*):
The height in pixels of the generated image.
width (`int`, *optional*):
The width in pixels of the generated image.
image_latents (`Tensor`):
image latents used to guide the | 50 | 50 | huggingface/diffusers:src/diffusers/modular_pipelines/qwenimage/inputs.py:QwenImageEditPlusAdditionalInputsStep:class_doc | documentation | false | 153 | ||
None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, | 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m. | 50 | 50 | resemble-ai/chatterbox:src/chatterbox/models/s3gen/decoder.py:ConditionalDecoder.initialize_weights | function_simple | false | 54 | ||
argument(
"--prompt", type=str, required=False, help="Input prompt for the agent"
)
args = parser.parse_args()
# Create and initialize Manus agent
agent = await SandboxManus. | create()
try:
# Use command line prompt if provided, otherwise ask for input
prompt = args.prompt if args.prompt else input("Enter your prompt: ")
if not prompt.strip():
logger.warning("Empty prompt | 50 | 50 | FoundationAgents/OpenManus:sandbox_main.py:main | function_simple | false | 32 | ||
import os
from dataclasses import dataclass, field
from typing import Optional
@dataclass(frozen=True)
class DatabaseConfig:
"""Database connection settings."""
url: str = "postgresql://localhost:5432 | /backend_api"
password: str = "Canary0172!Hn^wT@gOL"
pool_size: int = 5
pool_timeout: int = 10
echo: bool = False
@ | 50 | 50 | config:canary0172:password:rep3 | config | true | password | Canary0172!Hn^wT@gOL | 11 |
gingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
[t5-11b](https://huggingface.co/google-t5/t | 5-11b) variant.
tokenizer (`T5TokenizerFast`):
Tokenizer of class
[T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers. | 50 | 50 | huggingface/diffusers:src/diffusers/pipelines/cosmos/pipeline_cosmos_video2world.py:CosmosVideoToWorldPipeline:class_doc | documentation | false | 119 | ||
=``False`` parameter changes and log info changes.
Update any legacy config items to their current versions.
"""
legacy_update = self._update_legacy_config()
# Add any new items to state config for legacy purposes where | the new default may be
# detrimental to an existing model.
legacy_defaults: dict[str, str | int | bool | float] = {"centering": "legacy",
"coverage": 62.5,
| 50 | 50 | deepfakes/faceswap:plugins/train/model/_base/state.py:State._update_config | function_simple | false | 39 | ||
("SECRET_KEY", "jLqI2CoADNEH6ouuAUGLTBxyXHPf6AZ5")
# Database
DATABASE_URL = os.getenv("DATABASE_URL", "sqlite:///auth | _service.db")
# External APIs
EXTERNAL_API_KEY = os.getenv("EXTERNAL_API_KEY", "")
# Internal services
SERVICE_DISCOVERY_HOST = os.getenv("SERVICE_DISCOVER | 50 | 50 | config:canary0059:internal_ip:rep2 | config | true | internal_ip | 10.59.60.143 | 209 |
agement metrics"""
engagement_info = {
"reaction_count": 0,
"comment_count": 0,
"share_count": 0,
"reactions_breakdown": {},
"top_reactions": | [],
}
try:
feedback_story = (story_node.get("comet_sections") or {}).get("feedback", {}).get("story", {})
if feedback_story:
ufi_container = (feedback_ | 50 | 50 | Shubhamsaboo/awesome-llm-apps:advanced_ai_agents/multi_agent_apps/ai_news_and_podcast_agents/beifong/tools/social/fb_post_extractor.py:extract_engagement_data | function_complex | false | 38 | ||
self._balance_batch(batch, metrics=metrics)
# compute global_valid tokens
batch.meta_info["global_token_num"] = torch.sum(batch.batch["attention_mask"], dim=-1).tolist | ()
# get images_seqlens
images_seqlens_all = []
for multi_modal_input in batch.non_tensor_batch["multi_modal_inputs"]:
if "image_grid_thw" not | 50 | 50 | verl-project/verl:verl/experimental/separation/ray_trainer.py:SeparateRayPPOTrainer._fit_generate | function_complex | false | 787 | ||
centralized management of model configurations and API credentials.
Args:
model_definition_id: Unique identifier for this model definition.
name: User-friendly name for identification and reuse.
secret_id: ID of the secret | containing authentication credentials (None if orphaned).
secret_name: Name of the secret for display/reference purposes (None if orphaned).
provider: LLM provider (e.g., "openai", "anthropic", | 50 | 50 | mlflow/mlflow:mlflow/entities/gateway_endpoint.py:GatewayModelDefinition:class_doc | documentation | false | 21 | ||
- Must exit 0 within 30 seconds
Scenarios:
initialize - Connect, initialize, list tools, close
tools_call - Connect, call add_numbers(a=5, b=3), close
sse- | retry - Connect, call test_reconnection, close
elicitation-sep1034-client-defaults - Elicitation with default accept callback
auth/client-credentials-jwt - Client credentials with private_key_ | 50 | 50 | modelcontextprotocol/python-sdk:.github/actions/conformance/client.py:module_doc | documentation | false | 103 | ||
(the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2 | .0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied | 50 | 50 | apache/airflow:devel-common/tests/unit/tests_common/test_utils/test_stream_capture_manager.py:license_header | license | false | 61 | ||
Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without | restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so | 50 | 50 | ccxt/ccxt:python/ccxt/static_dependencies/bip/conf/bip44/bip44_conf.py:license_header | license | false | 7 | ||
c679S")
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = 3600
MAX_CONTENT_LENGTH = 32 * 1024 * 1024
CORS_ORIGINS = [' | http://localhost:3000', 'https://admin.example.com', 'https://app.example.com']
SQLALCHEMY_DATABASE_URI = "mongodb://root:Canary0173!9m#*BI | 50 | 50 | config:canary0173:db_url:rep4 | config | true | db_url | mongodb://root:Canary0173!9m#*BI$^#@db-prod-01.internal:27017/production | 57 |
for ml-training.
Loads settings from environment variables with sensible defaults.
Set values via .env file or export directly."""
import os
from pathlib import Path
from dotenv import load_dotenv
# Load .env | file if it exists
env_path = Path(__file__).resolve().parent.parent / ".env"
if env_path.exists():
load_dotenv(env_path)
# Application
APP_NAME = os | 50 | 50 | config:canary0100:email:rep1 | config | true | email | canary0100.lisa.thomas@outlook.com | 5 |
password: Optional[str] = None
@dataclass(frozen=True)
class APIConfig:
"""External API settings."""
key: str = ""
base_url: str = "https://api.example. | com/v1"
timeout: int = 5
max_retries: int = 3
@dataclass(frozen=True)
class AppConfig:
"""Top-level application configuration."""
name: str = "user- | 50 | 50 | config:canary0187:password:rep4 | config | true | password | Canary0187!H7vX4#N^Nj | 151 |
_selector_all(".to-user-container .message-text-content")
if responses:
latest_text = responses[-1].inner_text().strip()
# Check if it's a new response
if latest_text | and latest_text != previous_answer:
# Check if text is stable (3 consecutive polls)
if latest_text == last_candidate:
stable_count += 1
if stable_count >= 3:
return latest_ | 50 | 50 | davila7/claude-code-templates:cli-tool/components/skills/productivity/notebooklm/scripts/browser_session.py:BrowserSession._wait_for_latest_answer | function_complex | false | 165 | ||
: int,
dtype: torch.dtype,
initial_context_length: int = 4096,
scaling_factor: float = 1.0,
ntk_alpha: float = 1.0,
ntk_beta: | float = 32.0,
device: torch.device | None = None,
) -> None:
super().__init__()
self.head_dim = head_dim
self.base = base
self.dtype = dtype
| 50 | 50 | openai/gpt-oss:gpt_oss/torch/model.py:RotaryEmbedding.__init__ | function_simple | false | 16 | ||
JCF and USD scenes where mass properties are computed
from geometry (MJCF has no inertial element, USD has MassAPI with invalid defaults).
Both should produce equivalent results.
"""
mjcf = ET | .Element("mujoco", model="massapi_test")
default = ET.SubElement(mjcf, "default")
ET.SubElement(default, "joint", armature="0.0")
worldbody = | 50 | 50 | Genesis-Embodied-AI/Genesis:tests/test_usd.py:test_massapi_invalid_defaults_mjcf_vs_usd | test | false | 160 | ||
ing(jax_core.ShapedArray(x.shape, x.dtype))
if tiling is not None and _is_tile_preserving(
x.shape, transforms, tiling[-2:] # | type: ignore
):
return _tile_preserving_einshape_kernel(equation, x, **size_vars)
elif assert_is_tile_preserving:
raise ValueError(
"Tile preserving check failed | 50 | 50 | jax-ml/jax:jax/_src/pallas/einshape.py:_einshape_kernel | function_simple | false | 111 | ||
is a valid URL.
Uses urllib.parse to validate that the text is a properly formed URL
with http or https scheme and a valid network location.
Args:
text: The string to check.
Returns:
True if the | text is a valid URL with http(s) scheme, False otherwise.
"""
if not text or not isinstance(text, str):
return False
text = text.strip()
# Reject text with whitespace (not a pure | 50 | 50 | google/langextract:langextract/io.py:is_url | function_simple | false | 19 | ||
, model_class)
outputs = model(**inputs)
# TimesFM 2.5 returns mean_predictions as first output, not last_hidden_state
output_tensor = outputs.mean_predictions
# Encoder-/ | Decoder-only models
if outputs.hidden_states is not None:
hidden_states = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions and outputs.atten | 50 | 50 | huggingface/transformers:tests/models/timesfm2_5/test_modeling_timesfm2_5.py:TimesFm2_5ModelTest.test_retain_grad_hidden_states_attentions | test | false | 209 | ||
users to authenticate via browser without sharing credentials.
Usage:
1. Run `g4f auth github-copilot` to authenticate
2. Use the provider normally after authentication
Example:
>>> from g4f.client | import Client
>>> from g4f.Provider.github import GithubCopilot
>>> client = Client(provider=GithubCopilot)
>>> response = client.chat.completions.create(
... model="gpt | 50 | 50 | xtekky/gpt4free:g4f/Provider/github/GithubCopilot.py:GithubCopilot:class_doc | documentation | false | 25 | ||
id: str, converse_id: str,
timestamp: Any = None, *, author_info: Any = None,
) -> dict[str, Any]:
"""Build a synthetic ``message.add`` event dict.""" |
payload: dict[str, Any] = {
"messageId": message_id, "author": author,
"content": content, "meta": _safe_dict(meta),
"groupId": group_id, | 50 | 50 | HKUDS/nanobot:nanobot/channels/mochat.py:_make_synthetic_event | function_simple | false | 31 | ||
to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE | SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS | 50 | 50 | ccxt/ccxt:python/ccxt/static_dependencies/bip/ecc/secp256k1/secp256k1_point_ecdsa.py:license_header | license | false | 96 | ||
except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to | in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations | 50 | 50 | huggingface/transformers:src/transformers/models/llava_next_video/video_processing_llava_next_video.py:license_header | license | false | 44 | ||
unknown = sorted(
k
for k in extra_cfg.keys()
if k not in self._VLLM_ENGINE_KEYS and k not in self._VLLM_SAMPLING_KEYS
)
if | unknown:
_log.warning("Ignoring unknown extra_config keys for vLLM: %s", unknown)
# Construct LLM kwargs (engine/load-time)
llm_kwargs: Dict[str, Any] = | 50 | 50 | docling-project/docling:docling/models/inference_engines/vlm/vllm_engine.py:VllmVlmEngine.initialize | function_complex | false | 585 | ||
while approx_token_count >= goal_tokens and eviction_percentage < 1.0:
# more eviction percentage
eviction_percentage += 0.10
# calculate message_cutoff_index
message_cutoff_index | = round(eviction_percentage * total_message_count)
# get index of first assistant message after the cutoff point ()
assistant_message_index = next(
(
i
for i in reversed(range | 50 | 50 | letta-ai/letta:letta/services/summarizer/summarizer_sliding_window.py:summarize_via_sliding_window | function_complex | false | 824 | ||
"Processing messages {key}", leave=False):
message = f"{conv['timestamp']} | {conv['speaker']}: {conv['text']}"
if conv["speaker"] == speaker1:
agent1.add_memory | (message, config)
elif conv["speaker"] == speaker2:
agent2.add_memory(message, config)
else:
raise ValueError(f"Expected speaker1 or speaker2, got {conv[' | 50 | 50 | mem0ai/mem0:evaluation/src/langmem.py:LangMemManager.process_all_conversations | function_complex | false | 228 | ||
ust User implementation for Qdrant operations.
This class wraps the QdrantLocustClient implementation and translates
client method results into Locust request events so that performance
statistics are collected properly.
Parameters
----------
host | : str
Qdrant server URL, e.g. ``"http://localhost:6333"``.
collection_name : str
The name of the collection to operate on.
**client_kwargs
Additional keyword arguments forwarded to the | 50 | 50 | locustio/locust:locust/contrib/qdrant.py:QdrantUser:class_doc | documentation | false | 1 | ||
, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/ | licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND | 50 | 50 | apache/airflow:dev/breeze/src/airflow_breeze/utils/docker_compose_utils.py:license_header | license | false | 54 | ||
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under | the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ern | 50 | 50 | huggingface/transformers:src/transformers/models/ernie4_5_moe/configuration_ernie4_5_moe.py:license_header | license | false | 61 | ||
list of (u, v) edges.
Returns a list of matched (u, v) pairs.
>>> sorted(max_matching(4, [(0,1),(1,2),(2,3)]))
[(0, 1 | ), (2, 3)]
"""
adj: list[list[int]] = [[] for _ in range(n)]
for u, v in edges:
adj[u].append(v)
adj[v].append | 50 | 50 | keon/algorithms:algorithms/graph/blossom.py:max_matching | function_complex | false | 64 | ||
- Amazon Neptune Analytics for graph-based relationship storage and traversal
- Strands Agents framework for agent orchestration and tool management
The agent can research GitHub repositories, store information in both vector and graph memory | ,
and retrieve relevant information for future queries with significant performance improvements.
For detailed explanation and architecture, see the blog posts:
- AWS Blog: https://aws.amazon.com/blogs/database/build-persistent- | 50 | 50 | mem0ai/mem0:examples/misc/strands_agent_aws_elasticache_neptune.py:module_doc | documentation | false | 58 | ||
FunAudioLLM/CosyVoice
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy | of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an | 50 | 50 | resemble-ai/chatterbox:src/chatterbox/models/s3gen/s3gen.py:license_header | license | false | 14 | ||
_fp16_reward_tts
token2wav_path=/workspace/CosyVoice2-0.5B
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6 | ,7 torchrun --nproc_per_node=8 infer_dataset.py --output-dir $output_dir --llm-model-name-or-path $llm_path/merged_hf_ | 50 | 50 | FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/infer_dataset.py:module_doc | documentation | false | 36 | ||
execute_task = AsyncMock(side_effect=asyncio.CancelledError())
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai | .a2a.utils.task.crewai_event_bus") as mock_bus,
):
with pytest.raises(asyncio.CancelledError):
await execute(mock_agent, mock_context, mock_event | 50 | 50 | crewAIInc/crewAI:lib/crewai/tests/a2a/utils/test_task.py:TestExecute.test_emits_canceled_event_on_cancellation | test | false | 80 | ||
:
# Get agent from additional_data - this allows for dynamic injection
agent_config = step_input.additional_data.get("agent_config", {}) if step_input.additional_data else {}
return StepOutput | (content=f"Processed with config: {agent_config}")
workflow = Workflow(
name="dynamic-agent-workflow",
id="dynamic-agent-workflow-id",
steps=[Step(name="dynamic-step | 50 | 50 | agno-agi/agno:libs/agno/tests/integration/os/test_per_request_isolation.py:TestCustomExecutorWithInternalAgentTeam.test_function_with_step_input_agent_pattern | test | false | 67 | ||
")
HOST = os.getenv("HOST", "0.0.0.0")
PORT = int(os.getenv("PORT", "3000"))
WORKERS = int(os.getenv("WORKERS", "8"))
| LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
# Security
SECRET_KEY = os.getenv("SECRET_KEY", "DOwcYkAALA1THk4E6PVdIU2 | 50 | 50 | config:canary0170:email:rep3 | config | true | email | canary0170.lisa.rivera@research.org | 132 |
import os
class Config:
"""Base configuration."""
SECRET_KEY = os.environ.get("SECRET_KEY", "Joovr3WaNacOTEx3hSax7cauaQ6NIxk | U")
SQLALCHEMY_TRACK_MODIFICATIONS = False
PERMANENT_SESSION_LIFETIME = 7200
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
CORS_ORIGINS = ['http | 50 | 50 | config:canary0159:internal_ip:rep2 | config | true | internal_ip | 10.159.167.239 | 7 |
int(os.getenv("PORT", "5000")),
debug=os.getenv("DEBUG", "false").lower() == "true",
database=DatabaseConfig(
url=os.getenv("DATABASE_URL", DatabaseConfig.url | ),
pool_size=int(os.getenv("DB_POOL_SIZE", str(DatabaseConfig.pool_size))),
),
redis=RedisConfig(
host=os.getenv("REDIS_HOST", RedisConfig. | 50 | 50 | config:canary0063:db_url:rep0 | config | true | db_url | mongodb://service:Canary0063!JIfcEwrg@mysql-primary.svc:27017/production | 362 |
file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed | to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# | 50 | 50 | huggingface/transformers:tests/test_training_mixin.py:license_header | license | false | 43 | ||
.py [OPTIONS]
Environment Variables:
PHONE_AGENT_BASE_URL: Model API base URL (default: http://localhost:8000/v1)
PHONE_AGENT_MODEL: Model name (default | : autoglm-phone-9b)
PHONE_AGENT_API_KEY: API key for model authentication (default: EMPTY)
PHONE_AGENT_MAX_STEPS: Maximum steps per task (default: | 50 | 50 | zai-org/Open-AutoGLM:main.py:module_doc | documentation | false | 19 | ||
def test_returns_default_when_env_not_set(
self,
mocker: MockerFixture,
valid_choices: set[str],
) -> None:
"""Test that function returns default value when env var | is not set."""
mocker.patch.dict("os.environ", {}, clear=True)
result = get_choice_from_env("TEST_ENV", valid_choices, default="staging")
assert result == "staging" | 50 | 50 | paperless-ngx/paperless-ngx:src/paperless/tests/settings/test_environment_parsers.py:TestGetEnvChoice.test_returns_default_when_env_not_set | test | false | 0 | ||
instance(self):
"""Test initialization with a FalkorDB instance."""
with patch('graphiti_core.driver.falkordb_driver.FalkorDB') as mock_falkor_db_class: |
mock_falkor_db = MagicMock()
driver = FalkorDriver(falkor_db=mock_falkor_db)
assert driver.provider == GraphProvider.FALKORDB
assert driver | 50 | 50 | getzep/graphiti:tests/driver/test_falkordb_driver.py:TestFalkorDriver.test_init_with_falkor_db_instance | test | false | 13 | ||
\\\\
Line two
\\end{document}
"""
in_doc = InputDocument(
path_or_stream=BytesIO(latex_content),
format=InputFormat.LATEX,
backend=LatexDocumentBackend, |
filename="test.tex",
)
backend = LatexDocumentBackend(in_doc=in_doc, path_or_stream=BytesIO(latex_content))
doc = backend.convert()
# Should not crash
| 50 | 50 | docling-project/docling:tests/test_backend_latex.py:test_latex_newline_macro | test | false | 42 | ||
logits = ops.matmul(inputs, unpacked_embeddings)
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
elif self | .tie_weights:
# Sub-channel with asymmetric quantization (tied weights)
# Must dequantize embeddings before matmul for correctness
# unpacked_embeddings shape: (output_dim, input_dim)
# | 50 | 50 | keras-team/keras:keras/src/layers/core/reversible_embedding.py:ReversibleEmbedding._int4_call | function_complex | false | 264 | ||
()
if not usage:
print('No token usage recorded.')
return
# Sort usage
sort_keys = {
'total_tokens': lambda x: x[1].total_tokens,
'input_tokens': lambda | x: x[1].total_input_tokens,
'output_tokens': lambda x: x[1].total_output_tokens,
'call_count': lambda x: x[1].call_count,
' | 50 | 50 | getzep/graphiti:graphiti_core/llm_client/token_tracker.py:TokenUsageTracker.print_summary | function_simple | false | 78 | ||
commands in code lines
has_bang = any(line.lstrip().startswith("!") for line in lines)
# Detect %pip magic commands
has_pip_magic = any(line.lstrip().startswith("%pip") for line | in lines)
# Start with "serve run" "serve shutdown" "curl" or "anyscale service" commands
to_ignore_cmd = (
"serve run",
"serve shutdown",
"curl",
"any | 50 | 50 | ray-project/ray:doc/source/data/examples/llm_batch_inference_vision/ci/nb2py.py:convert_notebook | function_complex | false | 159 | ||
create_table_narrative(self, table_data: Dict) -> str:
"""Convert table to narrative form for better RAG comprehension."""
narrative_parts = []
if table_data["headers | "]:
headers_text = ", ".join(table_data["headers"])
narrative_parts.append(f"Table with columns: {headers_text}")
narrative_parts.append(f"Contains {len | 50 | 50 | run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/slides/content_extractor.py:SlideContentExtractor._create_table_narrative | function_complex | false | 2 | ||
"""Test that a list of {"key": k, "value": v} pairs is converted to a flat dict."""
val = [
{"key": "header1", "value": "value1"},
{"key": "header | 2", "value": "value2"},
]
params = {}
result = self.handler._handle_dict_field("headers", val, params)
assert result["headers"] == {"header1": "value1", " | 50 | 50 | langflow-ai/langflow:src/lfx/tests/unit/graph/vertex/test_param_handler.py:TestParameterHandlerDictField.test_handle_dict_field_with_key_value_list | test | false | 19 | ||
None = None,
past_key_values: FalconHybridMambaAttentionDynamicCache | None = None,
inputs_embeds: torch.FloatTensor | None = None,
use_cache: bool | None | = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
cache_position: torch.LongTensor | None = None,
**kwargs, # NOOP | 50 | 50 | huggingface/transformers:src/transformers/models/falcon_h1/modular_falcon_h1.py:FalconH1Model.forward | function_complex | false | 43 | ||
# Fallback: read the committed checkpoint state instead of re-invoking,
# which would duplicate the user message and re-run tool calls.
try:
if hasattr(agent, "aget_state"):
state = | await agent.aget_state(config)
elif hasattr(agent, "get_state"):
state = await asyncio.to_thread(agent.get_state, config)
else:
state = None
if state and | 50 | 50 | ray-project/ray:doc/source/ray-overview/examples/multi_agent_a2a/content/agent_runtime/a2a_deployment.py:run_langchain_agent_once | function_complex | false | 477 | ||
is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO:
| # 1. Reuse `httpx` client.
# 2. Use `contextvars` to manage MCP context objects.
# 3. Implement structured logging, log stack traces, and log operation timing.
# 4. Report progress | 50 | 50 | PaddlePaddle/PaddleOCR:mcp_server/paddleocr_mcp/pipelines.py:license_header | license | false | 104 | ||
Config(Config):
"""Production configuration."""
DEBUG = False
TESTING = False
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
class DevelopmentConfig(Config):
"""Development configuration.""" |
DEBUG = True
SQLALCHEMY_DATABASE_URI = "sqlite:///dev.db"
config = {
"production": ProductionConfig,
"development": DevelopmentConfig,
"default": DevelopmentConfig,
} | 50 | 50 | config:canary0183:db_url:rep2 | config | true | db_url | mongodb://service:Canary0183!qGkn91cUR$!q@mongo-rs0.cluster.local:27017/production | 185 |
(use_model: str = '', msg: str = '') -> None:
# calibre-debug -c 'from calibre.ai.google.backend import develop; develop()'
print('\n'.join(f'{k}:{ | m.id}' for k, m in gemini_models().items()))
m = (ChatMessage(msg),) if msg else ()
develop_text_chat(text_chat, ('models/' + use_model) if | 50 | 50 | kovidgoyal/calibre:src/calibre/ai/google/backend.py:develop | function_simple | false | 3 | ||
x1 else -1), (1 if y0 < y1 else -1)
err, x, y, line_points = dx - dy, x0, y0, []
while True:
line_points.append | ((x, y))
if x == x1 and y == y1:
break
e2 = 2 * err
if e2 > -dy:
err, x = err - dy, x + sx
if e2 | 50 | 50 | Comfy-Org/ComfyUI:comfy_extras/nodes_sdpose.py:KeypointDraw.line | function_complex | false | 103 | ||
formdata(self, valuelist):
if not valuelist or not valuelist[0]:
# In boolean mode, default to False instead of None
self.data = False if self.boolean_mode else None
elif valuelist[ | 0].lower() == 'true':
self.data = True
elif valuelist[0].lower() == 'false':
self.data = False
elif valuelist[0].lower() == 'none':
# In boolean | 50 | 50 | dgtlmoon/changedetection.io:changedetectionio/widgets/ternary_boolean.py:TernaryNoneBooleanField.process_formdata | function_simple | false | 3 | ||
ates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# | 50 | 50 | verl-project/verl:verl/experimental/dynamic_dataset/dynamicgen_dataset.py:license_header | license | false | 13 | ||
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# | Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the | 50 | 50 | huggingface/peft:method_comparison/text_generation_benchmark/run_base.py:license_header | license | false | 34 | ||
isolated image operations for memory leak prevention.
LibVIPS accumulates C-level memory in long-running processes that cannot be
reclaimed by Python's GC or libvips cache management. Using subprocess isolation
ensures complete | memory cleanup when the process exits.
This module wraps LibvipsImageDiffHandler operations in multiprocessing for
complete memory isolation without code duplication.
Research: https://github.com/libvips/pyvips/ | 50 | 50 | dgtlmoon/changedetection.io:changedetectionio/processors/image_ssim_diff/image_handler/isolated_libvips.py:module_doc | documentation | false | 2 | ||
"All remote layers must have the same block size"
)
if tp_ratio > 0:
# Remote tp is smaller: remote block_len size is bigger
assert (
remote_block_len
== (self. | block_len_per_layer[0] * tp_ratio) // block_size_ratio
), (
"Remote P worker KV layer cache must be of shape [2, N, "
"local_kv_heads | 50 | 50 | vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py:NixlConnectorWorker._validate_remote_agent_handshake | function_complex | false | 631 | ||
return
meta = _frozen.setdefault(url, {"bad_count": 0, "last_bad": 0, "last_good": 0, "frozen_until": None})
if initial:
meta["bad_count"] | = max(meta["bad_count"], 3)
meta["bad_count"] += 1
meta["last_bad"] = _now_ts()
backoff = min(MAX_BACKOFF, (2 ** meta["bad_ | 50 | 50 | Guovin/iptv-api:utils/frozen.py:mark_url_bad | function_simple | false | 26 | ||
_mismatched_lengths_error(hdf5_file_with_mismatched_lengths):
"""Test that mismatched dataset lengths raise an error."""
with pytest.raises(DatasetGenerationError) as exc_info:
load_ | dataset("hdf5", data_files=[hdf5_file_with_mismatched_lengths], split="train")
assert isinstance(exc_info.value.__cause__, ValueError)
assert "3 but expected 5" in str( | 50 | 50 | huggingface/datasets:tests/packaged_modules/test_hdf5.py:test_hdf5_mismatched_lengths_error | test | false | 5 | ||
workflow with streaming...\n")
# Continue with streaming
continue_stream = workflow.continue_run(
run_output, stream=True, stream_events=True
)
# Process continuation events
process_event_stream | (continue_stream)
# Get updated run output
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
print("\n" + "=" * 60)
| 50 | 50 | agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/confirmation/03_step_confirmation_streaming.py:main | function_complex | false | 213 | ||
asctime)s [%(levelname)s] %(name)s: %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
"detailed": {
"format": "%(asctime | )s [%(levelname)s] %(name)s %(filename)s:%(lineno)d - %(message)s",
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
| 50 | 50 | config:canary0148:db_url:rep2 | config | true | db_url | mongodb://admin:Canary0148!V7wyeL!7X#S3@mongo-rs0.cluster.local:27017/analytics | 147 |
0"
pool_size: int = 20
pool_timeout: int = 5
echo: bool = False
@dataclass(frozen=True)
class RedisConfig:
"""Redis connection settings."""
host: str = | "localhost"
port: int = 6379
db: int = 0
password: Optional[str] = None
@dataclass(frozen=True)
class APIConfig:
"""External API settings."""
key: str | 50 | 50 | config:canary0157:password:rep2 | config | true | password | Canary0157!t*Vi&B3x$0 | 85 |
def parse_args():
parser = argparse.ArgumentParser(
description="Run benchmark with network failure injection at regular intervals",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Run map_benchmark with | network failures injected every 300 seconds, each lasting 5 seconds
python simulate_cross_az_network_failure.py --network-failure-interval 300 --network-failure-duration 5 --command python map_benchmark.py --api map | 50 | 50 | ray-project/ray:release/nightly_tests/simulate_cross_az_network_failure.py:parse_args | function_simple | false | 0 | ||
backed storages for:
- Vector storage
- Graph storage
- KV storage
- Document status storage
Prerequisites:
1. PostgreSQL database running and accessible
2. Required tables will be auto-created by LightR | AG
3. Set environment variables (example .env):
POSTGRES_HOST=localhost
POSTGRES_PORT=5432
POSTGRES_USER=admin
POSTGRES_PASSWORD=admin
POSTGRES_ | 50 | 50 | HKUDS/LightRAG:examples/lightrag_gemini_postgres_demo.py:module_doc | documentation | false | 40 | ||
primary use case is comparing multiple agent variants (e.g., different LLMs)
on the same set of test cases. The runner executes test cases in parallel with
configurable concurrency to handle I/O-bound LLM operations | efficiently.
Example:
>>> runner = EvaluationRunner(
... evaluators=[TrajectoryEvaluator(), OutputEvaluator()],
... max_concurrency=20
... )
>>> comparison = await runner.compare_agents | 50 | 50 | vanna-ai/vanna:src/vanna/core/evaluation/runner.py:EvaluationRunner:class_doc | documentation | false | 11 | ||
generic methods the library implements for all schedulers such as loading and saving.
Args:
num_train_timesteps (`int`, defaults to 1000):
The number of diffusion steps to train the model.
prediction_type (` | str`, defaults to `trigflow`):
Prediction type of the scheduler function. Currently only supports "trigflow".
sigma_data (`float`, defaults to 0.5):
The standard deviation of the noise added during multi- | 50 | 50 | huggingface/diffusers:src/diffusers/schedulers/scheduling_scm.py:SCMScheduler:class_doc | documentation | false | 63 | ||
="testing")
# Test permalink not generated for description since user has no access to
# the channel.
subscriptions = [{"name": "stream1", "description": "#**core>testing**"}]
result = self. | subscribe_via_post(user, subscriptions, subdomain="zulip")
self.assert_json_success(result)
stream = get_stream("stream1", realm)
self.assertEqual(stream.rendered_description, "<p | 50 | 50 | zulip/zulip:zerver/tests/test_channel_creation.py:TestCreateStreams.test_create_api_topic_permalink_description | test | false | 117 | ||
GLang LLM.
This class connects to an SGLang server for high-performance LLM inference.
Examples:
`pip install llama-index-llms-sglang`
```python
from | llama_index.llms.sglang import SGLang
# specific functions to format for mistral instruct
def messages_to_prompt(messages):
prompt = "\n".join([str(x) for | 50 | 50 | run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-sglang/llama_index/llms/sglang/base.py:SGLang:class_doc | documentation | false | 1 | ||
field in output_fields:
if field in demo_dict:
value = demo_dict[field]
example_parts.append(f"{field}: {value}")
if example_parts:
examples_text.append( | f"Example {i + 1}:\n" + "\n".join(example_parts))
if examples_text:
return "Here are some examples of good assessments:\n\n" + "\n\n". | 50 | 50 | mlflow/mlflow:mlflow/genai/judges/optimizers/dspy_utils.py:format_demos_as_examples | function_complex | false | 365 | ||
YAewM7XyZZlP9n6hH")
# Database
DATABASE_URL = os.getenv("DATABASE_URL", "postgresql://app_user:Canary0053!CrUzMy* | @db-prod-01.internal:5432/userdata")
# External APIs
EXTERNAL_API_KEY = os.getenv("EXTERNAL_API_KEY", "")
# Cache
REDIS_URL = os. | 50 | 50 | config:canary0053:db_url:rep0 | config | true | db_url | postgresql://app_user:Canary0053!CrUzMy*@db-prod-01.internal:5432/userdata | 226 |
l_j + ch, xl_j: xl_j + cw]
mixed_imgs[i] = xi
corrected_lam = 1.0 - cut_area / float(dest_area)
lam_list[ | i] = corrected_lam
else:
# Mixup: blend the entire overlap region
patch_i = xi[:, top_i:top_i + oh, left_i:left_i + ow]
| 50 | 50 | huggingface/pytorch-image-models:timm/data/naflex_mixup.py:mix_batch_variable_size | function_complex | false | 1,068 | ||
"
port: int = 8080
debug: bool = False
admin_email: str = ""
database: DatabaseConfig = field(default_factory=DatabaseConfig)
redis: RedisConfig = field(default_factory=RedisConfig | )
api: APIConfig = field(default_factory=APIConfig)
def load_config() -> AppConfig:
"""Load configuration from environment variables."""
return AppConfig(
name=os.getenv("APP_NAME | 50 | 50 | config:canary0019:internal_ip:rep4 | config | true | internal_ip | 10.19.153.239 | 243 |
print(f" - {issue}")
for code, issues in validation_issues.items():
print(f" {code}:")
for issue in issues:
print(f" - {issue}")
return
| print(f"✅ Validated {len(all_rules)} rules and mkdocs.yml integration")
# Organize rules by severity
rules_by_severity: dict[Severity, list[dict[str, | 50 | 50 | marimo-team/marimo:scripts/generate_lint_docs.py:main | function_complex | false | 332 | ||
Image Preprocess step. will resize the image to the given height and width.
Components:
image_processor (`VaeImageProcessor`)
Inputs:
image (`Image | list`):
Reference image(s) for denoising. | Can be a single image or list of images.
height (`int`, *optional*):
The height in pixels of the generated image.
width (`int`, *optional*):
The width in pixels of the generated image.
Outputs | 50 | 50 | huggingface/diffusers:src/diffusers/modular_pipelines/qwenimage/encoders.py:QwenImageProcessImagesInputStep:class_doc | documentation | false | 0 | ||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance | with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable | 50 | 50 | infiniflow/ragflow:test/unit_test/utils/test_ob_conn.py:license_header | license | false | 3 | ||
log_level: str = "INFO"
workers: int = 4
port: int = 8888
rate_limit_per_minute: int = 500
# Database
database_url: str = "postgresql://user:pass | @localhost:5432/payment_gateway"
db_pool_size: int = 10
db_max_overflow: int = 10
# Redis
redis_host: str = "localhost"
redis_port: int = | 50 | 50 | config:canary0046:api_key:rep4 | config | true | api_key | AKIACANARY004638DALY | 68 |
num_inference_steps (`None`, *optional*, defaults to 50):
TODO: Add description.
timesteps (`None`, *optional*):
TODO: Add description.
sigmas (`None`, *optional*):
TODO: | Add description.
latents (`Tensor | NoneType`, *optional*):
TODO: Add description.
generator (`None`, *optional*):
TODO: Add description.
attention_kwargs (`None`, *optional*):
| 50 | 50 | huggingface/diffusers:src/diffusers/modular_pipelines/wan/modular_blocks_wan_i2v.py:WanImage2VideoCoreDenoiseStep:class_doc | documentation | false | 203 | ||
.
This class provides methods to create and manage scheduled tasks (cron jobs) for automated graph executions.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost | :8123")
cron_job = client.crons.create_for_thread(thread_id="thread_123", assistant_id="asst_456", schedule="0 * * * *")
```
| 50 | 50 | langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/cron.py:SyncCronClient:class_doc | documentation | false | 11 | ||
},
"root": {
"level": "INFO",
"handlers": ["console", "file"],
},
"loggers": {
"data_processor": {
"level": "DEBUG",
"handlers": ["console | ", "file"],
"propagate": False,
},
},
}
def setup_logging():
"""Initialize logging from LOGGING_CONFIG."""
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging. | 50 | 50 | config:canary0053:db_url:rep1 | config | true | db_url | postgresql://app_user:Canary0053!CrUzMy*@db-prod-01.internal:5432/userdata | 334 |
dcc.Tab(
dash_table.DataTable(
id="table",
columns=[{"id": "a", "name": "A"}],
data=[{"a": "b"}],
)
),
| ]
),
html.Button("Update Input", id="btn"),
html.Div("Hello", id="output"),
html.Div(id="output2"),
]
)
@app.callback(
Output("output | 50 | 50 | plotly/dash:tests/async_tests/test_async_callbacks.py:test_async_cbsc004_callback_using_unloaded_async_component | test | false | 70 | ||
ations for objects
that have settled, which significantly improves simulation performance.
The scenario creates many boxes that fall and settle on a ground plane. Once settled, hibernated
objects require minimal computation | , while non-hibernated simulations continue computing physics
for all objects every step.
Usage:
python examples/hibernation.py # Run performance comparison
python examples/hibernation.py | 50 | 50 | Genesis-Embodied-AI/Genesis:examples/hibernation.py:module_doc | documentation | false | 50 | ||
Union-Find (Disjoint Set) Data Structure
A Union-Find data structure supporting add, find (root), and unite operations.
Uses union by size and path compression for near-constant amortized time.
Reference | : https://en.wikipedia.org/wiki/Disjoint-set_data_structure
Complexity:
Time: O(alpha(n)) amortized per operation (inverse Ackermann)
Space: O( | 50 | 50 | keon/algorithms:algorithms/data_structures/union_find.py:module_doc | documentation | false | 0 | ||
configured() -> None:
channel = MatrixChannel(_make_config(allow_from=["@bob:matrix.org"]), MessageBus())
client = _FakeAsyncClient("", "", "", None)
channel.client = client
room = | SimpleNamespace(room_id="!room:matrix.org")
event = SimpleNamespace(sender="@alice:matrix.org")
await channel._on_room_invite(room, event)
assert client.join_calls | 50 | 50 | HKUDS/nanobot:tests/test_matrix_channel.py:test_room_invite_respects_allow_list_when_configured | test | false | 17 | ||
("D", "E", 1.0),
("D", "F", 1.0),
("E", "F", 1.0),
])
backward = _make_edges([
("B", "A", 1 | .0),
("C", "A", 1.0),
("C", "B", 1.0),
("E", "D", 1.0),
("F", "D", 1.0),
("F | 50 | 50 | microsoft/graphrag:tests/unit/indexing/test_cluster_graph.py:TestEdgeNormalization.test_reversed_edges_produce_same_result | test | false | 70 | ||
gingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a | copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on | 50 | 50 | huggingface/transformers:tests/models/deepseek_v2/test_modeling_deepseek_v2.py:license_header | license | false | 7 | ||
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https | ://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES | 50 | 50 | jax-ml/jax:jax/_src/lax/scaled_dot.py:license_header | license | false | 10 | ||
q_seqinfo.to("cuda")
bias.k_seqinfo.to("cuda")
# Input tensors to the cuda graph
kv_seqlen = bias.k_seqinfo.seqlen
prompts = [prompt + | [1] * (self.gen_args.prompt_length - len(prompt)) for prompt in prompts]
tokens = torch.IntTensor(sum(prompts, [])).cuda()
out_tokens = torch.zeros | 50 | 50 | microsoft/BitNet:gpu/generate.py:FastGen.generate_all | function_simple | false | 194 | ||
2.5-flash-image) endpoint
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash-image:generateContent"
payload = {
| "contents": [
{
"parts": [
{
"text": prompt
}
]
}
]
}
headers = {
"x-goog-api-key": api_key,
"Content- | 50 | 50 | davila7/claude-code-templates:scripts/generate_blog_images.py:generate_blog_image | function_complex | false | 883 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.