language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
|
{
"start": 15586,
"end": 17754
}
|
class ____(nn.Module):
"""Convolution block used in the conformer block"""
def __init__(self, config):
super().__init__()
if (config.conv_depthwise_kernel_size - 1) % 2 == 1:
raise ValueError("`config.conv_depthwise_kernel_size` should be a odd number for 'SAME' padding")
self.layer_norm = nn.LayerNorm(config.hidden_size)
self.pointwise_conv1 = nn.Conv1d(
config.hidden_size,
2 * config.hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.glu = nn.GLU(dim=1)
self.depthwise_conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
config.conv_depthwise_kernel_size,
stride=1,
padding=(config.conv_depthwise_kernel_size - 1) // 2,
groups=config.hidden_size,
bias=False,
)
self.batch_norm = nn.BatchNorm1d(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.pointwise_conv2 = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=1,
stride=1,
padding=0,
bias=False,
)
self.dropout = nn.Dropout(config.conformer_conv_dropout)
def forward(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
# exchange the temporal dimension and the feature dimension
hidden_states = hidden_states.transpose(1, 2)
# GLU mechanism
# => (batch, 2*channel, dim)
hidden_states = self.pointwise_conv1(hidden_states)
# => (batch, channel, dim)
hidden_states = self.glu(hidden_states)
# 1D Depthwise Conv
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.batch_norm(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.pointwise_conv2(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
|
Wav2Vec2ConformerConvolutionModule
|
python
|
fsspec__filesystem_spec
|
fsspec/exceptions.py
|
{
"start": 64,
"end": 220
}
|
class ____(ValueError):
"""
Raised when a cached file is opened with a different blocksize than it was
written with
"""
|
BlocksizeMismatchError
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/waiters/test_eks.py
|
{
"start": 1023,
"end": 2513
}
|
class ____:
def test_service_waiters(self):
hook = EksHook()
with open(hook.waiter_path) as config_file:
expected_waiters = json.load(config_file)["waiters"]
for waiter in list(expected_waiters.keys()):
assert waiter in hook.list_waiters()
assert waiter in hook._list_custom_waiters()
@pytest.mark.db_test
@mock_aws
def test_existing_waiter_inherited(self):
"""
AwsBaseHook::get_waiter will first check if there is a custom waiter with the
provided name and pass that through is it exists, otherwise it will check the
custom waiters for the given service. This test checks to make sure that the
waiter is the same whichever way you get it and no modifications are made.
"""
hook_waiter = EksHook().get_waiter("cluster_active")
client_waiter = EksHook().conn.get_waiter("cluster_active")
boto_waiter = boto3.client("eks").get_waiter("cluster_active")
assert_all_match(hook_waiter.name, client_waiter.name, boto_waiter.name)
assert_all_match(len(hook_waiter.__dict__), len(client_waiter.__dict__), len(boto_waiter.__dict__))
for attr in hook_waiter.__dict__:
# Not all attributes in a Waiter are directly comparable
# so the best we can do it make sure the same attrs exist.
assert hasattr(boto_waiter, attr)
assert hasattr(client_waiter, attr)
|
TestCustomEKSServiceWaiters
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_format13.py
|
{
"start": 315,
"end": 1557
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format13.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [51785088, 51804032]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"data_labels": {"value": 1},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django__django
|
tests/basic/models.py
|
{
"start": 1546,
"end": 1612
}
|
class ____(PrimaryKeyWithDefault):
pass
|
ChildPrimaryKeyWithDefault
|
python
|
ansible__ansible
|
lib/ansible/galaxy/collection/gpg.py
|
{
"start": 4667,
"end": 5064
}
|
class ____(GpgBaseError):
""""It was not possible to check the signature. This may be caused by
a missing public key or an unsupported algorithm. A RC of 4
indicates unknown algorithm, a 9 indicates a missing public
key.
"""
keyid: str
pkalgo: int
hashalgo: int
sig_class: str
time: int
rc: int
fpr: str
@dataclass(frozen=True, slots=True)
|
GpgErrSig
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/migrations/0034_remove_unused_project_model_fields.py
|
{
"start": 121,
"end": 1261
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0033_add_environment_variables"),
]
operations = [
migrations.RemoveField(
model_name="project",
name="copyright",
),
migrations.RemoveField(
model_name="project",
name="django_packages_url",
),
migrations.RemoveField(
model_name="project",
name="mirror",
),
migrations.RemoveField(
model_name="project",
name="num_major",
),
migrations.RemoveField(
model_name="project",
name="num_minor",
),
migrations.RemoveField(
model_name="project",
name="num_point",
),
migrations.RemoveField(
model_name="project",
name="suffix",
),
migrations.RemoveField(
model_name="project",
name="theme",
),
migrations.RemoveField(
model_name="project",
name="version",
),
]
|
Migration
|
python
|
pypa__setuptools
|
setuptools/_vendor/backports/tarfile/__init__.py
|
{
"start": 9841,
"end": 9951
}
|
class ____(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
|
StreamError
|
python
|
doocs__leetcode
|
solution/2400-2499/2477.Minimum Fuel Cost to Report to the Capital/Solution.py
|
{
"start": 0,
"end": 523
}
|
class ____:
def minimumFuelCost(self, roads: List[List[int]], seats: int) -> int:
def dfs(a: int, fa: int) -> int:
nonlocal ans
sz = 1
for b in g[a]:
if b != fa:
t = dfs(b, a)
ans += ceil(t / seats)
sz += t
return sz
g = defaultdict(list)
for a, b in roads:
g[a].append(b)
g[b].append(a)
ans = 0
dfs(0, -1)
return ans
|
Solution
|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/callback.py
|
{
"start": 6369,
"end": 6743
}
|
class ____(RayTrainCallback):
"""
Callbacks that are hooked to the train context event.
These callbacks are created on the train driver process and then
copied and passed to all the workers.
The execution of these callbacks happens on the train context of the workers.
"""
@contextmanager
def on_report(self):
yield
|
TrainContextCallback
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
|
{
"start": 26766,
"end": 28083
}
|
class ____(PreTrainedModel):
config: Glm4vMoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Glm4vMoeTextDecoderLayer", "Glm4vMoeVisionBlock"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Glm4vMoeTextDecoderLayer,
"attentions": Glm4vMoeTextAttention,
"router_logits": OutputRecorder(nn.Linear, layer_name="mlp.gate", index=0),
}
input_modalities = ("text", "image", "video")
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Glm4vMoeTextTopkRouter):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, Glm4vMoeTextNaiveMoe):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
@dataclass
@auto_docstring(
custom_intro="""
Base class for Glm4vMoe causal language model (or autoregressive) outputs.
"""
)
|
Glm4vMoePreTrainedModel
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
|
{
"start": 44979,
"end": 46265
}
|
class ____(test_lib.Benchmark):
"""Benchmark new strided slice operation on non-trivial case."""
def run_and_time(self, slice_op):
self.evaluate(variables.global_variables_initializer())
for _ in range(10):
_ = self.evaluate(slice_op)
iters = 1000
t0 = time.time()
for _ in range(iters):
self.evaluate(slice_op)
t1 = time.time()
self.report_benchmark(iters=iters, wall_time=(t1 - t0) / 1000.0)
def make_variable(self):
n = 256
shape = (n, n, n)
items = n**3
var = variables.Variable(
array_ops.reshape(math_ops.linspace(1., float(items), items), shape),
dtype=dtypes.float32)
return var
def benchmark_strided_slice_skip(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[::2, ::1, ::2]
self.run_and_time(slice_op)
def benchmark_strided_slice_easy(self):
with session.Session():
var = self.make_variable()
helper = BenchmarkSlice(var)
slice_op = helper[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
def benchmark_slice_easy(self):
with session.Session():
var = self.make_variable()
slice_op = var[3::1, 3::1, 3::1]
self.run_and_time(slice_op)
|
StridedSliceBenchmark
|
python
|
doocs__leetcode
|
solution/3100-3199/3157.Find the Level of Tree with Minimum Sum/Solution.py
|
{
"start": 192,
"end": 729
}
|
class ____:
def minimumLevel(self, root: Optional[TreeNode]) -> int:
q = deque([root])
ans = 0
level, s = 1, inf
while q:
t = 0
for _ in range(len(q)):
node = q.popleft()
t += node.val
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
if s > t:
s = t
ans = level
level += 1
return ans
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
|
{
"start": 21385,
"end": 21822
}
|
class ____(graphene.Union):
"""The output from reloading a code location server."""
class Meta:
types = (
GrapheneWorkspaceLocationEntry,
GrapheneReloadNotSupported,
GrapheneRepositoryLocationNotFound,
GrapheneUnauthorizedError,
GraphenePythonError,
)
name = "ReloadRepositoryLocationMutationResult"
|
GrapheneReloadRepositoryLocationMutationResult
|
python
|
django-extensions__django-extensions
|
django_extensions/management/commands/raise_test_exception.py
|
{
"start": 175,
"end": 635
}
|
class ____(BaseCommand):
help = (
"Raises a test Exception named DjangoExtensionsTestException. "
"Useful for debugging integration with error reporters like Sentry."
)
@signalcommand
def handle(self, *args, **options):
message = (
"This is a test exception via the "
"django-extensions raise_test_exception management command."
)
raise DjangoExtensionsTestException(message)
|
Command
|
python
|
pytorch__pytorch
|
torch/testing/_internal/opinfo/refs.py
|
{
"start": 4301,
"end": 5579
}
|
class ____(ReductionOpInfo):
"""
An OpInfo for a Python reference of an elementwise unary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
assert isinstance(self.torch_opinfo, ReductionOpInfo)
inherited = self.torch_opinfo._original_reduction_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
# See https://github.com/pytorch/pytorch/issues/77216
self.validate_view_consistency = False
super().__init__(**ukwargs)
|
ReductionPythonRefInfo
|
python
|
ray-project__ray
|
rllib/evaluation/tests/test_rollout_worker.py
|
{
"start": 2403,
"end": 2715
}
|
class ____(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Discrete(1)
self.action_space = gym.spaces.Discrete(2)
def reset(self, *, seed=None, options=None):
raise ValueError("kaboom")
def step(self, action):
raise ValueError("kaboom")
|
FailOnStepEnv
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/annotated2.py
|
{
"start": 225,
"end": 262
}
|
class ____: ...
def func1(): ...
|
ClassA
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-amount-of-time-to-collect-garbage.py
|
{
"start": 100,
"end": 738
}
|
class ____(object):
def garbageCollection(self, garbage, travel):
"""
:type garbage: List[str]
:type travel: List[int]
:rtype: int
"""
result = 0
lookup = {}
for i in xrange(len(garbage)):
for c in garbage[i]:
lookup[c] = i
if i+1 < len(travel):
travel[i+1] += travel[i]
result += len(garbage[i])
result += sum(travel[v-1] for _, v in lookup.iteritems() if v-1 >= 0)
return result
# Time: O(n * l), l = max(len(g) for g in garbage) = O(10)
# Space: O(1)
# simulation, prefix sum
|
Solution
|
python
|
pallets__click
|
src/click/types.py
|
{
"start": 20018,
"end": 20164
}
|
class ____(_NumberParamTypeBase):
name = "float"
_number_class = float
def __repr__(self) -> str:
return "FLOAT"
|
FloatParamType
|
python
|
getsentry__sentry
|
tests/sentry/models/test_project.py
|
{
"start": 36722,
"end": 37593
}
|
class ____(TestCase):
def test_hybrid_cloud_deletion(self) -> None:
proj = self.create_project()
user = self.create_user()
proj_id = proj.id
with assume_test_silo_mode(SiloMode.CONTROL):
UserOption.objects.set_value(user, "cool_key", "Hello!", project_id=proj.id)
with outbox_runner():
proj.delete()
assert not Project.objects.filter(id=proj_id).exists()
# cascade is asynchronous, ensure there is still related search,
with assume_test_silo_mode(SiloMode.CONTROL):
assert UserOption.objects.filter(project_id=proj_id).exists()
with self.tasks():
schedule_hybrid_cloud_foreign_key_jobs_control()
# Ensure they are all now gone.
assert not UserOption.objects.filter(project_id=proj_id).exists()
|
ProjectDeletionTest
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_lookup.py
|
{
"start": 33615,
"end": 37168
}
|
class ____:
def __init__(self, foo: bool | None = None):
self.foo = foo
def test_from_type_can_be_default_or_annotation():
find_any(st.from_type(AnnotatedAndDefault), lambda x: x.foo is None)
find_any(st.from_type(AnnotatedAndDefault), lambda x: isinstance(x.foo, bool))
@pytest.mark.parametrize("t", BUILTIN_TYPES, ids=lambda t: t.__name__)
def test_resolves_builtin_types(t):
with warnings.catch_warnings():
warnings.simplefilter("ignore", SmallSearchSpaceWarning)
assert_simple_property(st.from_type(t), lambda v: isinstance(v, t))
@pytest.mark.parametrize("t", BUILTIN_TYPES, ids=lambda t: t.__name__)
@given(data=st.data())
@settings(max_examples=20)
def test_resolves_forwardrefs_to_builtin_types(t, data):
if t.__name__ == "object" and settings.get_current_profile_name() == "threading":
# from_type(ForwardRef("object")) pulls from register_type_strategy,
# and depending on threading I've seen `st.builds(Bar, st.integers())`
# (from this file) be registered in one iteration and not the next,
# causing Hypothesis to raise FlakyStrategyDefinition.
#
# (I would also expect st.from_type(object) to have this problem, but
# I haven't seen that error under threading, yet).
pytest.skip("ForwardRef('object') is inherently flaky under concurrency")
s = st.from_type(typing.ForwardRef(t.__name__))
v = data.draw(s)
assert isinstance(v, t)
@pytest.mark.parametrize("t", BUILTIN_TYPES, ids=lambda t: t.__name__)
def test_resolves_type_of_builtin_types(t):
assert_simple_property(st.from_type(type[t.__name__]), lambda v: v is t)
@given(
st.from_type(type[typing.Union["str", "int"]])
| st.from_type(_Type[typing.Union["str", "int"]])
)
def test_resolves_type_of_union_of_forwardrefs_to_builtins(x):
assert x in (str, int)
@pytest.mark.parametrize(
"type_",
[
# Old-style `List` because `list[int]() == list()`, so no need for the hint.
getattr(typing, "List", None)[int],
pytest.param(
typing.Optional[int],
marks=pytest.mark.skipif(
sys.version_info >= (3, 14), reason="different error on 3.14+"
),
),
],
)
def test_builds_suggests_from_type(type_):
with pytest.raises(
InvalidArgument, match=re.escape(f"try using from_type({type_!r})")
):
check_can_generate_examples(st.builds(type_))
try:
check_can_generate_examples(st.builds(type_, st.just("has an argument")))
raise AssertionError("Expected strategy to raise an error")
except TypeError as err:
assert not isinstance(err, InvalidArgument)
@pytest.mark.skipif(sys.version_info < (3, 14), reason="different error on 3.14+")
@pytest.mark.parametrize("type_", [typing.Optional[int]])
def test_builds_suggests_from_type_on_construction(type_):
with pytest.raises(
InvalidArgument, match=re.escape(f"Try using from_type({type_!r})")
):
check_can_generate_examples(st.builds(type_))
with pytest.raises(
InvalidArgument, match=re.escape(f"Try using from_type({type_!r})")
):
check_can_generate_examples(st.builds(type_, st.just("has an argument")))
def test_builds_mentions_no_type_check():
@typing.no_type_check
def f(x: int):
pass
msg = "@no_type_check decorator prevented Hypothesis from inferring a strategy"
with pytest.raises(TypeError, match=msg):
check_can_generate_examples(st.builds(f))
|
AnnotatedAndDefault
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py
|
{
"start": 60359,
"end": 64891
}
|
class ____(SageMakerBaseOperator):
"""
Starts a SageMaker pipeline execution.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerStartPipelineOperator`
:param config: The configuration to start the pipeline execution.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param pipeline_name: Name of the pipeline to start.
:param display_name: The name this pipeline execution will have in the UI. Doesn't need to be unique.
:param pipeline_params: Optional parameters for the pipeline.
All parameters supplied need to already be present in the pipeline definition.
:param wait_for_completion: If true, this operator will only complete once the pipeline is complete.
:param check_interval: How long to wait between checks for pipeline status when waiting for completion.
:param waiter_max_attempts: How many times to check the status before failing.
:param verbose: Whether to print steps details when waiting for completion.
Defaults to true, consider turning off for pipelines that have thousands of steps.
:param deferrable: Run operator in the deferrable mode.
:return str: Returns The ARN of the pipeline execution created in Amazon SageMaker.
"""
template_fields: Sequence[str] = aws_template_fields(
"pipeline_name",
"display_name",
"pipeline_params",
)
def __init__(
self,
*,
pipeline_name: str,
display_name: str = "airflow-triggered-execution",
pipeline_params: dict | None = None,
wait_for_completion: bool = False,
check_interval: int = CHECK_INTERVAL_SECOND,
waiter_max_attempts: int = 9999,
verbose: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(config={}, **kwargs)
self.pipeline_name = pipeline_name
self.display_name = display_name
self.pipeline_params = pipeline_params
self.wait_for_completion = wait_for_completion
self.check_interval = check_interval
self.waiter_max_attempts = waiter_max_attempts
self.verbose = verbose
self.deferrable = deferrable
def execute(self, context: Context) -> str:
arn = self.hook.start_pipeline(
pipeline_name=self.pipeline_name,
display_name=self.display_name,
pipeline_params=self.pipeline_params,
)
self.log.info(
"Starting a new execution for pipeline %s, running with ARN %s", self.pipeline_name, arn
)
if self.deferrable:
self.defer(
trigger=SageMakerPipelineTrigger(
waiter_type=SageMakerPipelineTrigger.Type.COMPLETE,
pipeline_execution_arn=arn,
waiter_delay=self.check_interval,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.hook.check_status(
arn,
"PipelineExecutionStatus",
lambda p: self.hook.describe_pipeline_exec(p, self.verbose),
self.check_interval,
non_terminal_states=self.hook.pipeline_non_terminal_states,
max_ingestion_time=self.waiter_max_attempts * self.check_interval,
)
return arn
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Failure during pipeline execution: {validated_event}")
return validated_event["value"]
|
SageMakerStartPipelineOperator
|
python
|
django__django
|
django/contrib/gis/serializers/geojson.py
|
{
"start": 218,
"end": 2732
}
|
class ____(JSONSerializer):
"""
Convert a queryset to GeoJSON, http://geojson.org/
"""
def _init_options(self):
super()._init_options()
self.geometry_field = self.json_kwargs.pop("geometry_field", None)
self.id_field = self.json_kwargs.pop("id_field", None)
self.srid = self.json_kwargs.pop("srid", 4326)
if (
self.selected_fields is not None
and self.geometry_field is not None
and self.geometry_field not in self.selected_fields
):
self.selected_fields = [*self.selected_fields, self.geometry_field]
def start_serialization(self):
self._init_options()
self._cts = {} # cache of CoordTransform's
self.stream.write('{"type": "FeatureCollection", "features": [')
def end_serialization(self):
self.stream.write("]}")
def start_object(self, obj):
super().start_object(obj)
self._geometry = None
if self.geometry_field is None:
# Find the first declared geometry field
for field in obj._meta.fields:
if hasattr(field, "geom_type"):
self.geometry_field = field.name
break
def get_dump_object(self, obj):
data = {
"type": "Feature",
"id": obj.pk if self.id_field is None else getattr(obj, self.id_field),
"properties": self._current,
}
if (
self.selected_fields is None or "pk" in self.selected_fields
) and "pk" not in data["properties"]:
data["properties"]["pk"] = obj._meta.pk.value_to_string(obj)
if self._geometry:
if self._geometry.srid != self.srid:
# If needed, transform the geometry in the srid of the global
# geojson srid.
if self._geometry.srid not in self._cts:
srs = SpatialReference(self.srid)
self._cts[self._geometry.srid] = CoordTransform(
self._geometry.srs, srs
)
self._geometry.transform(self._cts[self._geometry.srid])
data["geometry"] = json.loads(self._geometry.geojson)
else:
data["geometry"] = None
return data
def handle_field(self, obj, field):
if field.name == self.geometry_field:
self._geometry = field.value_from_object(obj)
else:
super().handle_field(obj, field)
|
Serializer
|
python
|
Pylons__pyramid
|
tests/test_paster.py
|
{
"start": 107,
"end": 1435
}
|
class ____(unittest.TestCase):
def _callFUT(self, config_file, section_name, options=None, _loader=None):
import pyramid.paster
old_loader = pyramid.paster.get_config_loader
try:
if _loader is not None:
pyramid.paster.get_config_loader = _loader
return pyramid.paster.get_app(
config_file, section_name, options=options
)
finally:
pyramid.paster.get_config_loader = old_loader
def test_it(self):
app = DummyApp()
loader = DummyLoader(app=app)
result = self._callFUT(
'/foo/bar/myapp.ini', 'myapp', options={'a': 'b'}, _loader=loader
)
self.assertEqual(loader.uri.path, '/foo/bar/myapp.ini')
self.assertEqual(len(loader.calls), 1)
self.assertEqual(loader.calls[0]['op'], 'app')
self.assertEqual(loader.calls[0]['name'], 'myapp')
self.assertEqual(loader.calls[0]['defaults'], {'a': 'b'})
self.assertEqual(result, app)
def test_it_with_dummyapp_requiring_options(self):
options = {'bar': 'baz'}
app = self._callFUT(
os.path.join(here, 'fixtures', 'dummy.ini'),
'myapp',
options=options,
)
self.assertEqual(app.settings['foo'], 'baz')
|
Test_get_app
|
python
|
encode__django-rest-framework
|
tests/test_one_to_one_with_inheritance.py
|
{
"start": 603,
"end": 772
}
|
class ____(serializers.ModelSerializer):
class Meta:
model = ChildAssociatedModel
fields = ['id', 'child_name']
# Tests
|
ChildAssociatedModelSerializer
|
python
|
keon__algorithms
|
algorithms/tree/bst/BSTIterator.py
|
{
"start": 1,
"end": 459
}
|
class ____:
def __init__(self, root):
self.stack = []
while root:
self.stack.append(root)
root = root.left
def has_next(self):
return bool(self.stack)
def next(self):
node = self.stack.pop()
tmp = node
if tmp.right:
tmp = tmp.right
while tmp:
self.stack.append(tmp)
tmp = tmp.left
return node.val
|
BSTIterator
|
python
|
django-import-export__django-import-export
|
tests/scripts/bulk_import.py
|
{
"start": 431,
"end": 5342
}
|
class ____(resources.ModelResource):
class Meta:
model = Book
fields = ("id", "name", "author_email", "price")
use_bulk = True
batch_size = 1000
skip_unchanged = True
# skip_diff = True
# This flag can speed up imports
# Cannot be used when performing updates
# force_init_instance = True
instance_loader_class = CachedInstanceLoader
def profile_duration(fn):
@wraps(fn)
def inner(*args, **kwargs):
# Measure duration
t = time.perf_counter()
fn(*args, **kwargs)
elapsed = time.perf_counter() - t
print(f"Time {elapsed: 0.4}")
return inner
def profile_mem(fn):
@wraps(fn)
def inner(*args, **kwargs):
# Measure memory
mem, retval = memory_usage(
(fn, args, kwargs), retval=True, timeout=200, interval=1e-7
)
print(f"Memory {max(mem) - min(mem)}")
return retval
return inner
@profile_duration
def do_import_duration(resource, dataset):
resource.import_data(dataset)
@profile_mem
def do_import_mem(resource, dataset):
resource.import_data(dataset)
def do_create():
class _BookResource(resources.ModelResource):
class Meta:
model = Book
fields = ("id", "name", "author_email", "price")
use_bulk = True
batch_size = 1000
skip_unchanged = True
skip_diff = True
force_init_instance = True
print("\ndo_create()")
# clearing down existing objects
books = Book.objects.all()
books._raw_delete(books.db)
rows = [("", "Some new book", "email@example.com", "10.25")] * NUM_ROWS
dataset = tablib.Dataset(*rows, headers=["id", "name", "author_email", "price"])
book_resource = _BookResource()
do_import_duration(book_resource, dataset)
do_import_mem(book_resource, dataset)
# Book objects are created once for the 'duration' run,
# and once for the 'memory' run
assert Book.objects.count() == NUM_ROWS * 2
books._raw_delete(books.db)
def do_update():
print("\ndo_update()")
# clearing down existing objects
books = Book.objects.all()
books._raw_delete(books.db)
rows = [("", "Some new book", "email@example.com", "10.25")] * NUM_ROWS
books = [Book(name=r[1], author_email=r[2], price=r[3]) for r in rows]
# run 'update' - there must be existing rows in the DB...
# i.e. so they can be updated
Book.objects.bulk_create(books)
assert NUM_ROWS == Book.objects.count()
# find the ids, so that we can perform the update
all_books = Book.objects.all()
rows = [(b.id, b.name, b.author_email, b.price) for b in all_books]
dataset = tablib.Dataset(*rows, headers=["id", "name", "author_email", "price"])
book_resource = _BookResource()
do_import_duration(book_resource, dataset)
do_import_mem(book_resource, dataset)
assert NUM_ROWS == Book.objects.count()
books = Book.objects.all()
books._raw_delete(books.db)
def do_delete():
class _BookResource(resources.ModelResource):
def for_delete(self, row, instance):
return True
class Meta:
model = Book
fields = ("id", "name", "author_email", "price")
use_bulk = True
batch_size = 1000
skip_diff = True
instance_loader_class = CachedInstanceLoader
print("\ndo_delete()")
# clearing down existing objects
books = Book.objects.all()
books._raw_delete(books.db)
rows = [("", "Some new book", "email@example.com", "10.25")] * NUM_ROWS
books = [Book(name=r[1], author_email=r[2], price=r[3]) for r in rows]
# deletes - there must be existing rows in the DB...
# i.e. so they can be deleted
Book.objects.bulk_create(books)
assert NUM_ROWS == Book.objects.count()
all_books = Book.objects.all()
rows = [(b.id, b.name, b.author_email, b.price) for b in all_books]
dataset = tablib.Dataset(*rows, headers=["id", "name", "author_email", "price"])
book_resource = _BookResource()
do_import_duration(book_resource, dataset)
assert 0 == Book.objects.count()
# recreate rows which have just been deleted
Book.objects.bulk_create(books)
assert NUM_ROWS == Book.objects.count()
all_books = Book.objects.all()
rows = [(b.id, b.name, b.author_email, b.price) for b in all_books]
dataset = tablib.Dataset(*rows, headers=["id", "name", "author_email", "price"])
do_import_mem(book_resource, dataset)
assert 0 == Book.objects.count()
def run(*args):
if len(args) > 0:
arg = args[0].lower()
if arg == "create":
do_create()
if arg == "update":
do_update()
if arg == "delete":
do_delete()
else:
do_create()
do_update()
do_delete()
|
_BookResource
|
python
|
huggingface__transformers
|
tests/models/mllama/test_image_processing_mllama.py
|
{
"start": 1100,
"end": 5466
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
num_images=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
do_convert_rgb=True,
do_pad=True,
max_image_tiles=4,
):
size = size if size is not None else {"height": 224, "width": 224}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.max_image_tiles = max_image_tiles
self.image_size = image_size
self.num_images = num_images
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_convert_rgb = do_convert_rgb
self.do_pad = do_pad
def prepare_image_processor_dict(self):
return {
"do_convert_rgb": self.do_convert_rgb,
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
"max_image_tiles": self.max_image_tiles,
}
def prepare_image_inputs(
self,
batch_size=None,
min_resolution=None,
max_resolution=None,
num_channels=None,
num_images=None,
size_divisor=None,
equal_resolution=False,
numpify=False,
torchify=False,
):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
One can specify whether the images are of the same resolution or not.
"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
batch_size = batch_size if batch_size is not None else self.batch_size
min_resolution = min_resolution if min_resolution is not None else self.min_resolution
max_resolution = max_resolution if max_resolution is not None else self.max_resolution
num_channels = num_channels if num_channels is not None else self.num_channels
num_images = num_images if num_images is not None else self.num_images
images_list = []
for i in range(batch_size):
images = []
for j in range(num_images):
if equal_resolution:
width = height = max_resolution
else:
# To avoid getting image width/height 0
if size_divisor is not None:
# If `size_divisor` is defined, the image needs to have width/size >= `size_divisor`
min_resolution = max(size_divisor, min_resolution)
width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2)
images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8))
images_list.append(images)
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list]
if torchify:
images_list = [[torch.from_numpy(image) for image in images] for images in images_list]
return images_list
def expected_output_image_shape(self, images):
expected_output_image_shape = (
max(len(images) for images in images),
self.max_image_tiles,
self.num_channels,
self.size["height"],
self.size["width"],
)
return expected_output_image_shape
@require_torch
@require_vision
|
MllamaImageProcessingTester
|
python
|
fabric__fabric
|
tests/task.py
|
{
"start": 1058,
"end": 3072
}
|
class ____:
def accepts_Invoke_level_kwargs(self):
# Arbitrarily selected list of invoke-level kwargs...
def body(c, parts):
"I am a docstring"
pass
# Faux @task()
t = fabric.task(
name="dadbod",
aliases=["heavenly", "check", "shop"],
default=True,
help={"parts": "See: the sum of"},
iterable=["parts"],
)(body)
assert t.body is body
assert t.__doc__ == "I am a docstring"
assert t.name == "dadbod"
assert "heavenly" in t.aliases
assert t.is_default
assert "parts" in t.help
assert "parts" in t.iterable
def returns_Fabric_level_Task_instance(self):
assert isinstance(fabric.task(Mock()), fabric.Task)
def does_not_touch_klass_kwarg_if_explicitly_given(self):
# Otherwise sub-subclassers would be screwed, yea?
class SubFabTask(fabric.Task):
pass
assert isinstance(fabric.task(klass=SubFabTask)(Mock()), SubFabTask)
class hosts_kwarg:
# NOTE: these don't currently test anything besides "the value given is
# attached as .hosts" but they guard against regressions and ensures
# things work as documented, even if Executor is what really cares.
def _run(self, hosts):
@fabric.task(hosts=hosts)
def mytask(c):
pass
assert mytask.hosts == hosts
def values_may_be_connection_first_posarg_strings(self):
self._run(["host1", "user@host2", "host3:2222"])
def values_may_be_Connection_constructor_kwarg_dicts(self):
self._run(
[
{"host": "host1"},
{"host": "host2", "user": "user"},
{"host": "host3", "port": 2222},
]
)
def values_may_be_mixed(self):
self._run([{"host": "host1"}, "user@host2"])
def _dummy(c):
pass
|
task_
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/remainder_test.py
|
{
"start": 841,
"end": 1654
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device, dtype, op_func):
self.dividend = torch.rand(M, N, K, device=device)
self.dividend = (self.dividend * 1000 - 500).to(dtype=dtype)
self.divisor = torch.rand(M, N, K, device=device)
# +1 so we don't divide by zero
self.divisor = (self.divisor * 40 + 1).to(dtype=dtype)
self.inputs = {"dividend": self.dividend, "divisor": self.divisor}
self.op_func = op_func
def forward(self, dividend, divisor):
return self.op_func(dividend, divisor)
op_bench.generate_pt_tests_from_op_list(
remainder_ops_list,
remainder_short_configs + remainder_long_configs,
RemainderOpBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
RemainderOpBenchmark
|
python
|
openai__openai-python
|
src/openai/types/vector_store_create_params.py
|
{
"start": 1574,
"end": 1894
}
|
class ____(TypedDict, total=False):
anchor: Required[Literal["last_active_at"]]
"""Anchor timestamp after which the expiration policy applies.
Supported anchors: `last_active_at`.
"""
days: Required[int]
"""The number of days after the anchor time that the vector store will expire."""
|
ExpiresAfter
|
python
|
joke2k__faker
|
faker/providers/currency/en_AU/__init__.py
|
{
"start": 46,
"end": 279
}
|
class ____(CurrencyProvider):
price_formats = ["#.##", "%#.##", "%##.##", "%,###.##", "%#,###.##"]
def pricetag(self) -> str:
return "$\N{NO-BREAK SPACE}" + self.numerify(self.random_element(self.price_formats))
|
Provider
|
python
|
realpython__materials
|
python-raise-exception/api.py
|
{
"start": 18,
"end": 365
}
|
class ____(Exception):
pass
def call_external_api(url):
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
except requests.RequestException as error:
raise APIError(f"{error}") from None
return data
print(call_external_api("https://api.github.com/events"))
|
APIError
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/axis_artist.py
|
{
"start": 21713,
"end": 38340
}
|
class ____(martist.Artist):
"""
An artist which draws axis (a line along which the n-th axes coord
is constant) line, ticks, tick labels, and axis label.
"""
zorder = 2.5
@property
def LABELPAD(self):
return self.label.get_pad()
@LABELPAD.setter
def LABELPAD(self, v):
self.label.set_pad(v)
def __init__(self, axes,
helper,
offset=None,
axis_direction="bottom",
**kwargs):
"""
Parameters
----------
axes : `mpl_toolkits.axisartist.axislines.Axes`
helper : `~mpl_toolkits.axisartist.axislines.AxisArtistHelper`
"""
# axes is also used to follow the axis attribute (tick color, etc).
super().__init__(**kwargs)
self.axes = axes
self._axis_artist_helper = helper
if offset is None:
offset = (0, 0)
self.offset_transform = ScaledTranslation(
*offset,
Affine2D().scale(1 / 72) # points to inches.
+ self.axes.get_figure(root=False).dpi_scale_trans)
if axis_direction in ["left", "right"]:
self.axis = axes.yaxis
else:
self.axis = axes.xaxis
self._axisline_style = None
self._axis_direction = axis_direction
self._init_line()
self._init_ticks(**kwargs)
self._init_offsetText(axis_direction)
self._init_label()
# axis direction
self._ticklabel_add_angle = 0.
self._axislabel_add_angle = 0.
self.set_axis_direction(axis_direction)
# axis direction
def set_axis_direction(self, axis_direction):
"""
Adjust the direction, text angle, and text alignment of tick labels
and axis labels following the Matplotlib convention for the rectangle
axes.
The *axis_direction* must be one of [left, right, bottom, top].
===================== ========== ========= ========== ==========
Property left bottom right top
===================== ========== ========= ========== ==========
ticklabel direction "-" "+" "+" "-"
axislabel direction "-" "+" "+" "-"
ticklabel angle 90 0 -90 180
ticklabel va center baseline center baseline
ticklabel ha right center right center
axislabel angle 180 0 0 180
axislabel va center top center bottom
axislabel ha right center right center
===================== ========== ========= ========== ==========
Note that the direction "+" and "-" are relative to the direction of
the increasing coordinate. Also, the text angles are actually
relative to (90 + angle of the direction to the ticklabel),
which gives 0 for bottom axis.
Parameters
----------
axis_direction : {"left", "bottom", "right", "top"}
"""
self.major_ticklabels.set_axis_direction(axis_direction)
self.label.set_axis_direction(axis_direction)
self._axis_direction = axis_direction
if axis_direction in ["left", "top"]:
self.set_ticklabel_direction("-")
self.set_axislabel_direction("-")
else:
self.set_ticklabel_direction("+")
self.set_axislabel_direction("+")
def set_ticklabel_direction(self, tick_direction):
r"""
Adjust the direction of the tick labels.
Note that the *tick_direction*\s '+' and '-' are relative to the
direction of the increasing coordinate.
Parameters
----------
tick_direction : {"+", "-"}
"""
self._ticklabel_add_angle = _api.check_getitem(
{"+": 0, "-": 180}, tick_direction=tick_direction)
def invert_ticklabel_direction(self):
self._ticklabel_add_angle = (self._ticklabel_add_angle + 180) % 360
self.major_ticklabels.invert_axis_direction()
self.minor_ticklabels.invert_axis_direction()
def set_axislabel_direction(self, label_direction):
r"""
Adjust the direction of the axis label.
Note that the *label_direction*\s '+' and '-' are relative to the
direction of the increasing coordinate.
Parameters
----------
label_direction : {"+", "-"}
"""
self._axislabel_add_angle = _api.check_getitem(
{"+": 0, "-": 180}, label_direction=label_direction)
def get_transform(self):
return self.axes.transAxes + self.offset_transform
def get_helper(self):
"""
Return axis artist helper instance.
"""
return self._axis_artist_helper
def set_axisline_style(self, axisline_style=None, **kwargs):
"""
Set the axisline style.
The new style is completely defined by the passed attributes. Existing
style attributes are forgotten.
Parameters
----------
axisline_style : str or None
The line style, e.g. '->', optionally followed by a comma-separated
list of attributes. Alternatively, the attributes can be provided
as keywords.
If *None* this returns a string containing the available styles.
Examples
--------
The following two commands are equal:
>>> set_axisline_style("->,size=1.5")
>>> set_axisline_style("->", size=1.5)
"""
if axisline_style is None:
return AxislineStyle.pprint_styles()
if isinstance(axisline_style, AxislineStyle._Base):
self._axisline_style = axisline_style
else:
self._axisline_style = AxislineStyle(axisline_style, **kwargs)
self._init_line()
def get_axisline_style(self):
"""Return the current axisline style."""
return self._axisline_style
def _init_line(self):
"""
Initialize the *line* artist that is responsible to draw the axis line.
"""
tran = (self._axis_artist_helper.get_line_transform(self.axes)
+ self.offset_transform)
axisline_style = self.get_axisline_style()
if axisline_style is None:
self.line = PathPatch(
self._axis_artist_helper.get_line(self.axes),
color=mpl.rcParams['axes.edgecolor'],
fill=False,
linewidth=mpl.rcParams['axes.linewidth'],
capstyle=mpl.rcParams['lines.solid_capstyle'],
joinstyle=mpl.rcParams['lines.solid_joinstyle'],
transform=tran)
else:
self.line = axisline_style(self, transform=tran)
def _draw_line(self, renderer):
self.line.set_path(self._axis_artist_helper.get_line(self.axes))
if self.get_axisline_style() is not None:
self.line.set_line_mutation_scale(self.major_ticklabels.get_size())
self.line.draw(renderer)
def _init_ticks(self, **kwargs):
axis_name = self.axis.axis_name
trans = (self._axis_artist_helper.get_tick_transform(self.axes)
+ self.offset_transform)
self.major_ticks = Ticks(
kwargs.get(
"major_tick_size",
mpl.rcParams[f"{axis_name}tick.major.size"]),
axis=self.axis, transform=trans)
self.minor_ticks = Ticks(
kwargs.get(
"minor_tick_size",
mpl.rcParams[f"{axis_name}tick.minor.size"]),
axis=self.axis, transform=trans)
size = mpl.rcParams[f"{axis_name}tick.labelsize"]
self.major_ticklabels = TickLabels(
axis=self.axis,
axis_direction=self._axis_direction,
figure=self.axes.get_figure(root=False),
transform=trans,
fontsize=size,
pad=kwargs.get(
"major_tick_pad", mpl.rcParams[f"{axis_name}tick.major.pad"]),
)
self.minor_ticklabels = TickLabels(
axis=self.axis,
axis_direction=self._axis_direction,
figure=self.axes.get_figure(root=False),
transform=trans,
fontsize=size,
pad=kwargs.get(
"minor_tick_pad", mpl.rcParams[f"{axis_name}tick.minor.pad"]),
)
def _get_tick_info(self, tick_iter):
"""
Return a pair of:
- list of locs and angles for ticks
- list of locs, angles and labels for ticklabels.
"""
ticks_loc_angle = []
ticklabels_loc_angle_label = []
ticklabel_add_angle = self._ticklabel_add_angle
for loc, angle_normal, angle_tangent, label in tick_iter:
angle_label = angle_tangent - 90 + ticklabel_add_angle
angle_tick = (angle_normal
if 90 <= (angle_label - angle_normal) % 360 <= 270
else angle_normal + 180)
ticks_loc_angle.append([loc, angle_tick])
ticklabels_loc_angle_label.append([loc, angle_label, label])
return ticks_loc_angle, ticklabels_loc_angle_label
def _update_ticks(self, renderer=None):
# set extra pad for major and minor ticklabels: use ticksize of
# majorticks even for minor ticks. not clear what is best.
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
dpi_cor = renderer.points_to_pixels(1.)
if self.major_ticks.get_visible() and self.major_ticks.get_tick_out():
ticklabel_pad = self.major_ticks._ticksize * dpi_cor
self.major_ticklabels._external_pad = ticklabel_pad
self.minor_ticklabels._external_pad = ticklabel_pad
else:
self.major_ticklabels._external_pad = 0
self.minor_ticklabels._external_pad = 0
majortick_iter, minortick_iter = \
self._axis_artist_helper.get_tick_iterators(self.axes)
tick_loc_angle, ticklabel_loc_angle_label = \
self._get_tick_info(majortick_iter)
self.major_ticks.set_locs_angles(tick_loc_angle)
self.major_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
tick_loc_angle, ticklabel_loc_angle_label = \
self._get_tick_info(minortick_iter)
self.minor_ticks.set_locs_angles(tick_loc_angle)
self.minor_ticklabels.set_locs_angles_labels(ticklabel_loc_angle_label)
def _draw_ticks(self, renderer):
self._update_ticks(renderer)
self.major_ticks.draw(renderer)
self.major_ticklabels.draw(renderer)
self.minor_ticks.draw(renderer)
self.minor_ticklabels.draw(renderer)
if (self.major_ticklabels.get_visible()
or self.minor_ticklabels.get_visible()):
self._draw_offsetText(renderer)
_offsetText_pos = dict(left=(0, 1, "bottom", "right"),
right=(1, 1, "bottom", "left"),
bottom=(1, 0, "top", "right"),
top=(1, 1, "bottom", "right"))
def _init_offsetText(self, direction):
x, y, va, ha = self._offsetText_pos[direction]
self.offsetText = mtext.Annotation(
"",
xy=(x, y), xycoords="axes fraction",
xytext=(0, 0), textcoords="offset points",
color=mpl.rcParams['xtick.color'],
horizontalalignment=ha, verticalalignment=va,
)
self.offsetText.set_transform(IdentityTransform())
self.axes._set_artist_props(self.offsetText)
def _update_offsetText(self):
self.offsetText.set_text(self.axis.major.formatter.get_offset())
self.offsetText.set_size(self.major_ticklabels.get_size())
offset = (self.major_ticklabels.get_pad()
+ self.major_ticklabels.get_size()
+ 2)
self.offsetText.xyann = (0, offset)
def _draw_offsetText(self, renderer):
self._update_offsetText()
self.offsetText.draw(renderer)
def _init_label(self, **kwargs):
tr = (self._axis_artist_helper.get_axislabel_transform(self.axes)
+ self.offset_transform)
self.label = AxisLabel(
0, 0, "__from_axes__",
color="auto",
fontsize=kwargs.get("labelsize", mpl.rcParams['axes.labelsize']),
fontweight=mpl.rcParams['axes.labelweight'],
axis=self.axis,
transform=tr,
axis_direction=self._axis_direction,
)
self.label.set_figure(self.axes.get_figure(root=False))
labelpad = kwargs.get("labelpad", 5)
self.label.set_pad(labelpad)
def _update_label(self, renderer):
if not self.label.get_visible():
return
if self._ticklabel_add_angle != self._axislabel_add_angle:
if ((self.major_ticks.get_visible()
and not self.major_ticks.get_tick_out())
or (self.minor_ticks.get_visible()
and not self.major_ticks.get_tick_out())):
axislabel_pad = self.major_ticks._ticksize
else:
axislabel_pad = 0
else:
axislabel_pad = max(self.major_ticklabels._axislabel_pad,
self.minor_ticklabels._axislabel_pad)
self.label._external_pad = axislabel_pad
xy, angle_tangent = \
self._axis_artist_helper.get_axislabel_pos_angle(self.axes)
if xy is None:
return
angle_label = angle_tangent - 90
x, y = xy
self.label._ref_angle = angle_label + self._axislabel_add_angle
self.label.set(x=x, y=y)
def _draw_label(self, renderer):
self._update_label(renderer)
self.label.draw(renderer)
def set_label(self, s):
# docstring inherited
self.label.set_text(s)
def get_tightbbox(self, renderer=None):
if not self.get_visible():
return
self._axis_artist_helper.update_lim(self.axes)
self._update_ticks(renderer)
self._update_label(renderer)
self.line.set_path(self._axis_artist_helper.get_line(self.axes))
if self.get_axisline_style() is not None:
self.line.set_line_mutation_scale(self.major_ticklabels.get_size())
bb = [
*self.major_ticklabels.get_window_extents(renderer),
*self.minor_ticklabels.get_window_extents(renderer),
self.label.get_window_extent(renderer),
self.offsetText.get_window_extent(renderer),
self.line.get_window_extent(renderer),
]
bb = [b for b in bb if b and (b.width != 0 or b.height != 0)]
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return None
@martist.allow_rasterization
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
renderer.open_group(__name__, gid=self.get_gid())
self._axis_artist_helper.update_lim(self.axes)
self._draw_ticks(renderer)
self._draw_line(renderer)
self._draw_label(renderer)
renderer.close_group(__name__)
def toggle(self, all=None, ticks=None, ticklabels=None, label=None):
"""
Toggle visibility of ticks, ticklabels, and (axis) label.
To turn all off, ::
axis.toggle(all=False)
To turn all off but ticks on ::
axis.toggle(all=False, ticks=True)
To turn all on but (axis) label off ::
axis.toggle(all=True, label=False)
"""
if all:
_ticks, _ticklabels, _label = True, True, True
elif all is not None:
_ticks, _ticklabels, _label = False, False, False
else:
_ticks, _ticklabels, _label = None, None, None
if ticks is not None:
_ticks = ticks
if ticklabels is not None:
_ticklabels = ticklabels
if label is not None:
_label = label
if _ticks is not None:
self.major_ticks.set_visible(_ticks)
self.minor_ticks.set_visible(_ticks)
if _ticklabels is not None:
self.major_ticklabels.set_visible(_ticklabels)
self.minor_ticklabels.set_visible(_ticklabels)
if _label is not None:
self.label.set_visible(_label)
|
AxisArtist
|
python
|
huggingface__transformers
|
src/transformers/image_utils.py
|
{
"start": 2351,
"end": 2658
}
|
class ____(ExplicitEnum):
COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value
COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value
AnnotationType = dict[str, Union[int, str, list[dict]]]
def is_pil_image(img):
return is_vision_available() and isinstance(img, PIL.Image.Image)
|
AnnotionFormat
|
python
|
python__mypy
|
mypyc/irbuild/for_helpers.py
|
{
"start": 20320,
"end": 22389
}
|
class ____:
"""Abstract base class for generating for loops."""
def __init__(
self,
builder: IRBuilder,
index: Lvalue,
body_block: BasicBlock,
loop_exit: BasicBlock,
line: int,
nested: bool,
) -> None:
self.builder = builder
self.index = index
self.body_block = body_block
self.line = line
# Some for loops need a cleanup block that we execute at exit. We
# create a cleanup block if needed. However, if we are generating a for
# loop for a nested iterator, such as "e" in "enumerate(e)", the
# outermost generator should generate the cleanup block -- we don't
# need to do it here.
if self.need_cleanup() and not nested:
# Create a new block to handle cleanup after loop exit.
self.loop_exit = BasicBlock()
else:
# Just use the existing loop exit block.
self.loop_exit = loop_exit
def need_cleanup(self) -> bool:
"""If this returns true, we need post-loop cleanup."""
return False
def add_cleanup(self, exit_block: BasicBlock) -> None:
"""Add post-loop cleanup, if needed."""
if self.need_cleanup():
self.builder.activate_block(self.loop_exit)
self.gen_cleanup()
self.builder.goto(exit_block)
def gen_condition(self) -> None:
"""Generate check for loop exit (e.g. exhaustion of iteration)."""
def begin_body(self) -> None:
"""Generate ops at the beginning of the body (if needed)."""
def gen_step(self) -> None:
"""Generate stepping to the next item (if needed)."""
def gen_cleanup(self) -> None:
"""Generate post-loop cleanup (if needed)."""
def load_len(self, expr: Value | AssignmentTarget) -> Value:
"""A helper to get collection length, used by several subclasses."""
return self.builder.builder.builtin_len(
self.builder.read(expr, self.line), self.line, use_pyssize_t=True
)
|
ForGenerator
|
python
|
pyca__cryptography
|
tests/x509/test_x509_ext.py
|
{
"start": 101975,
"end": 104727
}
|
class ____:
def test_invalid_access_method(self):
with pytest.raises(TypeError):
x509.AccessDescription(
"notanoid", # type:ignore[arg-type]
x509.DNSName("test"),
)
def test_invalid_access_location(self):
with pytest.raises(TypeError):
x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
"invalid", # type:ignore[arg-type]
)
def test_valid_nonstandard_method(self):
ad = x509.AccessDescription(
ObjectIdentifier("2.999.1"),
x509.UniformResourceIdentifier("http://example.com"),
)
assert ad is not None
def test_repr(self):
ad = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
assert repr(ad) == (
"<AccessDescription(access_method=<ObjectIdentifier(oid=1.3.6"
".1.5.5.7.48.1, name=OCSP)>, access_location=<UniformResource"
"Identifier(value='http://ocsp.domain.com')>)>"
)
def test_eq(self):
ad = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
ad2 = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
assert ad == ad2
def test_ne(self):
ad = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
ad2 = x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
ad3 = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://notthesame"),
)
assert ad != ad2
assert ad != ad3
assert ad != object()
def test_hash(self):
ad = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
ad2 = x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
ad3 = x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
x509.UniformResourceIdentifier("http://ocsp.domain.com"),
)
assert hash(ad) == hash(ad2)
assert hash(ad) != hash(ad3)
|
TestAccessDescription
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/model_laboratory.py
|
{
"start": 398,
"end": 4035
}
|
class ____:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: list[str] | None = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains: A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names: Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
msg = ( # type: ignore[unreachable]
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
raise ValueError(msg) # noqa: TRY004
if len(chain.input_keys) != 1:
msg = (
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
raise ValueError(msg)
if len(chain.output_keys) != 1:
msg = (
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None and len(names) != len(chains):
msg = "Length of chains does not match length of names."
raise ValueError(msg)
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls,
llms: list[BaseLLM],
prompt: PromptTemplate | None = None,
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms: A list of LLMs to experiment with.
prompt: An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
name = self.names[i] if self.names is not None else str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
ModelLaboratory
|
python
|
tensorflow__tensorflow
|
third_party/xla/xla/backends/cpu/testlib/elemental_kernel_emitter_test.py
|
{
"start": 1798,
"end": 4403
}
|
class ____:
op: HloOpcode
np_op: Callable[[np.ndarray, ...], np.ndarray]
input_ranges: tuple[float, float] = (-1.0, 1.0)
decimal_precision: int = 6
# For simple unpacking
def __iter__(self):
return iter(
(self.op, self.np_op, self.input_ranges, self.decimal_precision)
)
def __repr__(self):
return f"{self.op.name}({self.input_ranges})"
@parameterized.product(
op_def=[
ElementalHloOpcodeDef(HloOpcode.sine, np.sin),
ElementalHloOpcodeDef(HloOpcode.cosine, np.cos),
ElementalHloOpcodeDef(HloOpcode.tan, np.tan),
ElementalHloOpcodeDef(HloOpcode.exponential, np.exp),
ElementalHloOpcodeDef(HloOpcode.log, np.log, (0.01, 10.0)),
ElementalHloOpcodeDef(HloOpcode.log_plus_one, np.log1p),
ElementalHloOpcodeDef(HloOpcode.sqrt, np.sqrt),
ElementalHloOpcodeDef(
HloOpcode.rsqrt, lambda x: np.reciprocal(np.sqrt(x))
),
ElementalHloOpcodeDef(HloOpcode.cbrt, np.cbrt),
ElementalHloOpcodeDef(HloOpcode.power, np.pow),
ElementalHloOpcodeDef(HloOpcode.add, np.add),
ElementalHloOpcodeDef(HloOpcode.subtract, np.subtract),
ElementalHloOpcodeDef(HloOpcode.multiply, np.multiply),
ElementalHloOpcodeDef(HloOpcode.divide, np.divide),
ElementalHloOpcodeDef(HloOpcode.maximum, np.maximum),
ElementalHloOpcodeDef(HloOpcode.minimum, np.minimum),
ElementalHloOpcodeDef(HloOpcode.sign, np.sign),
ElementalHloOpcodeDef(HloOpcode.negate, np.negative),
ElementalHloOpcodeDef(HloOpcode.is_finite, np.isfinite, (-_inf, _inf)),
ElementalHloOpcodeDef(HloOpcode.ceil, np.ceil, (-10.0, 10.0)),
ElementalHloOpcodeDef(HloOpcode.floor, np.floor, (-5.0, 5.0)),
ElementalHloOpcodeDef(HloOpcode.tanh, np.tanh),
ElementalHloOpcodeDef(HloOpcode.atan2, np.arctan2),
ElementalHloOpcodeDef(HloOpcode.erf, np_erf),
ElementalHloOpcodeDef(HloOpcode.exponential_minus_one, np.expm1),
# TODO(willfroom): Update to use better inputs for the following.
ElementalHloOpcodeDef(HloOpcode.clamp, np.clip),
# TODO(willfroom): Add complex ops.
# ElementalHloOpcodeDef(HloOpcode.complex, np.complex),
# ElementalHloOpcodeDef(HloOpcode.real, np.real),
# ElementalHloOpcodeDef(HloOpcode.imag, np.imag),
# TODO(willfroom): go through ElementalIrEmitter interface and ensure
# that all ops are implemented.
# ...
],
shape=[(4,), (4, 3), (4, 3, 10)],
dtype=[np.dtype(np.float32), np.dtype(np.float64)],
)
|
ElementalHloOpcodeDef
|
python
|
huggingface__transformers
|
src/transformers/models/nanochat/modular_nanochat.py
|
{
"start": 5333,
"end": 7918
}
|
class ____(LlamaModel):
def __init__(self, config: NanoChatConfig):
super().__init__(config)
self.norm = NanoChatRMSNorm(eps=config.rms_norm_eps)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
hidden_states = self.norm(hidden_states) # Additional norm before the layers
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
|
NanoChatModel
|
python
|
sphinx-doc__sphinx
|
sphinx/addnodes.py
|
{
"start": 1463,
"end": 1577
}
|
class ____:
"""A node which does not support smart-quotes."""
support_smartquotes = False
|
not_smartquotable
|
python
|
optuna__optuna
|
optuna/samplers/nsgaii/_crossovers/_uniform.py
|
{
"start": 214,
"end": 1760
}
|
class ____(BaseCrossover):
"""Uniform Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`.
Select each parameter with equal probability from the two parent individuals.
For further information about uniform crossover, please refer to the following paper:
- `Gilbert Syswerda. 1989. Uniform Crossover in Genetic Algorithms.
In Proceedings of the 3rd International Conference on Genetic Algorithms.
Morgan Kaufmann Publishers Inc., San Francisco, CA, USA, 2-9.
<https://www.researchgate.net/publication/201976488_Uniform_Crossover_in_Genetic_Algorithms>`__
Args:
swapping_prob:
Probability of swapping each parameter of the parents during crossover.
"""
n_parents = 2
def __init__(self, swapping_prob: float = 0.5) -> None:
if not (0.0 <= swapping_prob <= 1.0):
raise ValueError("`swapping_prob` must be a float value within the range [0.0, 1.0].")
self._swapping_prob = swapping_prob
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
# https://www.researchgate.net/publication/201976488_Uniform_Crossover_in_Genetic_Algorithms
# Section 1 Introduction
n_params = len(search_space_bounds)
masks = (rng.rand(n_params) >= self._swapping_prob).astype(int)
child_params = parents_params[masks, range(n_params)]
return child_params
|
UniformCrossover
|
python
|
pytorch__pytorch
|
torch/_export/db/examples/tensor_setattr.py
|
{
"start": 42,
"end": 337
}
|
class ____(torch.nn.Module):
"""
setattr() call onto tensors is not supported.
"""
def forward(self, x, attr):
setattr(x, attr, torch.randn(3, 2))
return x + 4
example_args = (torch.randn(3, 2), "attr")
tags = {"python.builtin"}
model = TensorSetattr()
|
TensorSetattr
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/dependent_of_dev_build/package.py
|
{
"start": 218,
"end": 559
}
|
class ____(Package):
homepage = "example.com"
url = "fake.com"
version("0.0.0", sha256="0123456789abcdef0123456789abcdef")
depends_on("dev-build-test-install")
def install(self, spec, prefix):
with open(prefix.filename, "w", encoding="utf-8") as f:
f.write("This file is installed")
|
DependentOfDevBuild
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_common_errors.py
|
{
"start": 5083,
"end": 7259
}
|
class ____:
"""Test detection of malformed parameter descriptions."""
# Using function-based validation approach
def test_missing_parameter_descriptions(self):
"""Test detection of parameters without descriptions."""
docstring = '''"""Function with missing parameter descriptions.
Args:
param1: # Missing description
param2: Valid description
param3: # Another missing description
Returns:
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# This might produce warnings but shouldn't break validation
# The function should still be considered valid for RST syntax
assert result.parsing_successful
def test_malformed_type_annotations(self):
"""Test detection of malformed type annotations in parameters."""
docstring = '''"""Function with malformed type annotations.
Args:
param1 (str: Missing closing parenthesis
param2 (int)): Extra closing parenthesis
param3 str): Missing opening parenthesis
Returns:
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect RST syntax issues from malformed parentheses
assert result.has_warnings() or result.has_errors()
def test_inconsistent_parameter_format(self):
"""Test detection of inconsistent parameter formatting."""
docstring = '''"""Function with inconsistent parameter formatting.
Args:
param1 (str): Formatted with type annotation
param2: No type annotation
param3 (int) - Wrong separator, should use colon
param4 (bool): description with
multiple lines but inconsistent formatting
Returns:
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should still be valid RST but may have warnings about formatting
assert result.parsing_successful
|
TestParameterDescriptionErrors
|
python
|
getsentry__sentry
|
tests/sentry/pipeline/test_pipeline.py
|
{
"start": 1095,
"end": 1743
}
|
class ____(Pipeline[Never, PipelineSessionStore]):
pipeline_name = "test_pipeline"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.finished = False
self.dispatch_count = 0
@cached_property
def provider(self) -> DummyProvider:
ret = {"dummy": DummyProvider()}[self._provider_key]
ret.set_pipeline(self)
ret.update_config(self.config)
return ret
def get_pipeline_views(self) -> Sequence[PipelineStep]:
return self.provider.get_pipeline_views()
def finish_pipeline(self):
self.finished = True
@control_silo_test
|
DummyPipeline
|
python
|
plotly__plotly.py
|
_plotly_utils/png.py
|
{
"start": 42784,
"end": 44085
}
|
class ____:
"""A PNG image. You can create an :class:`Image` object from
an array of pixels by calling :meth:`png.from_array`. It can be
saved to disk with the :meth:`save` method.
"""
def __init__(self, rows, info):
"""
.. note ::
The constructor is not public. Please do not call it.
"""
self.rows = rows
self.info = info
def save(self, file):
"""Save the image to the named *file*.
See `.write()` if you already have an open file object.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
with open(file, "wb") as fd:
w.write(fd, self.rows)
def write(self, file):
"""Write the image to the open file object.
See `.save()` if you have a filename.
In general, you can only call this method once;
after it has been called the first time the PNG image is written,
the source data will have been streamed, and
cannot be streamed again.
"""
w = Writer(**self.info)
w.write(file, self.rows)
|
Image
|
python
|
cython__cython
|
Cython/Debugger/Tests/test_libcython_in_gdb.py
|
{
"start": 906,
"end": 1143
}
|
class ____(type):
def __init__(self, name, bases, dict):
for func_name, func in dict.items():
if inspect.isfunction(func):
setattr(self, func_name, print_on_call_decorator(func))
|
TraceMethodCallMeta
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_core.py
|
{
"start": 4442,
"end": 5149
}
|
class ____(Benchmark):
params = [
[bool, np.uint8, np.uint64, np.int64, np.float32, np.float64],
[(1_000_000,), (1000, 1000), (100, ), (2, )]
]
param_names = ["dtype", "shape"]
def setup(self, dtype, size):
self.x = np.random.randint(0, 3, size=size).astype(dtype)
self.x_sparse = np.zeros(size).astype(dtype)
self.x_sparse[1] = 1
self.x_sparse[-1] = 1
self.x_dense = np.ones(size).astype(dtype)
def time_nonzero(self, dtype, size):
np.nonzero(self.x)
def time_nonzero_sparse(self, dtype, size):
np.nonzero(self.x_sparse)
def time_nonzero_dense(self, dtype, size):
np.nonzero(self.x_dense)
|
Nonzero
|
python
|
Delgan__loguru
|
loguru/_simple_sinks.py
|
{
"start": 3244,
"end": 5957
}
|
class ____:
"""A sink that handles asynchronous logging operations.
Parameters
----------
function
The async function to execute.
loop
The event loop to use.
error_interceptor
An interceptor for handling errors.
"""
def __init__(self, function, loop, error_interceptor):
self._function = function
self._loop = loop
self._error_interceptor = error_interceptor
self._tasks = weakref.WeakSet()
def write(self, message):
"""Asynchronously write a message.
Parameters
----------
message
The message to write.
"""
try:
loop = self._loop or get_running_loop()
except RuntimeError:
return
coroutine = self._function(message)
task = loop.create_task(coroutine)
def check_exception(future):
if future.cancelled() or future.exception() is None:
return
if not self._error_interceptor.should_catch():
raise future.exception()
self._error_interceptor.print(message.record, exception=future.exception())
task.add_done_callback(check_exception)
self._tasks.add(task)
def stop(self):
"""Cancel all pending tasks."""
for task in self._tasks:
task.cancel()
def tasks_to_complete(self):
"""Return list of tasks that need to be completed.
Returns
-------
list
List of tasks to complete.
"""
# To avoid errors due to "self._tasks" being mutated while iterated, the
# "tasks_to_complete()" method must be protected by the same lock as "write()" (which
# happens to be the handler lock). However, the tasks must not be awaited while the lock is
# acquired as this could lead to a deadlock. Therefore, we first need to collect the tasks
# to complete, then return them so that they can be awaited outside of the lock.
return [self._complete_task(task) for task in self._tasks]
async def _complete_task(self, task):
"""Complete a single task.
Parameters
----------
task
The task to complete.
"""
loop = get_running_loop()
if get_task_loop(task) is not loop:
return
try:
await task
except Exception:
pass # Handled in "check_exception()"
def __getstate__(self):
state = self.__dict__.copy()
state["_tasks"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._tasks = weakref.WeakSet()
|
AsyncSink
|
python
|
spack__spack
|
lib/spack/spack/repo.py
|
{
"start": 80367,
"end": 80742
}
|
class ____(UnknownEntityError):
"""Raised when we encounter an unknown namespace"""
def __init__(self, namespace, name=None):
msg, long_msg = f"Unknown namespace: {namespace}", None
if name == "yaml":
long_msg = f"Did you mean to specify a filename with './{namespace}.{name}'?"
super().__init__(msg, long_msg)
|
UnknownNamespaceError
|
python
|
google__pytype
|
pytype/tests/test_typeguard.py
|
{
"start": 111,
"end": 789
}
|
class ____(test_base.BaseTest):
"""Tests for typing_extensions.TypeGuard."""
def test_typing_extensions(self):
self.Check("""
from typing_extensions import TypeGuard
def is_str_list(val: list[object]) -> TypeGuard[list[str]]:
return all(isinstance(x, str) for x in val)
def f(val: list[object]):
if is_str_list(val):
assert_type(val, list[str])
""")
@test_utils.skipFromPy((3, 10), "3.9- must use typing_extensions")
def test_unsupported_version(self):
self.CheckWithErrors("""
from typing import TypeGuard # not-supported-yet
""")
@test_utils.skipBeforePy((3, 10), "New in 3.10")
|
TypingExtensionsTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/op_def_library_test.py
|
{
"start": 55117,
"end": 55856
}
|
class ____(test_util.TensorFlowTestCase):
def testNoGraph(self):
out = op_def_library.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
graph = ops.Graph()
with graph.as_default():
out = op_def_library.apply_op("Simple", a=3)
self.assertEqual(out.graph, graph)
def testDifferentGraphFails(self):
with ops.Graph().as_default():
a = op_def_library.apply_op("Simple", a=3)
with ops.Graph().as_default():
b = op_def_library.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
op_def_library.apply_op("Binary", a=a, b=b)
self.assertIn("must be from the same graph", str(cm.exception))
|
OpDefLibraryGraphTest
|
python
|
facebookresearch__faiss
|
tests/test_binary_hashindex.py
|
{
"start": 3490,
"end": 5821
}
|
class ____(unittest.TestCase):
def test_hash_and_multihash(self):
d = 128
nq = 100
nb = 2000
(_, xb, xq) = make_binary_dataset(d, 0, nb, nq)
index_ref = faiss.IndexBinaryFlat(d)
index_ref.add(xb)
k = 10
Dref, Iref = index_ref.search(xq, k)
nfound = {}
for nh in 0, 1, 3, 5:
for nbit in 4, 7:
if nh == 0:
index = faiss.IndexBinaryHash(d, nbit)
else:
index = faiss.IndexBinaryMultiHash(d, nh, nbit)
index.add(xb)
index.nflip = 2
Dnew, Inew = index.search(xq, k)
nf = 0
for i in range(nq):
ref = Iref[i]
new = Inew[i]
snew = set(new)
# no duplicates
self.assertTrue(len(new) == len(snew))
nf += len(set(ref) & snew)
nfound[(nh, nbit)] = nf
self.assertGreater(nfound[(nh, 4)], nfound[(nh, 7)])
# test serialization
index2 = faiss.deserialize_index_binary(
faiss.serialize_index_binary(index))
D2, I2 = index2.search(xq, k)
np.testing.assert_array_equal(Inew, I2)
np.testing.assert_array_equal(Dnew, D2)
self.assertGreater(3, abs(nfound[(0, 7)] - nfound[(1, 7)]))
self.assertGreater(nfound[(3, 7)], nfound[(1, 7)])
self.assertGreater(nfound[(5, 7)], nfound[(3, 7)])
def subtest_result_order(self, nh):
d = 128
nq = 10
nb = 200
(_, xb, xq) = make_binary_dataset(d, 0, nb, nq)
nbit = 10
if nh == 0:
index = faiss.IndexBinaryHash(d, nbit)
else:
index = faiss.IndexBinaryMultiHash(d, nh, nbit)
index.add(xb)
index.nflip = 5
k = 10
Do, Io = index.search(xq, k)
self.assertTrue(
np.all(Do[:, 1:] >= Do[:, :-1])
)
def test_result_order_binhash(self):
self.subtest_result_order(0)
def test_result_order_multihash(self):
self.subtest_result_order(3)
"""
I suspect this test crashes CircleCI on Linux
# this is an expensive test, so we don't run it by default
|
TestKnn
|
python
|
ray-project__ray
|
python/ray/tune/search/ax/ax_search.py
|
{
"start": 992,
"end": 15666
}
|
class ____(Searcher):
"""Uses `Ax <https://ax.dev/>`_ to optimize hyperparameters.
Ax is a platform for understanding, managing, deploying, and
automating adaptive experiments. Ax provides an easy to use
interface with BoTorch, a flexible, modern library for Bayesian
optimization in PyTorch. More information can be found in https://ax.dev/.
To use this search algorithm, you must install Ax:
.. code-block:: bash
$ pip install ax-platform
Parameters:
space: Parameters in the experiment search space.
Required elements in the dictionaries are: "name" (name of
this parameter, string), "type" (type of the parameter: "range",
"fixed", or "choice", string), "bounds" for range parameters
(list of two values, lower bound first), "values" for choice
parameters (list of values), and "value" for fixed parameters
(single value).
metric: Name of the metric used as objective in this
experiment. This metric must be present in `raw_data` argument
to `log_data`. This metric must also be present in the dict
reported/returned by the Trainable. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute. Defaults to "max".
points_to_evaluate: Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
parameter_constraints: Parameter constraints, such as
"x3 >= x4" or "x3 + x4 >= 2".
outcome_constraints: Outcome constraints of form
"metric_name >= bound", like "m1 <= 3."
ax_client: Optional AxClient instance. If this is set, do
not pass any values to these parameters: `space`, `metric`,
`parameter_constraints`, `outcome_constraints`.
**ax_kwargs: Passed to AxClient instance. Ignored if `AxClient` is not
None.
Tune automatically converts search spaces to Ax's format:
.. code-block:: python
from ray import tune
from ray.tune.search.ax import AxSearch
config = {
"x1": tune.uniform(0.0, 1.0),
"x2": tune.uniform(0.0, 1.0)
}
def easy_objective(config):
for i in range(100):
intermediate_result = config["x1"] + config["x2"] * i
tune.report({"score": intermediate_result})
ax_search = AxSearch()
tuner = tune.Tuner(
easy_objective,
tune_config=tune.TuneConfig(
search_alg=ax_search,
metric="score",
mode="max",
),
param_space=config,
)
tuner.fit()
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray import tune
from ray.tune.search.ax import AxSearch
parameters = [
{"name": "x1", "type": "range", "bounds": [0.0, 1.0]},
{"name": "x2", "type": "range", "bounds": [0.0, 1.0]},
]
def easy_objective(config):
for i in range(100):
intermediate_result = config["x1"] + config["x2"] * i
tune.report({"score": intermediate_result})
ax_search = AxSearch(space=parameters, metric="score", mode="max")
tuner = tune.Tuner(
easy_objective,
tune_config=tune.TuneConfig(
search_alg=ax_search,
),
)
tuner.fit()
"""
def __init__(
self,
space: Optional[Union[Dict, List[Dict]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
parameter_constraints: Optional[List] = None,
outcome_constraints: Optional[List] = None,
ax_client: Optional[AxClient] = None,
**ax_kwargs,
):
assert (
ax is not None
), """Ax must be installed!
You can install AxSearch with the command:
`pip install ax-platform`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(AxSearch, self).__init__(
metric=metric,
mode=mode,
)
self._ax = ax_client
self._ax_kwargs = ax_kwargs or {}
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self))
)
space = self.convert_search_space(space)
self._space = space
self._parameter_constraints = parameter_constraints
self._outcome_constraints = outcome_constraints
self._points_to_evaluate = copy.deepcopy(points_to_evaluate)
self._parameters = []
self._live_trial_mapping = {}
if self._ax or self._space:
self._setup_experiment()
def _setup_experiment(self):
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
if not self._ax:
self._ax = AxClient(**self._ax_kwargs)
try:
exp = self._ax.experiment
has_experiment = True
except ValueError:
has_experiment = False
if not has_experiment:
if not self._space:
raise ValueError(
"You have to create an Ax experiment by calling "
"`AxClient.create_experiment()`, or you should pass an "
"Ax search space as the `space` parameter to `AxSearch`, "
"or pass a `param_space` dict to `tune.Tuner()`."
)
if self._mode not in ["min", "max"]:
raise ValueError(
"Please specify the `mode` argument when initializing "
"the `AxSearch` object or pass it to `tune.TuneConfig()`."
)
self._ax.create_experiment(
parameters=self._space,
objective_name=self._metric,
parameter_constraints=self._parameter_constraints,
outcome_constraints=self._outcome_constraints,
minimize=self._mode != "max",
)
else:
if any(
[
self._space,
self._parameter_constraints,
self._outcome_constraints,
self._mode,
self._metric,
]
):
raise ValueError(
"If you create the Ax experiment yourself, do not pass "
"values for these parameters to `AxSearch`: {}.".format(
[
"space",
"parameter_constraints",
"outcome_constraints",
"mode",
"metric",
]
)
)
exp = self._ax.experiment
# Update mode and metric from experiment if it has been passed
self._mode = "min" if exp.optimization_config.objective.minimize else "max"
self._metric = exp.optimization_config.objective.metric.name
self._parameters = list(exp.parameters)
if self._ax._enforce_sequential_optimization:
logger.warning(
"Detected sequential enforcement. Be sure to use "
"a ConcurrencyLimiter."
)
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], config: Dict, **spec
):
if self._ax:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_experiment()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._ax:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"
)
)
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__, metric=self._metric, mode=self._mode
)
)
if self._points_to_evaluate:
config = self._points_to_evaluate.pop(0)
parameters, trial_index = self._ax.attach_trial(config)
else:
try:
parameters, trial_index = self._ax.get_next_trial()
except (MaxParallelismReachedException, DataRequiredError):
return None
self._live_trial_mapping[trial_id] = trial_index
try:
suggested_config = unflatten_list_dict(parameters)
except AssertionError:
# Fails to unflatten if keys are out of order, which only happens
# if search space includes a list with both constants and
# tunable hyperparameters:
# Ex: "a": [1, tune.uniform(2, 3), 4]
suggested_config = unflatten_list_dict(
{k: parameters[k] for k in sorted(parameters.keys())}
)
return suggested_config
def on_trial_complete(self, trial_id, result=None, error=False):
"""Notification for the completion of trial.
Data of form key value dictionary of metric names and values.
"""
if result:
self._process_result(trial_id, result)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id, result):
ax_trial_index = self._live_trial_mapping[trial_id]
metrics_to_include = [self._metric] + [
oc.metric.name
for oc in self._ax.experiment.optimization_config.outcome_constraints
]
metric_dict = {}
for key in metrics_to_include:
val = result[key]
if np.isnan(val) or np.isinf(val):
# Don't report trials with NaN metrics to Ax
self._ax.abandon_trial(
trial_index=ax_trial_index,
reason=f"nan/inf metrics reported by {trial_id}",
)
return
metric_dict[key] = (val, None)
self._ax.complete_trial(trial_index=ax_trial_index, raw_data=metric_dict)
@staticmethod
def convert_search_space(spec: Dict):
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to an Ax search space."
)
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(par, domain):
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"AxSearch does not support quantization. Dropped quantization."
)
sampler = sampler.sampler
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
return {
"name": par,
"type": "range",
"bounds": [domain.lower, domain.upper],
"value_type": "float",
"log_scale": True,
}
elif isinstance(sampler, Uniform):
return {
"name": par,
"type": "range",
"bounds": [domain.lower, domain.upper],
"value_type": "float",
"log_scale": False,
}
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return {
"name": par,
"type": "range",
"bounds": [domain.lower, domain.upper - 1],
"value_type": "int",
"log_scale": True,
}
elif isinstance(sampler, Uniform):
return {
"name": par,
"type": "range",
"bounds": [domain.lower, domain.upper - 1],
"value_type": "int",
"log_scale": False,
}
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return {"name": par, "type": "choice", "values": domain.categories}
raise ValueError(
"AxSearch does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts,
# "a/d/0", "a/d/1" for nested lists (using the index in the list)
fixed_values = [
{"name": "/".join(str(p) for p in path), "type": "fixed", "value": val}
for path, val in resolved_vars
]
resolved_values = [
resolve_value("/".join(str(p) for p in path), domain)
for path, domain in domain_vars
]
return fixed_values + resolved_values
def save(self, checkpoint_path: str):
save_object = self.__dict__
with open(checkpoint_path, "wb") as outputFile:
cloudpickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = cloudpickle.load(inputFile)
self.__dict__.update(save_object)
|
AxSearch
|
python
|
doocs__leetcode
|
solution/1200-1299/1237.Find Positive Integer Solution for a Given Equation/Solution.py
|
{
"start": 349,
"end": 723
}
|
class ____:
def findSolution(self, customfunction: "CustomFunction", z: int) -> List[List[int]]:
ans = []
for x in range(1, z + 1):
y = 1 + bisect_left(
range(1, z + 1), z, key=lambda y: customfunction.f(x, y)
)
if customfunction.f(x, y) == z:
ans.append([x, y])
return ans
|
Solution
|
python
|
paramiko__paramiko
|
tests/test_config.py
|
{
"start": 34572,
"end": 36074
}
|
class ____:
# NOTE: this is still a cherry-pick of a few levels of complexity, there's
# no point testing literally all possible combinations.
def test_originalhost_host(self):
result = load_config("match-complex").lookup("target")
assert result["hostname"] == "bogus"
assert result["user"] == "rand"
@patch("paramiko.config.getpass.getuser")
def test_originalhost_localuser(self, getuser):
getuser.return_value = "rando"
result = load_config("match-complex").lookup("remote")
assert result["user"] == "calrissian"
@patch("paramiko.config.getpass.getuser")
def test_everything_but_all(self, getuser):
getuser.return_value = "rando"
result = load_config("match-complex").lookup("www")
assert result["port"] == "7777"
@patch("paramiko.config.getpass.getuser")
def test_everything_but_all_with_some_negated(self, getuser):
getuser.return_value = "rando"
result = load_config("match-complex").lookup("docs")
assert result["port"] == "1234"
def test_negated_canonical(self, socket):
# !canonical in a config that is not canonicalized - does match
result = load_config("match-canonical-no").lookup("specific")
assert result["user"] == "overload"
# !canonical in a config that is canonicalized - does NOT match
result = load_config("match-canonical-yes").lookup("www")
assert result["user"] == "hidden"
|
TestComplexMatching
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/fastapi/multiple_models/tutorial002.py
|
{
"start": 411,
"end": 1271
}
|
class ____(HeroBase):
id: int
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=List[HeroPublic])
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
|
HeroPublic
|
python
|
pydantic__pydantic
|
pydantic/v1/types.py
|
{
"start": 10640,
"end": 13028
}
|
class ____(str):
strip_whitespace = False
to_upper = False
to_lower = False
min_length: OptionalInt = None
max_length: OptionalInt = None
curtail_length: OptionalInt = None
regex: Optional[Union[str, Pattern[str]]] = None
strict = False
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
minLength=cls.min_length,
maxLength=cls.max_length,
pattern=cls.regex and cls._get_pattern(cls.regex),
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield strict_str_validator if cls.strict else str_validator
yield constr_strip_whitespace
yield constr_upper
yield constr_lower
yield constr_length_validator
yield cls.validate
@classmethod
def validate(cls, value: Union[str]) -> Union[str]:
if cls.curtail_length and len(value) > cls.curtail_length:
value = value[: cls.curtail_length]
if cls.regex:
if not re.match(cls.regex, value):
raise errors.StrRegexError(pattern=cls._get_pattern(cls.regex))
return value
@staticmethod
def _get_pattern(regex: Union[str, Pattern[str]]) -> str:
return regex if isinstance(regex, str) else regex.pattern
def constr(
*,
strip_whitespace: bool = False,
to_upper: bool = False,
to_lower: bool = False,
strict: bool = False,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
curtail_length: Optional[int] = None,
regex: Optional[str] = None,
) -> Type[str]:
# use kwargs then define conf in a dict to aid with IDE type hinting
namespace = dict(
strip_whitespace=strip_whitespace,
to_upper=to_upper,
to_lower=to_lower,
strict=strict,
min_length=min_length,
max_length=max_length,
curtail_length=curtail_length,
regex=regex and re.compile(regex),
)
return _registered(type('ConstrainedStrValue', (ConstrainedStr,), namespace))
if TYPE_CHECKING:
StrictStr = str
else:
class StrictStr(ConstrainedStr):
strict = True
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# This types superclass should be Set[T], but cython chokes on that...
|
ConstrainedStr
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 661346,
"end": 661777
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of FollowUser"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "user")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
user = sgqlc.types.Field("User", graphql_name="user")
"""The user that was followed."""
|
FollowUserPayload
|
python
|
encode__django-rest-framework
|
tests/models.py
|
{
"start": 908,
"end": 1011
}
|
class ____(RESTFrameworkModel):
users = models.ManyToManyField(User)
# ForeignKey
|
BasicModelWithUsers
|
python
|
tensorflow__tensorflow
|
tensorflow/python/eager/remote_cloud_tpu_test.py
|
{
"start": 1993,
"end": 2997
}
|
class ____(absltest.TestCase):
"""Test that we can connect to a real Cloud TPU."""
def test_connect(self):
# Log full diff on failure.
self.maxDiff = None # pylint:disable=invalid-name
self.assertCountEqual(
EXPECTED_DEVICES_PRE_CONNECT,
[device.name for device in config.list_logical_devices()])
resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project
)
remote.connect_to_cluster(resolver)
expected_devices = EXPECTED_DEVICES_PRE_CONNECT
for task in range(FLAGS.num_tpu_devices // DEVICES_PER_TASK):
expected_devices.extend([
template.format(task=task)
for template in EXPECTED_NEW_DEVICES_AFTER_CONNECT_TEMPLATES
])
self.assertCountEqual(
expected_devices,
[device.name for device in config.list_logical_devices()])
tpu_cluster_resolver.initialize_tpu_system(resolver)
if __name__ == '__main__':
absltest.main()
|
RemoteCloudTPUTest
|
python
|
getsentry__sentry
|
src/sentry/auth/helper.py
|
{
"start": 3400,
"end": 26929
}
|
class ____:
# SSO auth handler
auth_provider: AuthProvider
provider: Provider
organization: RpcOrganization
request: HttpRequest
identity: Mapping[str, Any]
referrer: str | None = "in-app"
@cached_property
def user(self) -> User | AnonymousUser:
email = self.identity.get("email")
if email:
try:
user = resolve_email_to_user(email)
except AmbiguousUserFromEmail as e:
user = e.users[0]
self.warn_about_ambiguous_email(email, e.users, user)
if user is not None:
return user
return (
User.objects.get(id=self.request.user.id)
if self.request.user.is_authenticated
else self.request.user
)
@staticmethod
def warn_about_ambiguous_email(email: str, users: Collection[User], chosen_user: User) -> None:
with sentry_sdk.isolation_scope() as scope:
scope.set_level("warning")
scope.set_tag("email", email)
scope.set_extra("user_ids", [user.id for user in users])
scope.set_extra("chosen_user", chosen_user.id)
sentry_sdk.capture_message("Handling identity from ambiguous email address")
class _NotCompletedSecurityChecks(Exception):
pass
def _login(self, user: Any) -> None:
metrics.incr(
"sso.login_attempt",
tags={
"provider": self.provider.key,
},
sample_rate=1.0,
skip_internal=False,
)
user_was_logged_in = auth.login(
self.request,
user,
after_2fa=self.request.build_absolute_uri(),
organization_id=self.organization.id,
)
if not user_was_logged_in:
raise self._NotCompletedSecurityChecks()
metrics.incr(
"sso.login_success",
tags={
"provider": self.provider.key,
},
sample_rate=1.0,
skip_internal=False,
)
@staticmethod
def _set_linked_flag(member: RpcOrganizationMember) -> None:
if member.flags.sso__invalid or not member.flags.sso__linked:
member.flags.sso__invalid = False
member.flags.sso__linked = True
organization_service.update_membership_flags(organization_member=member)
def handle_existing_identity(
self,
state: AuthHelperSessionStore,
auth_identity: AuthIdentity,
) -> HttpResponseRedirect:
# TODO(dcramer): this is very similar to attach
now = timezone.now()
auth_identity.update(
data=self.provider.update_identity(
new_data=self.identity.get("data", {}), current_data=auth_identity.data
),
last_verified=now,
last_synced=now,
)
member = organization_service.check_membership_by_id(
organization_id=self.organization.id, user_id=auth_identity.user.id
)
if member is None:
# this is likely the case when someone was removed from the org
# but still has access to rejoin
member = self._handle_new_membership(auth_identity)
else:
self._set_linked_flag(member)
user = auth_identity.user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
data = state.data
subdomain = None
if data:
subdomain = data.get("subdomain") or None
if features.has("system:multi-region"):
subdomain = self.organization.slug
try:
self._login(user)
except self._NotCompletedSecurityChecks:
return HttpResponseRedirect(self._get_login_redirect(subdomain))
state.clear()
if not is_active_superuser(self.request):
# set activeorg to ensure correct redirect upon logging in
auth.set_active_org(self.request, self.organization.slug)
return HttpResponseRedirect(self._get_login_redirect(subdomain))
def _get_login_redirect(self, subdomain: str | None) -> str:
# TODO(domains) Passing this method the organization should let us consolidate and simplify subdomain
# state tracking.
login_redirect_url = auth.get_login_redirect(self.request)
if subdomain is not None:
url_prefix = generate_organization_url(subdomain)
login_redirect_url = absolute_uri(login_redirect_url, url_prefix=url_prefix)
return login_redirect_url
def _handle_membership(
self,
request: HttpRequest,
organization: RpcOrganization,
auth_identity: AuthIdentity,
) -> tuple[User, RpcOrganizationMember]:
user = User.objects.get(id=auth_identity.user_id)
if is_demo_user(user) and not is_demo_org(organization):
sentry_sdk.capture_message(
"Demo user cannot be added to an organization that is not a demo organization.",
level="warning",
extras={
"user_id": user.id,
"organization_id": organization.id,
},
)
raise Exception(
"Demo user cannot be added to an organization that is not a demo organization."
)
# If the user is either currently *pending* invite acceptance (as indicated
# from the invite token and member id in the session) OR an existing invite exists on this
# organization for the email provided by the identity provider.
invite_helper = ApiInviteHelper.from_session_or_email(
request=request, organization_id=organization.id, email=user.email, logger=logger
)
# If we are able to accept an existing invite for the user for this
# organization, do so, otherwise handle new membership
if invite_helper:
if invite_helper.invite_approved:
rpc_om = invite_helper.accept_invite(user)
assert rpc_om
return user, rpc_om
# It's possible the user has an _invite request_ that hasn't been approved yet,
# and is able to join the organization without an invite through the SSO flow.
# In that case, delete the invite request and create a new membership.
invite_helper.handle_invite_not_approved()
flags = RpcOrganizationMemberFlags(sso__linked=True)
# if the org doesn't have the ability to add members then anyone who got added
# this way should be disabled until the org upgrades
if not features.has("organizations:invite-members", organization):
flags.member_limit__restricted = True
# Otherwise create a new membership
om = organization_service.add_organization_member(
organization_id=organization.id,
default_org_role=organization.default_role,
role=organization.default_role,
user_id=user.id,
flags=flags,
)
return user, om
def _handle_new_membership(self, auth_identity: AuthIdentity) -> RpcOrganizationMember:
user, om = self._handle_membership(
request=self.request,
organization=self.organization,
auth_identity=auth_identity,
)
log_service.record_audit_log(
event=AuditLogEvent(
organization_id=self.organization.id,
date_added=timezone.now(),
event_id=audit_log.get_event_id("MEMBER_ADD"),
actor_user_id=user.id,
actor_label=user.username,
ip_address=self.request.META["REMOTE_ADDR"],
target_object_id=om.id,
data=om.get_audit_log_metadata(user.email),
target_user_id=user.id,
)
)
return om
def _get_auth_identity(self, **params: Any) -> AuthIdentity | None:
try:
return AuthIdentity.objects.get(auth_provider_id=self.auth_provider.id, **params)
except AuthIdentity.DoesNotExist:
return None
def handle_attach_identity(self, member: RpcOrganizationMember | None = None) -> AuthIdentity:
"""
Given an already authenticated user, attach or re-attach an identity.
"""
# prioritize identifying by the SSO provider's user ID
with transaction.atomic(router.db_for_write(AuthIdentity)):
auth_identity = self._get_auth_identity(ident=self.identity["id"])
if auth_identity is None:
# otherwise look for an already attached identity
# this can happen if the SSO provider's internal ID changes
auth_identity = self._get_auth_identity(user_id=self.user.id)
if auth_identity is None:
assert self.user.is_authenticated
auth_is_new = True
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user_id=self.user.id,
ident=self.identity["id"],
data=self.identity.get("data", {}),
)
else:
auth_is_new = False
# TODO(dcramer): this might leave the user with duplicate accounts,
# and in that kind of situation its very reasonable that we could
# test email addresses + is_managed to determine if we can auto
# merge
if auth_identity.user_id != self.user.id:
wipe = self._wipe_existing_identity(auth_identity)
else:
wipe = None
now = timezone.now()
auth_identity.update(
user_id=self.user.id,
ident=self.identity["id"],
data=self.provider.update_identity(
new_data=self.identity.get("data", {}), current_data=auth_identity.data
),
last_verified=now,
last_synced=now,
)
logger.info(
"sso.login-pipeline.attach-existing-identity",
extra={
"wipe_result": repr(wipe),
"organization_id": self.organization.id,
"user_id": self.user.id,
"auth_identity_user_id": auth_identity.user.id,
"auth_provider_id": self.auth_provider.id,
"idp_identity_id": self.identity["id"],
"idp_identity_email": self.identity.get("email"),
},
)
if member is None:
member = self._get_organization_member(auth_identity)
self._set_linked_flag(member)
if auth_is_new:
log_service.record_audit_log(
event=AuditLogEvent(
organization_id=self.organization.id,
date_added=timezone.now(),
event_id=audit_log.get_event_id("SSO_IDENTITY_LINK"),
actor_user_id=self.user.id,
actor_label=self.user.username,
ip_address=self.request.META["REMOTE_ADDR"],
target_object_id=auth_identity.id,
target_user_id=self.user.id,
data=auth_identity.get_audit_log_data(),
)
)
messages.add_message(self.request, messages.SUCCESS, OK_LINK_IDENTITY)
return auth_identity
def _wipe_existing_identity(self, auth_identity: AuthIdentity) -> Any:
# it's possible the user has an existing identity, let's wipe it out
# so that the new identifier gets used (other we'll hit a constraint)
# violation since one might exist for (provider, user) as well as
# (provider, ident)
assert self.user.is_authenticated
with outbox_context(transaction.atomic(router.db_for_write(AuthIdentity))):
deletion_result = (
AuthIdentity.objects.exclude(id=auth_identity.id)
.filter(auth_provider=self.auth_provider, user_id=self.user.id)
.delete()
)
for outbox in self.auth_provider.outboxes_for_mark_invalid_sso(auth_identity.user_id):
outbox.save()
return deletion_result
def _get_organization_member(self, auth_identity: AuthIdentity) -> RpcOrganizationMember:
"""
Check to see if the user has a member associated, if not, create a new membership
based on the auth_identity email.
"""
member = organization_service.check_membership_by_id(
organization_id=self.organization.id, user_id=self.user.id
)
if member is None:
return self._handle_new_membership(auth_identity)
return member
def _respond(
self,
template: str,
context: Mapping[str, Any] | None = None,
status: int = 200,
) -> HttpResponse:
default_context = {"organization": self.organization}
if context:
default_context.update(context)
return render_to_response(template, default_context, self.request, status=status)
def _post_login_redirect(self) -> HttpResponseRedirect:
url = auth.get_login_redirect(self.request)
if self.request.POST.get("op") == "newuser":
# add events that we can handle on the front end
provider = self.auth_provider.provider if self.auth_provider else None
params = {
"frontend_events": orjson.dumps(
{"event_name": "Sign Up", "event_label": provider}
).decode()
}
url = add_params_to_url(url, params)
response = HttpResponseRedirect(url)
# Always remove any pending invite cookies, pending invites will have been
# accepted during the SSO flow.
remove_invite_details_from_session(self.request)
return response
def has_verified_account(self, verification_value: dict[str, Any]) -> bool:
return bool(
verification_value["email"] == self.identity["email"]
and verification_value["user_id"] == self.user.id
)
@property
def _logged_in_user(self) -> User | None:
"""The user, if they have authenticated on this session."""
if is_demo_mode_enabled() and is_demo_user(self.request.user):
return None
return self.request.user if self.request.user.is_authenticated else None
@property
def _app_user(self) -> User | None:
"""The user, if they are represented persistently in our app."""
return self.user if isinstance(self.user, User) else None
def _has_usable_password(self) -> bool:
return bool(self._app_user and self._app_user.has_usable_password())
@cached_property
def _login_form(self) -> AuthenticationForm:
return AuthenticationForm(
self.request,
self.request.POST if self.request.POST.get("op") == "login" else None,
initial={"username": self._app_user and self._app_user.username},
)
def _build_confirmation_response(self, is_new_account: bool) -> HttpResponse:
existing_user, template = self._dispatch_to_confirmation(is_new_account)
context = {
"identity": self.identity,
"provider": self.provider_name,
"identity_display_name": self.identity.get("name") or self.identity.get("email"),
"identity_identifier": self.identity.get("email") or self.identity.get("id"),
"existing_user": existing_user,
}
if not self._logged_in_user:
context["login_form"] = self._login_form
return self._respond(f"sentry/{template}.html", context)
def handle_unknown_identity(
self,
state: AuthHelperSessionStore,
) -> HttpResponse:
"""
Flow is activated upon a user logging in to where an AuthIdentity is
not present.
XXX(dcramer): this docstring is out of date
The flow will attempt to answer the following:
- Is there an existing user with the same email address? Should they be
merged?
- Is there an existing user (via authentication) that should be merged?
- Should I create a new user based on this identity?
"""
op = self.request.POST.get("op")
# we don't trust all IDP email verification, so users can also confirm via one time email link
is_account_verified = False
if verification_key := self.request.session.get(SSO_VERIFICATION_KEY):
verification_value = get_verification_value_from_key(verification_key)
if verification_value:
is_account_verified = self.has_verified_account(verification_value)
logger.info(
"sso.login-pipeline.verified-email-existing-session-key",
extra={
"user_id": self.user.id,
"organization_id": self.organization.id,
"has_verification_value": bool(verification_value),
},
)
has_verified_email = self.user.id and cache.get(f"{SSO_VERIFICATION_KEY}:{self.user.id}")
if has_verified_email and not verification_key:
logger.info(
"sso.login-pipeline.verified-email-missing-session-key",
extra={
"user_id": self.user.id,
"organization_id": self.organization.id,
},
)
is_new_account = not self.user.is_authenticated # stateful
if self._app_user and (self.identity.get("email_verified") or is_account_verified):
# we only allow this flow to happen if the existing user has
# membership, otherwise we short circuit because it might be
# an attempt to hijack membership of another organization
membership = organization_service.check_membership_by_id(
user_id=self._app_user.id, organization_id=self.organization.id
)
if membership is not None:
try:
self._login(self.user)
except self._NotCompletedSecurityChecks:
# adding is_account_verified to the check below in order to redirect
# to 2fa when the user migrates their idp but has 2fa enabled,
# otherwise it would stop them from linking their sso provider
if self._has_usable_password() or is_account_verified:
return self._post_login_redirect()
else:
is_new_account = True
else:
# assume they've confirmed they want to attach the identity
op = "confirm"
elif is_account_verified:
op = "confirm"
else:
# force them to create a new account
is_new_account = True
# without a usable password they can't login, so default to a new account
elif not self._has_usable_password():
is_new_account = True
if op == "confirm" and (self.request.user.id == self.user.id) or is_account_verified:
auth_identity = self.handle_attach_identity()
elif op == "newuser":
auth_identity = self.handle_new_user()
elif op == "login" and not self._logged_in_user:
# confirm authentication, login
if self._login_form.is_valid():
# This flow is special. If we are going through a 2FA
# flow here (login returns False) we want to instruct the
# system to return upon completion of the 2fa flow to the
# current URL and continue with the dialog.
#
# If there is no 2fa we don't need to do this and can just
# go on.
try:
self._login(self._login_form.get_user())
except self._NotCompletedSecurityChecks:
return self._post_login_redirect()
else:
auth.log_auth_failure(self.request, self.request.POST.get("username"))
return self._build_confirmation_response(is_new_account)
else:
return self._build_confirmation_response(is_new_account)
user = auth_identity.user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
# XXX(dcramer): this is repeated from above
try:
self._login(user)
except self._NotCompletedSecurityChecks:
return self._post_login_redirect()
state.clear()
if not is_active_superuser(self.request):
auth.set_active_org(self.request, self.organization.slug)
return self._post_login_redirect()
@property
def provider_name(self) -> str:
if self.auth_provider:
return self.auth_provider.provider_name
else:
# A blank character is needed to prevent an HTML span from collapsing
return " "
def _dispatch_to_confirmation(
self, is_new_account: bool
) -> tuple[User | AnonymousUser | None, str]:
if self._logged_in_user:
return self._logged_in_user, "auth-confirm-link"
if self._app_user and not self._has_usable_password():
send_one_time_account_confirm_link(
self._app_user,
self.organization,
self.auth_provider,
self.identity["email"],
self.identity["id"],
)
return self.user, "auth-confirm-account"
self.request.session.set_test_cookie()
return None if is_new_account else self.user, "auth-confirm-identity"
def handle_new_user(self) -> AuthIdentity:
user = User.objects.create(
username=uuid4().hex,
email=self.identity["email"],
name=self.identity.get("name", "")[:200],
)
if settings.TERMS_URL and settings.PRIVACY_URL:
user.update(flags=F("flags").bitor(User.flags.newsletter_consent_prompt))
try:
with transaction.atomic(router.db_for_write(AuthIdentity)):
auth_identity = AuthIdentity.objects.create(
auth_provider=self.auth_provider,
user=user,
ident=self.identity["id"],
data=self.identity.get("data", {}),
)
except IntegrityError:
auth_identity = AuthIdentity.objects.get(
auth_provider_id=self.auth_provider.id, ident=self.identity["id"]
)
auth_identity.update(user=user, data=self.identity.get("data", {}))
user.send_confirm_emails(is_new_user=True)
provider = self.auth_provider.provider if self.auth_provider else None
user_signup.send_robust(
sender=self.handle_new_user,
user=user,
source="sso",
provider=provider,
referrer=self.referrer,
)
self._handle_new_membership(auth_identity)
return auth_identity
|
AuthIdentityHandler
|
python
|
great-expectations__great_expectations
|
tests/expectations/metrics/conftest.py
|
{
"start": 466,
"end": 606
}
|
class ____:
def __init__(self, dialect: Dialect):
self.dialect = dialect
def connect(self) -> None:
pass
|
MockSaEngine
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/cuda/cuda_cpp_scheduling.py
|
{
"start": 832,
"end": 977
}
|
class ____(WhyNoFuse):
def __init__(self, name1: str, name2: str) -> None:
self.name1 = name1
self.name2 = name2
|
WhyNoFuseNames
|
python
|
scipy__scipy
|
scipy/stats/_multivariate.py
|
{
"start": 123449,
"end": 127084
}
|
class ____(multi_rv_frozen):
__class_getitem__ = None
def __init__(self, df, scale, seed=None):
"""Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
self.C = scipy.linalg.cholesky(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(self.C.diagonal()))
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = """\
`n` should be a nonnegative integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = """\
See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
|
invwishart_frozen
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterternary/unselected/_textfont.py
|
{
"start": 233,
"end": 2618
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary.unselected"
_path_str = "scatterternary.unselected.textfont"
_valid_props = {"color"}
@property
def color(self):
"""
Sets the text font color of unselected points, applied only
when a selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.unselected.Textfont`
color
Sets the text font color of unselected points, applied
only when a selection exists.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.unselected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.unselected.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Textfont
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/io/csv.py
|
{
"start": 5049,
"end": 5569
}
|
class ____(BaseIO):
fname = "__test__.csv"
timeout = 1500
params = [1000, 10000, 100000]
param_names = ["nobs"]
def setup(self, nobs):
d = "2018-11-29"
dt = "2018-11-26 11:18:27.0"
self.data = DataFrame(
{
"dt": [np.datetime64(dt)] * nobs,
"d": [np.datetime64(d)] * nobs,
"r": [np.random.uniform()] * nobs,
}
)
def time_frame(self, nobs):
self.data.to_csv(self.fname)
|
ToCSVDatetimeBig
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/descriptor_props.py
|
{
"start": 32648,
"end": 34541
}
|
class ____(DescriptorProperty[_T]):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(
self, mapper: Mapper[Any]
) -> Type[PropComparator[_T]]:
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if getattr(p, "comparator_factory", None) is not None:
comparator_callable = p.comparator_factory
break
assert comparator_callable is not None
return comparator_callable(p, mapper) # type: ignore
def __init__(self) -> None:
super().__init__()
def warn() -> NoReturn:
raise AttributeError(
"Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s."
% (self.parent, self.key, self.parent)
)
class NoninheritedConcreteProp:
def __set__(s: Any, obj: Any, value: Any) -> NoReturn:
warn()
def __delete__(s: Any, obj: Any) -> NoReturn:
warn()
def __get__(s: Any, obj: Any, owner: Any) -> Any:
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
|
ConcreteInheritedProperty
|
python
|
django__django
|
tests/admin_inlines/admin.py
|
{
"start": 6717,
"end": 6808
}
|
class ____(admin.ModelAdmin):
inlines = [ReadOnlyChapterInline]
|
NovelReadonlyChapterAdmin
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_type_lookup.py
|
{
"start": 7624,
"end": 8834
}
|
class ____:
__init__ = "Hello!"
def test_uninspectable_builds():
with pytest.raises(TypeError, match="object is not callable"):
check_can_generate_examples(st.builds(BrokenClass))
def test_uninspectable_from_type():
with pytest.raises(TypeError, match="object is not callable"):
check_can_generate_examples(st.from_type(BrokenClass))
def _check_instances(t):
# See https://github.com/samuelcolvin/pydantic/discussions/2508
return (
t.__module__ != "typing"
and t.__name__ != "ByteString"
and not t.__module__.startswith("pydantic")
and t.__module__ != "typing_extensions"
)
def maybe_mark(x):
if x.__name__ in "Match Decimal IPv4Address":
marks = xfail_on_crosshair(Why.other, as_marks=True, strict=False)
return pytest.param(x, marks=marks)
return x
@pytest.mark.parametrize(
"typ",
sorted(
(maybe_mark(x) for x in _global_type_lookup if _check_instances(x)),
key=str,
),
)
@given(data=st.data())
def test_can_generate_from_all_registered_types(data, typ):
value = data.draw(st.from_type(typ), label="value")
assert isinstance(value, typ)
T = TypeVar("T")
|
BrokenClass
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_syntax_extensions.py
|
{
"start": 787,
"end": 1054
}
|
class ____(SyntaxExtension, ClauseElement):
_traverse_internals = []
def apply_to_select(self, select_stmt):
select_stmt.apply_syntax_extension_point(
lambda existing: [*existing, self],
"post_select",
)
|
PostSelectClause
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol3.py
|
{
"start": 3221,
"end": 3737
}
|
class ____:
x: str = ""
# This should generate an error because named tuple
# attributes are immutable.
p9_1: Proto9 = NT9()
# This should generate an error because frozen dataclass
# attributes are immutable.
p9_2: Proto9 = DCFrozen9()
p9_3: Proto9 = DC9()
# This should generate an error because named tuple
# attributes are immutable.
p10_1: Proto10 = NT9()
# This should generate an error because frozen dataclass
# attributes are immutable.
p10_2: Proto10 = DCFrozen9()
p10_3: Proto10 = DC9()
|
DCFrozen9
|
python
|
streamlit__streamlit
|
lib/streamlit/watcher/event_based_path_watcher.py
|
{
"start": 8600,
"end": 9165
}
|
class ____:
"""Emits notifications when a single path is modified."""
def __init__(
self,
md5: str,
modification_time: float,
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> None:
self.md5 = md5
self.modification_time = modification_time
self.glob_pattern = glob_pattern
self.allow_nonexistent = allow_nonexistent
self.on_changed = Signal()
def __repr__(self) -> str:
return repr_(self)
|
WatchedPath
|
python
|
huggingface__transformers
|
src/transformers/models/bamba/modeling_bamba.py
|
{
"start": 15938,
"end": 19086
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: BambaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
BambaAttention
|
python
|
readthedocs__readthedocs.org
|
readthedocs/organizations/models.py
|
{
"start": 2045,
"end": 9219
}
|
class ____(models.Model):
"""Organization model."""
# Auto fields
pub_date = models.DateTimeField(_("Publication date"), auto_now_add=True)
modified_date = models.DateTimeField(_("Modified date"), auto_now=True)
# Foreign
projects = models.ManyToManyField(
"projects.Project",
verbose_name=_("Projects"),
related_name="organizations",
blank=True,
)
owners = models.ManyToManyField(
User,
verbose_name=_("Owners"),
related_name="owner_organizations",
through="OrganizationOwner",
)
# Local
name = models.CharField(_("Name"), max_length=100)
slug = models.SlugField(
_("Slug"),
max_length=255,
unique=True,
null=False,
blank=False,
)
email = models.EmailField(
_("Email"),
help_text="Best email address for billing related inquiries",
max_length=255,
blank=True,
null=True,
)
description = models.TextField(
_("Description"),
help_text="A short description shown on your profile page",
blank=True,
null=True,
)
url = models.URLField(
_("Home Page"),
help_text="The main website for your organization",
max_length=255,
blank=True,
null=True,
)
never_disable = models.BooleanField(
_("Never disable"),
help_text="Never disable this organization, even if its subscription ends",
# TODO: remove after migration
null=True,
default=False,
)
disabled = models.BooleanField(
_("Disabled"),
help_text="Docs and builds are disabled for this organization",
default=False,
)
artifacts_cleaned = models.BooleanField(
_("Artifacts Cleaned"),
help_text="Artifacts are cleaned out from storage",
default=False,
)
max_concurrent_builds = models.IntegerField(
_("Maximum concurrent builds allowed for this organization"),
null=True,
blank=True,
)
# TODO: This field can be removed, we are now using stripe_customer instead.
stripe_id = models.CharField(
_("Stripe customer ID"),
max_length=100,
blank=True,
null=True,
)
stripe_customer = models.OneToOneField(
"djstripe.Customer",
verbose_name=_("Stripe customer"),
on_delete=models.SET_NULL,
related_name="rtd_organization",
null=True,
blank=True,
)
stripe_subscription = models.OneToOneField(
"djstripe.Subscription",
verbose_name=_("Stripe subscription"),
on_delete=models.SET_NULL,
related_name="rtd_organization",
null=True,
blank=True,
)
notifications = GenericRelation(
Notification,
related_query_name="organization",
content_type_field="attached_to_content_type",
object_id_field="attached_to_id",
)
avatar = models.ImageField(
_("Avatar"),
upload_to=_upload_organization_avatar_to,
storage=_get_user_content_storage,
validators=[FileExtensionValidator(allowed_extensions=["jpg", "jpeg", "png"])],
blank=True,
null=True,
help_text="Avatar for your organization (JPG or PNG format, max 500x500px, 750KB)",
)
# Managers
objects = OrganizationQuerySet.as_manager()
history = ExtraHistoricalRecords()
class Meta:
base_manager_name = "objects"
verbose_name = _("organization")
ordering = ["name"]
get_latest_by = ["-pub_date"]
def __str__(self):
return self.name
def get_stripe_subscription(self):
status_priority = [
# Past due and unpaid should be taken into consideration first,
# as the user needs to pay before they can access the service.
# See https://docs.stripe.com/billing/subscriptions/overview#subscription-statuses.
SubscriptionStatus.unpaid,
SubscriptionStatus.past_due,
SubscriptionStatus.incomplete_expired,
SubscriptionStatus.incomplete,
SubscriptionStatus.active,
SubscriptionStatus.trialing,
]
for status in status_priority:
subscriptions = self.stripe_customer.subscriptions.filter(status=status)
if subscriptions.exists():
if subscriptions.count() > 1:
# NOTE: this should never happen, unless we manually
# created another subscription for the user or if there
# is a bug in our code.
log.exception(
"Organization has more than one subscription with the same status",
organization_slug=self.slug,
subscription_status=status,
)
return subscriptions.order_by("created").last()
# Fall back to the most recently created subscription.
return self.stripe_customer.subscriptions.order_by("created").last()
def get_absolute_url(self):
return reverse("organization_detail", args=(self.slug,))
@property
def users(self):
return AdminPermission.members(self)
@property
def members(self):
return AdminPermission.members(self)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
if self.stripe_customer:
self.stripe_id = self.stripe_customer.id
# If the avatar is being changed, delete the previous one.
try:
previous_avatar = Organization.objects.get(pk=self.pk).avatar
except Organization.DoesNotExist:
previous_avatar = None
if previous_avatar and previous_avatar != self.avatar:
previous_avatar.delete(save=False)
super().save(*args, **kwargs)
def get_stripe_metadata(self):
"""Get metadata for the stripe account."""
return {
"org:id": self.id,
"org:slug": self.slug,
}
def add_member(self, user, team):
"""
Add member to organization team.
user
User to add to organization team
team
Team instance to add user to
"""
member = team.members.filter(pk=user.pk).first()
if not member:
member = TeamMember.objects.create(team=team, member=user)
return member
def get_avatar_url(self):
"""
Get the URL of the organization's avatar.
Use the `avatar` field if it exists, otherwise use
the gravatar from the organization's email.
"""
if self.avatar:
return self.avatar.url
if self.email:
return get_gravatar_url(self.email, size=100)
return settings.GRAVATAR_DEFAULT_IMAGE
def delete(self, *args, **kwargs):
"""Override delete method to clean up related resources."""
# Delete the avatar file.
if self.avatar:
self.avatar.delete(save=False)
super().delete(*args, **kwargs)
|
Organization
|
python
|
python__mypy
|
mypyc/irbuild/nonlocalcontrol.py
|
{
"start": 4856,
"end": 5621
}
|
class ____(NonlocalControl):
"""Abstract nonlocal control that runs some cleanup code."""
def __init__(self, outer: NonlocalControl) -> None:
self.outer = outer
@abstractmethod
def gen_cleanup(self, builder: IRBuilder, line: int) -> None: ...
def gen_break(self, builder: IRBuilder, line: int) -> None:
self.gen_cleanup(builder, line)
self.outer.gen_break(builder, line)
def gen_continue(self, builder: IRBuilder, line: int) -> None:
self.gen_cleanup(builder, line)
self.outer.gen_continue(builder, line)
def gen_return(self, builder: IRBuilder, value: Value, line: int) -> None:
self.gen_cleanup(builder, line)
self.outer.gen_return(builder, value, line)
|
CleanupNonlocalControl
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-mymagic/llama_index/llms/mymagic/base.py
|
{
"start": 410,
"end": 9568
}
|
class ____(LLM):
"""
MyMagicAI LLM.
Examples:
`pip install llama-index-llms-mymagic`
```python
from llama_index.llms.mymagic import MyMagicAI
llm = MyMagicAI(
api_key="your-api-key",
storage_provider="s3", # s3, gcs
bucket_name="your-bucket-name",
list_inputs="your list of inputs if you choose to pass directly",
session="your-session-name", # files should be located in this folder on which batch inference will be run
role_arn="your-role-arn",
system_prompt="your-system-prompt",
region="your-bucket-region",
return_output=False, # Whether you want MyMagic API to return the output json
input_json_file=None, # name of the input file (stored on the bucket)
structured_output=None, # json schema of the output
)
resp = llm.complete(
question="your-question",
model="choose-model", # check models at
max_tokens=5, # number of tokens to generate, default is 10
)
print(resp)
```
"""
base_url_template: str = "https://fastapi.mymagic.ai"
completion_url: str = f"{base_url_template}/v1/completions"
status_url: str = f"{base_url_template}/get_result"
api_key: str = None
list_inputs: Optional[List[str]] = Field(
None,
description="If user chooses to provide list of inputs to the model instead of specifying in a storage bucket.",
)
storage_provider: str = Field(
default=None, description="The storage provider to use."
)
bucket_name: str = Field(
default=None,
description="The bucket name where the data is stored.",
)
session: str = Field(
default=None,
description="The session to use. This is a subfolder in the bucket where your data is located.",
)
role_arn: Optional[str] = Field(
None, description="ARN for role assumption in AWS S3."
)
system_prompt: Optional[str] = Field(
default="Answer the question based only on the given content. Do not give explanations or examples. Do not continue generating more text after the answer.",
description="The system prompt to use.",
)
region: Optional[str] = Field(
"eu-west-2", description="The region the bucket is in. Only used for AWS S3."
)
input_json_file: Optional[str] = Field(
None, description="Should the input be read from a single json file?"
)
structured_output: Optional[Dict[str, Any]] = Field(
None, description="User-defined structure for the response output"
)
model: str = Field(default="mixtral8x7", description="The MyMagicAI model to use.")
max_tokens: int = Field(
default=10, description="The maximum number of tokens to generate."
)
question = Field(default="", description="The user question.")
question_data: Dict[str, Any] = Field(
default_factory=dict, description="The data to send to the MyMagicAI API."
)
return_output: Optional[bool] = Field(
False, description="Whether MyMagic API should return the output json"
)
def __init__(
self,
api_key: str,
storage_provider: Optional[str] = None,
input_json_file: Optional[str] = None,
structured_output: Optional[Dict[str, Any]] = None,
return_output: Optional[bool] = False,
list_inputs: Optional[List[str]] = None,
role_arn: Optional[str] = None,
region: Optional[str] = "eu-west-2",
session: str = None,
bucket_name: Optional[str] = None,
system_prompt: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.return_output = return_output
self.api_key = api_key
self.question_data = {
"list_inputs": list_inputs,
"storage_provider": storage_provider,
"bucket_name": bucket_name,
"session": session,
"role_arn": role_arn,
"system_prompt": system_prompt,
"region": region,
"return_output": return_output,
"input_json_file": input_json_file,
"structured_output": structured_output,
}
@classmethod
def class_name(cls) -> str:
return "MyMagicAI"
async def _submit_question(self, question_data: Dict[str, Any]) -> Dict[str, Any]:
timeout_config = httpx.Timeout(600.0, connect=60.0)
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
async with httpx.AsyncClient(timeout=timeout_config) as client:
resp = await client.post(
self.completion_url,
json=question_data,
headers=headers,
)
resp.raise_for_status()
return resp.json()
async def _get_result(self, task_id: str) -> Dict[str, Any]:
url = f"{self.status_url}/{task_id}"
timeout_config = httpx.Timeout(600.0, connect=60.0)
async with httpx.AsyncClient(timeout=timeout_config) as client:
resp = await client.get(url)
resp.raise_for_status()
return resp.json()
async def acomplete(
self,
question: str,
model: Optional[str] = None,
max_tokens: Optional[int] = None,
poll_interval: float = 1.0,
) -> CompletionResponse:
self.question_data["question"] = question
self.question_data["model"] = model or self.model
self.max_tokens = self.question_data["max_tokens"] = (
max_tokens or self.max_tokens
)
task_response = await self._submit_question(self.question_data)
if self.return_output:
return task_response
task_id = task_response.get("task_id")
while True:
result = await self._get_result(task_id)
if result["status"] != "PENDING":
return result
await asyncio.sleep(poll_interval)
def _submit_question_sync(self, question_data: Dict[str, Any]) -> Dict[str, Any]:
"""Submits a question to the model synchronously."""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
resp = requests.post(
self.completion_url,
json=question_data,
headers=headers,
)
resp.raise_for_status()
return resp.json()
def _get_result_sync(self, task_id: str) -> Dict[str, Any]:
"""Polls for the result of a task synchronously."""
url = f"{self.status_url}/{task_id}"
response = requests.get(url, timeout=600.0)
response.raise_for_status()
return response.json()
def complete(
self,
question: str,
model: Optional[str] = None,
max_tokens: Optional[int] = None,
poll_interval: float = 1.0,
) -> CompletionResponse:
self.question_data["question"] = question
self.question_data["model"] = model or self.model
self.max_tokens = self.question_data["max_tokens"] = (
max_tokens or self.max_tokens
)
task_response = self._submit_question_sync(self.question_data)
if self.return_output:
return task_response
task_id = task_response.get("task_id")
while True:
result = self._get_result_sync(task_id)
if result["status"] != "PENDING":
return CompletionResponse(
text=result.get("message", ""),
additional_kwargs={"status": result["status"]},
)
time.sleep(poll_interval)
def stream_complete(self, question: str) -> CompletionResponseGen:
raise NotImplementedError(
"MyMagicAI does not currently support streaming completion."
)
async def achat(self, question: str) -> ChatResponse:
raise NotImplementedError("MyMagicAI does not currently support chat.")
def chat(self, question: str) -> ChatResponse:
raise NotImplementedError("MyMagicAI does not currently support chat.")
async def astream_complete(self, question: str) -> CompletionResponseAsyncGen:
raise NotImplementedError("MyMagicAI does not currently support streaming.")
async def astream_chat(self, question: str) -> ChatResponseAsyncGen:
raise NotImplementedError("MyMagicAI does not currently support streaming.")
def chat(self, question: str) -> ChatResponse:
raise NotImplementedError("MyMagicAI does not currently support chat.")
def stream_chat(self, question: str) -> ChatResponseGen:
raise NotImplementedError("MyMagicAI does not currently support chat.")
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
num_output=self.max_tokens,
model_name=self.model,
is_chat_model=False,
)
|
MyMagicAI
|
python
|
simonw__datasette
|
datasette/views/special.py
|
{
"start": 20633,
"end": 21673
}
|
class ____(BaseView):
name = "messages_debug"
has_json_alternate = False
async def get(self, request):
await self.ds.ensure_permission(action="view-instance", actor=request.actor)
return await self.render(["messages_debug.html"], request)
async def post(self, request):
await self.ds.ensure_permission(action="view-instance", actor=request.actor)
post = await request.post_vars()
message = post.get("message", "")
message_type = post.get("message_type") or "INFO"
assert message_type in ("INFO", "WARNING", "ERROR", "all")
datasette = self.ds
if message_type == "all":
datasette.add_message(request, message, datasette.INFO)
datasette.add_message(request, message, datasette.WARNING)
datasette.add_message(request, message, datasette.ERROR)
else:
datasette.add_message(request, message, getattr(datasette, message_type))
return Response.redirect(self.ds.urls.instance())
|
MessagesDebugView
|
python
|
conda__conda
|
conda/plugins/types.py
|
{
"start": 6492,
"end": 7045
}
|
class ____(CondaPlugin):
"""
Return type to use when defining a conda pre-command plugin hook.
For details on how this is used, see
:meth:`~conda.plugins.hookspec.CondaSpecs.conda_pre_commands`.
:param name: Pre-command name (e.g., ``custom_plugin_pre_commands``).
:param action: Callable which contains the code to be run.
:param run_for: Represents the command(s) this will be run on (e.g. ``install`` or ``create``).
"""
name: str
action: Callable[[str], None]
run_for: set[str]
@dataclass
|
CondaPreCommand
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/roots/query.py
|
{
"start": 7889,
"end": 52311
}
|
class ____(graphene.ObjectType):
"""The root for all queries to retrieve data from the Dagster instance."""
class Meta:
name = "Query"
version = graphene.Field(
graphene.NonNull(graphene.String),
description="Retrieve the version of Dagster running in the Dagster deployment.",
)
repositoriesOrError = graphene.Field(
graphene.NonNull(GrapheneRepositoriesOrError),
repositorySelector=graphene.Argument(GrapheneRepositorySelector),
description="Retrieve all the repositories.",
)
repositoryOrError = graphene.Field(
graphene.NonNull(GrapheneRepositoryOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
description="Retrieve a repository by its location name and repository name.",
)
workspaceOrError = graphene.Field(
graphene.NonNull(GrapheneWorkspaceOrError),
description="Retrieve the workspace and its locations.",
)
locationStatusesOrError = graphene.Field(
graphene.NonNull(GrapheneWorkspaceLocationStatusEntriesOrError),
description="Retrieve location status for workspace locations.",
)
workspaceLocationEntryOrError = graphene.Field(
GrapheneWorkspaceLocationEntryOrError,
name=graphene.Argument(graphene.NonNull(graphene.String)),
description="Retrieve a workspace entry by name.",
)
pipelineOrError = graphene.Field(
graphene.NonNull(GraphenePipelineOrError),
params=graphene.NonNull(GraphenePipelineSelector),
description="Retrieve a job by its location name, repository name, and job name.",
)
resourcesOrError = graphene.Field(
graphene.NonNull(GrapheneResourcesOrError),
pipelineSelector=graphene.NonNull(GraphenePipelineSelector),
description="Retrieve the list of resources for a given job.",
)
pipelineSnapshotOrError = graphene.Field(
graphene.NonNull(GraphenePipelineSnapshotOrError),
snapshotId=graphene.String(),
activePipelineSelector=graphene.Argument(GraphenePipelineSelector),
description=(
"Retrieve a job snapshot by its id or location name, repository name, and job name."
),
)
graphOrError = graphene.Field(
graphene.NonNull(GrapheneGraphOrError),
selector=graphene.Argument(GrapheneGraphSelector),
description="Retrieve a graph by its location name, repository name, and graph name.",
)
scheduler = graphene.Field(
graphene.NonNull(GrapheneSchedulerOrError),
description="Retrieve the name of the scheduler running in the Dagster deployment.",
)
scheduleOrError = graphene.Field(
graphene.NonNull(GrapheneScheduleOrError),
schedule_selector=graphene.NonNull(GrapheneScheduleSelector),
description="Retrieve a schedule by its location name, repository name, and schedule name.",
)
schedulesOrError = graphene.Field(
graphene.NonNull(GrapheneSchedulesOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
scheduleStatus=graphene.Argument(GrapheneInstigationStatus),
description="Retrieve all the schedules.",
)
topLevelResourceDetailsOrError = graphene.Field(
graphene.NonNull(GrapheneResourceDetailsOrError),
resourceSelector=graphene.NonNull(GrapheneResourceSelector),
description=(
"Retrieve a top level resource by its location name, repository name, and resource"
" name."
),
)
allTopLevelResourceDetailsOrError = graphene.Field(
graphene.NonNull(GrapheneResourceDetailsListOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
description="Retrieve all the top level resources.",
)
utilizedEnvVarsOrError = graphene.Field(
graphene.NonNull(GrapheneEnvVarWithConsumersListOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
description="Retrieve all the utilized environment variables for the given repo.",
)
sensorOrError = graphene.Field(
graphene.NonNull(GrapheneSensorOrError),
sensorSelector=graphene.NonNull(GrapheneSensorSelector),
description="Retrieve a sensor by its location name, repository name, and sensor name.",
)
sensorsOrError = graphene.Field(
graphene.NonNull(GrapheneSensorsOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
sensorStatus=graphene.Argument(GrapheneInstigationStatus),
description="Retrieve all the sensors.",
)
instigationStateOrError = graphene.Field(
graphene.NonNull(GrapheneInstigationStateOrError),
instigationSelector=graphene.NonNull(GrapheneInstigationSelector),
id=graphene.Argument(graphene.String),
description=(
"Retrieve the state for a schedule or sensor by its location name, repository name, and"
" schedule/sensor name."
),
)
instigationStatesOrError = graphene.Field(
graphene.NonNull(GrapheneInstigationStatesOrError),
repositoryID=graphene.NonNull(graphene.String),
description=(
"Retrieve the state for a group of instigators (schedule/sensor) by their containing repository id."
),
)
partitionSetsOrError = graphene.Field(
graphene.NonNull(GraphenePartitionSetsOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
pipelineName=graphene.NonNull(graphene.String),
description=(
"Retrieve the partition sets for a job by its location name, repository name, and job"
" name."
),
)
partitionSetOrError = graphene.Field(
graphene.NonNull(GraphenePartitionSetOrError),
repositorySelector=graphene.NonNull(GrapheneRepositorySelector),
partitionSetName=graphene.String(),
description=(
"Retrieve a partition set by its location name, repository name, and partition set"
" name."
),
)
pipelineRunsOrError = graphene.Field(
graphene.NonNull(GrapheneRunsOrError),
filter=graphene.Argument(GrapheneRunsFilter),
cursor=graphene.String(),
limit=graphene.Int(),
description="Retrieve runs after applying a filter, cursor, and limit.",
)
pipelineRunOrError = graphene.Field(
graphene.NonNull(GrapheneRunOrError),
runId=graphene.NonNull(graphene.ID),
description="Retrieve a run by its run id.",
)
runsOrError = graphene.Field(
graphene.NonNull(GrapheneRunsOrError),
filter=graphene.Argument(GrapheneRunsFilter),
cursor=graphene.String(),
limit=graphene.Int(),
description="Retrieve runs after applying a filter, cursor, and limit.",
)
runOrError = graphene.Field(
graphene.NonNull(GrapheneRunOrError),
runId=graphene.NonNull(graphene.ID),
description="Retrieve a run by its run id.",
)
runsFeedOrError = graphene.Field(
graphene.NonNull(GrapheneRunsFeedConnectionOrError),
limit=graphene.NonNull(graphene.Int),
cursor=graphene.String(),
view=graphene.NonNull(GrapheneRunsFeedView),
filter=graphene.Argument(GrapheneRunsFilter),
description="Retrieve entries for the Runs Feed after applying a filter, cursor and limit.",
)
runsFeedCountOrError = graphene.Field(
graphene.NonNull(GrapheneRunsFeedCountOrError),
view=graphene.NonNull(GrapheneRunsFeedView),
filter=graphene.Argument(GrapheneRunsFilter),
description="Retrieve the number of entries for the Runs Feed after applying a filter.",
)
runTagKeysOrError = graphene.Field(
GrapheneRunTagKeysOrError,
description="Retrieve the distinct tag keys from all runs.",
)
runTagsOrError = graphene.Field(
GrapheneRunTagsOrError,
tagKeys=graphene.Argument(graphene.List(graphene.NonNull(graphene.String))),
valuePrefix=graphene.String(),
limit=graphene.Int(),
description="Retrieve all the distinct key-value tags from all runs.",
)
runIdsOrError = graphene.Field(
graphene.NonNull(GrapheneRunIdsOrError),
filter=graphene.Argument(GrapheneRunsFilter),
cursor=graphene.String(),
limit=graphene.Int(),
description="Retrieve run IDs after applying a filter, cursor, and limit.",
)
runGroupOrError = graphene.Field(
graphene.NonNull(GrapheneRunGroupOrError),
runId=graphene.NonNull(graphene.ID),
description="Retrieve a group of runs with the matching root run id.",
)
isPipelineConfigValid = graphene.Field(
graphene.NonNull(GraphenePipelineConfigValidationResult),
pipeline=graphene.Argument(graphene.NonNull(GraphenePipelineSelector)),
mode=graphene.Argument(graphene.NonNull(graphene.String)),
runConfigData=graphene.Argument(GrapheneRunConfigData),
description="Retrieve whether the run configuration is valid or invalid.",
)
executionPlanOrError = graphene.Field(
graphene.NonNull(GrapheneExecutionPlanOrError),
pipeline=graphene.Argument(graphene.NonNull(GraphenePipelineSelector)),
mode=graphene.Argument(graphene.NonNull(graphene.String)),
runConfigData=graphene.Argument(GrapheneRunConfigData),
description="Retrieve the execution plan for a job and its run configuration.",
)
runConfigSchemaOrError = graphene.Field(
graphene.NonNull(GrapheneRunConfigSchemaOrError),
args={
"selector": graphene.Argument(graphene.NonNull(GraphenePipelineSelector)),
"mode": graphene.Argument(graphene.String),
},
description="Retrieve the run configuration schema for a job.",
)
instance = graphene.Field(
graphene.NonNull(GrapheneInstance),
description="Retrieve the instance configuration for the Dagster deployment.",
)
assetsOrError = graphene.Field(
graphene.NonNull(GrapheneAssetsOrError),
prefix=graphene.List(graphene.NonNull(graphene.String)),
assetKeys=graphene.Argument(graphene.List(graphene.NonNull(GrapheneAssetKeyInput))),
cursor=graphene.String(),
limit=graphene.Int(),
description="Retrieve all assets (both with or without a definition) after providing a list of asset keys, applying a prefix filter, cursor, and limit.",
)
assetRecordsOrError = graphene.Field(
graphene.NonNull(GrapheneAssetRecordsOrError),
prefix=graphene.List(graphene.NonNull(graphene.String)),
cursor=graphene.String(),
limit=graphene.Int(),
description="Retrieve materialized asset records after applying a prefix filter, cursor, and limit.",
)
assetOrError = graphene.Field(
graphene.NonNull(GrapheneAssetOrError),
assetKey=graphene.Argument(graphene.NonNull(GrapheneAssetKeyInput)),
description="Retrieve an asset by asset key.",
)
assetNodes = graphene.Field(
non_null_list(GrapheneAssetNode),
group=graphene.Argument(GrapheneAssetGroupSelector),
pipeline=graphene.Argument(GraphenePipelineSelector),
assetKeys=graphene.Argument(graphene.List(graphene.NonNull(GrapheneAssetKeyInput))),
loadMaterializations=graphene.Boolean(default_value=False),
description=(
"Retrieve asset nodes (assets with a definition) after applying a filter on asset group, job, and asset keys."
),
)
assetNodeOrError = graphene.Field(
graphene.NonNull(GrapheneAssetNodeOrError),
assetKey=graphene.Argument(graphene.NonNull(GrapheneAssetKeyInput)),
description="Retrieve an asset node by asset key.",
)
assetNodeAdditionalRequiredKeys = graphene.Field(
non_null_list(GrapheneAssetKey),
assetKeys=graphene.Argument(non_null_list(GrapheneAssetKeyInput)),
description="Retrieve a list of additional asset keys that must be materialized with the provided selection (due to @multi_assets with can_subset=False constraints.)",
)
assetNodeDefinitionCollisions = graphene.Field(
non_null_list(GrapheneAssetNodeDefinitionCollision),
assetKeys=graphene.Argument(non_null_list(GrapheneAssetKeyInput)),
description=(
"Retrieve a list of asset keys where two or more repos provide an asset definition."
" Note: Assets should "
)
+ "not be defined in more than one repository - this query is used to present warnings and"
" errors in the Dagster UI.",
)
partitionBackfillOrError = graphene.Field(
graphene.NonNull(GraphenePartitionBackfillOrError),
backfillId=graphene.Argument(graphene.NonNull(graphene.String)),
description="Retrieve a backfill by backfill id.",
)
assetBackfillPreview = graphene.Field(
non_null_list(GrapheneAssetPartitions),
params=graphene.Argument(graphene.NonNull(GrapheneAssetBackfillPreviewParams)),
description="Fetch the partitions that would be targeted by a backfill, given the root partitions targeted.",
)
partitionBackfillsOrError = graphene.Field(
graphene.NonNull(GraphenePartitionBackfillsOrError),
status=graphene.Argument(GrapheneBulkActionStatus),
cursor=graphene.String(),
limit=graphene.Int(),
filters=graphene.Argument(GrapheneBulkActionsFilter),
description="Retrieve backfills after applying a status filter, cursor, and limit.",
)
permissions = graphene.Field(
non_null_list(GraphenePermission),
description="Retrieve the set of permissions for the Dagster deployment.",
)
canBulkTerminate = graphene.Field(
graphene.NonNull(graphene.Boolean),
description="Returns whether the user has permission to terminate runs in the deployment",
)
assetsLatestInfo = graphene.Field(
non_null_list(GrapheneAssetLatestInfo),
assetKeys=graphene.Argument(non_null_list(GrapheneAssetKeyInput)),
description="Retrieve the latest materializations for a set of assets by asset keys.",
)
logsForRun = graphene.Field(
graphene.NonNull(GrapheneEventConnectionOrError),
runId=graphene.NonNull(graphene.ID),
afterCursor=graphene.String(),
limit=graphene.Int(),
description="Retrieve event logs after applying a run id filter, cursor, and limit.",
)
capturedLogsMetadata = graphene.Field(
graphene.NonNull(GrapheneCapturedLogsMetadata),
logKey=graphene.Argument(non_null_list(graphene.String)),
description="Retrieve the captured log metadata for a given log key.",
)
capturedLogs = graphene.Field(
graphene.NonNull(GrapheneCapturedLogs),
logKey=graphene.Argument(non_null_list(graphene.String)),
cursor=graphene.Argument(graphene.String),
limit=graphene.Argument(graphene.Int),
description="Captured logs are the stdout/stderr logs for a given log key",
)
shouldShowNux = graphene.Field(
graphene.NonNull(graphene.Boolean),
description="Whether or not the NUX should be shown to the user",
)
test = graphene.Field(
GrapheneTestFields,
description="Provides fields for testing behavior",
)
autoMaterializeAssetEvaluationsOrError = graphene.Field(
GrapheneAutoMaterializeAssetEvaluationRecordsOrError,
assetKey=graphene.Argument(graphene.NonNull(GrapheneAssetKeyInput)),
limit=graphene.Argument(graphene.NonNull(graphene.Int)),
cursor=graphene.Argument(graphene.String),
description="Retrieve the auto materialization evaluation records for an asset.",
)
truePartitionsForAutomationConditionEvaluationNode = graphene.Field(
non_null_list(graphene.String),
assetKey=graphene.Argument(GrapheneAssetKeyInput),
evaluationId=graphene.Argument(graphene.NonNull(graphene.ID)),
nodeUniqueId=graphene.Argument(graphene.String),
description="Retrieve the partition keys which were true for a specific automation condition evaluation node.",
)
autoMaterializeEvaluationsForEvaluationId = graphene.Field(
GrapheneAutoMaterializeAssetEvaluationRecordsOrError,
evaluationId=graphene.Argument(graphene.NonNull(graphene.ID)),
description=(
"Retrieve the auto materialization evaluation records for a given evaluation ID."
),
)
assetConditionEvaluationForPartition = graphene.Field(
GrapheneAssetConditionEvaluation,
assetKey=graphene.Argument(GrapheneAssetKeyInput),
evaluationId=graphene.Argument(graphene.NonNull(graphene.ID)),
partition=graphene.Argument(graphene.NonNull(graphene.String)),
description="Retrieve the condition evaluation for an asset and partition.",
)
assetConditionEvaluationRecordsOrError = graphene.Field(
GrapheneAssetConditionEvaluationRecordsOrError,
assetKey=graphene.Argument(GrapheneAssetKeyInput),
assetCheckKey=graphene.Argument(GrapheneAssetCheckHandleInput, required=False),
limit=graphene.Argument(graphene.NonNull(graphene.Int)),
cursor=graphene.Argument(graphene.String),
description="Retrieve the condition evaluation records for an asset.",
)
assetConditionEvaluationsForEvaluationId = graphene.Field(
GrapheneAssetConditionEvaluationRecordsOrError,
evaluationId=graphene.Argument(graphene.NonNull(graphene.ID)),
description=("Retrieve the condition evaluation records for a given evaluation ID."),
)
autoMaterializeTicks = graphene.Field(
non_null_list(GrapheneInstigationTick),
dayRange=graphene.Int(),
dayOffset=graphene.Int(),
limit=graphene.Int(),
cursor=graphene.String(),
statuses=graphene.List(graphene.NonNull(GrapheneInstigationTickStatus)),
beforeTimestamp=graphene.Float(),
afterTimestamp=graphene.Float(),
description="Fetch the history of auto-materialization ticks",
)
assetCheckExecutions = graphene.Field(
non_null_list(GrapheneAssetCheckExecution),
assetKey=graphene.Argument(graphene.NonNull(GrapheneAssetKeyInput)),
checkName=graphene.Argument(graphene.NonNull(graphene.String)),
limit=graphene.NonNull(graphene.Int),
cursor=graphene.String(),
description="Retrieve the executions for a given asset check.",
)
latestDefsStateInfo = graphene.Field(
GrapheneDefsStateInfo,
description="Retrieve the latest available DefsStateInfo for the current workspace.",
)
@capture_error
def resolve_repositoriesOrError(
self,
graphene_info: ResolveInfo,
repositorySelector: Optional[GrapheneRepositorySelector] = None,
):
if repositorySelector:
return GrapheneRepositoryConnection(
nodes=[
fetch_repository(
graphene_info,
RepositorySelector.from_graphql_input(repositorySelector),
)
]
)
return fetch_repositories(graphene_info)
@capture_error
def resolve_repositoryOrError(
self, graphene_info: ResolveInfo, repositorySelector: GrapheneRepositorySelector
):
return fetch_repository(
graphene_info, RepositorySelector.from_graphql_input(repositorySelector)
)
@capture_error
def resolve_workspaceOrError(self, graphene_info: ResolveInfo):
return fetch_workspace(graphene_info.context)
@capture_error
def resolve_workspaceLocationEntryOrError(self, graphene_info: ResolveInfo, name: str):
return fetch_location_entry(graphene_info.context, name)
@capture_error
def resolve_locationStatusesOrError(self, graphene_info: ResolveInfo):
return fetch_location_statuses(graphene_info.context)
@capture_error
async def resolve_pipelineSnapshotOrError(
self,
graphene_info: ResolveInfo,
snapshotId: Optional[str] = None,
activePipelineSelector: Optional[GraphenePipelineSelector] = None,
):
if activePipelineSelector:
job_selector = pipeline_selector_from_graphql(activePipelineSelector)
if snapshotId:
return await get_job_snapshot_or_error_from_snap_or_selector(
graphene_info, job_selector, snapshotId
)
return await get_job_snapshot_or_error_from_job_selector(graphene_info, job_selector)
elif snapshotId:
return get_job_snapshot_or_error_from_snapshot_id(graphene_info, snapshotId)
else:
raise DagsterInvariantViolationError(
"Must pass snapshotId or activePipelineSelector",
)
@capture_error
def resolve_graphOrError(
self,
graphene_info: ResolveInfo,
selector: Optional[GrapheneGraphSelector] = None,
):
if selector is None:
raise DagsterInvariantViolationError(
"Must pass graph selector",
)
graph_selector = graph_selector_from_graphql(selector)
return get_graph_or_error(graphene_info, graph_selector)
def resolve_version(self, graphene_info: ResolveInfo):
return graphene_info.context.version
@capture_error
def resolve_scheduler(self, graphene_info: ResolveInfo):
return get_scheduler_or_error(graphene_info)
@capture_error
def resolve_scheduleOrError(
self, graphene_info: ResolveInfo, schedule_selector: GrapheneScheduleSelector
):
return get_schedule_or_error(
graphene_info, ScheduleSelector.from_graphql_input(schedule_selector)
)
@capture_error
def resolve_schedulesOrError(
self,
graphene_info: ResolveInfo,
repositorySelector: GrapheneRepositorySelector,
scheduleStatus: Optional[GrapheneInstigationStatus] = None,
):
if scheduleStatus == GrapheneInstigationStatus.RUNNING:
instigator_statuses = {
InstigatorStatus.RUNNING,
InstigatorStatus.DECLARED_IN_CODE,
}
elif scheduleStatus == GrapheneInstigationStatus.STOPPED:
instigator_statuses = {InstigatorStatus.STOPPED}
else:
instigator_statuses = None
return get_schedules_or_error(
graphene_info,
RepositorySelector.from_graphql_input(repositorySelector),
instigator_statuses,
)
@capture_error
def resolve_topLevelResourceDetailsOrError(self, graphene_info: ResolveInfo, resourceSelector):
return get_resource_or_error(
graphene_info, ResourceSelector.from_graphql_input(resourceSelector)
)
def resolve_allTopLevelResourceDetailsOrError(self, graphene_info: ResolveInfo, **kwargs):
return get_top_level_resources_or_error(
graphene_info,
RepositorySelector.from_graphql_input(kwargs.get("repositorySelector")),
)
@capture_error
def resolve_utilizedEnvVarsOrError(self, graphene_info: ResolveInfo, **kwargs):
return get_utilized_env_vars_or_error(
graphene_info,
RepositorySelector.from_graphql_input(kwargs.get("repositorySelector")),
)
@capture_error
def resolve_sensorOrError(
self, graphene_info: ResolveInfo, sensorSelector: GrapheneRepositorySelector
):
return get_sensor_or_error(graphene_info, SensorSelector.from_graphql_input(sensorSelector))
@capture_error
def resolve_sensorsOrError(
self,
graphene_info,
repositorySelector: GrapheneRepositorySelector,
sensorStatus: Optional[GrapheneInstigationStatus] = None,
):
if sensorStatus == GrapheneInstigationStatus.RUNNING:
instigator_statuses = {
InstigatorStatus.RUNNING,
InstigatorStatus.DECLARED_IN_CODE,
}
elif sensorStatus == GrapheneInstigationStatus.STOPPED:
instigator_statuses = {InstigatorStatus.STOPPED}
else:
instigator_statuses = None
return get_sensors_or_error(
graphene_info,
RepositorySelector.from_graphql_input(repositorySelector),
instigator_statuses,
)
@capture_error
def resolve_instigationStateOrError(
self,
graphene_info: ResolveInfo,
*,
instigationSelector: GrapheneInstigationSelector,
id: Optional[str] = None,
):
return get_instigator_state_by_selector(
graphene_info,
InstigatorSelector.from_graphql_input(instigationSelector),
CompoundID.from_string(id) if id else None,
)
@capture_error
def resolve_instigationStatesOrError(
self,
graphene_info: ResolveInfo,
repositoryID: str,
):
return get_instigation_states_by_repository_id(
graphene_info,
CompoundID.from_string(repositoryID),
)
@capture_error
async def resolve_pipelineOrError(
self, graphene_info: ResolveInfo, params: GraphenePipelineSelector
):
return GraphenePipeline(
await get_remote_job_or_raise(graphene_info, pipeline_selector_from_graphql(params))
)
@capture_error
def resolve_resourcesOrError(
self, graphene_info: ResolveInfo, pipelineSelector: GraphenePipelineSelector
) -> Sequence[GrapheneResource]:
from dagster_graphql.schema.errors import GraphenePipelineNotFoundError
job_selector = pipeline_selector_from_graphql(pipelineSelector)
if not graphene_info.context.has_job(job_selector):
raise UserFacingGraphQLError(GraphenePipelineNotFoundError(selector=job_selector))
check.invariant(
not job_selector.is_subset_selection,
"resourcesOrError only accepts non-subsetted selectors",
)
def _get_config_type(key: str):
return graphene_info.context.get_config_type(job_selector, key)
return GrapheneResourceConnection(
resources=[
GrapheneResource(_get_config_type, resource_snap)
for resource_snap in graphene_info.context.get_resources(job_selector)
]
)
def resolve_pipelineRunsOrError(
self,
_graphene_info: ResolveInfo,
filter: Optional[GrapheneRunsFilter] = None, # noqa: A002
cursor: Optional[str] = None,
limit: Optional[int] = None,
):
selector = filter.to_selector() if filter is not None else None
return GrapheneRuns(
filters=selector,
cursor=cursor,
limit=limit,
)
async def resolve_pipelineRunOrError(self, graphene_info: ResolveInfo, runId: graphene.ID):
return await gen_run_by_id(graphene_info, runId)
def resolve_runsOrError(
self,
_graphene_info: ResolveInfo,
filter: Optional[GrapheneRunsFilter] = None, # noqa: A002
cursor: Optional[str] = None,
limit: Optional[int] = None,
):
selector = filter.to_selector() if filter is not None else None
return GrapheneRuns(
filters=selector,
cursor=cursor,
limit=limit,
)
def resolve_runIdsOrError(
self,
_graphene_info: ResolveInfo,
filter: Optional[GrapheneRunsFilter] = None, # noqa: A002
cursor: Optional[str] = None,
limit: Optional[int] = None,
):
selector = filter.to_selector() if filter is not None else None
return GrapheneRunIds(
filters=selector,
cursor=cursor,
limit=limit,
)
async def resolve_runOrError(self, graphene_info: ResolveInfo, runId):
return await gen_run_by_id(graphene_info, runId)
def resolve_runsFeedOrError(
self,
graphene_info: ResolveInfo,
limit: int,
view: GrapheneRunsFeedView,
cursor: Optional[str] = None,
filter: Optional[GrapheneRunsFilter] = None, # noqa: A002
):
selector = filter.to_selector() if filter is not None else None
return get_runs_feed_entries(
graphene_info=graphene_info, cursor=cursor, limit=limit, filters=selector, view=view
)
def resolve_runsFeedCountOrError(
self,
graphene_info: ResolveInfo,
view: GrapheneRunsFeedView,
filter: Optional[GrapheneRunsFilter] = None, # noqa: A002
):
selector = filter.to_selector() if filter is not None else None
return GrapheneRunsFeedCount(
get_runs_feed_count(
graphene_info,
selector,
view=view,
)
)
@capture_error
def resolve_partitionSetsOrError(
self,
graphene_info: ResolveInfo,
repositorySelector: RepositorySelector,
pipelineName: str,
):
return get_partition_sets_or_error(
graphene_info,
RepositorySelector.from_graphql_input(repositorySelector),
pipelineName,
)
@capture_error
def resolve_partitionSetOrError(
self,
graphene_info: ResolveInfo,
repositorySelector: RepositorySelector,
partitionSetName: Optional[str] = None,
):
return get_partition_set(
graphene_info,
RepositorySelector.from_graphql_input(repositorySelector),
# partitionSetName should prob be required
partitionSetName, # type: ignore
)
@capture_error
def resolve_runTagKeysOrError(self, graphene_info: ResolveInfo):
return get_run_tag_keys(graphene_info)
@capture_error
def resolve_runTagsOrError(
self,
graphene_info: ResolveInfo,
tagKeys: list[str],
valuePrefix: Optional[str] = None,
limit: Optional[int] = None,
):
return get_run_tags(graphene_info, tagKeys, valuePrefix, limit)
@capture_error
def resolve_runGroupOrError(self, graphene_info: ResolveInfo, runId):
return get_run_group(graphene_info, runId)
@capture_error
async def resolve_isPipelineConfigValid(
self,
graphene_info: ResolveInfo,
pipeline: GraphenePipelineSelector,
mode: str,
runConfigData: Optional[Any] = None, # custom scalar (GrapheneRunConfigData)
):
return await validate_pipeline_config(
graphene_info,
pipeline_selector_from_graphql(pipeline),
parse_run_config_input(runConfigData or {}, raise_on_error=False),
)
@capture_error
async def resolve_executionPlanOrError(
self,
graphene_info: ResolveInfo,
pipeline: GraphenePipelineSelector,
mode: str,
runConfigData: Optional[Any] = None, # custom scalar (GrapheneRunConfigData)
):
return await get_execution_plan(
graphene_info,
pipeline_selector_from_graphql(pipeline),
parse_run_config_input(runConfigData or {}, raise_on_error=True), # type: ignore # (possible str)
)
@capture_error
async def resolve_runConfigSchemaOrError(
self,
graphene_info: ResolveInfo,
selector: GraphenePipelineSelector,
mode: Optional[str] = None,
):
return await resolve_run_config_schema_or_error(
graphene_info, pipeline_selector_from_graphql(selector), mode
)
def resolve_instance(self, graphene_info: ResolveInfo):
return GrapheneInstance(graphene_info.context.instance)
def resolve_assetNodes(
self,
graphene_info: ResolveInfo,
loadMaterializations: bool,
group: Optional[GrapheneAssetGroupSelector] = None,
pipeline: Optional[GraphenePipelineSelector] = None,
assetKeys: Optional[Sequence[GrapheneAssetKeyInput]] = None,
) -> Sequence[GrapheneAssetNode]:
if assetKeys == []:
return []
elif not assetKeys:
use_all_asset_keys = True
resolved_asset_keys = None
else:
use_all_asset_keys = False
resolved_asset_keys = set(
AssetKey.from_graphql_input(asset_key_input) for asset_key_input in assetKeys or []
)
repo = None
if group is not None:
group_name = group.groupName
repo_sel = RepositorySelector.from_graphql_input(group)
repo_loc_entry = graphene_info.context.get_location_entry(repo_sel.location_name)
repo_loc = repo_loc_entry.code_location if repo_loc_entry else None
if not repo_loc or not repo_loc.has_repository(repo_sel.repository_name):
return []
repo = repo_loc.get_repository(repo_sel.repository_name)
remote_nodes = [
remote_node
for remote_node in repo.asset_graph.asset_nodes
if remote_node.group_name == group_name
]
elif pipeline is not None:
selector = pipeline_selector_from_graphql(pipeline)
remote_nodes = graphene_info.context.get_assets_in_job(selector)
else:
if not use_all_asset_keys and resolved_asset_keys:
remote_nodes = [
graphene_info.context.asset_graph.get(asset_key)
for asset_key in resolved_asset_keys
if graphene_info.context.asset_graph.has(asset_key)
]
else:
remote_nodes = [
remote_node for remote_node in graphene_info.context.asset_graph.asset_nodes
]
# Filter down to requested asset keys
results = [
remote_node
for remote_node in remote_nodes
if use_all_asset_keys or remote_node.key in check.not_none(resolved_asset_keys)
]
if not results:
return []
final_keys = [node.key for node in results]
AssetRecord.prepare(graphene_info.context, final_keys)
def load_asset_graph() -> RemoteAssetGraph:
if repo is not None:
return repo.asset_graph
else:
return graphene_info.context.asset_graph
nodes = [
GrapheneAssetNode(
remote_node=remote_node,
)
for remote_node in results
]
return sorted(nodes, key=lambda node: node.id)
def resolve_assetNodeOrError(self, graphene_info: ResolveInfo, assetKey: GrapheneAssetKeyInput):
asset_key_input = cast("Mapping[str, Sequence[str]]", assetKey)
return get_asset_node(graphene_info, AssetKey.from_graphql_input(asset_key_input))
@capture_error
def resolve_assetsOrError(
self,
graphene_info: ResolveInfo,
prefix: Optional[Sequence[str]] = None,
assetKeys: Optional[Sequence[GrapheneAssetKeyInput]] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
):
return get_assets(
graphene_info,
prefix=prefix,
cursor=cursor,
limit=limit,
asset_keys=[
AssetKey.from_graphql_input(asset_key_input) for asset_key_input in assetKeys
]
if assetKeys is not None
else None,
)
@capture_error
def resolve_assetRecordsOrError(
self,
graphene_info: ResolveInfo,
prefix: Optional[Sequence[str]] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
):
return get_asset_records(
graphene_info,
prefix=prefix,
cursor=cursor,
limit=limit,
)
def resolve_assetOrError(self, graphene_info: ResolveInfo, assetKey: GrapheneAssetKeyInput):
return get_asset(AssetKey.from_graphql_input(assetKey))
def resolve_assetNodeAdditionalRequiredKeys(
self,
graphene_info: ResolveInfo,
assetKeys: Sequence[GrapheneAssetKeyInput],
):
assert assetKeys is not None
raw_asset_keys = cast("Sequence[Mapping[str, Sequence[str]]]", assetKeys)
asset_keys = set(AssetKey.from_graphql_input(asset_key) for asset_key in raw_asset_keys)
return get_additional_required_keys(graphene_info, asset_keys)
def resolve_assetNodeDefinitionCollisions(
self,
graphene_info: ResolveInfo,
assetKeys: Sequence[GrapheneAssetKeyInput],
):
assert assetKeys is not None
raw_asset_keys = cast("Sequence[Mapping[str, Sequence[str]]]", assetKeys)
asset_keys = set(AssetKey.from_graphql_input(asset_key) for asset_key in raw_asset_keys)
return get_asset_node_definition_collisions(graphene_info, asset_keys)
@capture_error
def resolve_partitionBackfillOrError(self, graphene_info: ResolveInfo, backfillId: str):
return get_backfill(graphene_info, backfillId)
@capture_error
def resolve_partitionBackfillsOrError(
self,
graphene_info: ResolveInfo,
status: Optional[GrapheneBulkActionStatus] = None,
cursor: Optional[str] = None,
limit: Optional[int] = None,
filters: Optional[GrapheneBulkActionsFilter] = None,
):
return get_backfills(
graphene_info,
status=BulkActionStatus.from_graphql_input(status) if status else None,
cursor=cursor,
limit=limit,
filters=filters.to_selector() if filters else None,
)
def resolve_assetBackfillPreview(
self, graphene_info: ResolveInfo, params: GrapheneAssetBackfillPreviewParams
) -> Sequence[GrapheneAssetPartitions]:
return get_asset_backfill_preview(graphene_info, params)
def resolve_permissions(self, graphene_info: ResolveInfo):
permissions = graphene_info.context.permissions
return [GraphenePermission(permission, value) for permission, value in permissions.items()]
def resolve_canBulkTerminate(self, graphene_info: ResolveInfo) -> bool:
return graphene_info.context.has_permission(Permissions.TERMINATE_PIPELINE_EXECUTION)
def resolve_assetsLatestInfo(
self,
graphene_info: ResolveInfo,
assetKeys: Sequence[GrapheneAssetKeyInput],
):
asset_keys = set(AssetKey.from_graphql_input(asset_key) for asset_key in assetKeys)
return get_assets_latest_info(graphene_info, asset_keys)
@capture_error
def resolve_logsForRun(
self,
graphene_info: ResolveInfo,
runId: str,
afterCursor: Optional[str] = None,
limit: Optional[int] = None,
):
return get_logs_for_run(graphene_info, runId, afterCursor, limit)
def resolve_capturedLogsMetadata(
self, graphene_info: ResolveInfo, logKey: Sequence[str]
) -> GrapheneCapturedLogsMetadata:
return get_captured_log_metadata(graphene_info, logKey)
def resolve_capturedLogs(
self,
graphene_info: ResolveInfo,
logKey: Sequence[str],
cursor: Optional[str] = None,
limit: Optional[int] = None,
) -> GrapheneCapturedLogs:
log_data = get_compute_log_manager(graphene_info).get_log_data(
logKey, cursor=cursor, max_bytes=limit
)
return from_captured_log_data(log_data)
def resolve_shouldShowNux(self, graphene_info):
return graphene_info.context.instance.nux_enabled and not get_has_seen_nux()
def resolve_test(self, _):
return GrapheneTestFields()
def resolve_autoMaterializeAssetEvaluationsOrError(
self,
graphene_info: ResolveInfo,
assetKey: GrapheneAssetKeyInput,
limit: int,
cursor: Optional[str] = None,
):
asset_key = AssetKey.from_graphql_input(assetKey)
return fetch_auto_materialize_asset_evaluations(
graphene_info=graphene_info,
asset_key=asset_key,
cursor=cursor,
limit=limit,
)
def resolve_autoMaterializeEvaluationsForEvaluationId(
self,
graphene_info: ResolveInfo,
evaluationId: int,
):
return fetch_auto_materialize_asset_evaluations_for_evaluation_id(
graphene_info=graphene_info, evaluation_id=evaluationId
)
def resolve_assetConditionEvaluationForPartition(
self,
graphene_info: ResolveInfo,
assetKey: Optional[GrapheneAssetKeyInput],
evaluationId: str,
partition: str,
):
return fetch_asset_condition_evaluation_record_for_partition(
graphene_info=graphene_info,
graphene_asset_key=assetKey,
evaluation_id=int(evaluationId),
partition_key=partition,
)
def resolve_assetConditionEvaluationRecordsOrError(
self,
graphene_info: ResolveInfo,
assetKey: Optional[GrapheneAssetKeyInput],
limit: int,
cursor: Optional[str] = None,
assetCheckKey: Optional[GrapheneAssetCheckHandleInput] = None,
):
return fetch_asset_condition_evaluation_records_for_asset_key(
graphene_info=graphene_info,
graphene_entity_key=check.not_none(assetKey or assetCheckKey),
cursor=cursor,
limit=limit,
)
def resolve_truePartitionsForAutomationConditionEvaluationNode(
self,
graphene_info: ResolveInfo,
assetKey: Optional[GrapheneAssetKeyInput],
evaluationId: str,
nodeUniqueId: str,
):
return fetch_true_partitions_for_evaluation_node(
graphene_info=graphene_info,
graphene_entity_key=assetKey,
evaluation_id=int(evaluationId),
node_unique_id=nodeUniqueId,
)
def resolve_assetConditionEvaluationsForEvaluationId(
self, graphene_info: ResolveInfo, evaluationId: int
):
return fetch_asset_condition_evaluation_records_for_evaluation_id(
graphene_info=graphene_info, evaluation_id=evaluationId
)
def resolve_autoMaterializeTicks(
self,
graphene_info,
dayRange=None,
dayOffset=None,
limit=None,
cursor=None,
statuses=None,
beforeTimestamp=None,
afterTimestamp=None,
):
# Only valid for ticks from before auto-materialize was moved to be powered by multiple
# sensors
from dagster._daemon.asset_daemon import (
_PRE_SENSOR_AUTO_MATERIALIZE_ORIGIN_ID,
_PRE_SENSOR_AUTO_MATERIALIZE_SELECTOR_ID,
)
return get_instigation_ticks(
graphene_info=graphene_info,
instigator_type=InstigatorType.AUTO_MATERIALIZE,
instigator_origin_id=_PRE_SENSOR_AUTO_MATERIALIZE_ORIGIN_ID,
selector_id=_PRE_SENSOR_AUTO_MATERIALIZE_SELECTOR_ID,
batch_loader=None,
dayRange=dayRange,
dayOffset=dayOffset,
limit=limit,
cursor=cursor,
status_strings=statuses,
before=beforeTimestamp,
after=afterTimestamp,
)
def resolve_assetCheckExecutions(
self,
graphene_info: ResolveInfo,
assetKey: GrapheneAssetKeyInput,
checkName: str,
limit: int,
cursor: Optional[str] = None,
):
return fetch_asset_check_executions(
graphene_info.context,
asset_check_key=AssetCheckKey(
asset_key=AssetKey.from_graphql_input(assetKey), name=checkName
),
limit=limit,
cursor=cursor,
)
def resolve_latestDefsStateInfo(self, graphene_info: ResolveInfo):
defs_state_storage = graphene_info.context.instance.defs_state_storage
latest_info = (
defs_state_storage.get_latest_defs_state_info() if defs_state_storage else None
)
return GrapheneDefsStateInfo(latest_info) if latest_info else None
|
GrapheneQuery
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pynvml/pynvml.py
|
{
"start": 64158,
"end": 64335
}
|
class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('bar1Size', c_ulonglong),
]
VgpuTypeBar1Info_v1 = 0x1000010
|
c_nvmlVgpuTypeBar1Info_v1_t
|
python
|
apache__airflow
|
airflow-core/src/airflow/executors/workloads.py
|
{
"start": 2786,
"end": 2979
}
|
class ____(BaseModel):
"""Schema for Callback with minimal required fields needed for Executors and Task SDK."""
id: uuid.UUID
fetch_type: CallbackFetchMethod
data: dict
|
Callback
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/event.py
|
{
"start": 13962,
"end": 18511
}
|
class ____(EventSerializer):
"""
Applies formatting to SQL queries in the serialized event.
"""
def __init__(self) -> None:
super().__init__()
self.formatted_sql_cache: dict[str, str] = {}
def get_attrs(self, item_list, user, **kwargs):
is_public = kwargs.pop("is_public", False)
return super().get_attrs(item_list, user, is_public=is_public)
# Various checks to ensure that we don't spend too much time formatting
def _should_skip_formatting(self, query: str):
if (
(not query)
| (len(self.formatted_sql_cache) >= MAX_SQL_FORMAT_OPS)
| (len(query) > MAX_SQL_FORMAT_LENGTH)
):
return True
return False
def _remove_doublequotes(self, message: str) -> str:
return SQL_DOUBLEQUOTES_REGEX.sub(r"\1", message)
def _format_sql_query(self, message: str) -> str | None:
formatted = self.formatted_sql_cache.get(message, None)
if formatted is not None:
return formatted
if self._should_skip_formatting(message):
return message
formatted = sqlparse.format(message, reindent=True, wrap_after=80)
if formatted != message:
formatted = self._remove_doublequotes(formatted)
self.formatted_sql_cache[message] = formatted
return formatted
def _format_breadcrumb_messages(
self, event_data: EventSerializerResponse, event: Event | GroupEvent, user: User
) -> EventSerializerResponse:
try:
breadcrumbs = next(
filter(lambda entry: entry["type"] == "breadcrumbs", event_data.get("entries", ())),
None,
)
if not breadcrumbs:
return event_data
for breadcrumb_item in breadcrumbs.get("data", {}).get("values", ()):
breadcrumb_message = breadcrumb_item.get("message")
breadcrumb_category = breadcrumb_item.get("category")
if breadcrumb_category in FORMATTED_BREADCRUMB_CATEGORIES and breadcrumb_message:
breadcrumb_item["messageFormat"] = "sql"
breadcrumb_item["messageRaw"] = breadcrumb_message
breadcrumb_item["message"] = self._format_sql_query(breadcrumb_message)
return event_data
except Exception as exc:
sentry_sdk.capture_exception(exc)
return event_data
def _get_release_info(
self, user, event, include_full_release_data: bool
) -> GroupEventReleaseSerializerResponse | None:
version = event.get_tag("sentry:release")
if not version:
return None
try:
release = Release.objects.get(
projects=event.project,
organization_id=event.project.organization_id,
version=version,
)
except Release.DoesNotExist:
return {"version": version}
if include_full_release_data:
return serialize(release, user)
else:
return serialize(release, user, GroupEventReleaseSerializer())
def _format_db_spans(
self, event_data: EventSerializerResponse, event: Event | GroupEvent, user: User
) -> EventSerializerResponse:
try:
spans = next(
filter(lambda entry: entry["type"] == "spans", event_data.get("entries", ())),
None,
)
if not spans:
return event_data
for span in spans.get("data", ()):
span_description = span.get("description")
if span.get("op") in FORMATTED_SPAN_OPS and span_description:
span["description"] = self._format_sql_query(span_description)
return event_data
except Exception as exc:
sentry_sdk.capture_exception(exc)
return event_data
def serialize(self, obj, attrs, user, **kwargs) -> SqlFormatEventSerializerResponse:
include_full_release_data = kwargs.pop("include_full_release_data", False)
result = super().serialize(obj, attrs, user, **kwargs)
with sentry_sdk.start_span(op="serialize", name="Format SQL"):
result = self._format_breadcrumb_messages(result, obj, user)
result = self._format_db_spans(result, obj, user)
release_info = self._get_release_info(user, obj, include_full_release_data)
return {**result, "release": release_info}
|
SqlFormatEventSerializer
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airlift/dagster_airlift/core/components/airflow_instance/component.py
|
{
"start": 4221,
"end": 4332
}
|
class ____(BaseModel):
name: str
auth_type: Literal["basic_auth", "mwaa"]
|
AirflowInstanceScaffolderParams
|
python
|
openai__openai-python
|
src/openai/resources/chat/completions/completions.py
|
{
"start": 2592,
"end": 81258
}
|
class ____(SyncAPIResource):
@cached_property
def messages(self) -> Messages:
return Messages(self._client)
@cached_property
def with_raw_response(self) -> CompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return CompletionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return CompletionsWithStreamingResponse(self)
def parse(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
response_format: type[ResponseFormatT] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ParsedChatCompletion[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
You can pass a pydantic model to this method and it will automatically convert the model
into a JSON schema, send it to the API and parse the response content back into the given model.
This method will also automatically parse `function` tool calls if:
- You use the `openai.pydantic_function_tool()` helper method
- You mark your tool schema with `"strict": True`
Example usage:
```py
from pydantic import BaseModel
from openai import OpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
client = OpenAI()
completion = client.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": "You are a helpful math tutor."},
{"role": "user", "content": "solve 8x + 31 = 2"},
],
response_format=MathResponse,
)
message = completion.choices[0].message
if message.parsed:
print(message.parsed.steps)
print("answer: ", message.parsed.final_answer)
```
"""
chat_completion_tools = _validate_input_tools(tools)
extra_headers = {
"X-Stainless-Helper-Method": "chat.completions.parse",
**(extra_headers or {}),
}
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=chat_completion_tools,
)
return self._post(
"/chat/completions",
body=maybe_transform(
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning_effort": reasoning_effort,
"response_format": _type_to_response_format(response_format),
"safety_identifier": safety_identifier,
"seed": seed,
"service_tier": service_tier,
"stop": stop,
"store": store,
"stream": False,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
"verbosity": verbosity,
"web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
stream=False,
)
@overload
def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion:
"""
**Starting a new project?** We recommend trying
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
advantage of the latest OpenAI platform features. Compare
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
---
Creates a model response for the given chat conversation. Learn more in the
[text generation](https://platform.openai.com/docs/guides/text-generation),
[vision](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio) guides.
Parameter support can differ depending on the model used to generate the
response, particularly for newer reasoning models. Parameters that are only
supported for reasoning models are noted below. For the current state of
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
audio: Parameters for audio output. Required when audio output is requested with
`modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
function_call: Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
functions: Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
logprobs: Whether to return log probabilities of the output tokens or not. If true,
returns the log probabilities of each output token returned in the `content` of
`message`.
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
store: Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
tools: A list of tools the model may call. You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
stream: Literal[True],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Stream[ChatCompletionChunk]:
"""
**Starting a new project?** We recommend trying
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
advantage of the latest OpenAI platform features. Compare
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
---
Creates a model response for the given chat conversation. Learn more in the
[text generation](https://platform.openai.com/docs/guides/text-generation),
[vision](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio) guides.
Parameter support can differ depending on the model used to generate the
response, particularly for newer reasoning models. Parameters that are only
supported for reasoning models are noted below. For the current state of
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
audio: Parameters for audio output. Required when audio output is requested with
`modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
function_call: Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
functions: Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
logprobs: Whether to return log probabilities of the output tokens or not. If true,
returns the log probabilities of each output token returned in the `content` of
`message`.
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
store: Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
tools: A list of tools the model may call. You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
stream: bool,
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion | Stream[ChatCompletionChunk]:
"""
**Starting a new project?** We recommend trying
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
advantage of the latest OpenAI platform features. Compare
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
---
Creates a model response for the given chat conversation. Learn more in the
[text generation](https://platform.openai.com/docs/guides/text-generation),
[vision](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio) guides.
Parameter support can differ depending on the model used to generate the
response, particularly for newer reasoning models. Parameters that are only
supported for reasoning models are noted below. For the current state of
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
audio: Parameters for audio output. Required when audio output is requested with
`modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
function_call: Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
functions: Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
logprobs: Whether to return log probabilities of the output tokens or not. If true,
returns the log probabilities of each output token returned in the `content` of
`message`.
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
store: Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
tools: A list of tools the model may call. You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["messages", "model"], ["messages", "model", "stream"])
def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion | Stream[ChatCompletionChunk]:
validate_response_format(response_format)
return self._post(
"/chat/completions",
body=maybe_transform(
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning_effort": reasoning_effort,
"response_format": response_format,
"safety_identifier": safety_identifier,
"seed": seed,
"service_tier": service_tier,
"stop": stop,
"store": store,
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
"verbosity": verbosity,
"web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParamsStreaming
if stream
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletion,
stream=stream or False,
stream_cls=Stream[ChatCompletionChunk],
)
def retrieve(
self,
completion_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion:
"""Get a stored chat completion.
Only Chat Completions that have been created with
the `store` parameter set to `true` will be returned.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._get(
f"/chat/completions/{completion_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletion,
)
def update(
self,
completion_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion:
"""Modify a stored chat completion.
Only Chat Completions that have been created
with the `store` parameter set to `true` can be modified. Currently, the only
supported modification is to update the `metadata` field.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._post(
f"/chat/completions/{completion_id}",
body=maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletion,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: str | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ChatCompletion]:
"""List stored Chat Completions.
Only Chat Completions that have been stored with
the `store` parameter set to `true` will be returned.
Args:
after: Identifier for the last chat completion from the previous pagination request.
limit: Number of Chat Completions to retrieve.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: The model used to generate the Chat Completions.
order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/chat/completions",
page=SyncCursorPage[ChatCompletion],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
"model": model,
"order": order,
},
completion_list_params.CompletionListParams,
),
),
model=ChatCompletion,
)
def delete(
self,
completion_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletionDeleted:
"""Delete a stored chat completion.
Only Chat Completions that have been created
with the `store` parameter set to `true` can be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return self._delete(
f"/chat/completions/{completion_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletionDeleted,
)
def stream(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletionStreamManager[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
and automatic accumulation of each delta.
This also supports all of the parsing utilities that `.parse()` does.
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
```py
with client.chat.completions.stream(
model="gpt-4o-2024-08-06",
messages=[...],
) as stream:
for event in stream:
if event.type == "content.delta":
print(event.delta, flush=True, end="")
```
When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
"""
extra_headers = {
"X-Stainless-Helper-Method": "chat.completions.stream",
**(extra_headers or {}),
}
api_request: partial[Stream[ChatCompletionChunk]] = partial(
self.create,
messages=messages,
model=model,
audio=audio,
stream=True,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
function_call=function_call,
functions=functions,
logit_bias=logit_bias,
logprobs=logprobs,
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
metadata=metadata,
modalities=modalities,
n=n,
parallel_tool_calls=parallel_tool_calls,
prediction=prediction,
presence_penalty=presence_penalty,
prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning_effort=reasoning_effort,
safety_identifier=safety_identifier,
seed=seed,
service_tier=service_tier,
store=store,
stop=stop,
stream_options=stream_options,
temperature=temperature,
tool_choice=tool_choice,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
verbosity=verbosity,
web_search_options=web_search_options,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return ChatCompletionStreamManager(
api_request,
response_format=response_format,
input_tools=tools,
)
|
Completions
|
python
|
django__django
|
tests/test_runner/tests.py
|
{
"start": 909,
"end": 1091
}
|
class ____:
def __init__(self):
self.tests = []
def addTest(self, test):
self.tests.append(test)
def __iter__(self):
yield from self.tests
|
MySuite
|
python
|
doocs__leetcode
|
solution/0700-0799/0791.Custom Sort String/Solution.py
|
{
"start": 0,
"end": 184
}
|
class ____:
def customSortString(self, order: str, s: str) -> str:
d = {c: i for i, c in enumerate(order)}
return ''.join(sorted(s, key=lambda x: d.get(x, 0)))
|
Solution
|
python
|
pexpect__pexpect
|
pexpect/_async_w_await.py
|
{
"start": 2156,
"end": 3802
}
|
class ____(asyncio.Protocol):
transport = None
def set_expecter(self, expecter):
self.expecter = expecter
self.fut = asyncio.Future()
def found(self, result):
if not self.fut.done():
self.fut.set_result(result)
self.transport.pause_reading()
def error(self, exc):
if not self.fut.done():
self.fut.set_exception(exc)
self.transport.pause_reading()
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
spawn = self.expecter.spawn
s = spawn._decoder.decode(data)
spawn._log(s, "read")
if self.fut.done():
spawn._before.write(s)
spawn._buffer.write(s)
return
try:
index = self.expecter.new_data(s)
if index is not None:
# Found a match
self.found(index)
except Exception as exc:
self.expecter.errored()
self.error(exc)
def eof_received(self):
# N.B. If this gets called, async will close the pipe (the spawn object)
# for us
try:
self.expecter.spawn.flag_eof = True
index = self.expecter.eof()
except EOF as exc:
self.error(exc)
else:
self.found(index)
def connection_lost(self, exc):
if isinstance(exc, OSError) and exc.errno == errno.EIO:
# We may get here without eof_received being called, e.g on Linux
self.eof_received()
elif exc is not None:
self.error(exc)
|
PatternWaiter
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/response_create_event_param.py
|
{
"start": 438,
"end": 886
}
|
class ____(TypedDict, total=False):
description: str
"""
The description of the function, including guidance on when and how to call it,
and guidance about what to tell the user when calling (if anything).
"""
name: str
"""The name of the function."""
parameters: object
"""Parameters of the function in JSON Schema."""
type: Literal["function"]
"""The type of the tool, i.e. `function`."""
|
ResponseTool
|
python
|
jazzband__django-model-utils
|
tests/test_models/test_status_model.py
|
{
"start": 2745,
"end": 2971
}
|
class ____(StatusModelTests):
def setUp(self) -> None:
self.model = StatusPlainTuple
self.on_hold = StatusPlainTuple.STATUS[2][0]
self.active = StatusPlainTuple.STATUS[0][0]
|
StatusModelPlainTupleTests
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_bugbear/class_as_data_structure.py
|
{
"start": 72,
"end": 185
}
|
class ____: # B903
def __init__(self, x: float, y: float) -> None:
self.x = x
self.y = y
|
Point
|
python
|
nryoung__algorithms
|
algorithms/data_structures/binary_search_tree.py
|
{
"start": 432,
"end": 732
}
|
class ____(object):
"""
Implementation of a Node in a Binary Search Tree.
"""
def __init__(self, key=None, val=None, size_of_subtree=1):
self.key = key
self.val = val
self.size_of_subtree = size_of_subtree
self.left = None
self.right = None
|
Node
|
python
|
apache__avro
|
lang/py/avro/io.py
|
{
"start": 7442,
"end": 14558
}
|
class ____:
"""Read leaf values."""
_reader: IO[bytes]
def __init__(self, reader: IO[bytes]) -> None:
"""
reader is a Python object on which we can call read, seek, and tell.
"""
self._reader = reader
@property
def reader(self) -> IO[bytes]:
return self._reader
def read(self, n: int) -> bytes:
"""
Read n bytes.
"""
if n < 0:
raise avro.errors.InvalidAvroBinaryEncoding(f"Requested {n} bytes to read, expected positive integer.")
read_bytes = self.reader.read(n)
if len(read_bytes) != n:
raise avro.errors.InvalidAvroBinaryEncoding(f"Read {len(read_bytes)} bytes, expected {n} bytes")
return read_bytes
def read_null(self) -> None:
"""
null is written as zero bytes
"""
return None
def read_boolean(self) -> bool:
"""
a boolean is written as a single byte
whose value is either 0 (false) or 1 (true).
"""
return ord(self.read(1)) == 1
def read_int(self) -> int:
"""
int and long values are written using variable-length, zig-zag coding.
"""
return self.read_long()
def read_long(self) -> int:
"""
int and long values are written using variable-length, zig-zag coding.
"""
b = ord(self.read(1))
n = b & 0x7F
shift = 7
while (b & 0x80) != 0:
b = ord(self.read(1))
n |= (b & 0x7F) << shift
shift += 7
datum = (n >> 1) ^ -(n & 1)
return datum
def read_float(self) -> float:
"""
A float is written as 4 bytes.
The float is converted into a 32-bit integer using a method equivalent to
Java's floatToRawIntBits and then encoded in little-endian format.
"""
return float(STRUCT_FLOAT.unpack(self.read(4))[0])
def read_double(self) -> float:
"""
A double is written as 8 bytes.
The double is converted into a 64-bit integer using a method equivalent to
Java's doubleToRawLongBits and then encoded in little-endian format.
"""
return float(STRUCT_DOUBLE.unpack(self.read(8))[0])
def read_decimal_from_bytes(self, precision: int, scale: int) -> decimal.Decimal:
"""
Decimal bytes are decoded as signed short, int or long depending on the
size of bytes.
"""
size = self.read_long()
return self.read_decimal_from_fixed(precision, scale, size)
def read_decimal_from_fixed(self, precision: int, scale: int, size: int) -> decimal.Decimal:
"""
Decimal is encoded as fixed. Fixed instances are encoded using the
number of bytes declared in the schema.
"""
datum = self.read(size)
unscaled_datum = 0
msb = struct.unpack("!b", datum[0:1])[0]
leftmost_bit = (msb >> 7) & 1
if leftmost_bit == 1:
modified_first_byte = ord(datum[0:1]) ^ (1 << 7)
datum = bytearray([modified_first_byte]) + datum[1:]
for offset in range(size):
unscaled_datum <<= 8
unscaled_datum += ord(datum[offset : 1 + offset])
unscaled_datum += pow(-2, (size * 8) - 1)
else:
for offset in range(size):
unscaled_datum <<= 8
unscaled_datum += ord(datum[offset : 1 + offset])
original_prec = decimal.getcontext().prec
try:
decimal.getcontext().prec = precision
scaled_datum = decimal.Decimal(unscaled_datum).scaleb(-scale)
finally:
decimal.getcontext().prec = original_prec
return scaled_datum
def read_bytes(self) -> bytes:
"""
Bytes are encoded as a long followed by that many bytes of data.
"""
return self.read(self.read_long())
def read_utf8(self) -> str:
"""
A string is encoded as a long followed by
that many bytes of UTF-8 encoded character data.
"""
return self.read_bytes().decode("utf-8")
def read_date_from_int(self) -> datetime.date:
"""
int is decoded as python date object.
int stores the number of days from
the unix epoch, 1 January 1970 (ISO calendar).
"""
days_since_epoch = self.read_int()
return datetime.date(1970, 1, 1) + datetime.timedelta(days_since_epoch)
def _build_time_object(self, value: int, scale_to_micro: int) -> datetime.time:
value = value * scale_to_micro
value, microseconds = divmod(value, 1000000)
value, seconds = divmod(value, 60)
value, minutes = divmod(value, 60)
hours = value
return datetime.time(hour=hours, minute=minutes, second=seconds, microsecond=microseconds)
def read_time_millis_from_int(self) -> datetime.time:
"""
int is decoded as python time object which represents
the number of milliseconds after midnight, 00:00:00.000.
"""
milliseconds = self.read_int()
return self._build_time_object(milliseconds, 1000)
def read_time_micros_from_long(self) -> datetime.time:
"""
long is decoded as python time object which represents
the number of microseconds after midnight, 00:00:00.000000.
"""
microseconds = self.read_long()
return self._build_time_object(microseconds, 1)
def read_timestamp_millis_from_long(self) -> datetime.datetime:
"""
long is decoded as python datetime object which represents
the number of milliseconds from the unix epoch, 1 January 1970.
"""
timestamp_millis = self.read_long()
timedelta = datetime.timedelta(microseconds=timestamp_millis * 1000)
unix_epoch_datetime = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=avro.timezones.utc)
return unix_epoch_datetime + timedelta
def read_timestamp_micros_from_long(self) -> datetime.datetime:
"""
long is decoded as python datetime object which represents
the number of microseconds from the unix epoch, 1 January 1970.
"""
timestamp_micros = self.read_long()
timedelta = datetime.timedelta(microseconds=timestamp_micros)
unix_epoch_datetime = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=avro.timezones.utc)
return unix_epoch_datetime + timedelta
def skip_null(self) -> None:
pass
def skip_boolean(self) -> None:
self.skip(1)
def skip_int(self) -> None:
self.skip_long()
def skip_long(self) -> None:
b = ord(self.read(1))
while (b & 0x80) != 0:
b = ord(self.read(1))
def skip_float(self) -> None:
self.skip(4)
def skip_double(self) -> None:
self.skip(8)
def skip_bytes(self) -> None:
self.skip(self.read_long())
def skip_utf8(self) -> None:
self.skip_bytes()
def skip(self, n: int) -> None:
self.reader.seek(self.reader.tell() + n)
|
BinaryDecoder
|
python
|
huggingface__transformers
|
src/transformers/models/ministral/modular_ministral.py
|
{
"start": 8710,
"end": 8769
}
|
class ____(Qwen2DecoderLayer):
pass
|
MinistralDecoderLayer
|
python
|
apache__airflow
|
airflow-core/tests/unit/always/test_project_structure.py
|
{
"start": 40400,
"end": 40936
}
|
class ____(ExampleCoverageTest):
PROVIDER = "slack"
CLASS_DIRS = ProjectStructureTest.CLASS_DIRS
BASE_CLASSES = {
"airflow.providers.slack.transfers.base_sql_to_slack.BaseSqlToSlackOperator",
"airflow.providers.slack.operators.slack.SlackAPIOperator",
}
MISSING_EXAMPLES_FOR_CLASSES = set()
DEPRECATED_CLASSES = {
"airflow.providers.slack.notifications.slack_notifier.py.",
"airflow.providers.slack.transfers.sql_to_slack.SqlToSlackOperator",
}
|
TestSlackProviderProjectStructure
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 31474,
"end": 32182
}
|
class ____(BaseModel):
"""
XCom response serializer with string return type.
"""
key: Annotated[str, Field(title="Key")]
timestamp: Annotated[datetime, Field(title="Timestamp")]
logical_date: Annotated[datetime | None, Field(title="Logical Date")] = None
map_index: Annotated[int, Field(title="Map Index")]
task_id: Annotated[str, Field(title="Task Id")]
dag_id: Annotated[str, Field(title="Dag Id")]
run_id: Annotated[str, Field(title="Run Id")]
dag_display_name: Annotated[str, Field(title="Dag Display Name")]
task_display_name: Annotated[str, Field(title="Task Display Name")]
value: Annotated[str | None, Field(title="Value")] = None
|
XComResponseString
|
python
|
numpy__numpy
|
numpy/_typing/_nbit_base.py
|
{
"start": 2425,
"end": 2560
}
|
class ____(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
pass
@final
@set_module("numpy._typing")
|
_96Bit
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_se_linux_options.py
|
{
"start": 383,
"end": 5755
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'level': 'str',
'role': 'str',
'type': 'str',
'user': 'str'
}
attribute_map = {
'level': 'level',
'role': 'role',
'type': 'type',
'user': 'user'
}
def __init__(self, level=None, role=None, type=None, user=None, local_vars_configuration=None): # noqa: E501
"""V1SELinuxOptions - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._level = None
self._role = None
self._type = None
self._user = None
self.discriminator = None
if level is not None:
self.level = level
if role is not None:
self.role = role
if type is not None:
self.type = type
if user is not None:
self.user = user
@property
def level(self):
"""Gets the level of this V1SELinuxOptions. # noqa: E501
Level is SELinux level label that applies to the container. # noqa: E501
:return: The level of this V1SELinuxOptions. # noqa: E501
:rtype: str
"""
return self._level
@level.setter
def level(self, level):
"""Sets the level of this V1SELinuxOptions.
Level is SELinux level label that applies to the container. # noqa: E501
:param level: The level of this V1SELinuxOptions. # noqa: E501
:type: str
"""
self._level = level
@property
def role(self):
"""Gets the role of this V1SELinuxOptions. # noqa: E501
Role is a SELinux role label that applies to the container. # noqa: E501
:return: The role of this V1SELinuxOptions. # noqa: E501
:rtype: str
"""
return self._role
@role.setter
def role(self, role):
"""Sets the role of this V1SELinuxOptions.
Role is a SELinux role label that applies to the container. # noqa: E501
:param role: The role of this V1SELinuxOptions. # noqa: E501
:type: str
"""
self._role = role
@property
def type(self):
"""Gets the type of this V1SELinuxOptions. # noqa: E501
Type is a SELinux type label that applies to the container. # noqa: E501
:return: The type of this V1SELinuxOptions. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1SELinuxOptions.
Type is a SELinux type label that applies to the container. # noqa: E501
:param type: The type of this V1SELinuxOptions. # noqa: E501
:type: str
"""
self._type = type
@property
def user(self):
"""Gets the user of this V1SELinuxOptions. # noqa: E501
User is a SELinux user label that applies to the container. # noqa: E501
:return: The user of this V1SELinuxOptions. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this V1SELinuxOptions.
User is a SELinux user label that applies to the container. # noqa: E501
:param user: The user of this V1SELinuxOptions. # noqa: E501
:type: str
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SELinuxOptions):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SELinuxOptions):
return True
return self.to_dict() != other.to_dict()
|
V1SELinuxOptions
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.