language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ansible__ansible
|
lib/ansible/_internal/_collection_proxy.py
|
{
"start": 103,
"end": 1154
}
|
class ____[T](_c.Sequence[T]):
"""A read-only sequence proxy."""
# DTFIX5: needs unit test coverage
__slots__ = ('__value',)
def __init__(self, value: _c.Sequence[T]) -> None:
self.__value = value
@_t.overload
def __getitem__(self, index: int) -> T: ...
@_t.overload
def __getitem__(self, index: slice) -> _c.Sequence[T]: ...
def __getitem__(self, index: int | slice) -> T | _c.Sequence[T]:
if isinstance(index, slice):
return self.__class__(self.__value[index])
return self.__value[index]
def __len__(self) -> int:
return len(self.__value)
def __contains__(self, item: object) -> bool:
return item in self.__value
def __iter__(self) -> _t.Iterator[T]:
yield from self.__value
def __reversed__(self) -> _c.Iterator[T]:
return reversed(self.__value)
def index(self, *args) -> int:
return self.__value.index(*args)
def count(self, value: object) -> int:
return self.__value.count(value)
|
SequenceProxy
|
python
|
getsentry__sentry
|
src/sentry/models/options/option.py
|
{
"start": 2226,
"end": 2439
}
|
class ____(BaseOption):
__relocation_scope__ = RelocationScope.Config
class Meta:
app_label = "sentry"
db_table = "sentry_controloption"
__repr__ = sane_repr("key", "value")
|
ControlOption
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/utils/test_credentials_provider.py
|
{
"start": 23831,
"end": 24303
}
|
class ____:
def test_get_scopes_with_default(self):
assert _get_scopes() == _DEFAULT_SCOPES
@pytest.mark.parametrize(
("scopes_str", "scopes"),
[
pytest.param("scope1", ["scope1"], id="single-scope"),
pytest.param("scope1,scope2", ["scope1", "scope2"], id="multiple-scopes"),
],
)
def test_get_scopes_with_input(self, scopes_str, scopes):
assert _get_scopes(scopes_str) == scopes
|
TestGetScopes
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_simplify/SIM115.py
|
{
"start": 5750,
"end": 6057
}
|
class ____(TestCase):
@classmethod
def setUpClass(cls):
cls.enterClassContext(open("filename"))
# OK
async def foo():
class ExampleAsyncTests(IsolatedAsyncioTestCase):
async def test_something(self):
await self.enterAsyncContext(open("filename"))
# OK
|
ExampleClassTests
|
python
|
chroma-core__chroma
|
chromadb/server/fastapi/__init__.py
|
{
"start": 5879,
"end": 74567
}
|
class ____(Server):
def __init__(self, settings: Settings):
ProductTelemetryClient.SERVER_CONTEXT = ServerContext.FASTAPI
# https://fastapi.tiangolo.com/advanced/custom-response/#use-orjsonresponse
self._app = fastapi.FastAPI(debug=True, default_response_class=ORJSONResponse)
self._system = System(settings)
self._api: ServerAPI = self._system.instance(ServerAPI)
self._extra_openapi_schemas: Dict[str, Any] = {}
self._app.openapi = self.generate_openapi
self._opentelemetry_client = self._api.require(OpenTelemetryClient)
self._capacity_limiter = CapacityLimiter(
settings.chroma_server_thread_pool_size
)
self._quota_enforcer = self._system.require(QuotaEnforcer)
self._system.start()
self._app.middleware("http")(check_http_version_middleware)
self._app.middleware("http")(catch_exceptions_middleware)
self._app.middleware("http")(add_trace_id_to_response_middleware)
self._app.add_middleware(
CORSMiddleware,
allow_headers=["*"],
allow_origins=settings.chroma_server_cors_allow_origins,
allow_methods=["*"],
)
self._app.add_exception_handler(QuotaError, self.quota_exception_handler)
self._app.add_exception_handler(
RateLimitError, self.rate_limit_exception_handler
)
self._async_rate_limit_enforcer = self._system.require(AsyncRateLimitEnforcer)
self._app.on_event("shutdown")(self.shutdown)
self.authn_provider = None
if settings.chroma_server_authn_provider:
self.authn_provider = self._system.require(ServerAuthenticationProvider)
self.authz_provider = None
if settings.chroma_server_authz_provider:
self.authz_provider = self._system.require(ServerAuthorizationProvider)
self.router = ChromaAPIRouter()
self.setup_v1_routes()
self.setup_v2_routes()
self._app.include_router(self.router)
use_route_names_as_operation_ids(self._app)
instrument_fastapi(self._app)
telemetry_client = self._system.instance(ProductTelemetryClient)
telemetry_client.capture(ServerStartEvent())
def generate_openapi(self) -> Dict[str, Any]:
"""Used instead of the default openapi() generation handler to include manually-populated schemas."""
schema: Dict[str, Any] = get_openapi(
title="Chroma",
routes=self._app.routes,
version=chromadb_version,
)
for key, value in self._extra_openapi_schemas.items():
schema["components"]["schemas"][key] = value
return schema
def get_openapi_extras_for_body_model(
self, request_model: Type[D]
) -> Dict[str, Any]:
schema = request_model.model_json_schema(
ref_template="#/components/schemas/{model}"
)
if "$defs" in schema:
for key, value in schema["$defs"].items():
self._extra_openapi_schemas[key] = value
openapi_extra = {
"requestBody": {
"content": {"application/json": {"schema": schema}},
"required": True,
}
}
return openapi_extra
def setup_v2_routes(self) -> None:
self.router.add_api_route("/api/v2", self.root, methods=["GET"])
self.router.add_api_route("/api/v2/reset", self.reset, methods=["POST"])
self.router.add_api_route("/api/v2/version", self.version, methods=["GET"])
self.router.add_api_route("/api/v2/heartbeat", self.heartbeat, methods=["GET"])
self.router.add_api_route(
"/api/v2/pre-flight-checks", self.pre_flight_checks, methods=["GET"]
)
self.router.add_api_route(
"/api/v2/auth/identity",
self.get_user_identity,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases",
self.create_database,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(CreateDatabase),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}",
self.get_database,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}",
self.delete_database,
methods=["DELETE"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants",
self.create_tenant,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(CreateTenant),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}",
self.get_tenant,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases",
self.list_databases,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections",
self.list_collections,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections_count",
self.count_collections,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections",
self.create_collection,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(CreateCollection),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/add",
self.add,
methods=["POST"],
status_code=status.HTTP_201_CREATED,
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(AddEmbedding),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/update",
self.update,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(UpdateEmbedding),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/upsert",
self.upsert,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(AddEmbedding),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/get",
self.get,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(GetEmbedding),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/delete",
self.delete,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(DeleteEmbedding),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/count",
self.count,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/query",
self.get_nearest_neighbors,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(
request_model=QueryEmbedding
),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_name}",
self.get_collection,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}",
self.update_collection,
methods=["PUT"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(UpdateCollection),
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_name}",
self.delete_collection,
methods=["DELETE"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/functions/attach",
self.attach_function,
methods=["POST"],
response_model=None,
)
self.router.add_api_route(
"/api/v2/tenants/{tenant}/databases/{database_name}/collections/{collection_id}/functions/{function_name}",
self.get_attached_function,
methods=["GET"],
response_model=None,
)
def shutdown(self) -> None:
self._system.stop()
def app(self) -> fastapi.FastAPI:
return self._app
async def rate_limit_exception_handler(
self, request: Request, exc: RateLimitError
) -> ORJSONResponse:
return ORJSONResponse(
status_code=429,
content={"message": "Rate limit exceeded."},
)
def root(self) -> Dict[str, int]:
return {"nanosecond heartbeat": self._api.heartbeat()}
async def quota_exception_handler(
self, request: Request, exc: QuotaError
) -> ORJSONResponse:
return ORJSONResponse(
status_code=400,
content={"message": exc.message()},
)
async def heartbeat(self) -> Dict[str, int]:
return self.root()
async def version(self) -> str:
return self._api.get_version()
def _set_request_context(self, request: Request) -> None:
"""
Set context about the request on any components that might need it.
"""
self._quota_enforcer.set_context(context={"request": request})
@trace_method(
"auth_request",
OpenTelemetryGranularity.OPERATION,
)
@rate_limit
async def auth_request(
self,
headers: Headers,
action: AuthzAction,
tenant: Optional[str],
database: Optional[str],
collection: Optional[str],
) -> None:
return await to_thread.run_sync(
# NOTE(rescrv, iron will auth): No need to migrate because this is the utility call.
self.sync_auth_request,
*(headers, action, tenant, database, collection),
)
@trace_method(
"FastAPI.sync_auth_request",
OpenTelemetryGranularity.OPERATION,
)
def sync_auth_request(
self,
headers: Headers,
action: AuthzAction,
tenant: Optional[str],
database: Optional[str],
collection: Optional[str],
) -> None:
"""
Authenticates and authorizes the request based on the given headers
and other parameters. If the request cannot be authenticated or cannot
be authorized (with the configured providers), raises an HTTP 401.
"""
if not self.authn_provider:
add_attributes_to_current_span(
{
"tenant": tenant,
"database": database,
"collection": collection,
}
)
return
user_identity = self.authn_provider.authenticate_or_raise(dict(headers))
if not self.authz_provider:
return
authz_resource = AuthzResource(
tenant=tenant,
database=database,
collection=collection,
)
self.authz_provider.authorize_or_raise(user_identity, action, authz_resource)
add_attributes_to_current_span(
{
"tenant": tenant,
"database": database,
"collection": collection,
}
)
return
@trace_method("FastAPI.get_user_identity", OpenTelemetryGranularity.OPERATION)
async def get_user_identity(
self,
request: Request,
) -> UserIdentity:
if not self.authn_provider:
return UserIdentity(
user_id="", tenant=DEFAULT_TENANT, databases=[DEFAULT_DATABASE]
)
return cast(
UserIdentity,
await to_thread.run_sync(
lambda: cast(ServerAuthenticationProvider, self.authn_provider).authenticate_or_raise(dict(request.headers)) # type: ignore
),
)
@trace_method("FastAPI.create_database", OpenTelemetryGranularity.OPERATION)
async def create_database(
self,
request: Request,
tenant: str,
) -> None:
def process_create_database(
tenant: str, headers: Headers, raw_body: bytes
) -> None:
db = validate_model(CreateDatabase, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
headers,
AuthzAction.CREATE_DATABASE,
tenant,
db.name,
None,
)
self._set_request_context(request=request)
return self._api.create_database(db.name, tenant)
await to_thread.run_sync(
process_create_database,
tenant,
request.headers,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.get_database", OpenTelemetryGranularity.OPERATION)
async def get_database(
self,
request: Request,
database_name: str,
tenant: str,
) -> Database:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.GET_DATABASE,
tenant,
database_name,
None,
)
return cast(
Database,
await to_thread.run_sync(
self._api.get_database,
database_name,
tenant,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.delete_database", OpenTelemetryGranularity.OPERATION)
async def delete_database(
self,
request: Request,
database_name: str,
tenant: str,
) -> None:
# NOTE(rescrv, iron will auth): Implemented.
self.auth_request(
request.headers,
AuthzAction.DELETE_DATABASE,
tenant,
database_name,
None,
)
await to_thread.run_sync(
self._api.delete_database,
database_name,
tenant,
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.create_tenant", OpenTelemetryGranularity.OPERATION)
async def create_tenant(
self,
request: Request,
) -> None:
def process_create_tenant(request: Request, raw_body: bytes) -> None:
tenant = validate_model(CreateTenant, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.CREATE_TENANT,
tenant.name,
None,
None,
)
return self._api.create_tenant(tenant.name)
await to_thread.run_sync(
process_create_tenant,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.get_tenant", OpenTelemetryGranularity.OPERATION)
async def get_tenant(
self,
request: Request,
tenant: str,
) -> Tenant:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.GET_TENANT,
tenant,
None,
None,
)
return cast(
Tenant,
await to_thread.run_sync(
self._api.get_tenant,
tenant,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.list_databases", OpenTelemetryGranularity.OPERATION)
async def list_databases(
self,
request: Request,
tenant: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Sequence[Database]:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.LIST_DATABASES,
tenant,
None,
None,
)
return cast(
Sequence[Database],
await to_thread.run_sync(
self._api.list_databases,
limit,
offset,
tenant,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.list_collections", OpenTelemetryGranularity.OPERATION)
async def list_collections(
self,
request: Request,
tenant: str,
database_name: str,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> Sequence[CollectionModel]:
def process_list_collections(
limit: Optional[int], offset: Optional[int], tenant: str, database_name: str
) -> Sequence[CollectionModel]:
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.LIST_COLLECTIONS,
tenant,
database_name,
None,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api.list_collections(
tenant=tenant, database=database_name, limit=limit, offset=offset
)
api_collection_models = cast(
Sequence[CollectionModel],
await to_thread.run_sync(
process_list_collections,
limit,
offset,
tenant,
database_name,
limiter=self._capacity_limiter,
),
)
return api_collection_models
@trace_method("FastAPI.count_collections", OpenTelemetryGranularity.OPERATION)
async def count_collections(
self,
request: Request,
tenant: str,
database_name: str,
) -> int:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.COUNT_COLLECTIONS,
tenant,
database_name,
None,
)
add_attributes_to_current_span({"tenant": tenant})
return cast(
int,
await to_thread.run_sync(
self._api.count_collections,
tenant,
database_name,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.create_collection", OpenTelemetryGranularity.OPERATION)
async def create_collection(
self,
request: Request,
tenant: str,
database_name: str,
) -> CollectionModel:
def process_create_collection(
request: Request, tenant: str, database: str, raw_body: bytes
) -> CollectionModel:
create = validate_model(CreateCollection, orjson.loads(raw_body))
if not create.configuration:
if create.metadata:
configuration = (
create_collection_configuration_from_legacy_collection_metadata(
create.metadata
)
)
else:
configuration = None
else:
configuration = load_create_collection_configuration_from_json(
create.configuration
)
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.CREATE_COLLECTION,
tenant,
database,
create.name,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api.create_collection(
name=create.name,
configuration=configuration,
metadata=create.metadata,
get_or_create=create.get_or_create,
tenant=tenant,
database=database,
)
api_collection_model = cast(
CollectionModel,
await to_thread.run_sync(
process_create_collection,
request,
tenant,
database_name,
await request.body(),
limiter=self._capacity_limiter,
),
)
return api_collection_model
@trace_method("FastAPI.get_collection", OpenTelemetryGranularity.OPERATION)
async def get_collection(
self,
request: Request,
tenant: str,
database_name: str,
collection_name: str,
) -> CollectionModel:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.GET_COLLECTION,
tenant,
database_name,
collection_name,
)
add_attributes_to_current_span({"tenant": tenant})
api_collection_model = cast(
CollectionModel,
await to_thread.run_sync(
self._api.get_collection,
collection_name,
tenant,
database_name,
limiter=self._capacity_limiter,
),
)
return api_collection_model
@trace_method("FastAPI.update_collection", OpenTelemetryGranularity.OPERATION)
async def update_collection(
self,
tenant: str,
database_name: str,
collection_id: str,
request: Request,
) -> None:
def process_update_collection(
request: Request, collection_id: str, raw_body: bytes
) -> None:
update = validate_model(UpdateCollection, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.UPDATE_COLLECTION,
tenant,
database_name,
collection_id,
)
configuration = (
None
if not update.new_configuration
else load_update_collection_configuration_from_json(
update.new_configuration
)
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._modify(
id=_uuid(collection_id),
new_name=update.new_name,
new_metadata=update.new_metadata,
new_configuration=configuration,
tenant=tenant,
database=database_name,
)
await to_thread.run_sync(
process_update_collection,
request,
collection_id,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.delete_collection", OpenTelemetryGranularity.OPERATION)
async def delete_collection(
self,
request: Request,
collection_name: str,
tenant: str,
database_name: str,
) -> None:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.DELETE_COLLECTION,
tenant,
database_name,
collection_name,
)
add_attributes_to_current_span({"tenant": tenant})
await to_thread.run_sync(
self._api.delete_collection,
collection_name,
tenant,
database_name,
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.attach_function", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def attach_function(
self,
request: Request,
tenant: str,
database_name: str,
collection_id: str,
) -> Dict[str, Any]:
try:
def process_attach_function(request: Request, raw_body: bytes) -> Dict[str, Any]:
body = orjson.loads(raw_body)
# NOTE: Auth check for attaching functions
self.sync_auth_request(
request.headers,
AuthzAction.UPDATE_COLLECTION, # Using UPDATE_COLLECTION as the auth action
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
name = body.get("name")
function_id = body.get("function_id")
output_collection = body.get("output_collection")
params = body.get("params")
attached_fn = self._api.attach_function(
function_id=function_id,
name=name,
input_collection_id=_uuid(collection_id),
output_collection=output_collection,
params=params,
tenant=tenant,
database=database_name,
)
return {
"attached_function": {
"id": str(attached_fn.id),
"name": attached_fn.name,
"function_name": attached_fn.function_name,
"output_collection": attached_fn.output_collection,
"params": attached_fn.params,
}
}
raw_body = await request.body()
return await to_thread.run_sync(
process_attach_function,
request,
raw_body,
limiter=self._capacity_limiter,
)
except Exception:
raise
@trace_method("FastAPI.get_attached_function", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get_attached_function(
self,
request: Request,
tenant: str,
database_name: str,
collection_id: str,
function_name: str,
) -> Dict[str, Any]:
# NOTE: Auth check for getting attached functions
await self.auth_request(
request.headers,
AuthzAction.GET_COLLECTION, # Using GET_COLLECTION as the auth action
tenant,
database_name,
collection_id,
)
add_attributes_to_current_span({"tenant": tenant})
attached_fn = await to_thread.run_sync(
self._api.get_attached_function,
function_name,
_uuid(collection_id),
tenant,
database_name,
limiter=self._capacity_limiter,
)
return {
"attached_function": {
"id": str(attached_fn.id),
"name": attached_fn.name,
"function_name": attached_fn.function_name,
"output_collection": attached_fn.output_collection,
"params": attached_fn.params,
}
}
@trace_method("FastAPI.add", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def add(
self,
request: Request,
tenant: str,
database_name: str,
collection_id: str,
) -> bool:
try:
def process_add(request: Request, raw_body: bytes) -> bool:
add = validate_model(AddEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.ADD,
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._add(
collection_id=_uuid(collection_id),
ids=add.ids,
embeddings=cast(
Embeddings,
convert_list_embeddings_to_np(add.embeddings)
if add.embeddings
else None,
),
metadatas=add.metadatas, # type: ignore
documents=add.documents, # type: ignore
uris=add.uris, # type: ignore
tenant=tenant,
database=database_name,
)
return cast(
bool,
await to_thread.run_sync(
process_add,
request,
await request.body(),
limiter=self._capacity_limiter,
),
)
except InvalidDimensionException as e:
raise HTTPException(status_code=500, detail=str(e))
@trace_method("FastAPI.update", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def update(
self,
request: Request,
tenant: str,
database_name: str,
collection_id: str,
) -> None:
def process_update(request: Request, raw_body: bytes) -> bool:
update = validate_model(UpdateEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.UPDATE,
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._update(
collection_id=_uuid(collection_id),
ids=update.ids,
embeddings=convert_list_embeddings_to_np(update.embeddings)
if update.embeddings
else None,
metadatas=update.metadatas, # type: ignore
documents=update.documents, # type: ignore
uris=update.uris, # type: ignore
tenant=tenant,
database=database_name,
)
await to_thread.run_sync(
process_update,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.upsert", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def upsert(
self,
request: Request,
tenant: str,
database_name: str,
collection_id: str,
) -> None:
def process_upsert(request: Request, raw_body: bytes) -> bool:
upsert = validate_model(AddEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.UPSERT,
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._upsert(
collection_id=_uuid(collection_id),
ids=upsert.ids,
embeddings=cast(
Embeddings,
convert_list_embeddings_to_np(upsert.embeddings)
if upsert.embeddings
else None,
),
metadatas=upsert.metadatas, # type: ignore
documents=upsert.documents, # type: ignore
uris=upsert.uris, # type: ignore
tenant=tenant,
database=database_name,
)
await to_thread.run_sync(
process_upsert,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.get", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get(
self,
collection_id: str,
tenant: str,
database_name: str,
request: Request,
) -> GetResult:
def process_get(request: Request, raw_body: bytes) -> GetResult:
get = validate_model(GetEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.GET,
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._get(
collection_id=_uuid(collection_id),
ids=get.ids,
where=get.where,
limit=get.limit,
offset=get.offset,
where_document=get.where_document,
include=get.include,
tenant=tenant,
database=database_name,
)
get_result = cast(
GetResult,
await to_thread.run_sync(
process_get,
request,
await request.body(),
limiter=self._capacity_limiter,
),
)
if get_result["embeddings"] is not None:
get_result["embeddings"] = [
cast(Embedding, embedding).tolist()
for embedding in get_result["embeddings"]
]
return get_result
@trace_method("FastAPI.delete", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def delete(
self,
collection_id: str,
tenant: str,
database_name: str,
request: Request,
) -> None:
def process_delete(request: Request, raw_body: bytes) -> None:
delete = validate_model(DeleteEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.DELETE,
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._delete(
collection_id=_uuid(collection_id),
ids=delete.ids,
where=delete.where,
where_document=delete.where_document,
tenant=tenant,
database=database_name,
)
await to_thread.run_sync(
process_delete,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.count", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def count(
self,
request: Request,
tenant: str,
database_name: str,
collection_id: str,
) -> int:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.COUNT,
tenant,
database_name,
collection_id,
)
add_attributes_to_current_span({"tenant": tenant})
return cast(
int,
await to_thread.run_sync(
self._api._count,
_uuid(collection_id),
tenant,
database_name,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.reset", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def reset(
self,
request: Request,
) -> bool:
# NOTE(rescrv, iron will auth): Implemented.
await self.auth_request(
request.headers,
AuthzAction.RESET,
None,
None,
None,
)
return cast(
bool,
await to_thread.run_sync(
self._api.reset,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.get_nearest_neighbors", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get_nearest_neighbors(
self,
tenant: str,
database_name: str,
collection_id: str,
request: Request,
) -> QueryResult:
@trace_method(
"internal.get_nearest_neighbors", OpenTelemetryGranularity.OPERATION
)
def process_query(request: Request, raw_body: bytes) -> QueryResult:
query = validate_model(QueryEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): Implemented.
self.sync_auth_request(
request.headers,
AuthzAction.QUERY,
tenant,
database_name,
collection_id,
)
self._set_request_context(request=request)
add_attributes_to_current_span({"tenant": tenant})
return self._api._query(
collection_id=_uuid(collection_id),
query_embeddings=cast(
Embeddings,
convert_list_embeddings_to_np(query.query_embeddings)
if query.query_embeddings
else None,
),
n_results=query.n_results,
where=query.where,
where_document=query.where_document,
include=query.include,
tenant=tenant,
database=database_name,
)
nnresult = cast(
QueryResult,
await to_thread.run_sync(
process_query,
request,
await request.body(),
limiter=self._capacity_limiter,
),
)
if nnresult["embeddings"] is not None:
nnresult["embeddings"] = [
[cast(Embedding, embedding).tolist() for embedding in result]
for result in nnresult["embeddings"]
]
return nnresult
async def pre_flight_checks(self) -> Dict[str, Any]:
def process_pre_flight_checks() -> Dict[str, Any]:
return {
"max_batch_size": self._api.get_max_batch_size(),
}
return cast(
Dict[str, Any],
await to_thread.run_sync(
process_pre_flight_checks,
limiter=self._capacity_limiter,
),
)
# =========================================================================
# OLD ROUTES FOR BACKWARDS COMPATIBILITY — WILL BE REMOVED
# =========================================================================
def setup_v1_routes(self) -> None:
self.router.add_api_route("/api/v1", self.root, methods=["GET"])
self.router.add_api_route("/api/v1/reset", self.reset, methods=["POST"])
self.router.add_api_route("/api/v1/version", self.version, methods=["GET"])
self.router.add_api_route("/api/v1/heartbeat", self.heartbeat, methods=["GET"])
self.router.add_api_route(
"/api/v1/pre-flight-checks", self.pre_flight_checks, methods=["GET"]
)
self.router.add_api_route(
"/api/v1/databases",
self.create_database_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(CreateDatabase),
)
self.router.add_api_route(
"/api/v1/databases/{database}",
self.get_database_v1,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v1/tenants",
self.create_tenant_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(CreateTenant),
)
self.router.add_api_route(
"/api/v1/tenants/{tenant}",
self.get_tenant_v1,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v1/collections",
self.list_collections_v1,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v1/count_collections",
self.count_collections_v1,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v1/collections",
self.create_collection_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(CreateCollection),
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/add",
self.add_v1,
methods=["POST"],
status_code=status.HTTP_201_CREATED,
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(AddEmbedding),
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/update",
self.update_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(UpdateEmbedding),
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/upsert",
self.upsert_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(AddEmbedding),
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/get",
self.get_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(GetEmbedding),
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/delete",
self.delete_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(DeleteEmbedding),
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/count",
self.count_v1,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/query",
self.get_nearest_neighbors_v1,
methods=["POST"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(QueryEmbedding),
)
self.router.add_api_route(
"/api/v1/collections/{collection_name}",
self.get_collection_v1,
methods=["GET"],
response_model=None,
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}",
self.update_collection_v1,
methods=["PUT"],
response_model=None,
openapi_extra=self.get_openapi_extras_for_body_model(UpdateCollection),
)
self.router.add_api_route(
"/api/v1/collections/{collection_name}",
self.delete_collection_v1,
methods=["DELETE"],
response_model=None,
)
@trace_method(
"auth_and_get_tenant_and_database_for_request_v1",
OpenTelemetryGranularity.OPERATION,
)
@rate_limit
async def auth_and_get_tenant_and_database_for_request(
self,
headers: Headers,
action: AuthzAction,
tenant: Optional[str],
database: Optional[str],
collection: Optional[str],
) -> Tuple[Optional[str], Optional[str]]:
"""
Authenticates and authorizes the request based on the given headers
and other parameters. If the request cannot be authenticated or cannot
be authorized (with the configured providers), raises an HTTP 401.
If the request is authenticated and authorized, returns the tenant and
database to be used for the request. These will differ from the passed
tenant and database if and only if:
- The request is authenticated
- chroma_overwrite_singleton_tenant_database_access_from_auth = True
- The passed tenant or database are None or default_{tenant, database}
(can be overwritten separately)
- The user has access to a single tenant and/or single database.
"""
return await to_thread.run_sync(
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request,
headers,
action,
tenant,
database,
collection,
)
# NOTE(rescrv, iron will auth): v1
def sync_auth_and_get_tenant_and_database_for_request(
self,
headers: Headers,
action: AuthzAction,
tenant: Optional[str],
database: Optional[str],
collection: Optional[str],
) -> Tuple[Optional[str], Optional[str]]:
if not self.authn_provider:
add_attributes_to_current_span(
{
"tenant": tenant,
"database": database,
"collection": collection,
}
)
return (tenant, database)
user_identity = self.authn_provider.authenticate_or_raise(dict(headers))
(
new_tenant,
new_database,
) = self.authn_provider.singleton_tenant_database_if_applicable(user_identity)
if (not tenant or tenant == DEFAULT_TENANT) and new_tenant:
tenant = new_tenant
if (not database or database == DEFAULT_DATABASE) and new_database:
database = new_database
if not self.authz_provider:
return (tenant, database)
authz_resource = AuthzResource(
tenant=tenant,
database=database,
collection=collection,
)
self.authz_provider.authorize_or_raise(user_identity, action, authz_resource)
add_attributes_to_current_span(
{
"tenant": tenant,
"database": database,
"collection": collection,
}
)
return (tenant, database)
@trace_method("FastAPI.create_database_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def create_database_v1(
self,
request: Request,
tenant: str = DEFAULT_TENANT,
) -> None:
def process_create_database(
tenant: str, headers: Headers, raw_body: bytes
) -> None:
db = validate_model(CreateDatabase, orjson.loads(raw_body))
(
maybe_tenant,
maybe_database,
# NOTE(rescrv, iron will auth): v1
) = self.sync_auth_and_get_tenant_and_database_for_request(
headers,
AuthzAction.CREATE_DATABASE,
tenant,
db.name,
None,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
db.name = maybe_database
return self._api.create_database(db.name, tenant)
await to_thread.run_sync(
process_create_database,
tenant,
request.headers,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.get_database_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get_database_v1(
self,
request: Request,
database: str,
tenant: str = DEFAULT_TENANT,
) -> Database:
(
maybe_tenant,
maybe_database,
) = await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.GET_DATABASE,
tenant,
database,
None,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
database = maybe_database
return cast(
Database,
await to_thread.run_sync(
self._api.get_database,
database,
tenant,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.create_tenant_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def create_tenant_v1(
self,
request: Request,
) -> None:
def process_create_tenant(request: Request, raw_body: bytes) -> None:
tenant = validate_model(CreateTenant, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
maybe_tenant, _ = self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.CREATE_TENANT,
tenant.name,
None,
None,
)
if maybe_tenant:
tenant.name = maybe_tenant
return self._api.create_tenant(tenant.name)
await to_thread.run_sync(
process_create_tenant,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.get_tenant_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get_tenant_v1(
self,
request: Request,
tenant: str,
) -> Tenant:
maybe_tenant, _ = await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.GET_TENANT,
tenant,
None,
None,
)
if maybe_tenant:
tenant = maybe_tenant
return cast(
Tenant,
await to_thread.run_sync(
self._api.get_tenant,
tenant,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.list_collections_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def list_collections_v1(
self,
request: Request,
limit: Optional[int] = None,
offset: Optional[int] = None,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> Sequence[CollectionModel]:
(
maybe_tenant,
maybe_database,
) = await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.LIST_COLLECTIONS,
tenant,
database,
None,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
database = maybe_database
api_collection_models = cast(
Sequence[CollectionModel],
await to_thread.run_sync(
self._api.list_collections,
limit,
offset,
tenant,
database,
limiter=self._capacity_limiter,
),
)
return api_collection_models
@trace_method("FastAPI.count_collections_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def count_collections_v1(
self,
request: Request,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> int:
(
maybe_tenant,
maybe_database,
) = await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.COUNT_COLLECTIONS,
tenant,
database,
None,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
database = maybe_database
return cast(
int,
await to_thread.run_sync(
self._api.count_collections,
tenant,
database,
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.create_collection_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def create_collection_v1(
self,
request: Request,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
def process_create_collection(
request: Request, tenant: str, database: str, raw_body: bytes
) -> CollectionModel:
create = validate_model(CreateCollection, orjson.loads(raw_body))
configuration = (
CreateCollectionConfiguration()
if not create.configuration
else load_create_collection_configuration_from_json(
create.configuration
)
)
(
maybe_tenant,
maybe_database,
# NOTE(rescrv, iron will auth): v1
) = self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.CREATE_COLLECTION,
tenant,
database,
create.name,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
database = maybe_database
return self._api.create_collection(
name=create.name,
configuration=configuration,
metadata=create.metadata,
get_or_create=create.get_or_create,
tenant=tenant,
database=database,
)
api_collection_model = cast(
CollectionModel,
await to_thread.run_sync(
process_create_collection,
request,
tenant,
database,
await request.body(),
limiter=self._capacity_limiter,
),
)
return api_collection_model
@trace_method("FastAPI.get_collection_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get_collection_v1(
self,
request: Request,
collection_name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> CollectionModel:
(
maybe_tenant,
maybe_database,
) = await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.GET_COLLECTION,
tenant,
database,
collection_name,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
database = maybe_database
async def inner():
api_collection_model = cast(
CollectionModel,
await to_thread.run_sync(
self._api.get_collection,
collection_name,
tenant,
database,
limiter=self._capacity_limiter,
),
)
return api_collection_model
return await inner()
@trace_method("FastAPI.update_collection_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def update_collection_v1(
self,
collection_id: str,
request: Request,
) -> None:
def process_update_collection(
request: Request, collection_id: str, raw_body: bytes
) -> None:
update = validate_model(UpdateCollection, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.UPDATE_COLLECTION,
None,
None,
collection_id,
)
configuration = (
None
if not update.new_configuration
else load_update_collection_configuration_from_json(
update.new_configuration
)
)
return self._api._modify(
id=_uuid(collection_id),
new_name=update.new_name,
new_metadata=update.new_metadata,
new_configuration=configuration,
)
await to_thread.run_sync(
process_update_collection,
request,
collection_id,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.delete_collection_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def delete_collection_v1(
self,
request: Request,
collection_name: str,
tenant: str = DEFAULT_TENANT,
database: str = DEFAULT_DATABASE,
) -> None:
(
maybe_tenant,
maybe_database,
) = await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.DELETE_COLLECTION,
tenant,
database,
collection_name,
)
if maybe_tenant:
tenant = maybe_tenant
if maybe_database:
database = maybe_database
await to_thread.run_sync(
self._api.delete_collection,
collection_name,
tenant,
database,
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.add_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def add_v1(
self,
request: Request,
collection_id: str,
) -> bool:
try:
def process_add(request: Request, raw_body: bytes) -> bool:
add = validate_model(AddEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.ADD,
None,
None,
collection_id,
)
return self._api._add(
collection_id=_uuid(collection_id),
ids=add.ids,
embeddings=cast(
Embeddings,
convert_list_embeddings_to_np(add.embeddings)
if add.embeddings
else None,
),
metadatas=add.metadatas, # type: ignore
documents=add.documents, # type: ignore
uris=add.uris, # type: ignore
)
return cast(
bool,
await to_thread.run_sync(
process_add,
request,
await request.body(),
limiter=self._capacity_limiter,
),
)
except InvalidDimensionException as e:
raise HTTPException(status_code=500, detail=str(e))
@trace_method("FastAPI.update_v1", OpenTelemetryGranularity.OPERATION)
async def update_v1(
self,
request: Request,
collection_id: str,
) -> None:
def process_update(request: Request, raw_body: bytes) -> bool:
update = validate_model(UpdateEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.UPDATE,
None,
None,
collection_id,
)
return self._api._update(
collection_id=_uuid(collection_id),
ids=update.ids,
embeddings=convert_list_embeddings_to_np(update.embeddings)
if update.embeddings
else None,
metadatas=update.metadatas, # type: ignore
documents=update.documents, # type: ignore
uris=update.uris, # type: ignore
)
await to_thread.run_sync(
process_update,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.upsert_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def upsert_v1(
self,
request: Request,
collection_id: str,
) -> None:
def process_upsert(request: Request, raw_body: bytes) -> bool:
upsert = validate_model(AddEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.UPSERT,
None,
None,
collection_id,
)
return self._api._upsert(
collection_id=_uuid(collection_id),
ids=upsert.ids,
embeddings=cast(
Embeddings,
convert_list_embeddings_to_np(upsert.embeddings)
if upsert.embeddings
else None,
),
metadatas=upsert.metadatas, # type: ignore
documents=upsert.documents, # type: ignore
uris=upsert.uris, # type: ignore
)
await to_thread.run_sync(
process_upsert,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.get_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def get_v1(
self,
collection_id: str,
request: Request,
) -> GetResult:
def process_get(request: Request, raw_body: bytes) -> GetResult:
get = validate_model(GetEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.GET,
None,
None,
collection_id,
)
return self._api._get(
collection_id=_uuid(collection_id),
ids=get.ids,
where=get.where,
limit=get.limit,
offset=get.offset,
where_document=get.where_document,
include=get.include,
)
get_result = cast(
GetResult,
await to_thread.run_sync(
process_get,
request,
await request.body(),
limiter=self._capacity_limiter,
),
)
if get_result["embeddings"] is not None:
get_result["embeddings"] = [
cast(Embedding, embedding).tolist()
for embedding in get_result["embeddings"]
]
return get_result
@trace_method("FastAPI.delete_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def delete_v1(
self,
collection_id: str,
request: Request,
) -> None:
def process_delete(request: Request, raw_body: bytes) -> None:
delete = validate_model(DeleteEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.DELETE,
None,
None,
collection_id,
)
return self._api._delete(
collection_id=_uuid(collection_id),
ids=delete.ids,
where=delete.where,
where_document=delete.where_document,
)
await to_thread.run_sync(
process_delete,
request,
await request.body(),
limiter=self._capacity_limiter,
)
@trace_method("FastAPI.count_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def count_v1(
self,
request: Request,
collection_id: str,
) -> int:
await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.COUNT,
None,
None,
collection_id,
)
return cast(
int,
await to_thread.run_sync(
self._api._count,
_uuid(collection_id),
limiter=self._capacity_limiter,
),
)
@trace_method("FastAPI.reset_v1", OpenTelemetryGranularity.OPERATION)
@rate_limit
async def reset_v1(
self,
request: Request,
) -> bool:
await self.auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.RESET,
None,
None,
None,
)
return cast(
bool,
await to_thread.run_sync(
self._api.reset,
limiter=self._capacity_limiter,
),
)
@trace_method(
"FastAPI.get_nearest_neighbors_v1", OpenTelemetryGranularity.OPERATION
)
@rate_limit
async def get_nearest_neighbors_v1(
self,
collection_id: str,
request: Request,
) -> QueryResult:
def process_query(request: Request, raw_body: bytes) -> QueryResult:
query = validate_model(QueryEmbedding, orjson.loads(raw_body))
# NOTE(rescrv, iron will auth): v1
self.sync_auth_and_get_tenant_and_database_for_request(
request.headers,
AuthzAction.QUERY,
None,
None,
collection_id,
)
return self._api._query(
collection_id=_uuid(collection_id),
query_embeddings=cast(
Embeddings,
convert_list_embeddings_to_np(query.query_embeddings)
if query.query_embeddings
else None,
),
n_results=query.n_results,
where=query.where,
where_document=query.where_document,
include=query.include,
)
nnresult = cast(
QueryResult,
await to_thread.run_sync(
process_query,
request,
await request.body(),
limiter=self._capacity_limiter,
),
)
if nnresult["embeddings"] is not None:
nnresult["embeddings"] = [
[cast(Embedding, embedding).tolist() for embedding in result]
for result in nnresult["embeddings"]
]
return nnresult
# =========================================================================
|
FastAPI
|
python
|
pytorch__pytorch
|
test/distributed/_composable/test_composability/test_pp_composability.py
|
{
"start": 2228,
"end": 2610
}
|
class ____(torch.nn.Module):
def __init__(self, d_hid: int):
super().__init__()
self.net1 = nn.Linear(d_hid, d_hid)
self.net2 = nn.Linear(d_hid, d_hid)
self.net3 = nn.Linear(d_hid, d_hid * 2)
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
return x
|
MLPModuleEven
|
python
|
openai__gym
|
gym/envs/mujoco/reacher.py
|
{
"start": 111,
"end": 2190
}
|
class ____(MuJocoPyEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 50,
}
def __init__(self, **kwargs):
utils.EzPickle.__init__(self, **kwargs)
observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64)
MuJocoPyEnv.__init__(
self, "reacher.xml", 2, observation_space=observation_space, **kwargs
)
def step(self, a):
vec = self.get_body_com("fingertip") - self.get_body_com("target")
reward_dist = -np.linalg.norm(vec)
reward_ctrl = -np.square(a).sum()
reward = reward_dist + reward_ctrl
self.do_simulation(a, self.frame_skip)
if self.render_mode == "human":
self.render()
ob = self._get_obs()
return (
ob,
reward,
False,
False,
dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl),
)
def viewer_setup(self):
assert self.viewer is not None
self.viewer.cam.trackbodyid = 0
def reset_model(self):
qpos = (
self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq)
+ self.init_qpos
)
while True:
self.goal = self.np_random.uniform(low=-0.2, high=0.2, size=2)
if np.linalg.norm(self.goal) < 0.2:
break
qpos[-2:] = self.goal
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv
)
qvel[-2:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:2]
return np.concatenate(
[
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat[:2],
self.get_body_com("fingertip") - self.get_body_com("target"),
]
)
|
ReacherEnv
|
python
|
numba__numba
|
numba/tests/test_ssa.py
|
{
"start": 962,
"end": 5733
}
|
class ____(SSABaseTest):
"""
Contains tests to help isolate problems in SSA
"""
def test_argument_name_reused(self):
@njit
def foo(x):
x += 1
return x
self.check_func(foo, 123)
def test_if_else_redefine(self):
@njit
def foo(x, y):
z = x * y
if x < y:
z = x
else:
z = y
return z
self.check_func(foo, 3, 2)
self.check_func(foo, 2, 3)
def test_sum_loop(self):
@njit
def foo(n):
c = 0
for i in range(n):
c += i
return c
self.check_func(foo, 0)
self.check_func(foo, 10)
def test_sum_loop_2vars(self):
@njit
def foo(n):
c = 0
d = n
for i in range(n):
c += i
d += n
return c, d
self.check_func(foo, 0)
self.check_func(foo, 10)
def test_sum_2d_loop(self):
@njit
def foo(n):
c = 0
for i in range(n):
for j in range(n):
c += j
c += i
return c
self.check_func(foo, 0)
self.check_func(foo, 10)
def check_undefined_var(self, should_warn):
@njit
def foo(n):
if n:
if n > 0:
c = 0
return c
else:
# variable c is not defined in this branch
c += 1
return c
if should_warn:
with self.assertWarns(errors.NumbaWarning) as warns:
# n=1 so we won't actually run the branch with the uninitialized
self.check_func(foo, 1)
self.assertIn("Detected uninitialized variable c",
str(warns.warning))
else:
self.check_func(foo, 1)
with self.assertRaises(UnboundLocalError):
foo.py_func(0)
def test_undefined_var(self):
with override_config('ALWAYS_WARN_UNINIT_VAR', 0):
self.check_undefined_var(should_warn=False)
with override_config('ALWAYS_WARN_UNINIT_VAR', 1):
self.check_undefined_var(should_warn=True)
def test_phi_propagation(self):
@njit
def foo(actions):
n = 1
i = 0
ct = 0
while n > 0 and i < len(actions):
n -= 1
while actions[i]:
if actions[i]:
if actions[i]:
n += 10
actions[i] -= 1
else:
if actions[i]:
n += 20
actions[i] += 1
ct += n
ct += n
return ct, n
self.check_func(foo, np.array([1, 2]))
def test_unhandled_undefined(self):
def function1(arg1, arg2, arg3, arg4, arg5):
# This function is auto-generated.
if arg1:
var1 = arg2
var2 = arg3
var3 = var2
var4 = arg1
return
else:
if arg2:
if arg4:
var5 = arg4 # noqa: F841
return
else:
var6 = var4
return
return var6
else:
if arg5:
if var1:
if arg5:
var1 = var6
return
else:
var7 = arg2 # noqa: F841
return arg2
return
else:
if var2:
arg5 = arg2
return arg1
else:
var6 = var3
return var4
return
return
else:
var8 = var1
return
return var8
var9 = var3 # noqa: F841
var10 = arg5 # noqa: F841
return var1
# The argument values is not critical for re-creating the bug
# because the bug is in compile-time.
expect = function1(2, 3, 6, 0, 7)
got = njit(function1)(2, 3, 6, 0, 7)
self.assertEqual(expect, got)
|
TestSSA
|
python
|
milvus-io__pymilvus
|
pymilvus/orm/mutation.py
|
{
"start": 619,
"end": 1961
}
|
class ____:
def __init__(self, mr: Any) -> None:
self._mr = mr
@property
def primary_keys(self):
return self._mr.primary_keys if self._mr else []
@property
def insert_count(self):
return self._mr.insert_count if self._mr else 0
@property
def delete_count(self):
return self._mr.delete_count if self._mr else 0
@property
def upsert_count(self):
return self._mr.upsert_count if self._mr else 0
@property
def timestamp(self):
return self._mr.timestamp if self._mr else 0
@property
def succ_count(self):
return self._mr.succ_count if self._mr else 0
@property
def err_count(self):
return self._mr.err_count if self._mr else 0
@property
def succ_index(self):
return self._mr.succ_index if self._mr else []
@property
def err_index(self):
return self._mr.err_index if self._mr else []
# The unit of this cost is vcu, similar to token
@property
def cost(self):
return self._mr.cost if self._mr else 0
def __str__(self) -> str:
"""
Return the information of mutation result
:return str:
The information of mutation result.
"""
return self._mr.__str__() if self._mr else ""
__repr__ = __str__
|
MutationResult
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/project.py
|
{
"start": 10454,
"end": 11373
}
|
class ____(_ProjectSerializerOptionalBaseResponse):
id: str
slug: str
name: str # TODO: add deprecation about this field (not used in app)
platform: str | None
dateCreated: datetime
isBookmarked: bool
isMember: bool
features: list[str]
firstEvent: datetime | None
firstTransactionEvent: bool
access: list[str]
hasAccess: bool
hasFeedbacks: bool
hasFlags: bool
hasMinifiedStackTrace: bool
hasMonitors: bool
hasNewFeedbacks: bool
hasProfiles: bool
hasReplays: bool
hasSessions: bool
hasInsightsHttp: bool
hasInsightsDb: bool
hasInsightsAssets: bool
hasInsightsAppStart: bool
hasInsightsScreenLoad: bool
hasInsightsVitals: bool
hasInsightsCaches: bool
hasInsightsQueues: bool
hasInsightsAgentMonitoring: bool
hasInsightsMCP: bool
hasLogs: bool
hasTraceMetrics: bool
|
ProjectSerializerBaseResponse
|
python
|
pytorch__pytorch
|
torch/distributed/fsdp/wrap.py
|
{
"start": 21606,
"end": 23154
}
|
class ____:
"""
Helper class to wrap modules based on default config args via a context manager.
See :func:`enable_wrap` for more information.
"""
in_autowrap_context: bool = False # Context flag
wrapper_cls: Optional[Callable] = None # The wrapper class
kwargs: dict[str, Any] = {} # Wrapper's args
def __init__(self, **kwargs: dict[str, Any]):
self.kwargs = kwargs
@staticmethod
def enable_autowrap_context(kwargs: Any) -> None:
if _ConfigAutoWrap.in_autowrap_context:
raise NotImplementedError(
"You are already within an autowrap context and we currently do not supported nested autowrap."
)
_ConfigAutoWrap.in_autowrap_context = True
# Get and save the wrapper cls for the context.
if "wrapper_cls" not in kwargs:
raise AssertionError(
"Expected to pass in wrapper_cls arg into _ConfigAutoWrap."
)
_ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs["wrapper_cls"])
del kwargs["wrapper_cls"]
# Save the rest.
_ConfigAutoWrap.kwargs = kwargs
@staticmethod
def disable_autowrap_context() -> None:
_ConfigAutoWrap.in_autowrap_context = False
_ConfigAutoWrap.wrapper_cls = None
_ConfigAutoWrap.kwargs = {}
def __enter__(self) -> None:
self.enable_autowrap_context(self.kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.disable_autowrap_context()
|
_ConfigAutoWrap
|
python
|
openai__openai-python
|
src/openai/types/responses/input_token_count_params.py
|
{
"start": 4206,
"end": 5498
}
|
class ____(TypedDict, total=False):
format: ResponseFormatTextConfigParam
"""An object specifying the format that the model must output.
Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
ensures the model will match your supplied JSON schema. Learn more in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
The default format is `{ "type": "text" }` with no additional options.
**Not recommended for gpt-4o and newer models:**
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
"""
verbosity: Optional[Literal["low", "medium", "high"]]
"""Constrains the verbosity of the model's response.
Lower values will result in more concise responses, while higher values will
result in more verbose responses. Currently supported values are `low`,
`medium`, and `high`.
"""
ToolChoice: TypeAlias = Union[
ToolChoiceOptions,
ToolChoiceAllowedParam,
ToolChoiceTypesParam,
ToolChoiceFunctionParam,
ToolChoiceMcpParam,
ToolChoiceCustomParam,
ToolChoiceApplyPatchParam,
ToolChoiceShellParam,
]
|
Text
|
python
|
doocs__leetcode
|
solution/0100-0199/0105.Construct Binary Tree from Preorder and Inorder Traversal/Solution2.py
|
{
"start": 0,
"end": 675
}
|
class ____:
def getBinaryTrees(self, preOrder: List[int], inOrder: List[int]) -> List[TreeNode]:
def dfs(i: int, j: int, n: int) -> List[TreeNode]:
if n <= 0:
return [None]
v = preOrder[i]
ans = []
for k in d[v]:
if j <= k < j + n:
for l in dfs(i + 1, j, k - j):
for r in dfs(i + 1 + k - j, k + 1, n - 1 - (k - j)):
ans.append(TreeNode(v, l, r))
return ans
d = defaultdict(list)
for i, x in enumerate(inOrder):
d[x].append(i)
return dfs(0, 0, len(preOrder))
|
Solution
|
python
|
catalyst-team__catalyst
|
examples/catalyst_rl/dqn.py
|
{
"start": 550,
"end": 2424
}
|
class ____(ISampler):
def get_action(
self, env, actor: nn.Module, state: np.array, epsilon: float = -1
) -> int:
if np.random.random() < epsilon:
action = env.action_space.sample()
else:
state = torch.tensor(state[None], dtype=torch.float32)
q_values = actor(state).detach().cpu().numpy()[0]
action = np.argmax(q_values)
return int(action)
def get_trajectory(
self,
env: gym.Env,
actor: nn.Module,
device,
sampler_index: int = None,
trajectory_index: int = None,
t_max: int = 1000,
) -> Trajectory:
if sampler_index is not None:
epsilon = float(pow(0.9996, trajectory_index + 1) / (sampler_index + 1))
else:
epsilon = None
state = env.reset()
observations, actions, rewards, dones = [], [], [], []
for t in range(t_max):
action = self.get_action(env, actor, state=state, epsilon=epsilon)
next_state, reward, done, _ = env.step(action)
observations.append(state)
actions.append(action)
rewards.append(reward)
dones.append(done)
state = next_state
if done:
break
trajectory = Trajectory(observations, actions, rewards, dones)
return trajectory
def get_network(env, num_hidden: int = 128):
inner_fn = get_optimal_inner_init(nn.ReLU)
outer_fn = outer_init
network = torch.nn.Sequential(
nn.Linear(env.observation_space.shape[0], num_hidden),
nn.ReLU(),
nn.Linear(num_hidden, num_hidden),
nn.ReLU(),
)
head = nn.Linear(num_hidden, env.action_space.n)
network.apply(inner_fn)
head.apply(outer_fn)
return torch.nn.Sequential(network, head)
# Catalyst.RL
|
Sampler
|
python
|
pypa__setuptools
|
setuptools/_vendor/autocommand/autoparse.py
|
{
"start": 1396,
"end": 1464
}
|
class ____(AutocommandError):
'''Docstring error'''
|
DocstringError
|
python
|
google__jax
|
jax/_src/pallas/core.py
|
{
"start": 27556,
"end": 36627
}
|
class ____:
"""An internal canonicalized version of GridSpec.
Encodes the calling conventions of the pallas_call primitive, the kernel,
and the index maps.
The pallas_call is invoked with: ``*dynamic_grid_sizes, *index, *inputs``.
The ``index`` operands are for the scalar prefetch.
The kernel function is invoked with:
``*index, *inputs, *scratch``.
The index map functions are invoked with:
``*program_ids, *index``.
See the `check_invariants` method for a more precise specification.
"""
grid: GridMappingGrid
grid_names: tuple[Hashable, ...] | None
# Block mappings for: *inputs, *outputs
block_mappings: tuple[BlockMapping, ...]
# The inputs for tracing the index map: the tree and the flat avals
index_map_tree: tree_util.PyTreeDef
index_map_avals: tuple[jax_core.AbstractValue, ...]
# Which dimensions in `grid` are vmapped.
vmapped_dims: tuple[int, ...]
scratch_avals: tuple[jax_core.AbstractValue, ...]
num_index_operands: int
num_inputs: int
num_outputs: int
get_grid_indices: Callable | None = None
local_grid_env: Callable | None = None
# Primarily dictates how much debugging information is printed.
debug: bool = False
def check_invariants(self) -> None:
if not config.enable_checks.value: return
assert (len(self.block_mappings) == self.num_inputs + self.num_outputs), (
self.num_inputs, self.num_outputs,
self.block_mappings
)
# index_map_avals = int32[] * len(self.grid) + index_operands
assert len(self.index_map_avals) == len(self.grid) + self.num_index_operands, (
self.index_map_avals,
self.grid,
self.num_index_operands,
)
# Check that we can put together the avals and the tree.
index_map_args, index_map_kwargs = self.index_map_tree.unflatten(
self.index_map_avals)
assert not index_map_kwargs
assert len(index_map_args) >= len(self.grid)
for i in range(len(self.grid)):
index_map_arg = index_map_args[i]
assert index_map_arg.shape == (), f"index_map_arg: {index_map_arg}"
assert index_map_arg.dtype == jnp.int32, f"index_map_arg: {index_map_arg}"
assert len(self.vmapped_dims) <= len(self.grid)
for i in self.vmapped_dims:
assert 0 <= i < len(self.grid)
if self.grid_names is not None:
assert len(self.grid) == len(self.grid_names), (self.grid, self.grid_names)
for bm in self.block_mappings:
bm.check_invariants()
assert tuple(self.index_map_avals) == tuple(
bm.index_map_jaxpr.in_avals
), (
self.index_map_avals,
"|",
bm.index_map_jaxpr.in_avals,
)
def replace(self, **kwargs) -> GridMapping:
new_self = dataclasses.replace(self, **kwargs)
new_self.check_invariants()
return new_self
@property
def num_dynamic_grid_bounds(self):
return sum(b is dynamic_grid_dim for b in self.grid)
@property
def num_scratch_operands(self):
return len(self.scratch_avals)
@property
def static_grid(self) -> StaticGrid:
if self.num_dynamic_grid_bounds:
raise ValueError("Expected a grid with fully static bounds")
return self.grid # type: ignore
@contextlib.contextmanager
def trace_env(self):
if self.grid_names is None:
axis_env_ctx = contextlib.nullcontext()
else:
axis_env_ctx = jax_core.extend_axis_env_nd(
zip(self.grid_names, self.grid)
)
with tracing_grid_env(self.grid, self.vmapped_dims), axis_env_ctx:
yield
@property
def slice_index_ops(self):
"""Returns a slice object to select the index operands to a kernel.
This works on a sequence that contains *index, *ins, *outs, *scratch.
"""
return slice(0, self.num_index_operands)
@property
def slice_block_ops(self):
"""Returns a slice to select the block operands to a kernel.
The block operands are: *ins, *outs, the same for which we
have `self.block_mappings`.
This works on a sequence that contains *index, *ins, *outs, *scratch.
"""
return slice(self.num_index_operands,
self.num_index_operands + len(self.block_mappings))
@property
def slice_scratch_ops(self):
"""Returns a slice object to select the scratch operands to a kernel.
This works on a sequence that contains *index, *ins, *outs, *scratch.
"""
if self.num_scratch_operands:
return slice(-self.num_scratch_operands, None)
else:
return slice(0, 0)
@property
def in_shapes(self) -> Iterable[jax_core.ShapeDtypeStruct]:
"""The shapes of *index, *inputs."""
index_shapes = (
jax_core.ShapeDtypeStruct(ia.shape, ia.dtype)
for ia in self.index_map_avals[len(self.grid) :]
)
inputs_shapes = (
jax_core.ShapeDtypeStruct(bm.array_aval.shape, bm.array_aval.dtype)
for bm in self.block_mappings[:self.num_inputs])
return itertools.chain(index_shapes, inputs_shapes)
@property
def block_mappings_output(self) -> Iterable[BlockMapping]:
return itertools.islice(
self.block_mappings,
self.num_inputs,
self.num_inputs + self.num_outputs)
@property
def out_shapes(self) -> Iterable[jax_core.ShapeDtypeStruct]:
return tuple(
jax_core.ShapeDtypeStruct(bm.array_aval.shape, bm.array_aval.dtype)
for bm in self.block_mappings_output)
def to_lojax(self):
input_block_mappings, output_block_mappings, () = split_list(
self.block_mappings,
[self.num_inputs, self.num_inputs + self.num_outputs],
)
updated_input_block_mappings = [
lo_mapping
for bm in input_block_mappings
for lo_mapping in bm.to_lojax(
self.index_map_avals,
self.index_map_tree,
self.grid,
self.vmapped_dims,
)
]
updated_output_block_mappings = [
lo_mapping
for bm in output_block_mappings
for lo_mapping in bm.to_lojax(
self.index_map_avals,
self.index_map_tree,
self.grid,
self.vmapped_dims,
)
]
new_num_inputs = len(updated_input_block_mappings)
new_num_outputs = len(updated_output_block_mappings)
updated_scratch_avals = [
lo_aval
for aval in self.scratch_avals
for lo_aval in (aval.lo_ty() if aval.is_high else [aval])
]
updated_block_mappings = updated_input_block_mappings + updated_output_block_mappings
return self.replace(block_mappings=tuple(updated_block_mappings),
num_inputs=new_num_inputs,
num_outputs=new_num_outputs,
scratch_avals=tuple(updated_scratch_avals))
def __repr__(self):
if self.debug:
return (
f"GridMapping(grid={self.grid}, grid_names={self.grid_names}, "
f"block_mappings={self.block_mappings}, "
f"index_map_tree={self.index_map_tree}, "
f"index_map_avals={self.index_map_avals}, "
f"vmapped_dims={self.vmapped_dims}, "
f"num_index_operands={self.num_index_operands}, "
f"num_inputs={self.num_inputs}, "
f"num_outputs={self.num_outputs}, "
f"num_scratch_operands={self.num_scratch_operands}, "
f"get_grid_indices={self.get_grid_indices}, "
f"local_grid_env={self.local_grid_env}, "
f"debug={self.debug})"
)
return (
f"GridMapping(grid={self.grid}, block_mappings={self.block_mappings})"
)
def __str__(self):
return self.__repr__()
def _is_valid_grid_dim(dim: int | jax_typing.Array) -> bool:
if isinstance(dim, jax_typing.Array):
return True
return jax_core.is_dim(dim)
def _max_shape_from_aval(array_aval: jax_core.ShapedArray):
array_aval_shape = list(array_aval.shape)
for i, s in enumerate(array_aval.shape):
try:
aval = jax_core.get_aval(s)
if isinstance(aval, jax_core.DShapedArray):
array_aval_shape[i] = aval.dtype.bound
except OverflowError as e:
# Note - there are annoying cases where on 32 bit hardware,
# a flattened index space may overflow - for these cases,
# we just take the shape as is.
# In most places, this is totally sound to do.
# For ragged/jumble inputs, this will fail downstream.
return array_aval.shape
return tuple(array_aval_shape)
def _convert_block_spec_to_block_mapping(
block_spec: BlockSpec,
origin: OriginStr,
array_aval: jax_core.ShapedArray,
*,
# Inputs for the index_map
index_map_avals: Sequence[jax_core.AbstractValue],
index_map_tree: tree_util.PyTreeDef,
grid: GridMappingGrid,
vmapped_dims: tuple[int, ...],
debug: bool = False,
) -> BlockMapping:
if block_spec is no_block_spec:
block_spec = BlockSpec(None, None)
return block_spec.to_block_mapping(
origin,
array_aval,
index_map_avals=index_map_avals,
index_map_tree=index_map_tree,
grid=grid,
vmapped_dims=vmapped_dims,
debug=debug,
)
index_map_grid_aval = jax_core.ShapedArray((), jnp.int32)
|
GridMapping
|
python
|
doocs__leetcode
|
solution/2200-2299/2240.Number of Ways to Buy Pens and Pencils/Solution.py
|
{
"start": 0,
"end": 246
}
|
class ____:
def waysToBuyPensPencils(self, total: int, cost1: int, cost2: int) -> int:
ans = 0
for x in range(total // cost1 + 1):
y = (total - (x * cost1)) // cost2 + 1
ans += y
return ans
|
Solution
|
python
|
jina-ai__jina
|
tests/integration/stateful/test_stateful.py
|
{
"start": 521,
"end": 11841
}
|
class ____(TextDoc):
id: str
tags: Dict[str, Union[str, int]] = {}
l: List[Union[str, int]] = []
@pytest.fixture(scope='function')
def kill_all_children():
yield
from multiprocessing import active_children
children = active_children()
for p in children:
print(f' Child process {p.pid} is still active')
p.kill()
@pytest.fixture(scope='module')
def stateful_exec_docker_image_built():
import docker
client = docker.from_env()
client.images.build(
path=os.path.join(cur_dir, 'stateful_snapshot_exec/'), tag='stateful-exec'
)
client.close()
yield
time.sleep(3)
client = docker.from_env()
client.containers.prune()
def assert_is_indexed(client, search_da):
docs = client.search(
inputs=search_da, request_size=1, return_type=DocumentArray[TextDocWithId]
)
for doc in docs:
assert doc.text == f'ID {doc.id}'
def assert_all_replicas_indexed(client, search_da, num_replicas=3, key='pid'):
for query in search_da:
pids = set()
for _ in range(10):
for resp in client.search(
inputs=query, request_size=1, return_type=DocumentArray[TextDocWithId]
):
pids.add(resp.tags[key])
assert resp.text == f'ID {query.id}'
if len(pids) == num_replicas:
break
assert len(pids) == num_replicas
@pytest.mark.timeout(240)
@pytest.mark.parametrize('executor_cls', [MyStateExecutor, MyStateExecutorNoSnapshot])
@pytest.mark.parametrize('shards', [2, 1])
@pytest.mark.skipif(not docarray_v2, reason='tests support for docarray>=0.30')
def test_stateful_index_search(
executor_cls, shards, tmpdir, kill_all_children
):
replicas = 3
if shards > 1:
peer_ports = {}
for shard in range(shards):
peer_ports[shard] = [random_port() for _ in range(replicas)]
else:
peer_ports = [random_port() for _ in range(replicas)]
dep = Deployment(
uses=executor_cls,
replicas=replicas,
workspace=tmpdir,
stateful=True,
raft_configuration={
'snapshot_interval': 10,
'snapshot_threshold': 5,
'trailing_logs': 10,
'LogLevel': 'INFO',
},
shards=shards,
volumes=[str(tmpdir) + ':' + '/workspace'],
peer_ports=peer_ports,
polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'},
)
with dep:
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)]
)
search_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}') for i in range(1)]
)
dep.index(
inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]
)
# allowing some time for the state to be replicated
time.sleep(20)
# checking against the main read replica
assert_is_indexed(dep, search_da)
assert_all_replicas_indexed(dep, search_da)
docs = dep.post(
on='/similarity',
inputs=search_da,
request_size=1,
return_type=DocumentArray[TextDocWithId],
)
for doc in docs:
assert doc.text == 'similarity'
assert len(doc.l) == len(index_da) # good merging of results
time.sleep(10)
@pytest.mark.timeout(240)
@pytest.mark.parametrize('executor_cls', [MyStateExecutor, MyStateExecutorNoSnapshot])
@pytest.mark.parametrize('shards', [2, 1])
@pytest.mark.skipif(
'GITHUB_WORKFLOW' in os.environ or not docarray_v2,
reason='tests support for docarray>=0.30 and not working on GITHUB since issue with restarting server in grpc',
)
def test_stateful_index_search_restore(
executor_cls, shards, tmpdir, kill_all_children
):
replicas = 3
peer_ports = {}
for shard in range(shards):
peer_ports[shard] = [random_port() for _ in range(replicas)]
dep = Deployment(
uses=executor_cls,
replicas=replicas,
workspace=tmpdir,
stateful=True,
raft_configuration={
'snapshot_interval': 10,
'snapshot_threshold': 5,
'trailing_logs': 10,
'LogLevel': 'INFO',
},
shards=shards,
volumes=[str(tmpdir) + ':' + '/workspace'],
peer_ports=peer_ports,
polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'},
)
with dep:
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)]
)
search_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}') for i in range(1)]
)
dep.index(
inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]
)
# allowing some time for the state to be replicated
time.sleep(20)
# checking against the main read replica
assert_is_indexed(dep, search_da)
assert_all_replicas_indexed(dep, search_da)
time.sleep(10)
# test restoring
dep_restore = Deployment(
uses=executor_cls,
replicas=replicas,
workspace=tmpdir,
stateful=True,
raft_configuration={
'snapshot_interval': 10,
'snapshot_threshold': 5,
'trailing_logs': 10,
'LogLevel': 'INFO',
},
shards=shards,
volumes=[str(tmpdir) + ':' + '/workspace'],
peer_ports=peer_ports,
polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'},
)
with dep_restore:
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100, 200)]
)
dep_restore.index(
inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]
)
time.sleep(20)
search_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}') for i in range(200)]
)
assert_all_replicas_indexed(dep_restore, search_da)
time.sleep(10)
@pytest.mark.skipif(not docarray_v2, reason='tests support for docarray>=0.30')
@pytest.mark.parametrize('shards', [2, 1])
def test_stateful_index_search_container(
shards, tmpdir, stateful_exec_docker_image_built
):
replicas = 3
peer_ports = {}
for shard in range(shards):
peer_ports[shard] = [random_port() for _ in range(replicas)]
dep = Deployment(
uses='docker://stateful-exec',
replicas=replicas,
stateful=True,
raft_configuration={
'snapshot_interval': 10,
'snapshot_threshold': 5,
'trailing_logs': 10,
'LogLevel': 'INFO',
},
shards=shards,
workspace='/workspace/tmp',
volumes=[str(tmpdir) + ':' + '/workspace/tmp'],
peer_ports=peer_ports,
polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'},
)
with dep:
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)]
)
search_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}') for i in range(100)]
)
dep.index(
inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]
)
# allowing some time for the state to be replicated
time.sleep(20)
# checking against the main read replica
assert_is_indexed(dep, search_da)
assert_all_replicas_indexed(dep, search_da, key='num')
time.sleep(10)
dep_restore = Deployment(
uses='docker://stateful-exec',
replicas=replicas,
stateful=True,
raft_configuration={
'snapshot_interval': 10,
'snapshot_threshold': 5,
'trailing_logs': 10,
'LogLevel': 'INFO',
},
shards=shards,
workspace='/workspace/tmp',
volumes=[str(tmpdir) + ':' + '/workspace/tmp'],
peer_ports=peer_ports,
polling={'/index': 'ANY', '/search': 'ALL', '/similarity': 'ALL'},
)
# test restoring
with dep_restore:
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100, 200)]
)
dep_restore.index(
inputs=index_da, request_size=1, return_type=DocumentArray[TextDocWithId]
)
time.sleep(20)
search_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}') for i in range(200)]
)
assert_all_replicas_indexed(dep_restore, search_da, key='num')
time.sleep(10)
@pytest.mark.skipif(not docarray_v2, reason='tests support for docarray>=0.30')
@pytest.mark.parametrize('executor_cls', [MyStateExecutor])
def test_add_new_replica(executor_cls, tmpdir):
from jina.parsers import set_pod_parser
from jina.orchestrate.pods.factory import PodFactory
gateway_port = random_port()
replicas = 3
peer_ports = {}
for shard in range(1):
peer_ports[shard] = [random_port() for _ in range(replicas)]
ctx_mngr = Flow(port=gateway_port).add(
uses=executor_cls,
replicas=replicas,
workspace=tmpdir,
stateful=True,
peer_ports=peer_ports,
raft_configuration={
'snapshot_interval': 10,
'snapshot_threshold': 5,
'trailing_logs': 10,
'LogLevel': 'INFO',
},
)
with ctx_mngr:
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100)]
)
ctx_mngr.index(inputs=index_da, request_size=1)
# allowing sometime for snapshots
time.sleep(30)
new_replica_port = random_port()
args = set_pod_parser().parse_args([])
args.name = 'new-replica'
args.host = args.host[0]
args.port = [new_replica_port]
args.stateful = True
args.workspace = str(tmpdir)
args.uses = executor_cls.__name__
args.replica_id = str(replicas + 1)
with PodFactory.build_pod(args):
for port in peer_ports[0]:
leader_address = f'127.0.0.1:{port}' # detect the Pods addresses of the original Flow
voter_address = f'127.0.0.1:{new_replica_port}'
from jina.serve.consensus.add_voter.call_add_voter import call_add_voter
ret = call_add_voter(leader_address, '4', voter_address)
if ret is True:
break
time.sleep(10)
index_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(100, 200)]
)
ctx_mngr.index(
inputs=index_da,
request_size=1,
return_type=DocumentArray[TextDocWithId],
)
time.sleep(20)
search_da = DocumentArray[TextDocWithId](
[TextDocWithId(id=f'{i}') for i in range(200)]
)
client = Client(port=new_replica_port)
assert_is_indexed(client, search_da=search_da)
time.sleep(10)
|
TextDocWithId
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 604823,
"end": 605152
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("RequestedReviewer", graphql_name="node")
|
RequestedReviewerEdge
|
python
|
doocs__leetcode
|
solution/1500-1599/1588.Sum of All Odd Length Subarrays/Solution2.py
|
{
"start": 0,
"end": 305
}
|
class ____:
def sumOddLengthSubarrays(self, arr: List[int]) -> int:
ans, f, g = arr[0], arr[0], 0
for i in range(1, len(arr)):
ff = g + arr[i] * (i // 2 + 1)
gg = f + arr[i] * ((i + 1) // 2)
f, g = ff, gg
ans += f
return ans
|
Solution
|
python
|
ray-project__ray
|
python/ray/air/util/tensor_extensions/arrow.py
|
{
"start": 24562,
"end": 25459
}
|
class ____(_BaseFixedShapeArrowTensorType):
"""Arrow ExtensionType (v2) for tensors (supporting tensors > 4Gb)."""
OFFSET_DTYPE = pa.int64()
def __init__(self, shape: Tuple[int, ...], dtype: pa.DataType):
"""
Construct the Arrow extension type for array of fixed-shaped tensors.
Args:
shape: Shape of contained tensors.
dtype: pyarrow dtype of tensor elements.
"""
super().__init__(shape, pa.large_list(dtype), "ray.data.arrow_tensor_v2")
@classmethod
def _get_deserialize_parameter(cls, storage_type, serialized):
return (serialized, storage_type.value_type)
@classmethod
def _arrow_ext_deserialize_compute(cls, serialized, value_type):
shape = tuple(_deserialize_with_fallback(serialized, "shape"))
return cls(shape, value_type)
@PublicAPI(stability="beta")
|
ArrowTensorTypeV2
|
python
|
ApeWorX__ape
|
src/ape/api/transactions.py
|
{
"start": 1434,
"end": 9034
}
|
class ____(BaseInterfaceModel):
"""
An API class representing a transaction.
Ecosystem plugins implement one or more of transaction APIs
depending on which schemas they permit,
such as typed-transactions from `EIP-1559 <https://eips.ethereum.org/EIPS/eip-1559>`__.
"""
chain_id: Optional[HexInt] = Field(default=0, alias="chainId")
receiver: Optional[AddressType] = Field(default=None, alias="to")
sender: Optional[AddressType] = Field(default=None, alias="from")
gas_limit: Optional[HexInt] = Field(default=None, alias="gas")
nonce: Optional[HexInt] = None # NOTE: `Optional` only to denote using default behavior
value: HexInt = 0
data: HexBytes = HexBytes("")
type: HexInt
max_fee: Optional[HexInt] = None
max_priority_fee: Optional[HexInt] = None
# If left as None, will get set to the network's default required confirmations.
required_confirmations: Optional[HexInt] = Field(default=None, exclude=True)
signature: Optional[TransactionSignature] = Field(default=None, exclude=True)
model_config = ConfigDict(populate_by_name=True)
def __init__(self, *args, **kwargs):
raise_on_revert = kwargs.pop("raise_on_revert", True)
super().__init__(*args, **kwargs)
self._raise_on_revert = raise_on_revert
@field_validator("gas_limit", mode="before")
@classmethod
def validate_gas_limit(cls, value):
if value is None:
value = (
cls.network_manager.active_provider.network.gas_limit
if cls.network_manager.connected
else 0
)
if value == "auto" or isinstance(value, AutoGasLimit):
return None # Delegate to `ProviderAPI.estimate_gas_cost`
elif value == "max":
if not cls.network_manager.connected:
raise NetworkError("Must be connected to use 'max'.")
return cls.network_manager.active_provider.max_gas
elif isinstance(value, str) and is_hex(value):
return to_int(hexstr=value)
elif isinstance(value, str) and value.isnumeric():
return to_int(value)
return value
@property
def gas(self) -> Optional[int]:
"""
Alias for ``.gas_limit``.
"""
return self.gas_limit
@gas.setter
def gas(self, value):
self.gas_limit = self.validate_gas_limit(value)
@property
def raise_on_revert(self) -> bool:
"""
``True`` means VM-reverts should raise exceptions.
``False`` allows getting failed receipts.
"""
return self._raise_on_revert
@raise_on_revert.setter
def raise_on_revert(self, value):
self._raise_on_revert = value
@property
def total_transfer_value(self) -> int:
"""
The total amount of WEI that a transaction could use.
Useful for determining if an account balance can afford
to submit the transaction.
"""
if self.max_fee is None:
raise TransactionError("`self.max_fee` must not be None.")
return self.value + self.max_fee
@property
@abstractmethod
def txn_hash(self) -> HexBytes:
"""
The calculated hash of the transaction.
"""
# TODO: In 0.9, simply rename txn_hash to hash.
@property
def hash(self) -> HexBytes:
"""
Alias for ``self.txn_hash``.
"""
return self.txn_hash
@property
def receipt(self) -> Optional["ReceiptAPI"]:
"""
This transaction's associated published receipt, if it exists.
"""
try:
txn_hash = to_hex(self.txn_hash)
except SignatureError:
return None
try:
return self.chain_manager.get_receipt(txn_hash)
except (TransactionNotFoundError, ProviderNotConnectedError):
return None
@property
def trace(self) -> "TraceAPI":
"""
The transaction trace. Only works if this transaction was published
and you are using a provider that support tracing.
Raises:
:class:`~ape.exceptions.APINotImplementedError`: When using a provider
that does not support tracing.
"""
return self.provider.get_transaction_trace(to_hex(self.txn_hash))
@cached_property
def _calldata_repr(self) -> "CalldataRepr":
return self.local_project.config.display.calldata
@abstractmethod
def serialize_transaction(self) -> bytes:
"""
Serialize the transaction
"""
@log_instead_of_fail(default="<TransactionAPI>")
def __repr__(self) -> str:
# NOTE: Using JSON mode for style.
data = self.model_dump(mode="json")
params = ", ".join(f"{k}={v}" for k, v in data.items())
cls_name = getattr(type(self), "__name__", TransactionAPI.__name__)
return f"<{cls_name} {params}>"
def __str__(self) -> str:
return self.to_string()
def to_string(self, calldata_repr: Optional["CalldataRepr"] = None) -> str:
"""
Get the stringified representation of the transaction.
Args:
calldata_repr (:class:`~ape.types.abi.CalldataRepr` | None): Pass "full"
to see the full calldata. Defaults to the value from the config.
Returns:
str
"""
data = self.model_dump(mode="json") # JSON mode used for style purposes.
calldata_repr = calldata_repr or self._calldata_repr
data["data"] = self._get_calldata_repr_str(calldata_repr=calldata_repr)
params = "\n ".join(f"{k}: {v}" for k, v in data.items())
cls_name = getattr(type(self), "__name__", TransactionAPI.__name__)
tx_str = f"{cls_name}:\n {params}"
# Decode the actual call so the user can see the function.
if decoded := self._decoded_call():
tx_str = f"{tx_str}\n\n\t{decoded}"
return tx_str
def _get_calldata_repr_str(self, calldata_repr: "CalldataRepr") -> str:
calldata = HexBytes(self.data)
# Elide the transaction calldata for abridged representations if the length exceeds 8
# (4 bytes for function selector and trailing 4 bytes).
return (
calldata[:4].to_0x_hex() + "..." + calldata[-4:].hex()
if calldata_repr == "abridged" and len(calldata) > 8
else calldata.to_0x_hex()
)
def _decoded_call(self) -> Optional[str]:
if not self.receiver:
return "constructor()"
if not (contract_type := self.chain_manager.contracts.get(self.receiver)):
# Unknown.
return None
try:
abi = contract_type.methods[HexBytes(self.data)[:4]]
except KeyError:
return None
ecosystem = (
self.provider.network.ecosystem
if self.network_manager.active_provider
else self.network_manager.ethereum
)
decoded_calldata = ecosystem.decode_calldata(abi, HexBytes(self.data)[4:])
# NOTE: There is no actual returndata yet, but we can show the type.
return_types = [t.canonical_type for t in abi.outputs]
if len(return_types) == 1:
return_types = return_types[0]
return prettify_function(
abi.name or "",
decoded_calldata,
returndata=return_types,
contract=contract_type.name or humanize_hexstr(self.receiver),
is_create=self.receiver is None,
depth=4,
)
|
TransactionAPI
|
python
|
fastai__fastai
|
fastai/text/models/core.py
|
{
"start": 5918,
"end": 8414
}
|
class ____(Module):
"Create a linear classifier with pooling"
def __init__(self,
dims:list, # List of hidden sizes for MLP as `int`s
ps:list, # List of dropout probabilities as `float`s
bptt:int, # Backpropagation through time
y_range:tuple=None # Tuple of (low, high) output value bounds
):
if len(ps) != len(dims)-1: raise ValueError("Number of layers and dropout values do not match.")
acts = [nn.ReLU(inplace=True)] * (len(dims) - 2) + [None]
layers = [LinBnDrop(i, o, p=p, act=a) for i,o,p,a in zip(dims[:-1], dims[1:], ps, acts)]
if y_range is not None: layers.append(SigmoidRange(*y_range))
self.layers = nn.Sequential(*layers)
self.bptt = bptt
def forward(self, input):
out,mask = input
x = masked_concat_pool(out, mask, self.bptt)
x = self.layers(x)
return x, out, out
# %% ../../../nbs/33_text.models.core.ipynb 27
def get_text_classifier(
arch:Callable, # Function or class that can generate a language model architecture
vocab_sz:int, # Size of the vocabulary
n_class:int, # Number of classes
seq_len:int=72, # Backpropagation through time
config:dict=None, # Encoder configuration dictionary
drop_mult:float=1., # Multiplicative factor to scale all dropout probabilities in `config`
lin_ftrs:list=None, # List of hidden sizes for classifier head as `int`s
ps:list=None, # List of dropout probabilities for classifier head as `float`s
pad_idx:int=1, # Padding token id
max_len:int=72*20, # Maximal output length for `SentenceEncoder`
y_range:tuple=None # Tuple of (low, high) output value bounds
):
"Create a text classifier from `arch` and its `config`, maybe `pretrained`"
meta = _model_meta[arch]
cfg = meta['config_clas'].copy()
cfg.update(ifnone(config, {}))
config = cfg
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = SentenceEncoder(seq_len, arch(vocab_sz, **config), pad_idx=pad_idx, max_len=max_len)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps, bptt=seq_len, y_range=y_range))
return model if init is None else model.apply(init)
|
PoolingLinearClassifier
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_cond_format16.py
|
{
"start": 315,
"end": 1601
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format16.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format1 = workbook.add_format({"bg_color": "red"})
cell_format2 = workbook.add_format({"bg_color": "#92D050"})
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1",
{
"type": "cell",
"format": cell_format1,
"criteria": "<",
"value": 5,
"stop_if_true": False,
},
)
worksheet.conditional_format(
"A1",
{
"type": "cell",
"format": cell_format2,
"criteria": ">",
"value": 20,
"stop_if_true": True,
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 962809,
"end": 963205
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("SecurityAdvisory", graphql_name="node")
"""The item at the end of the edge."""
|
SecurityAdvisoryEdge
|
python
|
prabhupant__python-ds
|
data_structures/binary_trees/diagonal_tree.py
|
{
"start": 127,
"end": 858
}
|
class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def diagonal_print_util(root, d, diagonal_map):
if root is None:
return
try:
diagonal_map[d].append(root.val)
except:
diagonal_map[d] = [root.val]
# Increase vertical distance if left child
diagonal_print_util(root.left, d+1, diagonal_map)
# Vertical distance remains same for the right child
diagonal_print_util(root.right, d, diagonal_map)
def diagonal_print(root):
diagonal_map = dict()
diagonal_print_util(root, 0, diagonal_map)
for i in diagonal_map:
for j in diagonal_map[i]:
print(j, end=" ")
print('')
|
Node
|
python
|
kamyu104__LeetCode-Solutions
|
Python/jump-game-v.py
|
{
"start": 5707,
"end": 6765
}
|
class ____(object):
def maxJumps(self, arr, d):
"""
:type arr: List[int]
:type d: int
:rtype: int
"""
left, decreasing_stk = range(len(arr)), []
for i in xrange(len(arr)):
while decreasing_stk and arr[decreasing_stk[-1]] < arr[i]:
if i - decreasing_stk[-1] <= d:
left[i] = decreasing_stk[-1]
decreasing_stk.pop()
decreasing_stk.append(i)
right, decreasing_stk = range(len(arr)), []
for i in reversed(xrange(len(arr))):
while decreasing_stk and arr[decreasing_stk[-1]] < arr[i]:
if decreasing_stk[-1] - i <= d:
right[i] = decreasing_stk[-1]
decreasing_stk.pop()
decreasing_stk.append(i)
segment_tree = SegmentTree(len(arr))
for _, i in sorted([x, i] for i, x in enumerate(arr)):
segment_tree.update(i, i, segment_tree.query(left[i], right[i]) + 1)
return segment_tree.query(0, len(arr)-1)
|
Solution3
|
python
|
sqlalchemy__sqlalchemy
|
examples/performance/bulk_updates.py
|
{
"start": 479,
"end": 1714
}
|
class ____(Base):
__tablename__ = "customer"
id = Column(Integer, Identity(), primary_key=True)
name = Column(String(255))
description = Column(String(255))
Profiler.init("bulk_updates", num=100000)
@Profiler.setup
def setup_database(dburl, echo, num):
global engine
engine = create_engine(dburl, echo=echo)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
s = Session(engine)
for chunk in range(0, num, 10000):
s.bulk_insert_mappings(
Customer,
[
{
"name": "customer name %d" % i,
"description": "customer description %d" % i,
}
for i in range(chunk, chunk + 10000)
],
)
s.commit()
@Profiler.profile
def test_orm_flush(n):
"""UPDATE statements via the ORM flush process."""
session = Session(bind=engine)
for chunk in range(0, n, 1000):
customers = (
session.query(Customer)
.filter(Customer.id.between(chunk, chunk + 1000))
.all()
)
for customer in customers:
customer.description += "updated"
session.flush()
session.commit()
|
Customer
|
python
|
joblib__joblib
|
joblib/externals/loky/process_executor.py
|
{
"start": 40479,
"end": 52348
}
|
class ____(Executor):
_at_exit = None
def __init__(
self,
max_workers=None,
job_reducers=None,
result_reducers=None,
timeout=None,
context=None,
initializer=None,
initargs=(),
env=None,
):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: int, optional (default: cpu_count())
The maximum number of processes that can be used to execute the
given calls. If None or not given then as many worker processes
will be created as the number of CPUs the current process
can use.
job_reducers, result_reducers: dict(type: reducer_func)
Custom reducer for pickling the jobs and the results from the
Executor. If only `job_reducers` is provided, `result_reducer`
will use the same reducers
timeout: int, optional (default: None)
Idle workers exit after timeout seconds. If a new job is
submitted after the timeout, the executor will start enough
new Python processes to make sure the pool of workers is full.
context: A multiprocessing context to launch the workers. This
object should provide SimpleQueue, Queue and Process.
initializer: An callable used to initialize worker processes.
initargs: A tuple of arguments to pass to the initializer.
env: A dict of environment variable to overwrite in the child
process. The environment variables are set before any module is
loaded. Note that this only works with the loky context.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = cpu_count()
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
if (
sys.platform == "win32"
and self._max_workers > _MAX_WINDOWS_WORKERS
):
warnings.warn(
f"On Windows, max_workers cannot exceed {_MAX_WINDOWS_WORKERS} "
"due to limitations of the operating system."
)
self._max_workers = _MAX_WINDOWS_WORKERS
if context is None:
context = get_context()
self._context = context
self._env = env
self._initializer, self._initargs = _prepare_initializer(
initializer, initargs
)
_check_max_depth(self._context)
if result_reducers is None:
result_reducers = job_reducers
# Timeout
self._timeout = timeout
# Management thread
self._executor_manager_thread = None
# Map of pids to processes
self._processes = {}
# Internal variables of the ProcessPoolExecutor
self._processes = {}
self._queue_count = 0
self._pending_work_items = {}
self._running_work_items = []
self._work_ids = queue.Queue()
self._processes_management_lock = self._context.Lock()
self._executor_manager_thread = None
self._shutdown_lock = threading.Lock()
# _ThreadWakeup is a communication channel used to interrupt the wait
# of the main loop of executor_manager_thread from another thread (e.g.
# when calling executor.submit or executor.shutdown). We do not use the
# _result_queue to send wakeup signals to the executor_manager_thread
# as it could result in a deadlock if a worker process dies with the
# _result_queue write lock still acquired.
#
# _shutdown_lock must be locked to access _ThreadWakeup.wakeup.
self._executor_manager_thread_wakeup = _ThreadWakeup()
# Flag to hold the state of the Executor. This permits to introspect
# the Executor state even once it has been garbage collected.
self._flags = _ExecutorFlags(self._shutdown_lock)
# Finally setup the queues for interprocess communication
self._setup_queues(job_reducers, result_reducers)
mp.util.debug("ProcessPoolExecutor is setup")
def _setup_queues(self, job_reducers, result_reducers, queue_size=None):
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
if queue_size is None:
queue_size = 2 * self._max_workers + EXTRA_QUEUED_CALLS
self._call_queue = _SafeQueue(
max_size=queue_size,
pending_work_items=self._pending_work_items,
running_work_items=self._running_work_items,
thread_wakeup=self._executor_manager_thread_wakeup,
shutdown_lock=self._shutdown_lock,
reducers=job_reducers,
ctx=self._context,
)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue(
reducers=result_reducers, ctx=self._context
)
def _start_executor_manager_thread(self):
if self._executor_manager_thread is None:
mp.util.debug("_start_executor_manager_thread called")
# Start the processes so that their sentinels are known.
self._executor_manager_thread = _ExecutorManagerThread(self)
self._executor_manager_thread.start()
# register this executor in a mechanism that ensures it will wakeup
# when the interpreter is exiting.
_threads_wakeups[self._executor_manager_thread] = (
self._shutdown_lock,
self._executor_manager_thread_wakeup,
)
global process_pool_executor_at_exit
if process_pool_executor_at_exit is None:
# Ensure that the _python_exit function will be called before
# the multiprocessing.Queue._close finalizers which have an
# exitpriority of 10.
if sys.version_info < (3, 9):
process_pool_executor_at_exit = mp.util.Finalize(
None, _python_exit, exitpriority=20
)
else:
process_pool_executor_at_exit = threading._register_atexit(
_python_exit
)
def _adjust_process_count(self):
while len(self._processes) < self._max_workers:
worker_exit_lock = self._context.BoundedSemaphore(1)
args = (
self._call_queue,
self._result_queue,
self._initializer,
self._initargs,
self._processes_management_lock,
self._timeout,
worker_exit_lock,
_CURRENT_DEPTH + 1,
)
worker_exit_lock.acquire()
try:
# Try to spawn the process with some environment variable to
# overwrite but it only works with the loky context for now.
p = self._context.Process(
target=_process_worker, args=args, env=self._env
)
except TypeError:
p = self._context.Process(target=_process_worker, args=args)
p._worker_exit_lock = worker_exit_lock
p.start()
self._processes[p.pid] = p
mp.util.debug(
f"Adjusted process count to {self._max_workers}: "
f"{[(p.name, pid) for pid, p in self._processes.items()]}"
)
def _ensure_executor_running(self):
"""ensures all workers and management thread are running"""
with self._processes_management_lock:
if len(self._processes) != self._max_workers:
self._adjust_process_count()
self._start_executor_manager_thread()
def submit(self, fn, *args, **kwargs):
with self._flags.shutdown_lock:
if self._flags.broken is not None:
raise self._flags.broken
if self._flags.shutdown:
raise ShutdownExecutorError(
"cannot schedule new futures after shutdown"
)
# Cannot submit a new calls once the interpreter is shutting down.
# This check avoids spawning new processes at exit.
if _global_shutdown:
raise RuntimeError(
"cannot schedule new futures after interpreter shutdown"
)
f = Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._executor_manager_thread_wakeup.wakeup()
self._ensure_executor_running()
return f
submit.__doc__ = Executor.submit.__doc__
def map(self, fn, *iterables, **kwargs):
"""Returns an iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
chunksize: If greater than one, the iterables will be chopped into
chunks of size chunksize and submitted to the process pool.
If set to one, the items in the list will be sent one at a
time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
timeout = kwargs.get("timeout", None)
chunksize = kwargs.get("chunksize", 1)
if chunksize < 1:
raise ValueError("chunksize must be >= 1.")
results = super().map(
partial(_process_chunk, fn),
_get_chunks(chunksize, *iterables),
timeout=timeout,
)
return _chain_from_iterable_of_lists(results)
def shutdown(self, wait=True, kill_workers=False):
mp.util.debug(f"shutting down executor {self}")
self._flags.flag_as_shutting_down(kill_workers)
executor_manager_thread = self._executor_manager_thread
executor_manager_thread_wakeup = self._executor_manager_thread_wakeup
if executor_manager_thread_wakeup is not None:
# Wake up queue management thread
with self._shutdown_lock:
self._executor_manager_thread_wakeup.wakeup()
if executor_manager_thread is not None and wait:
# This locks avoids concurrent join if the interpreter
# is shutting down.
with _global_shutdown_lock:
executor_manager_thread.join()
_threads_wakeups.pop(executor_manager_thread, None)
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._executor_manager_thread = None
self._executor_manager_thread_wakeup = None
self._call_queue = None
self._result_queue = None
self._processes_management_lock = None
shutdown.__doc__ = Executor.shutdown.__doc__
|
ProcessPoolExecutor
|
python
|
huggingface__transformers
|
src/transformers/models/dab_detr/modeling_dab_detr.py
|
{
"start": 30636,
"end": 33371
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: DabDetrConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = DetrAttention(config)
self.self_attn_layer_norm = nn.LayerNorm(self.hidden_size)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.fc1 = nn.Linear(self.hidden_size, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.hidden_size)
self.final_layer_norm = nn.LayerNorm(self.hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
object_queries: torch.Tensor,
output_attentions: Optional[bool] = None,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, source_len)` where padding elements are indicated by very large negative
values.
object_queries (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
object_queries=object_queries,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Modified from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrDecoderLayer with ConditionalDetr->DabDetr
|
DabDetrEncoderLayer
|
python
|
pypa__pip
|
src/pip/_vendor/rich/highlighter.py
|
{
"start": 1230,
"end": 1454
}
|
class ____(Highlighter):
"""A highlighter object that doesn't highlight.
May be used to disable highlighting entirely.
"""
def highlight(self, text: Text) -> None:
"""Nothing to do"""
|
NullHighlighter
|
python
|
joke2k__faker
|
faker/providers/internet/__init__.py
|
{
"start": 2087,
"end": 27180
}
|
class ____(BaseProvider):
safe_domain_names: ElementsType[str] = ("example.org", "example.com", "example.net")
free_email_domains: ElementsType[str] = ("gmail.com", "yahoo.com", "hotmail.com")
tlds: ElementsType[str] = (
"com",
"com",
"com",
"com",
"com",
"com",
"biz",
"info",
"net",
"org",
)
hostname_prefixes: ElementsType[str] = (
"db",
"srv",
"desktop",
"laptop",
"lt",
"email",
"web",
)
uri_pages: ElementsType[str] = (
"index",
"home",
"search",
"main",
"post",
"homepage",
"category",
"register",
"login",
"faq",
"about",
"terms",
"privacy",
"author",
)
uri_paths: ElementsType[str] = (
"app",
"main",
"wp-content",
"search",
"category",
"tag",
"categories",
"tags",
"blog",
"posts",
"list",
"explore",
)
uri_extensions: ElementsType[str] = (
".html",
".html",
".html",
".htm",
".htm",
".php",
".php",
".jsp",
".asp",
)
http_methods: ElementsType[str] = (
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"CONNECT",
"OPTIONS",
"TRACE",
"PATCH",
)
http_assigned_codes: ElementsType[int] = (
100,
101,
102,
103,
200,
201,
202,
203,
204,
205,
206,
207,
208,
226,
300,
301,
302,
303,
304,
305,
307,
308,
400,
401,
402,
403,
404,
405,
406,
407,
408,
409,
410,
411,
412,
413,
414,
415,
416,
417,
421,
422,
423,
424,
425,
426,
428,
429,
431,
451,
500,
501,
502,
503,
504,
505,
506,
507,
508,
510,
511,
)
user_name_formats: ElementsType[str] = (
"{{last_name}}.{{first_name}}",
"{{first_name}}.{{last_name}}",
"{{first_name}}##",
"?{{last_name}}",
)
email_formats: ElementsType[str] = (
"{{user_name}}@{{domain_name}}",
"{{user_name}}@{{free_email_domain}}",
)
url_formats: ElementsType[str] = (
"www.{{domain_name}}/",
"{{domain_name}}/",
)
image_placeholder_services: ElementsType[str] = (
"https://picsum.photos/{width}/{height}",
"https://dummyimage.com/{width}x{height}",
"https://placekitten.com/{width}/{height}",
)
replacements: Tuple[Tuple[str, str], ...] = ()
def _to_ascii(self, string: str) -> str:
for search, replace in self.replacements:
string = string.replace(search, replace)
string = unidecode(string)
return string
@lowercase
def email(self, safe: bool = True, domain: Optional[str] = None) -> str:
if domain:
email = f"{self.user_name()}@{domain}"
elif safe:
email = f"{self.user_name()}@{self.safe_domain_name()}"
else:
pattern: str = self.random_element(self.email_formats)
email = "".join(self.generator.parse(pattern).split(" "))
return email
@lowercase
def safe_domain_name(self) -> str:
return self.random_element(self.safe_domain_names)
@lowercase
def safe_email(self) -> str:
return self.user_name() + "@" + self.safe_domain_name()
@lowercase
def free_email(self) -> str:
return self.user_name() + "@" + self.free_email_domain()
@lowercase
def company_email(self) -> str:
return self.user_name() + "@" + self.domain_name()
@lowercase
def free_email_domain(self) -> str:
return self.random_element(self.free_email_domains)
@lowercase
def ascii_email(self) -> str:
pattern: str = self.random_element(self.email_formats)
return self._to_ascii(
"".join(self.generator.parse(pattern).split(" ")),
)
@lowercase
def ascii_safe_email(self) -> str:
return self._to_ascii(self.user_name() + "@" + self.safe_domain_name())
@lowercase
def ascii_free_email(self) -> str:
return self._to_ascii(
self.user_name() + "@" + self.free_email_domain(),
)
@lowercase
def ascii_company_email(self) -> str:
return self._to_ascii(
self.user_name() + "@" + self.domain_name(),
)
@slugify_unicode
def user_name(self) -> str:
pattern: str = self.random_element(self.user_name_formats)
return self._to_ascii(self.bothify(self.generator.parse(pattern)).lower())
@lowercase
def hostname(self, levels: int = 1) -> str:
"""
Produce a hostname with specified number of subdomain levels.
>>> hostname()
db-01.nichols-phillips.com
>>> hostname(0)
laptop-56
>>> hostname(2)
web-12.williamson-hopkins.jackson.com
"""
hostname_prefix: str = self.random_element(self.hostname_prefixes)
hostname_prefix_first_level: str = hostname_prefix + "-" + self.numerify("##")
return (
hostname_prefix_first_level if levels < 1 else hostname_prefix_first_level + "." + self.domain_name(levels)
)
@lowercase
def domain_name(self, levels: int = 1) -> str:
"""
Produce an Internet domain name with the specified number of
subdomain levels.
>>> domain_name()
nichols-phillips.com
>>> domain_name(2)
williamson-hopkins.jackson.com
"""
if levels < 1:
raise ValueError("levels must be greater than or equal to 1")
if levels == 1:
return self.domain_word() + "." + self.tld()
return self.domain_word() + "." + self.domain_name(levels - 1)
@lowercase
@slugify_unicode
def domain_word(self) -> str:
company: str = self.generator.format("company")
company_elements: List[str] = company.split(" ")
return self._to_ascii(company_elements.pop(0))
def dga(
self,
year: Optional[int] = None,
month: Optional[int] = None,
day: Optional[int] = None,
tld: Optional[str] = None,
length: Optional[int] = None,
) -> str:
"""Generates a domain name by given date
https://en.wikipedia.org/wiki/Domain_generation_algorithm
:type year: int
:type month: int
:type day: int
:type tld: str
:type length: int
:rtype: str
"""
domain = ""
year = year or self.random_int(min=1, max=9999)
month = month or self.random_int(min=1, max=12)
day = day or self.random_int(min=1, max=30)
tld = tld or self.tld()
length = length or self.random_int(min=2, max=63)
for _ in range(length):
year = ((year ^ 8 * year) >> 11) ^ ((year & 0xFFFFFFF0) << 17)
month = ((month ^ 4 * month) >> 25) ^ 16 * (month & 0xFFFFFFF8)
day = ((day ^ (day << 13)) >> 19) ^ ((day & 0xFFFFFFFE) << 12)
domain += chr(((year ^ month ^ day) % 25) + 97)
return domain + "." + tld
def tld(self) -> str:
return self.random_element(self.tlds)
def http_method(self) -> str:
"""Returns random HTTP method
https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
:rtype: str
"""
return self.random_element(self.http_methods)
def http_status_code(self, include_unassigned: bool = True) -> int:
"""Returns random HTTP status code
https://www.rfc-editor.org/rfc/rfc9110#name-status-codes
:param include_unassigned: Whether to include status codes which have
not yet been assigned or are unused
:return: a random three digit status code
:rtype: int
:example: 404
"""
if include_unassigned:
return self.random_int(min=100, max=599)
else:
return self.random_element(self.http_assigned_codes)
def url(self, schemes: Optional[List[str]] = None) -> str:
"""
:param schemes: a list of strings to use as schemes, one will chosen randomly.
If None, it will generate http and https urls.
Passing an empty list will result in schemeless url generation like "://domain.com".
:return: a random url string.
"""
if schemes is None:
schemes = ["http", "https"]
pattern: str = f'{self.random_element(schemes) if schemes else ""}://{self.random_element(self.url_formats)}'
return self.generator.parse(pattern)
def _get_all_networks_and_weights(self, address_class: Optional[str] = None) -> Tuple[List[IPv4Network], List[int]]:
"""
Produces a 2-tuple of valid IPv4 networks and corresponding relative weights
:param address_class: IPv4 address class (a, b, or c)
"""
# If `address_class` has an unexpected value, use the whole IPv4 pool
if address_class in _IPv4Constants._network_classes.keys():
networks_attr = f"_cached_all_class_{address_class}_networks"
all_networks = [_IPv4Constants._network_classes[address_class]] # type: ignore
else:
networks_attr = "_cached_all_networks"
all_networks = [IPv4Network("0.0.0.0/0")]
# Return cached network and weight data if available
weights_attr = f"{networks_attr}_weights"
if hasattr(self, networks_attr) and hasattr(self, weights_attr):
return getattr(self, networks_attr), getattr(self, weights_attr)
# Otherwise, compute for list of networks (excluding special networks)
all_networks = self._exclude_ipv4_networks(
all_networks,
_IPv4Constants._excluded_networks,
)
# Then compute for list of corresponding relative weights
weights = [network.num_addresses for network in all_networks]
# Then cache and return results
setattr(self, networks_attr, all_networks)
setattr(self, weights_attr, weights)
return all_networks, weights
def _get_private_networks_and_weights(
self,
address_class: Optional[str] = None,
) -> Tuple[List[IPv4Network], List[int]]:
"""
Produces an OrderedDict of valid private IPv4 networks and corresponding relative weights
:param address_class: IPv4 address class (a, b, or c)
"""
# If `address_class` has an unexpected value, choose a valid value at random
if not address_class or address_class not in _IPv4Constants._network_classes.keys():
address_class = self.ipv4_network_class()
# Return cached network and weight data if available for a specific address class
networks_attr = f"_cached_private_class_{address_class}_networks"
weights_attr = f"{networks_attr}_weights"
if hasattr(self, networks_attr) and hasattr(self, weights_attr):
return getattr(self, networks_attr), getattr(self, weights_attr)
# Otherwise, compute for list of private networks (excluding special networks)
supernet = _IPv4Constants._network_classes[address_class]
private_networks = [subnet for subnet in _IPv4Constants._private_networks if subnet.overlaps(supernet)]
private_networks = self._exclude_ipv4_networks(
private_networks,
_IPv4Constants._excluded_networks,
)
# Then compute for list of corresponding relative weights
weights = [network.num_addresses for network in private_networks]
# Then cache and return results
setattr(self, networks_attr, private_networks)
setattr(self, weights_attr, weights)
return private_networks, weights
def _get_public_networks_and_weights(
self,
address_class: Optional[str] = None,
) -> Tuple[List[IPv4Network], List[int]]:
"""
Produces a 2-tuple of valid public IPv4 networks and corresponding relative weights
:param address_class: IPv4 address class (a, b, or c)
"""
# If `address_class` has an unexpected value, choose a valid value at random
if address_class not in _IPv4Constants._network_classes.keys():
address_class = self.ipv4_network_class()
# Return cached network and weight data if available for a specific address class
networks_attr = f"_cached_public_class_{address_class}_networks"
weights_attr = f"{networks_attr}_weights"
if hasattr(self, networks_attr) and hasattr(self, weights_attr):
return getattr(self, networks_attr), getattr(self, weights_attr)
# Otherwise, compute for list of public networks (excluding private and special networks)
public_networks = [_IPv4Constants._network_classes[address_class]] # type: ignore
public_networks = self._exclude_ipv4_networks(
public_networks,
_IPv4Constants._private_networks + _IPv4Constants._excluded_networks,
)
# Then compute for list of corresponding relative weights
weights = [network.num_addresses for network in public_networks]
# Then cache and return results
setattr(self, networks_attr, public_networks)
setattr(self, weights_attr, weights)
return public_networks, weights
def _random_ipv4_address_from_subnets(
self,
subnets: List[IPv4Network],
weights: Optional[List[int]] = None,
network: bool = False,
) -> str:
"""
Produces a random IPv4 address or network with a valid CIDR
from within the given subnets using a distribution described
by weights.
:param subnets: List of IPv4Networks to choose from within
:param weights: List of weights corresponding to the individual IPv4Networks
:param network: Return a network address, and not an IP address
:return:
"""
if not subnets:
raise ValueError("No subnets to choose from")
# If the weights argument has an invalid value, default to equal distribution
if (
isinstance(weights, list)
and len(subnets) == len(weights)
and all(isinstance(w, (float, int)) for w in weights)
):
subnet = choices_distribution(
subnets,
[float(w) for w in weights],
random=self.generator.random,
length=1,
)[0]
else:
subnet = self.generator.random.choice(subnets)
address = str(
subnet[
self.generator.random.randint(
0,
subnet.num_addresses - 1,
)
],
)
if network:
address += "/" + str(
self.generator.random.randint(
subnet.prefixlen,
subnet.max_prefixlen,
)
)
address = str(IPv4Network(address, strict=False))
return address
def _exclude_ipv4_networks(
self, networks: List[IPv4Network], networks_to_exclude: List[IPv4Network]
) -> List[IPv4Network]:
"""
Exclude the list of networks from another list of networks
and return a flat list of new networks.
:param networks: List of IPv4 networks to exclude from
:param networks_to_exclude: List of IPv4 networks to exclude
:returns: Flat list of IPv4 networks
"""
networks_to_exclude.sort(key=lambda x: x.prefixlen)
for network_to_exclude in networks_to_exclude:
def _exclude_ipv4_network(network):
"""
Exclude a single network from another single network
and return a list of networks. Network to exclude
comes from the outer scope.
:param network: Network to exclude from
:returns: Flat list of IPv4 networks after exclusion.
If exclude fails because networks do not
overlap, a single element list with the
orignal network is returned. If it overlaps,
even partially, the network is excluded.
"""
try:
return list(network.address_exclude(network_to_exclude))
except ValueError:
# If networks overlap partially, `address_exclude`
# will fail, but the network still must not be used
# in generation.
if network.overlaps(network_to_exclude):
return []
else:
return [network]
nested_networks = list(map(_exclude_ipv4_network, networks))
networks = [item for nested in nested_networks for item in nested]
return networks
def ipv4_network_class(self) -> str:
"""
Returns a IPv4 network class 'a', 'b' or 'c'.
:returns: IPv4 network class
"""
return self.random_element("abc")
def ipv4(
self,
network: bool = False,
address_class: Optional[str] = None,
private: Optional[str] = None,
) -> str:
"""
Returns a random IPv4 address or network with a valid CIDR.
:param network: Network address
:param address_class: IPv4 address class (a, b, or c)
:param private: Public or private
:returns: IPv4
"""
if private is True:
return self.ipv4_private(address_class=address_class, network=network)
elif private is False:
return self.ipv4_public(address_class=address_class, network=network)
else:
all_networks, weights = self._get_all_networks_and_weights(address_class=address_class)
return self._random_ipv4_address_from_subnets(all_networks, weights=weights, network=network)
def ipv4_private(self, network: bool = False, address_class: Optional[str] = None) -> str:
"""
Returns a private IPv4.
:param network: Network address
:param address_class: IPv4 address class (a, b, or c)
:returns: Private IPv4
"""
private_networks, weights = self._get_private_networks_and_weights(address_class=address_class)
return self._random_ipv4_address_from_subnets(private_networks, weights=weights, network=network)
def ipv4_public(self, network: bool = False, address_class: Optional[str] = None) -> str:
"""
Returns a public IPv4 excluding private blocks.
:param network: Network address
:param address_class: IPv4 address class (a, b, or c)
:returns: Public IPv4
"""
public_networks, weights = self._get_public_networks_and_weights(address_class=address_class)
return self._random_ipv4_address_from_subnets(public_networks, weights=weights, network=network)
def ipv6(self, network: bool = False) -> str:
"""Produce a random IPv6 address or network with a valid CIDR"""
address = str(IPv6Address(self.generator.random.randint(2**IPV4LENGTH, (2**IPV6LENGTH) - 1)))
if network:
address += "/" + str(self.generator.random.randint(0, IPV6LENGTH))
address = str(IPv6Network(address, strict=False))
return address
def mac_address(self, multicast: bool = False) -> str:
"""
Returns a random MAC address.
:param multicast: Multicast address
:returns: MAC Address
"""
mac = [self.generator.random.randint(0x00, 0xFF) for _ in range(0, 5)]
if multicast is True:
mac.insert(0, self.generator.random.randrange(0x01, 0xFF, 2))
else:
mac.insert(0, self.generator.random.randrange(0x00, 0xFE, 2))
return ":".join("%02x" % x for x in mac)
def port_number(self, is_system: bool = False, is_user: bool = False, is_dynamic: bool = False) -> int:
"""Returns a network port number
https://tools.ietf.org/html/rfc6335
:param is_system: System or well-known ports
:param is_user: User or registered ports
:param is_dynamic: Dynamic / private / ephemeral ports
:rtype: int
"""
if is_system:
return self.random_int(min=0, max=1023)
elif is_user:
return self.random_int(min=1024, max=49151)
elif is_dynamic:
return self.random_int(min=49152, max=65535)
return self.random_int(min=0, max=65535)
def uri_page(self) -> str:
return self.random_element(self.uri_pages)
def uri_path(self, deep: Optional[int] = None) -> str:
deep = deep if deep else self.generator.random.randint(1, 3)
return "/".join(
self.random_elements(self.uri_paths, length=deep),
)
def uri_extension(self) -> str:
return self.random_element(self.uri_extensions)
def uri(self, schemes: Optional[List[str]] = None, deep: Optional[int] = None) -> str:
"""
:param schemes: a list of strings to use as schemes, one will chosen randomly.
If None, it will generate http and https uris.
Passing an empty list will result in schemeless uri generation like "://domain.com/index.html".
:param deep: an integer specifying how many path components the URI should have..
:return: a random url string.
"""
if schemes is None:
schemes = ["http", "https"]
pattern: str = f'{self.random_element(schemes) if schemes else ""}://{self.random_element(self.url_formats)}'
path = self.uri_path(deep=deep)
page = self.uri_page()
extension = self.uri_extension()
return f"{self.generator.parse(pattern)}{path}{page}{extension}"
@slugify
def slug(self, value: Optional[str] = None) -> str:
"""Django algorithm"""
if value is None:
# Resolve https://github.com/joke2k/faker/issues/2103
# Always generate slug with ASCII characters, regardless of locale
ext_word_list = USLoremProvider.word_list
value = self.generator.text(20, ext_word_list=ext_word_list)
return value
def image_url(
self,
width: Optional[int] = None,
height: Optional[int] = None,
placeholder_url: Optional[str] = None,
) -> str:
"""
Returns URL to placeholder image
Example: http://placehold.it/640x480
:param width: Optional image width
:param height: Optional image height
:param placeholder_url: Optional template string of image URLs from custom
placeholder service. String must contain ``{width}`` and ``{height}``
placeholders, eg: ``https:/example.com/{width}/{height}``.
:rtype: str
"""
width_ = width or self.random_int(max=1024)
height_ = height or self.random_int(max=1024)
if placeholder_url is None:
placeholder_url = self.random_element(self.image_placeholder_services)
return placeholder_url.format(width=width_, height=height_)
def iana_id(self) -> str:
"""Returns IANA Registrar ID
https://www.iana.org/assignments/registrar-ids/registrar-ids.xhtml
:rtype: str
"""
return str(self.random_int(min=1, max=8888888))
def ripe_id(self) -> str:
"""Returns RIPE Organization ID
https://www.ripe.net/manage-ips-and-asns/db/support/organisation-object-in-the-ripe-database
:rtype: str
"""
lex = "?" * self.random_int(min=2, max=4)
num = "%" * self.random_int(min=1, max=5)
return self.bothify(f"ORG-{lex}{num}-RIPE").upper()
def nic_handle(self, suffix: str = "FAKE") -> str:
"""Returns NIC Handle ID
https://www.apnic.net/manage-ip/using-whois/guide/person/
:rtype: str
"""
if len(suffix) < 2:
raise ValueError("suffix length must be greater than or equal to 2")
lex = "?" * self.random_int(min=2, max=4)
num = "%" * self.random_int(min=1, max=5)
return self.bothify(f"{lex}{num}-{suffix}").upper()
def nic_handles(self, count: int = 1, suffix: str = "????") -> List[str]:
"""Returns NIC Handle ID list
:rtype: list[str]
"""
return [self.nic_handle(suffix=suffix) for _ in range(count)]
|
Provider
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-pebblo/tests/test_readers_pebblo.py
|
{
"start": 346,
"end": 2663
}
|
class ____:
def __init__(self, json_data: Dict, status_code: int):
self.json_data = json_data
self.status_code = status_code
def json(self) -> Dict:
return self.json_data
@pytest.fixture()
def create_empty_file():
with open(csv_empty_file_name, "w"):
pass
yield
if os.path.exists(csv_empty_file_name):
os.remove(csv_empty_file_name)
@pytest.fixture()
def create_csv_file():
data = "column1,column2,column3\nvalue1,value2,value3\nvalue4,value5,value6\n"
with open(csv_file_name, "w") as csv_file:
csv_file.write(data)
yield
if os.path.exists(csv_file_name):
os.remove(csv_file_name)
def test_class():
names_of_base_classes = [b.__name__ for b in PebbloSafeReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
def test_empty_filebased_loader(mocker: MockerFixture, create_empty_file) -> None:
"""Test basic file based csv loader."""
mocker.patch.multiple(
"requests",
get=MockResponse(json_data={"data": ""}, status_code=200),
post=MockResponse(json_data={"data": ""}, status_code=200),
)
file_path = f"{Path().resolve()}/{csv_empty_file_name}"
# Exercise
loader = PebbloSafeReader(
CSVReader(),
"dummy_app_name",
"dummy_owner",
"dummy_description",
)
result = loader.load_data(file=Path(file_path))
# Assert
assert result[0].text == ""
assert result[0].metadata == {"filename": "test_empty.csv", "extension": ".csv"}
def test_csv_loader_load_valid_data(mocker: MockerFixture, create_csv_file) -> None:
mocker.patch.multiple(
"requests",
get=MockResponse(json_data={"data": ""}, status_code=200),
post=MockResponse(json_data={"data": ""}, status_code=200),
)
file_path = f"{Path().resolve()}/test_nominal.csv"
# Exercise
loader = PebbloSafeReader(
CSVReader(),
"dummy_app_name",
"dummy_owner",
"dummy_description",
)
result = loader.load_data(file=Path(file_path))
# Assert
assert (
result[0].text
== "column1, column2, column3\nvalue1, value2, value3\nvalue4, value5, value6"
)
assert result[0].metadata == {"filename": "test_nominal.csv", "extension": ".csv"}
|
MockResponse
|
python
|
python-markdown__markdown
|
tests/test_syntax/extensions/test_smarty.py
|
{
"start": 6380,
"end": 7615
}
|
class ____(TestCase):
default_kwargs = {
'extensions': ['smarty'],
'extension_configs': {
'smarty': {
'smart_angled_quotes': True,
'substitutions': {
'ndash': '\u2013',
'mdash': '\u2014',
'ellipsis': '\u2026',
'left-single-quote': '‚', # `sb` is not a typo!
'right-single-quote': '‘',
'left-double-quote': '„',
'right-double-quote': '“',
'left-angle-quote': '[',
'right-angle-quote': ']',
},
},
},
}
def test_custom_substitutions(self):
text = (
'<< The "Unicode char of the year 2014"\n'
"is the 'mdash': ---\n"
"Must not be confused with 'ndash' (--) ... >>"
)
html = (
'<p>[ The „Unicode char of the year 2014“\n'
'is the ‚mdash‘: \u2014\n'
'Must not be confused with ‚ndash‘ (\u2013) \u2026 ]</p>'
)
self.assertMarkdownRenders(text, html)
|
TestSmartyCustomSubstitutions
|
python
|
getsentry__sentry
|
tests/sentry/monitors/endpoints/test_project_monitor_details.py
|
{
"start": 465,
"end": 620
}
|
class ____(BaseUpdateMonitorTest, BaseProjectMonitorTest):
endpoint = "sentry-api-0-project-monitor-details"
__test__ = True
|
ProjectUpdateMonitorTest
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_users.py
|
{
"start": 45623,
"end": 48168
}
|
class ____:
def test_user_recover_account_cancel_cancels_active_account_recoveries(
self, db_request, monkeypatch
):
admin_user = UserFactory.create()
user = UserFactory.create(
totp_secret=b"aaaaabbbbbcccccddddd",
webauthn=[
WebAuthn(
label="fake", credential_id="fake", public_key="extremely fake"
)
],
recovery_codes=[
RecoveryCode(code="fake"),
],
)
account_recovery0 = user.record_observation(
request=db_request,
kind=ObservationKind.AccountRecovery,
actor=admin_user,
summary="Account Recovery",
payload={"completed": None},
)
account_recovery0.additional = {"status": "initiated"}
account_recovery1 = user.record_observation(
request=db_request,
kind=ObservationKind.AccountRecovery,
actor=admin_user,
summary="Account Recovery",
payload={"completed": None},
)
account_recovery1.additional = {"status": "initiated"}
assert user.totp_secret is not None
assert len(user.webauthn) == 1
assert len(user.recovery_codes.all()) == 1
db_request.method = "POST"
db_request.matchdict["username"] = str(user.username)
db_request.params = {"username": user.username}
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.user = user
service = pretend.stub()
db_request.find_service = pretend.call_recorder(lambda iface, context: service)
now = datetime.datetime.now(datetime.UTC)
with freezegun.freeze_time(now):
result = views.user_recover_account_cancel(user, db_request)
assert user.totp_secret is not None
assert len(user.webauthn) == 1
assert len(user.recovery_codes.all()) == 1
assert db_request.find_service.calls == []
assert account_recovery0.additional["status"] == "cancelled"
assert account_recovery0.payload["cancelled"] == str(now)
assert account_recovery1.additional["status"] == "cancelled"
assert account_recovery1.payload["cancelled"] == str(now)
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
|
TestUserRecoverAccountCancel
|
python
|
sqlalchemy__sqlalchemy
|
test/typing/plain_files/ext/asyncio/async_sessionmaker.py
|
{
"start": 735,
"end": 894
}
|
class ____(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str]
bs: Mapped[List[B]] = relationship()
|
A
|
python
|
pypa__pipenv
|
pipenv/exceptions.py
|
{
"start": 2666,
"end": 3292
}
|
class ____(PipenvException):
def __init__(self, cmd, out="", err="", exit_code=1):
self.cmd = cmd
self.out = out
self.err = err
self.exit_code = exit_code
message = f"Error running command: {cmd}"
PipenvException.__init__(self, message)
def show(self, file=None):
console = Console(stderr=True, file=file, highlight=False)
console.print(f"[red]Error running command:[/red] [bold]$ {self.cmd}[/bold]")
if self.out:
console.print(f"OUTPUT: {self.out}")
if self.err:
console.print(f"STDERR: {self.err}")
|
PipenvCmdError
|
python
|
getsentry__sentry
|
tests/sentry/relocation/tasks/test_process.py
|
{
"start": 100679,
"end": 105022
}
|
class ____(RelocationTaskTestCase):
def setUp(self) -> None:
RelocationTaskTestCase.setUp(self)
TransactionTestCase.setUp(self)
self.relocation.step = Relocation.Step.NOTIFYING.value
self.relocation.latest_task = OrderedTask.NOTIFYING_USERS.name
self.relocation.save()
RegionImportChunk.objects.create(
import_uuid=self.relocation.uuid,
model="sentry.organization",
min_ordinal=0,
max_ordinal=0,
min_source_pk=1,
max_source_pk=1,
inserted_map={1: 1234},
inserted_identifiers={1: "testing-ab"},
)
self.imported_orgs = ["testing-ab"]
def test_success_admin_assisted_relocation(
self,
completed_mock: Mock,
fake_message_builder: Mock,
):
self.mock_message_builder(fake_message_builder)
notifying_owner(self.uuid)
assert fake_message_builder.call_count == 1
assert fake_message_builder.call_args.kwargs["type"] == "relocation.succeeded"
assert fake_message_builder.call_args.kwargs["context"]["orgs"] == self.imported_orgs
fake_message_builder.return_value.send_async.assert_called_once_with(
to=[self.owner.email, self.superuser.email]
)
assert completed_mock.call_count == 1
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.latest_notified == Relocation.EmailKind.SUCCEEDED.value
def test_success_self_serve_relocation(
self,
completed_mock: Mock,
fake_message_builder: Mock,
):
self.mock_message_builder(fake_message_builder)
self.relocation.creator_id = self.relocation.owner_id
self.relocation.save()
notifying_owner(self.uuid)
assert fake_message_builder.call_count == 1
assert fake_message_builder.call_args.kwargs["type"] == "relocation.succeeded"
assert fake_message_builder.call_args.kwargs["context"]["orgs"] == self.imported_orgs
fake_message_builder.return_value.send_async.assert_called_once_with(to=[self.owner.email])
assert completed_mock.call_count == 1
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.latest_notified == Relocation.EmailKind.SUCCEEDED.value
def test_retry_if_attempts_left(
self,
completed_mock: Mock,
fake_message_builder: Mock,
):
self.mock_message_builder(fake_message_builder)
fake_message_builder.return_value.send_async.side_effect = Exception("Test")
# An exception being raised will trigger a retry task.
with pytest.raises(Exception):
notifying_owner(self.uuid)
assert fake_message_builder.call_count == 1
assert completed_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.IN_PROGRESS.value
assert relocation.latest_notified != Relocation.EmailKind.FAILED.value
assert not relocation.failure_reason
def test_fail_if_no_attempts_left(
self,
completed_mock: Mock,
fake_message_builder: Mock,
):
self.relocation.latest_task = OrderedTask.NOTIFYING_OWNER.name
self.relocation.latest_task_attempts = MAX_FAST_TASK_RETRIES
self.relocation.save()
self.mock_message_builder(fake_message_builder)
fake_message_builder.return_value.send_async.side_effect = [Exception("Test"), None]
with pytest.raises(Exception):
notifying_owner(self.uuid)
# Oh, the irony: sending the "relocation success" email failed, so we send a "relocation
# failed" email instead...
assert fake_message_builder.call_count == 2
email_types = [args.kwargs["type"] for args in fake_message_builder.call_args_list]
assert "relocation.failed" in email_types
assert "relocation.succeeded" in email_types
assert completed_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.FAILURE.value
assert relocation.latest_notified == Relocation.EmailKind.FAILED.value
assert relocation.failure_reason == ERR_NOTIFYING_INTERNAL
|
NotifyingOwnerTest
|
python
|
huggingface__transformers
|
src/transformers/models/mobilevit/modeling_mobilevit.py
|
{
"start": 10932,
"end": 11919
}
|
class ____(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int) -> None:
super().__init__()
self.attention = MobileViTAttention(config, hidden_size)
self.intermediate = MobileViTIntermediate(config, hidden_size, intermediate_size)
self.output = MobileViTOutput(config, hidden_size, intermediate_size)
self.layernorm_before = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
attention_output = self.attention(self.layernorm_before(hidden_states))
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
return layer_output
|
MobileViTTransformerLayer
|
python
|
langchain-ai__langchain
|
libs/core/tests/unit_tests/test_tools.py
|
{
"start": 42306,
"end": 46822
}
|
class ____(BaseTool):
name: str = "structured_api"
args_schema: type[BaseModel] = _MockSchema
description: str = "A Structured Tool"
response_format: Literal["content_and_artifact"] = "content_and_artifact"
@override
def _run(
self,
arg1: int,
arg2: bool,
arg3: dict | None = None,
) -> tuple[str, dict]:
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
@tool("structured_api", response_format="content_and_artifact")
def _mock_structured_tool_with_artifact(
*, arg1: int, arg2: bool, arg3: dict | None = None
) -> tuple[str, dict]:
"""A Structured Tool."""
return f"{arg1} {arg2}", {"arg1": arg1, "arg2": arg2, "arg3": arg3}
@pytest.mark.parametrize(
"tool", [_MockStructuredToolWithRawOutput(), _mock_structured_tool_with_artifact]
)
def test_tool_call_input_tool_message_with_artifact(tool: BaseTool) -> None:
tool_call: dict = {
"name": "structured_api",
"args": {"arg1": 1, "arg2": True, "arg3": {"img": "base64string..."}},
"id": "123",
"type": "tool_call",
}
expected = ToolMessage(
"1 True", artifact=tool_call["args"], tool_call_id="123", name="structured_api"
)
actual = tool.invoke(tool_call)
assert actual == expected
tool_call.pop("type")
with pytest.raises(ValidationError):
tool.invoke(tool_call)
actual_content = tool.invoke(tool_call["args"])
assert actual_content == expected.content
def test_convert_from_runnable_dict() -> None:
# Test with typed dict input
class Args(TypedDict):
a: int
b: list[int]
def f(x: Args) -> str:
return str(x["a"] * max(x["b"]))
runnable: Runnable = RunnableLambda(f)
as_tool = runnable.as_tool()
args_schema = as_tool.args_schema
assert args_schema is not None
assert _schema(args_schema) == {
"title": "f",
"type": "object",
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"title": "B", "type": "array", "items": {"type": "integer"}},
},
"required": ["a", "b"],
}
assert as_tool.description
result = as_tool.invoke({"a": 3, "b": [1, 2]})
assert result == "6"
as_tool = runnable.as_tool(name="my tool", description="test description")
assert as_tool.name == "my tool"
assert as_tool.description == "test description"
# Dict without typed input-- must supply schema
def g(x: dict[str, Any]) -> str:
return str(x["a"] * max(x["b"]))
# Specify via args_schema:
class GSchema(BaseModel):
"""Apply a function to an integer and list of integers."""
a: int = Field(..., description="Integer")
b: list[int] = Field(..., description="List of ints")
runnable = RunnableLambda(g)
as_tool = runnable.as_tool(GSchema)
as_tool.invoke({"a": 3, "b": [1, 2]})
# Specify via arg_types:
runnable = RunnableLambda(g)
as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]})
result = as_tool.invoke({"a": 3, "b": [1, 2]})
assert result == "6"
# Test with config
def h(x: dict[str, Any]) -> str:
config = ensure_config()
assert config["configurable"]["foo"] == "not-bar"
return str(x["a"] * max(x["b"]))
runnable = RunnableLambda(h)
as_tool = runnable.as_tool(arg_types={"a": int, "b": list[int]})
result = as_tool.invoke(
{"a": 3, "b": [1, 2]}, config={"configurable": {"foo": "not-bar"}}
)
assert result == "6"
def test_convert_from_runnable_other() -> None:
# String input
def f(x: str) -> str:
return x + "a"
def g(x: str) -> str:
return x + "z"
runnable: Runnable = RunnableLambda(f) | g
as_tool = runnable.as_tool()
args_schema = as_tool.args_schema
assert args_schema is None
assert as_tool.description
result = as_tool.invoke("b")
assert result == "baz"
# Test with config
def h(x: str) -> str:
config = ensure_config()
assert config["configurable"]["foo"] == "not-bar"
return x + "a"
runnable = RunnableLambda(h)
as_tool = runnable.as_tool()
result = as_tool.invoke("b", config={"configurable": {"foo": "not-bar"}})
assert result == "ba"
@tool("foo", parse_docstring=True)
def injected_tool(x: int, y: Annotated[str, InjectedToolArg]) -> str:
"""Foo.
Args:
x: abc
y: 123
"""
return y
|
_MockStructuredToolWithRawOutput
|
python
|
boto__boto3
|
boto3/resources/model.py
|
{
"start": 5149,
"end": 6355
}
|
class ____:
"""
A resource response to create after performing an action.
:type definition: dict
:param definition: The JSON definition
:type resource_defs: dict
:param resource_defs: All resources defined in the service
"""
def __init__(self, definition, resource_defs):
self._definition = definition
self._resource_defs = resource_defs
#: (``string``) The name of the response resource type
self.type = definition.get('type')
#: (``string``) The JMESPath search query or ``None``
self.path = definition.get('path')
@property
def identifiers(self):
"""
A list of resource identifiers.
:type: list(:py:class:`Identifier`)
"""
identifiers = []
for item in self._definition.get('identifiers', []):
identifiers.append(Parameter(**item))
return identifiers
@property
def model(self):
"""
Get the resource model for the response resource.
:type: :py:class:`ResourceModel`
"""
return ResourceModel(
self.type, self._resource_defs[self.type], self._resource_defs
)
|
ResponseResource
|
python
|
pytorch__pytorch
|
test/inductor/test_codecache.py
|
{
"start": 7431,
"end": 67448
}
|
class ____(TestCase):
device_type = GPU_TYPE
def setUp(self):
super().setUp()
counters.clear()
DynamoCache.clear()
PrecompileContext.clear()
AOTAutogradCache.clear()
PatchCaches.setUp()
CacheArtifactManager.clear()
torch._dynamo.reset()
def tearDown(self):
super().tearDown()
PatchCaches.tearDown()
def reset(self):
AOTAutogradCache.clear()
DynamoCache.clear()
PrecompileContext.clear()
PyCodeCache.cache_clear(purge=True)
torch._dynamo.reset()
clear_caches()
@requires_triton()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"compile_threads": 1})
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("dtype", (torch.float32, torch.bfloat16))
@parametrize("dynamic", (False, True))
@parametrize("bundle_triton", (False, True))
@parametrize("use_static_cuda_launcher", (False, True))
@parametrize("grad", (False, True))
def test_cache_load_function(
self, device, dtype, dynamic, bundle_triton, use_static_cuda_launcher, grad
):
"""
Verify that we can populate and load functions from the cache.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
if device == "cuda" and dtype == torch.bfloat16 and not SM80OrLater:
raise unittest.SkipTest("requires SM80 or later")
if use_static_cuda_launcher and not (device == "cuda" and bundle_triton):
raise unittest.SkipTest(
"Static cuda launcher requires cuda and triton bundling"
)
if use_static_cuda_launcher and TEST_WITH_ROCM:
raise unittest.SkipTest("Static cuda launcher doesn't work with ROCM")
grad_multiplier = 2 if grad else 1
def fn(x, y):
yy = y @ y
return x * 2 + yy.view(25)
a_orig = torch.rand(25, dtype=dtype, device=device)
b_orig = torch.rand(5, 5, dtype=dtype, device=device)
with config.patch(
bundle_triton_into_fx_graph_cache=bundle_triton,
use_static_cuda_launcher=use_static_cuda_launcher,
):
compiled_fn = torch.compile(fn, dynamic=dynamic)
a1 = a_orig.clone().requires_grad_(grad)
b1 = b_orig.clone().requires_grad_(grad)
a2 = a_orig.clone().requires_grad_(grad)
b2 = b_orig.clone().requires_grad_(grad)
# A first call should miss in the cache.
eager_result = fn(a1, b1)
compiled_result = compiled_fn(a2, b2)
self.assertEqual(eager_result, compiled_result)
if grad:
eager_result.sum().backward()
compiled_result.sum().backward()
self.assertEqual(a1.grad, a2.grad)
self.assertEqual(b1.grad, b2.grad)
self.assertEqual(
counters["inductor"]["fxgraph_cache_miss"], grad_multiplier * 1
)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
# we expect:
# .ttir
# .ttgir
# .llir
# .ptx (cuda) or .spv (xpu)
# .json
# __grp__.*.json
# optionally, we can also get
# .cubin (CUDA only)
# .source (new versions of triton only, triton-lang/triton#6992)
# to avoid depending on the device and triton version, just assert that
# we have at least 6 kernels.
save_and_read_min_artifact_count = 6
if bundle_triton and device != "cpu":
self.assertGreaterEqual(
counters["inductor"]["triton_bundler_save_kernel"],
grad_multiplier * save_and_read_min_artifact_count,
)
self.assertEqual(
counters["inductor"]["triton_bundler_read_and_emit_kernel"], 0
)
if use_static_cuda_launcher:
self.assertEqual(
counters["inductor"]["triton_bundler_save_static_autotuner"],
grad_multiplier if device == "cuda" else 0,
)
self.assertEqual(
counters["inductor"]["triton_bundler_load_static_autotuner"], 0
)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
a1 = a_orig.clone().requires_grad_(grad)
b1 = b_orig.clone().requires_grad_(grad)
a2 = a_orig.clone().requires_grad_(grad)
b2 = b_orig.clone().requires_grad_(grad)
eager_result = fn(a1, b1)
compiled_result = compiled_fn(a2, b2)
self.assertEqual(eager_result, compiled_result)
if grad:
eager_result.sum().backward()
compiled_result.sum().backward()
self.assertEqual(a1.grad, a2.grad)
self.assertEqual(b1.grad, b2.grad)
self.assertEqual(
counters["inductor"]["fxgraph_cache_miss"], grad_multiplier * 1
)
self.assertEqual(
counters["inductor"]["fxgraph_cache_hit"], grad_multiplier * 1
)
self.assertEqual(
counters["inductor"]["fxgraph_lookup_write_file"], grad_multiplier * 1
)
if bundle_triton and device != "cpu":
self.assertGreaterEqual(
counters["inductor"]["triton_bundler_save_kernel"],
grad_multiplier * save_and_read_min_artifact_count,
)
self.assertGreaterEqual(
counters["inductor"]["triton_bundler_read_and_emit_kernel"],
grad_multiplier * save_and_read_min_artifact_count,
)
if use_static_cuda_launcher:
self.assertEqual(
counters["inductor"]["triton_bundler_save_static_autotuner"],
grad_multiplier if device == "cuda" else 0,
)
self.assertEqual(
counters["inductor"]["triton_bundler_load_static_autotuner"],
grad_multiplier if device == "cuda" else 0,
)
self.reset()
a1 = a_orig.clone().requires_grad_(grad)
b1 = b_orig.clone().requires_grad_(grad)
a2 = a_orig.clone().requires_grad_(grad)
b2 = b_orig.clone().requires_grad_(grad)
eager_result = fn(a1, b1)
if grad:
eager_result.sum().backward()
with torch.compiler.config.patch({"cache_key_tag": "test"}):
compiled_result = compiled_fn(a2, b2)
if grad:
compiled_result.sum().backward()
self.assertEqual(eager_result, compiled_result)
if grad:
self.assertEqual(a1.grad, a2.grad)
self.assertEqual(b1.grad, b2.grad)
self.assertEqual(
counters["inductor"]["fxgraph_cache_miss"], grad_multiplier * 2
)
self.assertEqual(
counters["inductor"]["fxgraph_cache_hit"], grad_multiplier * 1
)
self.assertEqual(
counters["inductor"]["fxgraph_lookup_write_file"], grad_multiplier * 1
)
if bundle_triton and device != "cpu":
self.assertGreaterEqual(
counters["inductor"]["triton_bundler_save_kernel"],
grad_multiplier * save_and_read_min_artifact_count * 2,
)
self.assertGreaterEqual(
counters["inductor"]["triton_bundler_read_and_emit_kernel"],
grad_multiplier * save_and_read_min_artifact_count,
)
if use_static_cuda_launcher:
self.assertEqual(
counters["inductor"]["triton_bundler_save_static_autotuner"],
grad_multiplier * 2 if device == "cuda" else 0,
)
self.assertEqual(
counters["inductor"]["triton_bundler_load_static_autotuner"],
grad_multiplier if device == "cuda" else 0,
)
@requires_triton()
@config.patch({"fx_graph_remote_cache": True})
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("dtype", (torch.float32, torch.bfloat16))
@parametrize("dynamic", (False, True))
@parametrize("bundle_triton", (False, True))
@parametrize("use_static_cuda_launcher", (False, True))
@config.patch(
{"compile_threads": 1}
) # Can't check globalStats if there are workers
def test_remote_cache_load_function(
self, device, dtype, dynamic, bundle_triton, use_static_cuda_launcher
):
from unittest.mock import patch
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
if device == "cuda" and dtype == torch.bfloat16 and not SM80OrLater:
raise unittest.SkipTest("requires SM80 or later")
if use_static_cuda_launcher and not (device == "cuda" and bundle_triton):
raise unittest.SkipTest(
"Static cuda launcher requires cuda and triton bundling"
)
if use_static_cuda_launcher and TEST_WITH_ROCM:
raise unittest.SkipTest("Static cuda launcher doesn't work with ROCM")
def fn(x, y):
return (x * 2, y @ y)
a = torch.rand(25, dtype=dtype, device=device)
b = torch.rand(5, 5, dtype=dtype, device=device)
with (
config.patch(
{
"fx_graph_remote_cache": True,
"bundle_triton_into_fx_graph_cache": bundle_triton,
"use_static_cuda_launcher": use_static_cuda_launcher,
}
),
patch.dict(os.environ),
PatchCaches(),
):
os.environ.pop("TRITON_CACHE_MANAGER", None)
for _ in range(4):
with fresh_cache():
compiled_fn = torch.compile(fn, dynamic=dynamic)
self.assertEqual(fn(a, b), compiled_fn(a, b))
reset()
self.assertEqual(global_stats.fx_graph, Stats(1, 3, 1))
with torch.compiler.config.patch({"cache_key_tag": "test"}), fresh_cache():
compiled_fn = torch.compile(fn, dynamic=dynamic)
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(global_stats.fx_graph, Stats(2, 3, 2))
# Check that the cache entries seem reasonable
for k in global_stats.fx_graph.cache:
self.assertRegex(k, r"pt2:fx-graph-v1::[0-9a-z]{52}:c[0-9]+")
@requires_triton()
@config.patch(
{
"fx_graph_cache": True,
"fx_graph_remote_cache": False,
"autotune_local_cache": True,
}
)
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("dtype", (torch.float32, torch.bfloat16))
@parametrize("dynamic", (False, True))
@torch._functorch.config.patch({"enable_autograd_cache": False})
def test_cache_hot_load(self, device, dtype, dynamic):
"""
Verify that we can populate and hot load functions from the cache.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
if device == "cuda" and dtype == torch.bfloat16 and not SM80OrLater:
raise unittest.SkipTest("requires SM80 or later")
def fn(x, y):
return x.sin() @ y
a = torch.rand(100, 100, dtype=dtype, device=device)
b = torch.rand(100, 100, dtype=dtype, device=device)
# Record artifacts
with fresh_cache():
compiled_fn = torch.compile(fn, dynamic=dynamic)
# A first call should miss in the cache.
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
artifacts = torch.compiler.save_cache_artifacts()
self.assertIsNotNone(artifacts)
artifact_bytes, cache_info = artifacts
autotune_expect = 1 if device == GPU_TYPE else 0
self.assertEqual(len(cache_info.inductor_artifacts), 1)
self.assertEqual(len(cache_info.autotune_artifacts), autotune_expect)
self.assertEqual(len(cache_info.aot_autograd_artifacts), 0)
self.assertEqual(len(cache_info.pgo_artifacts), 0)
self.reset()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
# We did not load anything so dont hit yet
with fresh_cache():
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
self.reset()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
# Hot load and hit
with fresh_cache():
cache_info = torch.compiler.load_cache_artifacts(artifact_bytes)
self.assertEqual(len(cache_info.inductor_artifacts), 1)
self.assertEqual(len(cache_info.autotune_artifacts), autotune_expect)
self.assertEqual(len(cache_info.aot_autograd_artifacts), 0)
self.assertEqual(len(cache_info.pgo_artifacts), 0)
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 1)
@requires_triton()
@config.patch(
{
"fx_graph_cache": True,
"fx_graph_remote_cache": False,
"autotune_local_cache": True,
}
)
@torch._dynamo.config.patch(
{
"caching_precompile": True,
}
)
@parametrize("dynamic", (False, True))
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("dtype", (torch.float32, torch.bfloat16))
def test_cache_hot_load_caching_precompile(self, device, dtype, dynamic):
"""
Verify that we can populate and hot load functions from the cache.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
if device == "cuda" and dtype == torch.bfloat16 and not SM80OrLater:
raise unittest.SkipTest("requires SM80 or later")
def fn(x, y):
return x.sin() @ y
a = torch.rand(100, 100, dtype=dtype, device=device, requires_grad=True)
b = torch.rand(100, 100, dtype=dtype, device=device, requires_grad=True)
# Record artifacts
with fresh_cache():
compiled_fn = torch.compile(fn, dynamic=dynamic)
# A first call should miss in the cache.
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
compiled_result.sum().backward()
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 1)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0)
self.assertEqual(counters["dynamo_cache"]["dynamo_cache_miss"], 1)
self.assertEqual(counters["dynamo_cache"]["dynamo_cache_hit"], 0)
artifacts = torch.compiler.save_cache_artifacts()
self.assertIsNotNone(artifacts)
artifact_bytes, cache_info = artifacts
autotune_expect = 2 if device == GPU_TYPE else 0
self.assertEqual(len(cache_info.inductor_artifacts), 2)
self.assertEqual(len(cache_info.autotune_artifacts), autotune_expect)
self.assertEqual(len(cache_info.aot_autograd_artifacts), 1)
self.assertEqual(len(cache_info.pgo_artifacts), 0)
self.assertEqual(len(cache_info.precompile_artifacts), 1)
self.reset()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
# We did not load anything so dont hit yet
with fresh_cache():
eager_result = fn(a, b)
# With caching precompile, we have to re torch.compile the function
# to trigger cache lookup
compiled_fn = torch.compile(fn, dynamic=dynamic)
compiled_result = compiled_fn(a, b)
compiled_result.sum().backward()
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 2)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0)
self.assertEqual(counters["dynamo_cache"]["dynamo_cache_miss"], 2)
self.assertEqual(counters["dynamo_cache"]["dynamo_cache_hit"], 0)
self.reset()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
# Hot load and hit
with fresh_cache(), torch.compiler.set_stance("fail_on_recompile"):
cache_info = torch.compiler.load_cache_artifacts(artifact_bytes)
self.assertEqual(len(cache_info.inductor_artifacts), 2)
self.assertEqual(len(cache_info.autotune_artifacts), autotune_expect)
self.assertEqual(len(cache_info.aot_autograd_artifacts), 1)
self.assertEqual(len(cache_info.pgo_artifacts), 0)
self.assertEqual(len(cache_info.precompile_artifacts), 1)
# With caching precompile, we have to re torch.compile the function
# to trigger cache lookup
compiled_fn = torch.compile(fn, dynamic=dynamic)
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
compiled_result.sum().backward()
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["aot_autograd"]["autograd_cache_miss"], 2)
self.assertEqual(counters["aot_autograd"]["autograd_cache_hit"], 0)
self.assertEqual(counters["dynamo_cache"]["dynamo_cache_miss"], 2)
self.assertEqual(counters["dynamo_cache"]["dynamo_cache_hit"], 1)
@config.patch(
{
"fx_graph_cache": True,
"fx_graph_remote_cache": False,
}
)
def test_cache_hot_load_repeat(self):
def fn(x, y):
return x @ y.sin()
compiled_fn = torch.compile(fn, dynamic=False)
a = torch.randn(4, 4)
b = torch.randn(4, 4)
a2 = torch.randn(4, 8)
b2 = torch.randn(8, 4)
with fresh_cache():
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
artifacts = torch.compiler.save_cache_artifacts()
self.assertFalse(torch.compiler._cache.CacheArtifactManager.need_serialize())
self.assertIsNotNone(artifacts)
artifact_bytes, cache_info = artifacts
self.reset()
with fresh_cache():
torch.compiler.load_cache_artifacts(artifact_bytes)
eager_result = fn(a, b)
compiled_result = compiled_fn(a, b)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertFalse(torch.compiler._cache.CacheArtifactManager.need_serialize())
self.reset()
with fresh_cache():
eager_result = fn(a2, b2)
compiled_result = compiled_fn(a2, b2)
self.assertEqual(eager_result, compiled_result)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertTrue(torch.compiler._cache.CacheArtifactManager.need_serialize())
@torch._dynamo.config.patch(automatic_dynamic_local_pgo=True)
@torch._functorch.config.patch({"enable_autograd_cache": False})
@config.patch({"fx_graph_cache": True, "fx_graph_remote_cache": False})
def test_cache_hot_load_pgo(self):
"""
Verify that we can populate and hot load functions from the cache with pgo.
"""
backend = torch._dynamo.testing.CompileCounterWithBackend("inductor")
@torch.compile(backend=backend, fullgraph=True)
def f(x):
return x * 2
# Record artifacts
with torch.compiler.config.patch(job_id=self.id()), fresh_cache():
f(torch.randn(2, 3))
f(torch.randn(2, 4))
self.assertEqual(backend.frame_count, 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
artifacts = torch.compiler.save_cache_artifacts()
self.assertIsNotNone(artifacts)
artifact_bytes, cache_info = artifacts
self.assertEqual(len(cache_info.inductor_artifacts), 2)
self.assertEqual(len(cache_info.autotune_artifacts), 0)
self.assertEqual(len(cache_info.aot_autograd_artifacts), 0)
self.assertEqual(len(cache_info.pgo_artifacts), 2)
self.reset()
backend.clear()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
# Hot load and hit
with torch.compiler.config.patch({"job_id": self.id()}), fresh_cache():
cache_info = torch.compiler.load_cache_artifacts(artifact_bytes)
self.assertEqual(len(cache_info.inductor_artifacts), 2)
self.assertEqual(len(cache_info.autotune_artifacts), 0)
self.assertEqual(len(cache_info.aot_autograd_artifacts), 0)
self.assertEqual(len(cache_info.pgo_artifacts), 2)
f(torch.randn(2, 5))
f(torch.randn(2, 6))
self.assertEqual(backend.frame_count, 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 1)
@torch._dynamo.config.patch(automatic_dynamic_local_pgo=True)
@torch._functorch.config.patch({"enable_autograd_cache": False})
@config.patch({"fx_graph_cache": True, "fx_graph_remote_cache": False})
def test_cache_hot_load_pgo_swap_file_names(self):
"""
Verify that we can populate and hot load functions from the cache with pgo
with file name swapping
"""
backend = torch._dynamo.testing.CompileCounterWithBackend("inductor")
@torch.compile(backend=backend, fullgraph=True)
def f(x):
return x * 2
# Record artifacts
with mock.patch(
"torch._utils_internal.get_mast_job_name_version", return_value=("foo", 5)
):
with fresh_cache():
f(torch.randn(2, 3))
f(torch.randn(2, 4))
self.assertEqual(backend.frame_count, 2)
artifacts = torch.compiler.save_cache_artifacts()
self.assertIsNotNone(artifacts)
artifact_bytes, cache_info = artifacts
self.assertEqual(len(cache_info.pgo_artifacts), 2)
self.reset()
backend.clear()
# Clean triton kernels
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
# Hot load and hit
with (
mock.patch(
"torch._utils_internal.get_mast_job_name_version",
return_value=("bar", 10),
),
fresh_cache(),
):
cache_info = torch.compiler.load_cache_artifacts(artifact_bytes)
self.assertEqual(len(cache_info.pgo_artifacts), 2)
f(torch.randn(2, 5))
f(torch.randn(2, 6))
self.assertEqual(backend.frame_count, 1)
def test_cache_hot_load_empty(self):
self.assertIsNone(torch.compiler.save_cache_artifacts())
def test_cache_hot_load_generic(self):
class CacheStub:
def __init__(self):
self.cache = {}
def lookup(self, key):
content = self.cache.get(key)
if content is None:
return None
CacheArtifactManager.record_artifact(
ArbitraryCacheArtifact.type(), key, content
)
return content
def save(self, key, content):
self.cache[key] = content
CacheArtifactManager.record_artifact(
ArbitraryCacheArtifact.type(), key, content
)
def clear(self):
self.cache.clear()
cache_stub = CacheStub()
@CacheArtifactFactory.register
class ArbitraryCacheArtifact(CacheArtifact):
@override
def populate_cache(self) -> None:
cache_stub.cache[self.key] = self.content.decode()
@override
@staticmethod
def type() -> str:
return "test"
@override
@staticmethod
def encode(content: str) -> bytes:
return content.encode()
test_cache = {"1": "foo", "2": "bar", "foo": "bar"}
for k, v in test_cache.items():
cache_stub.save(k, v)
artifacts = torch.compiler.save_cache_artifacts()
self.assertIsNotNone(artifacts)
artifact_bytes, cache_info = artifacts
self.assertEqual(len(cache_info.test_artifacts), 3)
cache_stub.clear()
CacheArtifactManager.clear()
cache_info = torch.compiler.load_cache_artifacts(artifact_bytes)
self.assertEqual(len(cache_info.test_artifacts), 3)
self.assertEqual(cache_stub.cache, test_cache)
CacheArtifactManager.clear()
cache_stub.lookup("foo")
artifacts = torch.compiler.save_cache_artifacts()
self.assertIsNotNone(artifacts)
_, cache_info = artifacts
self.assertEqual(len(cache_info.test_artifacts), 1)
@requires_triton()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("dtype", (torch.float32, torch.float64))
@parametrize("dynamic", (False, True))
def test_cache_load_model(self, device, dtype, dynamic):
"""
Verify that we can populate and load models from the cache.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
def fn(mod, x):
mod.zero_grad()
mod(x).sum().backward()
return [p.grad for p in mod.parameters()]
compiled_fn = torch.compile(fn, dynamic=dynamic)
mod = MyModelConv2d().to(device=device, dtype=dtype)
inp = torch.randn(2, 3, 16, 32, device=device, dtype=dtype)
# The first call should see all cache misses.
counters.clear()
grads1 = compiled_fn(mod, inp)
self.assertGreater(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# The second should see all hits. (First reset so in-memory guards
# don't prevent compilation).
counters.clear()
self.reset()
grads2 = compiled_fn(mod, inp)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_hit"], 0)
# And the results should be the same.
self.assertEqual(grads1, grads2)
@largeTensorTest("64GB", device=GPU_TYPE, inductor=True)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("device", (GPU_TYPE,))
@parametrize("dtype", (torch.float16, torch.bfloat16))
def test_cache_load_with_guards_int32_bounds(self, device, dtype):
"""
Test caching the same graph, but under conditions that introduce guards
for tensor sizes < int32.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
if device == "cuda" and dtype == torch.bfloat16 and not SM80OrLater:
raise unittest.SkipTest("requires CUDA SM80 or later")
def fn(x, y):
return (x + x, y + y)
compiled_fn = torch.compile(fn, dynamic=True)
# Iterate over different shapes, varying whether the total
# size is below or above int32. For each combination, we expect
# different guards around whether the symbolic sizes do or do
# not exceed int32.
shapes = (
((5, 6), (7, 8)),
((5, 6), (47000, 47001)),
((47000, 47001), (5, 6)),
)
for a_shape, b_shape in shapes:
a = torch.rand(a_shape, device=device, dtype=dtype)
b = torch.rand(b_shape, device=device, dtype=dtype)
# AVOID a dynamo reset here. We expect guards to have been
# added that will be violated with the new shape. We should
# see a recompilation (along with a cache miss).
counters.clear()
res1 = compiled_fn(a, b)
self.assertGreater(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# A second call should hit. (Reset here to force compilation).
counters.clear()
self.reset()
res2 = compiled_fn(a, b)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(res1, res2)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("dtype", (torch.float32, torch.bfloat16))
def test_cache_load_with_guards_static_bounds(self, device, dtype):
"""
Test caching the same graph, but under conditions that introduce guards
for static bounds.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
if device == "cuda" and dtype == torch.bfloat16 and not SM80OrLater:
raise unittest.SkipTest("requires SM80 or later")
# See lowering; for all of the pooling operators, we always guard and
# make the height/width static.
def fn(x):
return torch.nn.functional.adaptive_avg_pool2d(x, [5, 7])
compiled_fn = torch.compile(fn, dynamic=True)
# Iterate over different input shapes. Each new shape should cause
# a cache miss.
shapes = ((1, 64, 8, 9), (1, 64, 9, 10), (1, 64, 10, 11))
for shape in shapes:
x = torch.rand(shape, device=device, dtype=dtype)
# AVOID a dynamo reset here. For each cache hit, we expect guards
# to have been added that will be violated with each new shape.
# We should see a recompilation (along with a cache miss).
counters.clear()
res1 = compiled_fn(x)
self.assertGreater(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# A second call should hit.
counters.clear()
self.reset()
res2 = compiled_fn(x)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(res1, res2)
@config.patch("fx_graph_cache", True)
@torch._functorch.config.patch({"enable_autograd_cache": False})
@config.patch("fx_graph_remote_cache", False)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@requires_cuda_and_triton
def test_no_arguments_tensor_device_guards(self):
"""
Usually, when there are example inputs, the device index of the inputs
is sufficient to make sure we don't cache hit with the results from different
cuda devices.
When the input has no arguments, we still need to have the cuda
device index in the cache key.
"""
@torch.compile
def f():
y = torch.randn(3, device="cuda")
return (y,)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
result = f()
self.assertEqual(result[0].device, torch.device("cuda:0"))
self.reset()
# Should not cache hit with device guard
with torch.cuda._DeviceGuard(1):
torch.cuda.set_device(1)
result = f()
self.assertEqual(result[0].device, torch.device("cuda:1"))
@config.patch("fx_graph_cache", True)
@torch._functorch.config.patch({"enable_autograd_cache": False})
@config.patch("fx_graph_remote_cache", False)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@requires_cuda_and_triton
def test_tensor_device_guards_cpu_tensor(self):
"""
CPU tensor arguments should still cache hit
"""
@torch.compile
def f(x):
return x.sin()
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
result = f(torch.randn(3, device="cpu"))
self.assertEqual(result.device, torch.device("cpu"))
self.reset()
# Should not cache hit with device guard
with torch.cuda._DeviceGuard(1):
torch.cuda.set_device(1)
result = f(torch.randn(3, device="cpu"))
self.assertEqual(result.device, torch.device("cpu"))
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("device", (GPU_TYPE, "cpu"))
def test_constant_handling(self, device):
"""
Test that different constants are recognized correctly.
"""
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
def fn1(x):
return x + torch.tensor(list(range(12)), device=device)
def fn2(x):
return x + torch.tensor(list(range(1, 13)), device=device)
a = torch.rand(12, device=device)
compiled_fn1 = torch.compile(fn1)
compiled_fn2 = torch.compile(fn2)
# A call to fn1 should miss in the cache.
self.assertEqual(fn1(a), compiled_fn1(a))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# A call to fn2 should also miss (the constant is different)
self.assertEqual(fn2(a), compiled_fn2(a))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("variant", ("v1", "v2"))
def test_auto_functionalized_caching(self, variant):
if variant == "v1":
patch = torch._inductor.config.patch(enable_auto_functionalized_v2=False)
else:
assert variant == "v2"
patch = torch._inductor.config.patch(enable_auto_functionalized_v2=True)
@torch.library.custom_op("mylib::sin_inplace", mutates_args=["x"])
def sin_inplace(x: torch.Tensor) -> None:
x.sin_()
@torch.library.custom_op("mylib::cos_inplace", mutates_args=["x"])
def cos_inplace(x: torch.Tensor) -> None:
x.cos_()
@torch.compile(fullgraph=True)
def fn(x, op):
y = torch.empty_like(x)
op(y)
return y
x = torch.randn(3)
with patch:
# A first call should miss in the cache.
fn(x, sin_inplace)
self.reset()
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
fn(x, sin_inplace)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 1)
# A third call with different operator should have a cache miss
self.reset()
fn(x, cos_inplace)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 1)
@requires_gpu_and_triton
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@with_tf32_off
def test_flex_attention_caching(self):
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
block_mask = create_block_mask(
lambda b, h, q, kv: q >= kv, None, None, 512, 512
)
def score_mod(score, b, h, q, kv):
return score + (q - kv)
def fn(q, k, v):
return flex_attention(q, k, v, score_mod=score_mod, block_mask=block_mask)
def score_mod2(score, b, h, q, kv):
return score
def fn2(q, k, v):
return flex_attention(q, k, v, score_mod=score_mod2, block_mask=block_mask)
a, b, c = (torch.randn(1, 4, 512, 64).to(GPU_TYPE) for _ in range(3))
compiled_fn = torch.compile(fn)
compiled_fn2 = torch.compile(fn2)
atol, rtol = 1e-4, 1e-4
# A first call should miss in the cache.
self.assertEqual(fn(a, b, c), compiled_fn(a, b, c), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
self.assertEqual(fn(a, b, c), compiled_fn(a, b, c), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 1)
# A third call with different score_mod should have a cache miss
self.reset()
self.assertEqual(fn2(a, b, c), compiled_fn2(a, b, c), atol=atol, rtol=rtol)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_lookup_write_file"], 1)
@requires_gpu()
@requires_triton()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("bundle_triton", (False, True))
def test_higher_order_op_bypass(self, bundle_triton):
"""
Verify that we bypass the cache when we have a higher order ops
and that bundler start/end works with a cache bypass.
"""
def fn(x):
def true_fn(x: torch.Tensor):
return x.cos()
def false_fn(x: torch.Tensor):
return x.sin()
return torch.cond(x.shape[0], true_fn, false_fn, (x,))
with config.patch(
bundle_triton_into_fx_graph_cache=bundle_triton,
):
compiled_fn = torch.compile(fn, dynamic=True, fullgraph=True)
x = torch.randn(4, 4, device=GPU_TYPE)
compiled_fn(x)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertGreater(counters["inductor"]["fxgraph_cache_bypass"], 0)
@requires_gpu()
@requires_triton()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("bundle_triton", (False, True))
def test_triton_higher_order_op(self, bundle_triton):
"""
Verify that we can cache user defined triton kernel higher order op
"""
def fn(x, y):
n_elements = x.numel()
grid = lambda meta: ( # noqa: E731
triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
)
add_kernel[grid](x, y, x, n_elements, BLOCK_SIZE=4)
return x
def fn2(x, y):
n_elements = x.numel()
grid = lambda meta: ( # noqa: E731
triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
)
sub_kernel[grid](x, y, x, n_elements, BLOCK_SIZE=4)
return x
with config.patch(bundle_triton_into_fx_graph_cache=bundle_triton):
compiled_fn = torch.compile(fn, fullgraph=True)
compiled_fn2 = torch.compile(fn2, fullgraph=True)
x = torch.randn(4, device=GPU_TYPE)
y = torch.randn(4, device=GPU_TYPE)
compiled_fn(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
# Clean PyCodeCache and triton kernels
PyCodeCache.cache_clear()
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
compiled_fn(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
# Clean PyCodeCache and triton kernels
PyCodeCache.cache_clear()
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
compiled_fn2(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
@requires_gpu()
@requires_triton()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@parametrize("bundle_triton", (False, True))
def test_triton_higher_order_op_different_configs(self, bundle_triton):
"""
Verify that user defined triton kernel with
different configs are cached separately.
"""
add_kernel1 = triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 128}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_SIZE": 128}, num_stages=4, num_warps=4),
],
key=[],
)(add_kernel)
add_kernel2 = triton.autotune(
configs=[
triton.Config({"BLOCK_SIZE": 64}, num_stages=3, num_warps=8),
triton.Config({"BLOCK_SIZE": 64}, num_stages=4, num_warps=4),
],
key=[],
)(add_kernel)
def fn(x, y):
n_elements = x.numel()
grid = lambda meta: ( # noqa: E731
triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
)
add_kernel1[grid](x, y, x, n_elements)
return x
def fn2(x, y):
n_elements = x.numel()
grid = lambda meta: ( # noqa: E731
triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
)
add_kernel2[grid](x, y, x, n_elements)
return x
with config.patch(bundle_triton_into_fx_graph_cache=bundle_triton):
compiled_fn = torch.compile(fn, fullgraph=True)
compiled_fn2 = torch.compile(fn2, fullgraph=True)
x = torch.randn(4, device=GPU_TYPE)
y = torch.randn(4, device=GPU_TYPE)
compiled_fn(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
# Clean PyCodeCache and triton kernels
PyCodeCache.cache_clear()
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
compiled_fn(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
# Clean PyCodeCache and triton kernels
PyCodeCache.cache_clear()
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
compiled_fn2(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
@requires_gpu()
@requires_triton()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"compile_threads": 1})
@parametrize("bundle_triton", (False, True))
@parametrize("use_static_cuda_launcher", (False, True))
def test_triton_op(self, bundle_triton, use_static_cuda_launcher):
if use_static_cuda_launcher and TEST_WITH_ROCM:
raise unittest.SkipTest("Static cuda launcher doesn't work with ROCM")
libname = "my_cool_namespace"
opname = "my_triton_operator"
@torch._library.triton_op(f"{libname}::{opname}", mutates_args={})
def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
output = torch.empty_like(x)
n_elements = output.numel()
def grid(meta):
return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
capture_triton(add_kernel)[grid](x, y, output, n_elements, 16)
return output
def f(x, y):
return add(x, y)
compile_threads = 1 if use_static_cuda_launcher else config.compile_threads
with config.patch(
bundle_triton_into_fx_graph_cache=bundle_triton,
use_static_cuda_launcher=use_static_cuda_launcher,
compile_threads=compile_threads,
):
compiled_fn = torch.compile(f, fullgraph=True)
x = torch.randn(4, device=GPU_TYPE)
y = torch.randn(4, device=GPU_TYPE)
compiled_fn(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
# A second call should hit. (First reset so in-memory guards
# don't prevent compilation).
self.reset()
# Clean PyCodeCache and triton kernels
PyCodeCache.cache_clear()
shutil.rmtree(os.path.join(cache_dir(), "triton"), ignore_errors=True)
compiled_fn(x, y)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_generated_kernel_count(self):
"""
Test that we bump the generated_kernel_count metric on a cache hit.
"""
torch._logging.set_logs(inductor_metrics=True)
def fn(x, y):
return (x * y + y,)
a = torch.rand(5, 5)
b = torch.rand(5, 5)
compiled_fn = torch.compile(fn)
metrics.reset()
self.assertEqual(metrics.generated_kernel_count, 0)
# Verify the "miss" case.
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertEqual(metrics.generated_kernel_count, 1)
# Verify the "hit" case
self.reset()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(metrics.generated_kernel_count, 2)
torch._logging.set_logs()
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_inductor_counters(self):
"""
Test that we bump the inductor counters on a cache hit.
"""
def fn(a, b):
return torch.mm(a, b)
a = torch.rand(8, 32, device="cpu")
b = torch.rand(32, 8, device="cpu")
compiled_fn = torch.compile(fn)
# Verify the "miss" case.
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# Verify the "hit" case.
self.reset()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_cache_clear(self):
"""
Test clearing the cache.
"""
def fn(x, y):
return (x * y,)
a = torch.rand(5, 5)
b = torch.rand(5, 5)
compiled_fn = torch.compile(fn)
# A first call should miss in the cache.
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# A second call should hit.
counters.clear()
self.reset()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
# Clear the cache; now we should miss.
counters.clear()
self.reset()
torch._inductor.codecache.FxGraphCache.clear()
self.assertEqual(fn(a, b), compiled_fn(a, b))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_cache_with_nt(self):
def gen_nt(r):
values = torch.randn(r, 16)
offsets = torch.tensor([0, 2, 3, 6, 13, r])
return torch.nested.nested_tensor_from_jagged(values, offsets)
def fn(nt):
if nt.values().size(0) % 16 == 0:
return nt.sin()
return nt.cos()
inp1 = gen_nt(19)
inp2 = gen_nt(20)
counters.clear()
torch.compile(fn)(inp1)
torch.compile(fn)(inp2)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.reset()
counters.clear()
torch.compile(fn)(inp1)
torch.compile(fn)(inp2)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_cache_with_symint_non_arg_guard(self):
def fn(x, ref_id):
self_id = 22
if self_id == ref_id:
x = torch.mul(x, 1.0)
else:
x = torch.mul(x, 0)
return x
x = torch.ones(2)
counters.clear()
torch.compile(fn, fullgraph=True, dynamic=True)(x, 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.reset()
counters.clear()
torch.compile(fn, fullgraph=True, dynamic=True)(x, 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
def test_cache_guard(self):
def f(x, val):
if val > 5:
return x.sin()
else:
return x.cos()
x = torch.ones(2)
a = torch.compile(f, dynamic=True)(x, 6)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.reset()
counters.clear()
b = torch.compile(f, dynamic=True)(x, 4)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.assertNotEqual(a, b)
@config.patch({"fx_graph_cache": False, "fx_graph_remote_cache": False})
@requires_cuda_and_triton
@unittest.expectedFailure # TODO: pass in optimize_mem at runtime
def test_async_compile_cache(self):
class SimpleFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return grad_output * 2
x = torch.rand([10], requires_grad=True, device="cuda")
counters.clear()
sf = SimpleFunction
out = torch.compile(sf.apply)(x)
out.sum().backward()
self.assertEqual(counters["inductor"]["async_compile_cache_miss"], 1)
self.assertEqual(counters["inductor"]["async_compile_cache_hit"], 1)
@config.patch({"fx_graph_cache": True})
def test_cache_guard_overspec(self):
b = torch.tensor([0, 2, 4, 6, 8])
@torch.compile
class MyModel(torch.nn.Module):
def forward(self, x):
return torch.isin(x, b)
model = MyModel()
counters.clear()
for i in range(1, 5):
model(torch.arange(i))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
self.reset()
counters.clear()
for i in range(1, 5):
model(torch.arange(i))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 2)
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"freezing": True})
@parametrize("device", (GPU_TYPE, "cpu"))
@parametrize("inlinable", (True, False))
def test_freezing(self, device, inlinable):
if device == GPU_TYPE and not HAS_GPU:
raise unittest.SkipTest(f"requires {GPU_TYPE}")
# For machines with mkldnn_fp16 support, weight_pack in mkldnn_fusion.py causes
# the creation of a mkldnn format tensor which the current implementation does
# not support.
if (
device == "cpu"
and torch.backends.mkldnn.is_available()
and torch.ops.mkldnn._is_mkldnn_fp16_supported()
):
raise unittest.SkipTest("mkldnn tensors unsupported")
# The shape of the frozen constant determines if it will be inlined.
shape = (4,) if inlinable else (8, 8)
class MM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(shape))
def forward(self, x):
return x @ self.param
dtype = torch.float16
# Populate a cache entry.
mod1 = MM().to(device=device, dtype=dtype)
with torch.no_grad():
x = torch.rand(shape).to(device=device, dtype=dtype)
out0 = mod1(x)
out1 = torch.compile(mod1)(x)
self.assertEqual(out0, out1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
counters.clear()
self.reset()
# Same nn.Module, but with different parameters. In the case that the param can
# be inlined, we should consider the actual tensor value and we expect a cache
# miss (because the values are different here). If the param cannot be inlined,
# then we consider only the tensor metadata and we expect a cache hit.
mod2 = MM().to(device=device, dtype=dtype)
self.assertNotEqual(mod1.param, mod2.param)
with torch.no_grad():
x = torch.rand(shape).to(device=device, dtype=dtype)
out0 = mod2(x)
out1 = torch.compile(mod2)(x)
self.assertEqual(out0, out1)
self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0)
self.assertEqual(
counters["inductor"]["fxgraph_cache_miss"], 1 if inlinable else 0
)
self.assertEqual(
counters["inductor"]["fxgraph_cache_hit"], 0 if inlinable else 1
)
@instantiate_parametrized_tests
|
TestFxGraphCache
|
python
|
pytorch__pytorch
|
test/dynamo/test_aot_autograd_cache.py
|
{
"start": 84302,
"end": 84419
}
|
class ____(AOTAutogradCacheTests):
pass
@inductor_config.patch("fx_graph_cache", True)
|
AOTAutogradCacheBundledTests
|
python
|
huggingface__transformers
|
tests/models/pegasus_x/test_modeling_pegasus_x.py
|
{
"start": 35167,
"end": 36297
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (PegasusXDecoder,) if is_torch_available() else ()
is_encoder_decoder = False
def setUp(
self,
):
self.model_tester = PegasusXStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=PegasusXConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot keep gradients")
def test_flex_attention_with_grads():
return
|
PegasusXStandaloneDecoderModelTest
|
python
|
django-extensions__django-extensions
|
tests/management/commands/test_drop_test_database.py
|
{
"start": 2420,
"end": 13646
}
|
class ____(TestCase):
"""Test for drop_test_database command."""
@patch("sys.stdout", new_callable=StringIO)
@patch("django_extensions.management.commands.drop_test_database.input")
def test_should_raise_CommandError_if_database_is_unknown(self, m_input, m_stdout):
m_input.return_value = "no"
call_command("drop_test_database")
self.assertEqual("Reset cancelled.\n", m_stdout.getvalue())
@override_settings(DATABASES=SQLITE)
@patch("sys.stdout", new_callable=StringIO)
@patch("os.path.isfile")
@patch("os.unlink")
def test_sqlite3_should_unlink_primary_test_database(
self, m_unlink, m_isfile, m_stdout
):
# Indicate that no clone databases exist
m_isfile.side_effect = (True, False)
call_command("drop_test_database", "--noinput", verbosity=2)
with self.subTest("Should check for test database names until failure"):
self.assertListEqual(
m_isfile.call_args_list,
# See production code comments regarding double dots
[call("test_db.sqlite3"), call("test_db_1..sqlite3")],
)
with self.subTest("Should unlink only primary test database"):
self.assertListEqual(
m_unlink.call_args_list,
[call("test_db.sqlite3")],
)
with self.subTest("Should report successful message"):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=SQLITE)
@patch("os.path.isfile")
@patch("os.unlink")
def test_sqlite3_should_unlink_all_existing_clone_databases(
self, m_unlink, m_isfile
):
"""Test cloned test databases created via 'manage.py test --parallel'."""
# Indicate that clone databases exist up to test_db_2.sqlite3
m_isfile.side_effect = (True, True, True, False)
call_command("drop_test_database", "--noinput")
with self.subTest("Should check for test database names until failure"):
self.assertListEqual(
m_isfile.call_args_list,
[
call("test_db.sqlite3"),
# See production code comments regarding double dots
call("test_db_1..sqlite3"),
call("test_db_2..sqlite3"),
call("test_db_3..sqlite3"),
],
)
with self.subTest("Should unlink all existing test databases"):
self.assertListEqual(
m_unlink.call_args_list,
[
call("test_db.sqlite3"),
# See production code comments regarding double dots
call("test_db_1..sqlite3"),
call("test_db_2..sqlite3"),
],
)
@override_settings(DATABASES=SQLITE)
@patch("sys.stdout", new_callable=StringIO)
@patch("os.path.isfile")
@patch("os.unlink")
def test_sqlite3_should_not_print_Reset_successful_when_OSError_exception(
self, m_unlink, m_isfile, m_stdout
):
m_isfile.return_value = True
m_unlink.side_effect = OSError
call_command("drop_test_database", "--noinput", verbosity=2)
self.assertNotIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=MYSQL_HOST_PORT)
@patch("sys.stdout", new_callable=StringIO)
def test_mysql_should_drop_database_with_host_and_port(self, m_stdout):
m_database = MagicMock()
m_database.__spec__ = Mock()
# Indicate that no clone databases exist
# DROP queries return None while SELECT queries return a row count
m_database.connect.return_value.cursor.return_value.execute.side_effect = (
1,
None,
0,
)
with patch.dict("sys.modules", MySQLdb=m_database):
call_command("drop_test_database", "--noinput", verbosity=2)
with self.subTest(
"Should check for and remove test database names until failure"
):
exists_query = (
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME="
)
self.assertListEqual(
m_database.connect.return_value.cursor.return_value.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call("DROP DATABASE IF EXISTS `test_test`"),
call(exists_query + "'test_test_1';"),
],
)
with self.subTest("Should report successful message"):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=MYSQL_SOCKET)
@patch("sys.stdout", new_callable=StringIO)
def test_mysql_should_drop_database_with_unix_socket(self, m_stdout):
m_database = MagicMock()
m_database.__spec__ = Mock()
# Indicate that no clone databases exist
# DROP queries return None while SELECT queries return a row count
m_database.connect.return_value.cursor.return_value.execute.side_effect = (
1,
None,
0,
)
with patch.dict("sys.modules", MySQLdb=m_database):
call_command("drop_test_database", "--noinput", verbosity=2)
with self.subTest(
"Should check for and remove test database names until failure"
):
exists_query = (
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME="
)
self.assertListEqual(
m_database.connect.return_value.cursor.return_value.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call("DROP DATABASE IF EXISTS `test_test`"),
call(exists_query + "'test_test_1';"),
],
)
with self.subTest("Should report successful message"):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=MYSQL_HOST_PORT)
def test_mysql_should_drop_all_existing_clone_databases(self):
"""Test cloned test databases created via 'manage.py test --parallel'."""
m_database = MagicMock()
m_database.__spec__ = Mock()
# Indicate that clone databases exist up to test_test_2
# DROP queries return None while SELECT queries return a row count
m_database.connect.return_value.cursor.return_value.execute.side_effect = (
1,
None,
1,
None,
1,
None,
0,
)
with patch.dict("sys.modules", MySQLdb=m_database):
call_command("drop_test_database", "--noinput")
exists_query = (
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME="
)
self.assertListEqual(
m_database.connect.return_value.cursor.return_value.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call("DROP DATABASE IF EXISTS `test_test`"),
call(exists_query + "'test_test_1';"),
call("DROP DATABASE IF EXISTS `test_test_1`"),
call(exists_query + "'test_test_2';"),
call("DROP DATABASE IF EXISTS `test_test_2`"),
call(exists_query + "'test_test_3';"),
],
)
@override_settings(DATABASES=POSTGRES)
@patch("sys.stdout", new_callable=StringIO)
def test_postgresql_should_drop_database(self, m_stdout):
m_database = MagicMock()
m_database.__spec__ = Mock()
m_cursor = Mock()
m_database.connect.return_value.cursor.return_value = m_cursor
# Indicate that no clone databases exist
type(m_cursor).rowcount = PropertyMock(side_effect=(1, 0))
mock_kwargs = {"psycopg2": m_database}
has_psycopg3 = importlib.util.find_spec("psycopg") is not None
if has_psycopg3:
mock_kwargs = {"psycopg": m_database}
with patch.dict("sys.modules", **mock_kwargs):
call_command("drop_test_database", "--noinput", verbosity=2)
with self.subTest(
"Should check for and remove test database names until failure"
):
exists_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname="
self.assertListEqual(
m_cursor.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS "test_test";'),
call(exists_query + "'test_test_1';"),
],
)
with self.subTest("Should report successful message"):
self.assertIn("Reset successful.", m_stdout.getvalue())
@override_settings(DATABASES=POSTGRES)
def test_postgresql_should_drop_all_existing_cloned_databases(self):
"""Test cloned test databases created via 'manage.py test --parallel'."""
m_database = MagicMock()
m_database.__spec__ = Mock()
m_cursor = Mock()
m_database.connect.return_value.cursor.return_value = m_cursor
# Indicate that clone databases exist up to test_test_2
type(m_cursor).rowcount = PropertyMock(side_effect=(1, 1, 1, 0))
mock_kwargs = {"psycopg2": m_database}
has_psycopg3 = importlib.util.find_spec("psycopg") is not None
if has_psycopg3:
mock_kwargs = {"psycopg": m_database}
with patch.dict("sys.modules", **mock_kwargs):
call_command("drop_test_database", "--noinput")
exists_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname="
self.assertListEqual(
m_cursor.execute.call_args_list,
[
call(exists_query + "'test_test';"),
call('DROP DATABASE IF EXISTS "test_test";'),
call(exists_query + "'test_test_1';"),
call('DROP DATABASE IF EXISTS "test_test_1";'),
call(exists_query + "'test_test_2';"),
call('DROP DATABASE IF EXISTS "test_test_2";'),
call(exists_query + "'test_test_3';"),
],
)
@override_settings(DATABASES=POSTGRES)
@patch("sys.stdout", new_callable=StringIO)
def test_postgresql_should_not_print_Reset_successful_when_exception_occured(
self, m_stdout
):
m_database = MagicMock()
m_database.__spec__ = Mock()
m_database.ProgrammingError = Exception
m_cursor = Mock()
m_cursor.execute.side_effect = m_database.ProgrammingError
m_database.connect.return_value.cursor.return_value = m_cursor
mock_kwargs = {"psycopg2": m_database}
has_psycopg3 = importlib.util.find_spec("psycopg") is not None
if has_psycopg3:
mock_kwargs = {"psycopg": m_database}
with patch.dict("sys.modules", **mock_kwargs):
call_command("drop_test_database", "--noinput", verbosity=2)
self.assertNotIn("Reset successful.", m_stdout.getvalue())
|
DropTestDatabaseTests
|
python
|
great-expectations__great_expectations
|
tests/integration/data_sources_and_expectations/test_expectation_conditions.py
|
{
"start": 10591,
"end": 14279
}
|
class ____:
"""Simple tests to ensure that pandas properly utilizes row condition from each
type of expectation (ColumnMapExpectation, ColumnPairMapExpectation, etc)
"""
@parameterize_batch_for_data_sources(
data_source_configs=[PandasDataFrameDatasourceTestConfig()],
data=DATA,
)
def test_column_aggregate_expectation_with_condition_row_condition(
self, batch_for_datasource: Batch
) -> None:
"""Test ColumnAggregateExpectation with Condition row_condition."""
row_condition = (Column("quantity") > 0) & (Column("quantity") < 3)
expectation = gxe.ExpectColumnMinToBeBetween(
column="amount",
min_value=0.5,
max_value=1.5,
row_condition=row_condition,
condition_parser="pandas",
)
result = batch_for_datasource.validate(expectation)
assert result.success
@parameterize_batch_for_data_sources(
data_source_configs=[PandasDataFrameDatasourceTestConfig()],
data=DATA,
)
def test_column_map_expectation_with_condition_row_condition(
self, batch_for_datasource: Batch
) -> None:
"""Test ColumnMapExpectation with Condition row_condition."""
row_condition = Column("name") == "albert"
expectation = gxe.ExpectColumnValuesToBeBetween(
column="quantity",
min_value=0.5,
max_value=1.5,
row_condition=row_condition,
condition_parser="pandas",
)
result = batch_for_datasource.validate(expectation)
assert result.success
@parameterize_batch_for_data_sources(
data_source_configs=[PandasDataFrameDatasourceTestConfig()],
data=DATA,
)
def test_column_pair_map_expectation_with_condition_row_condition(
self, batch_for_datasource: Batch
) -> None:
"""Test ColumnPairMapExpectation with Condition row_condition."""
row_condition = Column("quantity") < 3
expectation = gxe.ExpectColumnPairValuesToBeEqual(
column_A="quantity",
column_B="quantity",
row_condition=row_condition,
condition_parser="pandas",
)
result = batch_for_datasource.validate(expectation)
assert result.success
@parameterize_batch_for_data_sources(
data_source_configs=[PandasDataFrameDatasourceTestConfig()],
data=DATA,
)
def test_multicolumn_map_expectation_with_condition_row_condition(
self, batch_for_datasource: Batch
) -> None:
"""Test MulticolumnMapExpectation with Condition row_condition."""
row_condition = Column("quantity") > 0
expectation = gxe.ExpectCompoundColumnsToBeUnique(
column_list=["name", "quantity"],
row_condition=row_condition,
condition_parser="pandas",
)
result = batch_for_datasource.validate(expectation)
assert result.success
@parameterize_batch_for_data_sources(
data_source_configs=[PandasDataFrameDatasourceTestConfig()],
data=DATA,
)
def test_batch_expectation_with_condition_row_condition(
self, batch_for_datasource: Batch
) -> None:
"""Test BatchExpectation with Condition row_condition."""
row_condition = Column("name") == "albert"
expectation = gxe.ExpectTableRowCountToBeBetween(
min_value=1,
max_value=1,
row_condition=row_condition,
condition_parser="pandas",
)
result = batch_for_datasource.validate(expectation)
assert result.success
|
TestPandasConditionClassAcrossExpectationTypes
|
python
|
django-import-export__django-import-export
|
import_export/formats/base_formats.py
|
{
"start": 3692,
"end": 3860
}
|
class ____(TextFormat):
TABLIB_MODULE = "tablib.formats._yaml"
# See https://stackoverflow.com/questions/332129/yaml-mime-type
CONTENT_TYPE = "text/yaml"
|
YAML
|
python
|
apache__airflow
|
providers/databricks/tests/unit/databricks/hooks/test_databricks.py
|
{
"start": 9676,
"end": 50319
}
|
class ____:
"""
Tests for DatabricksHook.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=LOGIN,
password=PASSWORD,
)
)
self.hook = DatabricksHook(retry_delay=0)
def test_user_agent_string(self):
op = "DatabricksSql"
hook = DatabricksHook(retry_delay=0, caller=op)
ua_string = hook.user_agent_value
assert ua_string.endswith(f" operator/{op}")
def test_parse_host_with_proper_host(self):
host = self.hook._parse_host(HOST)
assert host == HOST
def test_parse_host_with_scheme(self):
host = self.hook._parse_host(HOST_WITH_SCHEME)
assert host == HOST
def test_init_bad_retry_limit(self):
with pytest.raises(ValueError, match="Retry limit must be greater than or equal to 1"):
DatabricksHook(retry_limit=0)
def test_do_api_call_retries_with_retryable_error(self):
hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
for exception in [
requests_exceptions.ConnectionError,
requests_exceptions.SSLError,
requests_exceptions.Timeout,
requests_exceptions.ConnectTimeout,
requests_exceptions.HTTPError,
]:
with mock.patch("airflow.providers.databricks.hooks.databricks_base.requests") as mock_requests:
with mock.patch.object(hook.log, "error") as mock_errors:
setup_mock_requests(mock_requests, exception)
with pytest.raises(AirflowException):
hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
def test_do_api_call_retries_with_too_many_requests(self):
hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
with mock.patch("airflow.providers.databricks.hooks.databricks_base.requests") as mock_requests:
with mock.patch.object(hook.log, "error") as mock_errors:
setup_mock_requests(mock_requests, requests_exceptions.HTTPError, status_code=429)
with pytest.raises(AirflowException):
hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_do_api_call_does_not_retry_with_non_retryable_error(self, mock_requests):
hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
setup_mock_requests(mock_requests, requests_exceptions.HTTPError, status_code=400)
with mock.patch.object(hook.log, "error") as mock_errors:
with pytest.raises(AirflowException):
hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
mock_errors.assert_not_called()
def test_do_api_call_succeeds_after_retrying(self):
hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
for exception in [
requests_exceptions.ConnectionError,
requests_exceptions.SSLError,
requests_exceptions.Timeout,
requests_exceptions.ConnectTimeout,
requests_exceptions.HTTPError,
]:
with mock.patch("airflow.providers.databricks.hooks.databricks_base.requests") as mock_requests:
with mock.patch.object(hook.log, "error") as mock_errors:
setup_mock_requests(
mock_requests, exception, error_count=2, response_content={"run_id": "1"}
)
response = hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert mock_errors.call_count == 2
assert response == {"run_id": "1"}
def test_do_api_call_custom_retry(self):
hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
for exception in [
requests_exceptions.ConnectionError,
requests_exceptions.SSLError,
requests_exceptions.Timeout,
requests_exceptions.ConnectTimeout,
requests_exceptions.HTTPError,
]:
with mock.patch("airflow.providers.databricks.hooks.databricks_base.requests") as mock_requests:
with mock.patch.object(hook.log, "error") as mock_errors:
setup_mock_requests(mock_requests, exception)
with pytest.raises(AirflowException):
hook._do_api_call(SUBMIT_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_do_api_call_patch(self, mock_requests):
mock_requests.patch.return_value.json.return_value = {"cluster_name": "new_name"}
data = {"cluster_name": "new_name"}
patched_cluster_name = self.hook._do_api_call(("PATCH", "2.1/jobs/runs/submit"), data)
assert patched_cluster_name["cluster_name"] == "new_name"
mock_requests.patch.assert_called_once_with(
submit_run_endpoint(HOST),
json={"cluster_name": "new_name"},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_create(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {"job_id": JOB_ID}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
json = {"name": "test"}
job_id = self.hook.create_job(json)
assert job_id == JOB_ID
mock_requests.post.assert_called_once_with(
create_endpoint(HOST),
json={"name": "test"},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_reset_with_no_acl(self, mock_requests):
mock_requests.codes.ok = 200
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
json = {"name": "test"}
self.hook.reset_job(JOB_ID, json)
mock_requests.post.assert_called_once_with(
reset_endpoint(HOST),
json={"job_id": JOB_ID, "new_settings": {"name": "test"}},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_reset_with_acl(self, mock_requests):
mock_requests.codes.ok = 200
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
ACCESS_CONTROL_LIST = [{"permission_level": "CAN_MANAGE", "user_name": "test_user"}]
json = {
"access_control_list": ACCESS_CONTROL_LIST,
"name": "test",
}
self.hook.reset_job(JOB_ID, json)
mock_requests.post.assert_called_once_with(
reset_endpoint(HOST),
json={
"job_id": JOB_ID,
"new_settings": json,
},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
mock_requests.patch.assert_called_once_with(
permissions_endpoint(HOST, JOB_ID),
json={"access_control_list": ACCESS_CONTROL_LIST},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_update(self, mock_requests):
mock_requests.codes.ok = 200
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
json = {"name": "test"}
self.hook.update_job(JOB_ID, json)
mock_requests.post.assert_called_once_with(
update_endpoint(HOST),
json={"job_id": JOB_ID, "new_settings": {"name": "test"}},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_submit_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = {"run_id": "1"}
data = {"notebook_task": NOTEBOOK_TASK, "new_cluster": NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == "1"
mock_requests.post.assert_called_once_with(
submit_run_endpoint(HOST),
json={
"notebook_task": NOTEBOOK_TASK,
"new_cluster": NEW_CLUSTER,
},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_spark_python_submit_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = {"run_id": "1"}
data = {"spark_python_task": SPARK_PYTHON_TASK, "new_cluster": NEW_CLUSTER}
run_id = self.hook.submit_run(data)
assert run_id == "1"
mock_requests.post.assert_called_once_with(
submit_run_endpoint(HOST),
json={
"spark_python_task": SPARK_PYTHON_TASK,
"new_cluster": NEW_CLUSTER,
},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_run_now(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {"run_id": "1"}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {"notebook_params": NOTEBOOK_PARAMS, "jar_params": JAR_PARAMS, "job_id": JOB_ID}
run_id = self.hook.run_now(data)
assert run_id == "1"
mock_requests.post.assert_called_once_with(
run_now_endpoint(HOST),
json={"notebook_params": NOTEBOOK_PARAMS, "jar_params": JAR_PARAMS, "job_id": JOB_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_page_url(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
run_page_url = self.hook.get_run_page_url(RUN_ID)
assert run_page_url == RUN_PAGE_URL
mock_requests.get.assert_called_once_with(
get_run_endpoint(HOST),
json=None,
params={"run_id": RUN_ID},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_job_id(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
job_id = self.hook.get_job_id(RUN_ID)
assert job_id == JOB_ID
mock_requests.get.assert_called_once_with(
get_run_endpoint(HOST),
json=None,
params={"run_id": RUN_ID},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_output(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_OUTPUT_RESPONSE
run_output_error = self.hook.get_run_output(RUN_ID).get("error")
assert run_output_error == ERROR_MESSAGE
mock_requests.get.assert_called_once_with(
get_run_output_endpoint(HOST),
json=None,
params={"run_id": RUN_ID},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_state(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
run_state = self.hook.get_run_state(RUN_ID)
assert run_state == RunState(LIFE_CYCLE_STATE, RESULT_STATE, STATE_MESSAGE)
mock_requests.get.assert_called_once_with(
get_run_endpoint(HOST),
json=None,
params={"run_id": RUN_ID},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_state_str(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
run_state_str = self.hook.get_run_state_str(RUN_ID)
assert run_state_str == f"State: {LIFE_CYCLE_STATE}. Result: {RESULT_STATE}. {STATE_MESSAGE}"
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_state_lifecycle(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
lifecycle_state = self.hook.get_run_state_lifecycle(RUN_ID)
assert lifecycle_state == LIFE_CYCLE_STATE
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_state_result(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
result_state = self.hook.get_run_state_result(RUN_ID)
assert result_state == RESULT_STATE
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_run_state_cycle(self, mock_requests):
mock_requests.get.return_value.json.return_value = GET_RUN_RESPONSE
state_message = self.hook.get_run_state_message(RUN_ID)
assert state_message == STATE_MESSAGE
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_cancel_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = GET_RUN_RESPONSE
self.hook.cancel_run(RUN_ID)
mock_requests.post.assert_called_once_with(
cancel_run_endpoint(HOST),
json={"run_id": RUN_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_cancel_all_runs(self, mock_requests):
mock_requests.post.return_value.json.return_value = {}
self.hook.cancel_all_runs(JOB_ID)
mock_requests.post.assert_called_once_with(
cancel_all_runs_endpoint(HOST),
json={"job_id": JOB_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_delete_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = {}
self.hook.delete_run(RUN_ID)
mock_requests.post.assert_called_once_with(
delete_run_endpoint(HOST),
json={"run_id": RUN_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_repair_run(self, mock_requests):
mock_requests.post.return_value.json.return_value = {"repair_id": 734650698524280}
json = (
{
"run_id": 455644833,
"rerun_tasks": ["task0", "task1"],
"latest_repair_id": 734650698524280,
"rerun_all_failed_tasks": False,
"jar_params": ["john", "doe", "35"],
"notebook_params": {"name": "john doe", "age": "35"},
"python_params": ["john doe", "35"],
"spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"],
"python_named_params": {"name": "task", "data": "dbfs:/path/to/data.json"},
"pipeline_params": {"full_refresh": True},
"sql_params": {"name": "john doe", "age": "35"},
"dbt_commands": ["dbt deps", "dbt seed", "dbt run"],
},
)
self.hook.repair_run(json)
mock_requests.post.assert_called_once_with(
repair_run_endpoint(HOST),
json=json,
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_negative_get_latest_repair_id(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = {
"job_id": JOB_ID,
"run_id": RUN_ID,
"state": {"life_cycle_state": "RUNNING", "result_state": "RUNNING"},
"repair_history": [
{
"type": "ORIGINAL",
"start_time": 1704528798059,
"end_time": 1704529026679,
"state": {
"life_cycle_state": "RUNNING",
"result_state": "RUNNING",
"state_message": "dummy",
"user_cancelled_or_timedout": "false",
},
"task_run_ids": [396529700633015, 1111270934390307],
}
],
}
latest_repair_id = self.hook.get_latest_repair_id(RUN_ID)
assert latest_repair_id is None
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_positive_get_latest_repair_id(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = {
"job_id": JOB_ID,
"run_id": RUN_ID,
"state": {"life_cycle_state": "RUNNING", "result_state": "RUNNING"},
"repair_history": [
{
"type": "ORIGINAL",
"start_time": 1704528798059,
"end_time": 1704529026679,
"state": {
"life_cycle_state": "TERMINATED",
"result_state": "CANCELED",
"state_message": "dummy_original",
"user_cancelled_or_timedout": "false",
},
"task_run_ids": [396529700633015, 1111270934390307],
},
{
"type": "REPAIR",
"start_time": 1704530276423,
"end_time": 1704530363736,
"state": {
"life_cycle_state": "TERMINATED",
"result_state": "CANCELED",
"state_message": "dummy_repair_1",
"user_cancelled_or_timedout": "true",
},
"id": 108607572123234,
"task_run_ids": [396529700633015, 1111270934390307],
},
{
"type": "REPAIR",
"start_time": 1704531464690,
"end_time": 1704531481590,
"state": {"life_cycle_state": "RUNNING", "result_state": "RUNNING"},
"id": 52532060060836,
"task_run_ids": [396529700633015, 1111270934390307],
},
],
}
latest_repair_id = self.hook.get_latest_repair_id(RUN_ID)
assert latest_repair_id == 52532060060836
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_cluster_state(self, mock_requests):
"""
Response example from https://docs.databricks.com/api/workspace/clusters/get
"""
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = GET_CLUSTER_RESPONSE
cluster_state = self.hook.get_cluster_state(CLUSTER_ID)
assert cluster_state == ClusterState(CLUSTER_STATE, CLUSTER_STATE_MESSAGE)
mock_requests.get.assert_called_once_with(
get_cluster_endpoint(HOST),
json=None,
params={"cluster_id": CLUSTER_ID},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_start_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
self.hook.start_cluster({"cluster_id": CLUSTER_ID})
mock_requests.post.assert_called_once_with(
start_cluster_endpoint(HOST),
json={"cluster_id": CLUSTER_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_restart_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
self.hook.restart_cluster({"cluster_id": CLUSTER_ID})
mock_requests.post.assert_called_once_with(
restart_cluster_endpoint(HOST),
json={"cluster_id": CLUSTER_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_terminate_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
self.hook.terminate_cluster({"cluster_id": CLUSTER_ID})
mock_requests.post.assert_called_once_with(
terminate_cluster_endpoint(HOST),
json={"cluster_id": CLUSTER_ID},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_install_libs_on_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {"cluster_id": CLUSTER_ID, "libraries": LIBRARIES}
self.hook.install(data)
mock_requests.post.assert_called_once_with(
install_endpoint(HOST),
json={"cluster_id": CLUSTER_ID, "libraries": LIBRARIES},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_uninstall_libs_on_cluster(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.post.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.post.return_value).status_code = status_code_mock
data = {"cluster_id": CLUSTER_ID, "libraries": LIBRARIES}
self.hook.uninstall(data)
mock_requests.post.assert_called_once_with(
uninstall_endpoint(HOST),
json={"cluster_id": CLUSTER_ID, "libraries": LIBRARIES},
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
def test_is_oauth_token_valid_returns_true(self):
token = {
"access_token": "my_token",
"expires_on": int(time.time()) + TOKEN_REFRESH_LEAD_TIME + 10,
"token_type": "Bearer",
}
assert self.hook._is_oauth_token_valid(token)
def test_is_oauth_token_valid_returns_false(self):
token = {
"access_token": "my_token",
"expires_on": int(time.time()),
"token_type": "Bearer",
}
assert not self.hook._is_oauth_token_valid(token)
def test_is_oauth_token_valid_raises_missing_token(self):
with pytest.raises(AirflowException):
self.hook._is_oauth_token_valid({})
@pytest.mark.parametrize(("access_token", "token_type"), [("my_token", None), ("my_token", "not bearer")])
def test_is_oauth_token_valid_raises_invalid_type(self, access_token, token_type):
with pytest.raises(AirflowException):
self.hook._is_oauth_token_valid({"access_token": access_token, "token_type": token_type})
def test_is_oauth_token_valid_raises_wrong_time_key(self):
token = {
"access_token": "my_token",
"expires_on": 0,
"token_type": "Bearer",
}
with pytest.raises(AirflowException):
self.hook._is_oauth_token_valid(token, time_key="expiration")
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_list_jobs_success_single_page(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = LIST_JOBS_RESPONSE
jobs = self.hook.list_jobs()
mock_requests.get.assert_called_once_with(
list_jobs_endpoint(HOST),
json=None,
params={"limit": 25, "page_token": "", "expand_tasks": False, "include_user_names": False},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
assert jobs == LIST_JOBS_RESPONSE["jobs"]
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_list_jobs_success_multiple_pages(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.side_effect = [
create_successful_response_mock(
{**LIST_JOBS_RESPONSE, "has_more": True, "next_page_token": "PAGETOKEN"}
),
create_successful_response_mock(LIST_JOBS_RESPONSE),
]
jobs = self.hook.list_jobs()
assert mock_requests.get.call_count == 2
first_call_args = mock_requests.method_calls[0]
assert first_call_args[1][0] == list_jobs_endpoint(HOST)
assert first_call_args[2]["params"] == {
"limit": 25,
"page_token": "",
"expand_tasks": False,
"include_user_names": False,
}
second_call_args = mock_requests.method_calls[1]
assert second_call_args[1][0] == list_jobs_endpoint(HOST)
assert second_call_args[2]["params"] == {
"limit": 25,
"page_token": "PAGETOKEN",
"expand_tasks": False,
"include_user_names": False,
}
assert len(jobs) == 2
assert jobs == LIST_JOBS_RESPONSE["jobs"] * 2
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_job_id_by_name_success(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = LIST_JOBS_RESPONSE
job_id = self.hook.find_job_id_by_name(JOB_NAME)
mock_requests.get.assert_called_once_with(
list_jobs_endpoint(HOST),
json=None,
params={
"limit": 25,
"page_token": "",
"expand_tasks": False,
"include_user_names": False,
"name": JOB_NAME,
},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
assert job_id == JOB_ID
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_job_id_by_name_not_found(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = LIST_JOBS_RESPONSE
job_name = "Non existing job"
job_id = self.hook.find_job_id_by_name(job_name)
mock_requests.get.assert_called_once_with(
list_jobs_endpoint(HOST),
json=None,
params={
"limit": 25,
"page_token": "",
"expand_tasks": False,
"include_user_names": False,
"name": job_name,
},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
assert job_id is None
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_job_id_by_name_raise_exception_with_duplicates(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = {
**LIST_JOBS_RESPONSE,
"jobs": LIST_JOBS_RESPONSE["jobs"] * 2,
}
exception_message = f"There are more than one job with name {JOB_NAME}."
with pytest.raises(AirflowException, match=exception_message):
self.hook.find_job_id_by_name(JOB_NAME)
mock_requests.get.assert_called_once_with(
list_jobs_endpoint(HOST),
json=None,
params={
"limit": 25,
"page_token": "",
"expand_tasks": False,
"include_user_names": False,
"name": JOB_NAME,
},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_pipeline_id_by_name_success(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = LIST_PIPELINES_RESPONSE
pipeline_id = self.hook.find_pipeline_id_by_name(PIPELINE_NAME)
mock_requests.get.assert_called_once_with(
list_pipelines_endpoint(HOST),
json=None,
params={"filter": f"name LIKE '{PIPELINE_NAME}'", "max_results": 25},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
assert pipeline_id == PIPELINE_ID
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_list_pipelines_success_multiple_pages(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.side_effect = [
create_successful_response_mock({**LIST_PIPELINES_RESPONSE, "next_page_token": "PAGETOKEN"}),
create_successful_response_mock(LIST_PIPELINES_RESPONSE),
]
pipelines = self.hook.list_pipelines(pipeline_name=PIPELINE_NAME)
assert mock_requests.get.call_count == 2
first_call_args = mock_requests.method_calls[0]
assert first_call_args[1][0] == list_pipelines_endpoint(HOST)
assert first_call_args[2]["params"] == {"filter": f"name LIKE '{PIPELINE_NAME}'", "max_results": 25}
second_call_args = mock_requests.method_calls[1]
assert second_call_args[1][0] == list_pipelines_endpoint(HOST)
assert second_call_args[2]["params"] == {
"filter": f"name LIKE '{PIPELINE_NAME}'",
"max_results": 25,
"page_token": "PAGETOKEN",
}
assert len(pipelines) == 2
assert pipelines == LIST_PIPELINES_RESPONSE["statuses"] * 2
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_pipeline_id_by_name_not_found(self, mock_requests):
empty_response = {"statuses": []}
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = empty_response
ne_pipeline_name = "Non existing pipeline"
pipeline_id = self.hook.find_pipeline_id_by_name(ne_pipeline_name)
mock_requests.get.assert_called_once_with(
list_pipelines_endpoint(HOST),
json=None,
params={"filter": f"name LIKE '{ne_pipeline_name}'", "max_results": 25},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
assert pipeline_id is None
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_list_pipelines_raise_exception_with_duplicates(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = {
**LIST_PIPELINES_RESPONSE,
"statuses": LIST_PIPELINES_RESPONSE["statuses"] * 2,
}
exception_message = f"There are more than one pipelines with name {PIPELINE_NAME}."
with pytest.raises(AirflowException, match=exception_message):
self.hook.find_pipeline_id_by_name(pipeline_name=PIPELINE_NAME)
mock_requests.get.assert_called_once_with(
list_pipelines_endpoint(HOST),
json=None,
params={"filter": f"name LIKE '{PIPELINE_NAME}'", "max_results": 25},
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_post_sql_statement(self, mock_requests):
mock_requests.post.return_value.json.return_value = {
"statement_id": "01f00ed2-04e2-15bd-a944-a8ae011dac69"
}
json = {
"statement": "select * from test.test;",
"warehouse_id": WAREHOUSE_ID,
"catalog": "",
"schema": "",
"parameters": {},
"wait_timeout": "0s",
}
self.hook.post_sql_statement(json)
mock_requests.post.assert_called_once_with(
sql_statements_endpoint(HOST),
json=json,
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_get_sql_statement_state(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = GET_SQL_STATEMENT_RESPONSE
sql_statement_state = self.hook.get_sql_statement_state(STATEMENT_ID)
assert sql_statement_state == SQLStatementState(STATEMENT_STATE)
mock_requests.get.assert_called_once_with(
f"{sql_statements_endpoint(HOST)}/{STATEMENT_ID}",
json=None,
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_cancel_sql_statement(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = GET_SQL_STATEMENT_RESPONSE
self.hook.cancel_sql_statement(STATEMENT_ID)
mock_requests.post.assert_called_once_with(
f"{sql_statements_endpoint(HOST)}/{STATEMENT_ID}/cancel",
json=None,
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_connection_success(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.get.return_value.json.return_value = LIST_SPARK_VERSIONS_RESPONSE
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.get.return_value).status_code = status_code_mock
response = self.hook.test_connection()
assert response == (True, "Connection successfully tested")
mock_requests.get.assert_called_once_with(
list_spark_versions_endpoint(HOST),
json=None,
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_connection_failure(self, mock_requests):
mock_requests.codes.ok = 404
mock_requests.get.side_effect = Exception("Connection Failure")
status_code_mock = mock.PropertyMock(return_value=404)
type(mock_requests.get.return_value).status_code = status_code_mock
response = self.hook.test_connection()
assert response == (False, "Connection Failure")
mock_requests.get.assert_called_once_with(
list_spark_versions_endpoint(HOST),
json=None,
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_update_job_permission(self, mock_requests):
mock_requests.codes.ok = 200
mock_requests.patch.return_value.json.return_value = {}
status_code_mock = mock.PropertyMock(return_value=200)
type(mock_requests.patch.return_value).status_code = status_code_mock
self.hook.update_job_permission(1, ACCESS_CONTROL_DICT)
mock_requests.patch.assert_called_once_with(
f"https://{HOST}/api/2.0/permissions/jobs/1",
json=utils.normalise_json_content(ACCESS_CONTROL_DICT),
params=None,
auth=HTTPBasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@pytest.mark.db_test
|
TestDatabricksHook
|
python
|
python-openxml__python-docx
|
tests/oxml/unitdata/section.py
|
{
"start": 483,
"end": 590
}
|
class ____(BaseBuilder):
__tag__ = "w:sectPr"
__nspfxs__ = ("w",)
__attrs__ = ()
|
CT_SectPrBuilder
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_dlp.py
|
{
"start": 14798,
"end": 15663
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_get_inspect_template(self, mock_hook):
mock_hook.return_value.get_inspect_template.return_value = InspectTemplate()
operator = CloudDLPGetInspectTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_inspect_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
|
TestCloudDLPGetInspectTemplateOperator
|
python
|
langchain-ai__langchain
|
libs/partners/ollama/langchain_ollama/chat_models.py
|
{
"start": 8855,
"end": 62934
}
|
class ____(BaseChatModel):
r"""Ollama chat model integration.
???+ note "Setup"
Install `langchain-ollama` and download any models you want to use from ollama.
```bash
ollama pull gpt-oss:20b
pip install -U langchain-ollama
```
Key init args — completion params:
model: str
Name of Ollama model to use.
reasoning: bool | None
Controls the reasoning/thinking mode for
[supported models](https://ollama.com/search?c=thinking).
- `True`: Enables reasoning mode. The model's reasoning process will be
captured and returned separately in the `additional_kwargs` of the
response message, under `reasoning_content`. The main response
content will not include the reasoning tags.
- `False`: Disables reasoning mode. The model will not perform any reasoning,
and the response will not include any reasoning content.
- `None` (Default): The model will use its default reasoning behavior. Note
however, if the model's default behavior *is* to perform reasoning, think tags
(`<think>` and `</think>`) will be present within the main response content
unless you set `reasoning` to `True`.
temperature: float
Sampling temperature. Ranges from `0.0` to `1.0`.
num_predict: int | None
Max number of tokens to generate.
See full list of supported init args and their descriptions in the params section.
Instantiate:
```python
from langchain_ollama import ChatOllama
model = ChatOllama(
model="gpt-oss:20b",
validate_model_on_init=True,
temperature=0.8,
num_predict=256,
# other params ...
)
```
Invoke:
```python
messages = [
("system", "You are a helpful translator. Translate the user sentence to French."),
("human", "I love programming."),
]
model.invoke(messages)
```
```python
AIMessage(content='J'adore le programmation. (Note: "programming" can also refer to the act of writing code, so if you meant that, I could translate it as "J'adore programmer". But since you didn\'t specify, I assumed you were talking about the activity itself, which is what "le programmation" usually refers to.)', response_metadata={'model': 'llama3', 'created_at': '2024-07-04T03:37:50.182604Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 3576619666, 'load_duration': 788524916, 'prompt_eval_count': 32, 'prompt_eval_duration': 128125000, 'eval_count': 71, 'eval_duration': 2656556000}, id='run-ba48f958-6402-41a5-b461-5e250a4ebd36-0')
```
Stream:
```python
for chunk in model.stream("Return the words Hello World!"):
print(chunk.text, end="")
```
```python
content='Hello' id='run-327ff5ad-45c8-49fe-965c-0a93982e9be1'
content=' World' id='run-327ff5ad-45c8-49fe-965c-0a93982e9be1'
content='!' id='run-327ff5ad-45c8-49fe-965c-0a93982e9be1'
content='' response_metadata={'model': 'llama3', 'created_at': '2024-07-04T03:39:42.274449Z', 'message': {'role': 'assistant', 'content': ''}, 'done_reason': 'stop', 'done': True, 'total_duration': 411875125, 'load_duration': 1898166, 'prompt_eval_count': 14, 'prompt_eval_duration': 297320000, 'eval_count': 4, 'eval_duration': 111099000} id='run-327ff5ad-45c8-49fe-965c-0a93982e9be1'
```
```python
stream = model.stream(messages)
full = next(stream)
for chunk in stream:
full += chunk
full
```
```python
AIMessageChunk(
content='Je adore le programmation.(Note: "programmation" is the formal way to say "programming" in French, but informally, people might use the phrase "le développement logiciel" or simply "le code")',
response_metadata={
"model": "llama3",
"created_at": "2024-07-04T03:38:54.933154Z",
"message": {"role": "assistant", "content": ""},
"done_reason": "stop",
"done": True,
"total_duration": 1977300042,
"load_duration": 1345709,
"prompt_eval_duration": 159343000,
"eval_count": 47,
"eval_duration": 1815123000,
},
id="run-3c81a3ed-3e79-4dd3-a796-04064d804890",
)
```
Async:
```python
await model.ainvoke("Hello how are you!")
```
```python
AIMessage(
content="Hi there! I'm just an AI, so I don't have feelings or emotions like humans do. But I'm functioning properly and ready to help with any questions or tasks you may have! How can I assist you today?",
response_metadata={
"model": "llama3",
"created_at": "2024-07-04T03:52:08.165478Z",
"message": {"role": "assistant", "content": ""},
"done_reason": "stop",
"done": True,
"total_duration": 2138492875,
"load_duration": 1364000,
"prompt_eval_count": 10,
"prompt_eval_duration": 297081000,
"eval_count": 47,
"eval_duration": 1838524000,
},
id="run-29c510ae-49a4-4cdd-8f23-b972bfab1c49-0",
)
```
```python
async for chunk in model.astream("Say hello world!"):
print(chunk.content)
```
```python
HEL
LO
WORLD
!
```
```python
messages = [("human", "Say hello world!"), ("human", "Say goodbye world!")]
await model.abatch(messages)
```
```python
[
AIMessage(
content="HELLO, WORLD!",
response_metadata={
"model": "llama3",
"created_at": "2024-07-04T03:55:07.315396Z",
"message": {"role": "assistant", "content": ""},
"done_reason": "stop",
"done": True,
"total_duration": 1696745458,
"load_duration": 1505000,
"prompt_eval_count": 8,
"prompt_eval_duration": 111627000,
"eval_count": 6,
"eval_duration": 185181000,
},
id="run-da6c7562-e25a-4a44-987a-2c83cd8c2686-0",
),
AIMessage(
content="It's been a blast chatting with you! Say goodbye to the world for me, and don't forget to come back and visit us again soon!",
response_metadata={
"model": "llama3",
"created_at": "2024-07-04T03:55:07.018076Z",
"message": {"role": "assistant", "content": ""},
"done_reason": "stop",
"done": True,
"total_duration": 1399391083,
"load_duration": 1187417,
"prompt_eval_count": 20,
"prompt_eval_duration": 230349000,
"eval_count": 31,
"eval_duration": 1166047000,
},
id="run-96cad530-6f3e-4cf9-86b4-e0f8abba4cdb-0",
),
]
```
JSON mode:
```python
json_model = ChatOllama(format="json")
json_model.invoke(
"Return a query for the weather in a random location and time of day with two keys: location and time_of_day. "
"Respond using JSON only."
).content
```
```python
'{"location": "Pune, India", "time_of_day": "morning"}'
```
Tool Calling:
```python
from langchain_ollama import ChatOllama
from pydantic import BaseModel, Field
class Multiply(BaseModel):
a: int = Field(..., description="First integer")
b: int = Field(..., description="Second integer")
ans = await chat.invoke("What is 45*67")
ans.tool_calls
```
```python
[
{
"name": "Multiply",
"args": {"a": 45, "b": 67},
"id": "420c3f3b-df10-4188-945f-eb3abdb40622",
"type": "tool_call",
}
]
```
Thinking / Reasoning:
You can enable reasoning mode for models that support it by setting
the `reasoning` parameter to `True` in either the constructor or
the `invoke`/`stream` methods. This will enable the model to think
through the problem and return the reasoning process separately in the
`additional_kwargs` of the response message, under `reasoning_content`.
If `reasoning` is set to `None`, the model will use its default reasoning
behavior, and any reasoning content will *not* be captured under the
`reasoning_content` key, but will be present within the main response content
as think tags (`<think>` and `</think>`).
!!! note
This feature is only available for [models that support reasoning](https://ollama.com/search?c=thinking).
```python
from langchain_ollama import ChatOllama
model = ChatOllama(
model="deepseek-r1:8b",
validate_model_on_init=True,
reasoning=True,
)
model.invoke("how many r in the word strawberry?")
# or, on an invocation basis:
model.invoke("how many r in the word strawberry?", reasoning=True)
# or model.stream("how many r in the word strawberry?", reasoning=True)
# If not provided, the invocation will default to the ChatOllama reasoning
# param provided (None by default).
```
```python
AIMessage(content='The word "strawberry" contains **three \'r\' letters**. Here\'s a breakdown for clarity:\n\n- The spelling of "strawberry" has two parts ... be 3.\n\nTo be thorough, let\'s confirm with an online source or common knowledge.\n\nI can recall that "strawberry" has: s-t-r-a-w-b-e-r-r-y — yes, three r\'s.\n\nPerhaps it\'s misspelled by some, but standard is correct.\n\nSo I think the response should be 3.\n'}, response_metadata={'model': 'deepseek-r1:8b', 'created_at': '2025-07-08T19:33:55.891269Z', 'done': True, 'done_reason': 'stop', 'total_duration': 98232561292, 'load_duration': 28036792, 'prompt_eval_count': 10, 'prompt_eval_duration': 40171834, 'eval_count': 3615, 'eval_duration': 98163832416, 'model_name': 'deepseek-r1:8b'}, id='run--18f8269f-6a35-4a7c-826d-b89d52c753b3-0', usage_metadata={'input_tokens': 10, 'output_tokens': 3615, 'total_tokens': 3625})
```
""" # noqa: E501, pylint: disable=line-too-long
model: str
"""Model name to use."""
reasoning: bool | str | None = None
"""Controls the reasoning/thinking mode for [supported models](https://ollama.com/search?c=thinking).
- `True`: Enables reasoning mode. The model's reasoning process will be
captured and returned separately in the `additional_kwargs` of the
response message, under `reasoning_content`. The main response
content will not include the reasoning tags.
- `False`: Disables reasoning mode. The model will not perform any reasoning,
and the response will not include any reasoning content.
- `None` (Default): The model will use its default reasoning behavior. Note
however, if the model's default behavior *is* to perform reasoning, think tags
(`<think>` and `</think>`) will be present within the main response content
unless you set `reasoning` to `True`.
- `str`: e.g. `'low'`, `'medium'`, `'high'`. Enables reasoning with a custom
intensity level. Currently, this is only supported `gpt-oss`. See the
[Ollama docs](https://github.com/ollama/ollama-python/blob/da79e987f0ac0a4986bf396f043b36ef840370bc/ollama/_types.py#L210)
for more information.
"""
validate_model_on_init: bool = False
"""Whether to validate the model exists in Ollama locally on initialization.
!!! version-added "Added in `langchain-ollama` 0.3.4"
"""
mirostat: int | None = None
"""Enable Mirostat sampling for controlling perplexity.
(Default: `0`, `0` = disabled, `1` = Mirostat, `2` = Mirostat 2.0)
"""
mirostat_eta: float | None = None
"""Influences how quickly the algorithm responds to feedback from generated text.
A lower learning rate will result in slower adjustments, while a higher learning
rate will make the algorithm more responsive.
(Default: `0.1`)
"""
mirostat_tau: float | None = None
"""Controls the balance between coherence and diversity of the output.
A lower value will result in more focused and coherent text.
(Default: `5.0`)
"""
num_ctx: int | None = None
"""Sets the size of the context window used to generate the next token.
(Default: `2048`)
"""
num_gpu: int | None = None
"""The number of GPUs to use.
On macOS it defaults to `1` to enable metal support, `0` to disable.
"""
num_thread: int | None = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance. It is recommended to
set this value to the number of physical CPU cores your system has (as opposed to
the logical number of cores).
"""
num_predict: int | None = None
"""Maximum number of tokens to predict when generating text.
(Default: `128`, `-1` = infinite generation, `-2` = fill context)
"""
repeat_last_n: int | None = None
"""Sets how far back for the model to look back to prevent repetition.
(Default: `64`, `0` = disabled, `-1` = `num_ctx`)
"""
repeat_penalty: float | None = None
"""Sets how strongly to penalize repetitions.
A higher value (e.g., `1.5`) will penalize repetitions more strongly, while a
lower value (e.g., `0.9`) will be more lenient. (Default: `1.1`)
"""
temperature: float | None = None
"""The temperature of the model.
Increasing the temperature will make the model answer more creatively.
(Default: `0.8`)
"""
seed: int | None = None
"""Sets the random number seed to use for generation.
Setting this to a specific number will make the model generate the same text for the
same prompt.
"""
stop: list[str] | None = None
"""Sets the stop tokens to use."""
tfs_z: float | None = None
"""Tail free sampling.
Used to reduce the impact of less probable tokens from the output.
A higher value (e.g., `2.0`) will reduce the impact more, while a value of `1.0`
disables this setting.
(Default: `1`)
"""
top_k: int | None = None
"""Reduces the probability of generating nonsense.
A higher value (e.g. `100`) will give more diverse answers, while a lower value
(e.g. `10`) will be more conservative.
(Default: `40`)
"""
top_p: float | None = None
"""Works together with top-k.
A higher value (e.g., `0.95`) will lead to more diverse text, while a lower value
(e.g., `0.5`) will generate more focused and conservative text.
(Default: `0.9`)
"""
format: Literal["", "json"] | JsonSchemaValue | None = None
"""Specify the format of the output (options: `'json'`, JSON schema)."""
keep_alive: int | str | None = None
"""How long the model will stay loaded into memory."""
base_url: str | None = None
"""Base url the model is hosted under.
If none, defaults to the Ollama client default.
Supports `userinfo` auth in the format `http://username:password@localhost:11434`.
Useful if your Ollama server is behind a proxy.
!!! warning
`userinfo` is not secure and should only be used for local testing or
in secure environments. Avoid using it in production or over unsecured
networks.
!!! note
If using `userinfo`, ensure that the Ollama server is configured to
accept and validate these credentials.
!!! note
`userinfo` headers are passed to both sync and async clients.
"""
client_kwargs: dict | None = {}
"""Additional kwargs to pass to the httpx clients. Pass headers in here.
These arguments are passed to both synchronous and async clients.
Use `sync_client_kwargs` and `async_client_kwargs` to pass different arguments
to synchronous and asynchronous clients.
"""
async_client_kwargs: dict | None = {}
"""Additional kwargs to merge with `client_kwargs` before passing to httpx client.
These are clients unique to the async client; for shared args use `client_kwargs`.
For a full list of the params, see the [httpx documentation](https://www.python-httpx.org/api/#asyncclient).
"""
sync_client_kwargs: dict | None = {}
"""Additional kwargs to merge with `client_kwargs` before passing to httpx client.
These are clients unique to the sync client; for shared args use `client_kwargs`.
For a full list of the params, see the [httpx documentation](https://www.python-httpx.org/api/#client).
"""
_client: Client = PrivateAttr()
"""The client to use for making requests."""
_async_client: AsyncClient = PrivateAttr()
"""The async client to use for making requests."""
def _chat_params(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
**kwargs: Any,
) -> dict[str, Any]:
"""Assemble the parameters for a chat completion request.
Args:
messages: List of LangChain messages to send to the model.
stop: Optional list of stop tokens to use for this invocation.
**kwargs: Additional keyword arguments to include in the request.
Returns:
A dictionary of parameters to pass to the Ollama client.
"""
ollama_messages = self._convert_messages_to_ollama_messages(messages)
if self.stop is not None and stop is not None:
msg = "`stop` found in both the input and default params."
raise ValueError(msg)
if self.stop is not None:
stop = self.stop
options_dict = kwargs.pop("options", None)
if options_dict is None:
# Only include parameters that are explicitly set (not None)
options_dict = {
k: v
for k, v in {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"num_predict": self.num_predict,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"seed": self.seed,
"stop": self.stop if stop is None else stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
}.items()
if v is not None
}
params = {
"messages": ollama_messages,
"stream": kwargs.pop("stream", True),
"model": kwargs.pop("model", self.model),
"think": kwargs.pop("reasoning", self.reasoning),
"format": kwargs.pop("format", self.format),
"options": options_dict,
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
**kwargs,
}
if tools := kwargs.get("tools"):
params["tools"] = tools
return params
@model_validator(mode="after")
def _set_clients(self) -> Self:
"""Set clients to use for ollama."""
client_kwargs = self.client_kwargs or {}
cleaned_url, auth_headers = parse_url_with_auth(self.base_url)
merge_auth_headers(client_kwargs, auth_headers)
sync_client_kwargs = client_kwargs
if self.sync_client_kwargs:
sync_client_kwargs = {**sync_client_kwargs, **self.sync_client_kwargs}
async_client_kwargs = client_kwargs
if self.async_client_kwargs:
async_client_kwargs = {**async_client_kwargs, **self.async_client_kwargs}
self._client = Client(host=cleaned_url, **sync_client_kwargs)
self._async_client = AsyncClient(host=cleaned_url, **async_client_kwargs)
if self.validate_model_on_init:
validate_model(self._client, self.model)
return self
def _convert_messages_to_ollama_messages(
self, messages: list[BaseMessage]
) -> Sequence[Message]:
"""Convert a BaseMessage list to list of messages for Ollama to consume.
Args:
messages: List of BaseMessage to convert.
Returns:
List of messages in Ollama format.
"""
for idx, message in enumerate(messages):
# Handle message content written in v1 format
if (
isinstance(message, AIMessage)
and message.response_metadata.get("output_version") == "v1"
):
# Unpack known v1 content to Ollama format for the request
# Most types are passed through unchanged
messages[idx] = message.model_copy(
update={
"content": _convert_from_v1_to_ollama(
cast("list[types.ContentBlock]", message.content),
message.response_metadata.get("model_provider"),
)
}
)
ollama_messages: list = []
for message in messages:
role: str
tool_call_id: str | None = None
tool_calls: list[dict[str, Any]] | None = None
if isinstance(message, HumanMessage):
role = "user"
elif isinstance(message, AIMessage):
role = "assistant"
tool_calls = (
[
_lc_tool_call_to_openai_tool_call(tool_call)
for tool_call in message.tool_calls
]
if message.tool_calls
else None
)
elif isinstance(message, SystemMessage):
role = "system"
elif isinstance(message, ChatMessage):
role = message.role
elif isinstance(message, ToolMessage):
role = "tool"
tool_call_id = message.tool_call_id
else:
msg = "Received unsupported message type for Ollama."
raise TypeError(msg)
content = ""
images = []
if isinstance(message.content, str):
content = message.content
else: # List
for content_part in message.content:
if isinstance(content_part, str):
content += f"\n{content_part}"
elif content_part.get("type") == "text":
content += f"\n{content_part['text']}"
elif content_part.get("type") == "tool_use":
continue
elif content_part.get("type") == "image_url":
image_url = None
temp_image_url = content_part.get("image_url")
if isinstance(temp_image_url, str):
image_url = temp_image_url
elif (
isinstance(temp_image_url, dict)
and "url" in temp_image_url
and isinstance(temp_image_url["url"], str)
):
image_url = temp_image_url["url"]
else:
msg = (
"Only string image_url or dict with string 'url' "
"inside content parts are supported."
)
raise ValueError(msg)
image_url_components = image_url.split(",")
# Support data:image/jpeg;base64,<image> format
# and base64 strings
if len(image_url_components) > 1:
images.append(image_url_components[1])
else:
images.append(image_url_components[0])
elif is_data_content_block(content_part):
# Handles v1 "image" type
image = _get_image_from_data_content_block(content_part)
images.append(image)
else:
msg = (
"Unsupported message content type. "
"Must either have type 'text' or type 'image_url' "
"with a string 'image_url' field."
)
raise ValueError(msg)
# Should convert to ollama.Message once role includes tool, and tool_call_id
# is in Message
msg_: dict = {
"role": role,
"content": content,
"images": images,
}
if tool_calls:
msg_["tool_calls"] = tool_calls
if tool_call_id:
msg_["tool_call_id"] = tool_call_id
ollama_messages.append(msg_)
return ollama_messages
async def _acreate_chat_stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[Mapping[str, Any] | str]:
chat_params = self._chat_params(messages, stop, **kwargs)
if chat_params["stream"]:
async for part in await self._async_client.chat(**chat_params):
yield part
else:
yield await self._async_client.chat(**chat_params)
def _create_chat_stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
**kwargs: Any,
) -> Iterator[Mapping[str, Any] | str]:
chat_params = self._chat_params(messages, stop, **kwargs)
if chat_params["stream"]:
if self._client:
yield from self._client.chat(**chat_params)
elif self._client:
yield self._client.chat(**chat_params)
def _chat_stream_with_aggregation(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
verbose: bool = False, # noqa: FBT002
**kwargs: Any,
) -> ChatGenerationChunk:
final_chunk = None
for chunk in self._iterate_over_stream(messages, stop, **kwargs):
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=verbose,
)
if final_chunk is None:
msg = "No data received from Ollama stream."
raise ValueError(msg)
return final_chunk
async def _achat_stream_with_aggregation(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
verbose: bool = False, # noqa: FBT002
**kwargs: Any,
) -> ChatGenerationChunk:
final_chunk = None
async for chunk in self._aiterate_over_stream(messages, stop, **kwargs):
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=verbose,
)
if final_chunk is None:
msg = "No data received from Ollama stream."
raise ValueError(msg)
return final_chunk
def _get_ls_params(
self, stop: list[str] | None = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="ollama",
ls_model_name=self.model,
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_stop := stop or params.get("stop", None) or self.stop:
ls_params["ls_stop"] = ls_stop
return ls_params
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
final_chunk = self._chat_stream_with_aggregation(
messages, stop, run_manager, verbose=self.verbose, **kwargs
)
generation_info = final_chunk.generation_info
chat_generation = ChatGeneration(
message=AIMessage(
content=final_chunk.text,
usage_metadata=cast(
"AIMessageChunk", final_chunk.message
).usage_metadata,
tool_calls=cast("AIMessageChunk", final_chunk.message).tool_calls,
additional_kwargs=final_chunk.message.additional_kwargs,
),
generation_info=generation_info,
)
return ChatResult(generations=[chat_generation])
def _iterate_over_stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
reasoning = kwargs.get("reasoning", self.reasoning)
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
if not isinstance(stream_resp, str):
content = (
stream_resp["message"]["content"]
if "message" in stream_resp and "content" in stream_resp["message"]
else ""
)
# Warn and skip responses with done_reason: 'load' and empty content
# These indicate the model was loaded but no actual generation occurred
is_load_response_with_empty_content = (
stream_resp.get("done") is True
and stream_resp.get("done_reason") == "load"
and not content.strip()
)
if is_load_response_with_empty_content:
log.warning(
"Ollama returned empty response with done_reason='load'."
"This typically indicates the model was loaded but no content "
"was generated. Skipping this response."
)
continue
if stream_resp.get("done") is True:
generation_info = dict(stream_resp)
if "model" in generation_info:
generation_info["model_name"] = generation_info["model"]
generation_info["model_provider"] = "ollama"
_ = generation_info.pop("message", None)
else:
generation_info = None
additional_kwargs = {}
if (
reasoning
and "message" in stream_resp
and (thinking_content := stream_resp["message"].get("thinking"))
):
additional_kwargs["reasoning_content"] = thinking_content
chunk = ChatGenerationChunk(
message=AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
usage_metadata=_get_usage_metadata_from_generation_info(
stream_resp
),
tool_calls=_get_tool_calls_from_response(stream_resp),
),
generation_info=generation_info,
)
yield chunk
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
for chunk in self._iterate_over_stream(messages, stop, **kwargs):
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
yield chunk
async def _aiterate_over_stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
reasoning = kwargs.get("reasoning", self.reasoning)
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
if not isinstance(stream_resp, str):
content = (
stream_resp["message"]["content"]
if "message" in stream_resp and "content" in stream_resp["message"]
else ""
)
# Warn and skip responses with done_reason: 'load' and empty content
# These indicate the model was loaded but no actual generation occurred
is_load_response_with_empty_content = (
stream_resp.get("done") is True
and stream_resp.get("done_reason") == "load"
and not content.strip()
)
if is_load_response_with_empty_content:
log.warning(
"Ollama returned empty response with done_reason='load'. "
"This typically indicates the model was loaded but no content "
"was generated. Skipping this response."
)
continue
if stream_resp.get("done") is True:
generation_info = dict(stream_resp)
if "model" in generation_info:
generation_info["model_name"] = generation_info["model"]
generation_info["model_provider"] = "ollama"
_ = generation_info.pop("message", None)
else:
generation_info = None
additional_kwargs = {}
if (
reasoning
and "message" in stream_resp
and (thinking_content := stream_resp["message"].get("thinking"))
):
additional_kwargs["reasoning_content"] = thinking_content
chunk = ChatGenerationChunk(
message=AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
usage_metadata=_get_usage_metadata_from_generation_info(
stream_resp
),
tool_calls=_get_tool_calls_from_response(stream_resp),
),
generation_info=generation_info,
)
yield chunk
async def _astream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
async for chunk in self._aiterate_over_stream(messages, stop, **kwargs):
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
yield chunk
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
final_chunk = await self._achat_stream_with_aggregation(
messages, stop, run_manager, verbose=self.verbose, **kwargs
)
generation_info = final_chunk.generation_info
chat_generation = ChatGeneration(
message=AIMessage(
content=final_chunk.text,
usage_metadata=cast(
"AIMessageChunk", final_chunk.message
).usage_metadata,
tool_calls=cast("AIMessageChunk", final_chunk.message).tool_calls,
additional_kwargs=final_chunk.message.additional_kwargs,
),
generation_info=generation_info,
)
return ChatResult(generations=[chat_generation])
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "chat-ollama"
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
*,
tool_choice: dict | str | Literal["auto", "any"] | bool | None = None, # noqa: PYI051, ARG002
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with OpenAI tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports any tool definition handled by
`langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: If provided, which tool for model to call. **This parameter
is currently ignored as it is not supported by Ollama.**
kwargs: Any additional parameters are passed directly to
`self.bind(**kwargs)`.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: dict | type,
*,
method: Literal["function_calling", "json_mode", "json_schema"] = "json_schema",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
r"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- An OpenAI function/tool schema.
- A JSON Schema,
- A `TypedDict` class,
- Or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
method: The method for steering model generation, one of:
- `'json_schema'`:
Uses Ollama's [structured output API](https://ollama.com/blog/structured-outputs)
- `'function_calling'`:
Uses Ollama's tool-calling API
- `'json_mode'`:
Specifies `format='json'`. Note that if using JSON mode then you
must include instructions for formatting the output into the
desired schema into the model call.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
kwargs: Additional keyword args aren't supported.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
!!! warning "Behavior changed in `langchain-ollama` 0.2.2"
Added support for structured output API via `format` parameter.
!!! warning "Behavior changed in `langchain-ollama` 0.3.0"
Updated default `method` to `'json_schema'`.
??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=False`"
```python
from typing import Optional
from langchain_ollama import ChatOllama
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=...,
description="A justification for the answer.",
)
model = ChatOllama(model="llama3.1", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
??? note "Example: `schema=Pydantic` class, `method='json_schema'`, `include_raw=True`"
```python
from langchain_ollama import ChatOllama
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatOllama(model="llama3.1", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification,
include_raw=True,
)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```
??? note "Example: `schema=Pydantic` class, `method='function_calling'`, `include_raw=False`"
```python
from typing import Optional
from langchain_ollama import ChatOllama
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str | None = Field(
default=...,
description="A justification for the answer.",
)
model = ChatOllama(model="llama3.1", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification,
method="function_calling",
)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
??? note "Example: `schema=TypedDict` class, `method='function_calling'`, `include_raw=False`"
```python
from typing_extensions import Annotated, TypedDict
from langchain_ollama import ChatOllama
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[str | None, None, "A justification for the answer."]
model = ChatOllama(model="llama3.1", temperature=0)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
??? note "Example: `schema=OpenAI` function schema, `method='function_calling'`, `include_raw=False`"
```python
from langchain_ollama import ChatOllama
oai_schema = {
'name': 'AnswerWithJustification',
'description': 'An answer to the user question along with justification for the answer.',
'parameters': {
'type': 'object',
'properties': {
'answer': {'type': 'string'},
'justification': {'description': 'A justification for the answer.', 'type': 'string'}
},
'required': ['answer']
}
model = ChatOllama(model="llama3.1", temperature=0)
structured_model = model.with_structured_output(oai_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
??? note "Example: `schema=Pydantic` class, `method='json_mode'`, `include_raw=True`"
```python
from langchain_ollama import ChatOllama
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
model = ChatOllama(model="llama3.1", temperature=0)
structured_model = model.with_structured_output(
AnswerWithJustification, method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n"
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'),
# 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
# 'parsing_error': None
# }
```
""" # noqa: E501
_ = kwargs.pop("strict", None)
if kwargs:
msg = f"Received unsupported arguments {kwargs}"
raise ValueError(msg)
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
msg = (
"schema must be specified when method is not 'json_mode'. "
"Received None."
)
raise ValueError(msg)
formatted_tool = convert_to_openai_tool(schema)
tool_name = formatted_tool["function"]["name"]
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
ls_structured_output_format={
"kwargs": {"method": method},
"schema": formatted_tool,
},
)
if is_pydantic_schema:
output_parser: Runnable = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True,
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_mode":
llm = self.bind(
format="json",
ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
)
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
elif method == "json_schema":
if schema is None:
msg = (
"schema must be specified when method is not 'json_mode'. "
"Received None."
)
raise ValueError(msg)
if is_pydantic_schema:
schema = cast("TypeBaseModel", schema)
if issubclass(schema, BaseModelV1):
response_format = schema.schema()
else:
response_format = schema.model_json_schema()
llm = self.bind(
format=response_format,
ls_structured_output_format={
"kwargs": {"method": method},
"schema": schema,
},
)
output_parser = PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
else:
if is_typeddict(schema):
response_format = convert_to_json_schema(schema)
if "required" not in response_format:
response_format["required"] = list(
response_format["properties"].keys()
)
else:
# is JSON schema
response_format = cast("dict", schema)
llm = self.bind(
format=response_format,
ls_structured_output_format={
"kwargs": {"method": method},
"schema": response_format,
},
)
output_parser = JsonOutputParser()
else:
msg = (
f"Unrecognized method argument. Expected one of 'function_calling', "
f"'json_schema', or 'json_mode'. Received: '{method}'"
)
raise ValueError(msg)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
|
ChatOllama
|
python
|
numpy__numpy
|
numpy/_utils/_pep440.py
|
{
"start": 4294,
"end": 7546
}
|
class ____(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return f"<LegacyVersion({str(self)!r})>"
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# its adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
|
LegacyVersion
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py
|
{
"start": 40076,
"end": 55680
}
|
class ____(Qwen2_5OmniPreTrainedModelForConditionalGeneration):
def get_llm_pos_ids_for_vision(
self,
start_idx: int,
vision_idx: int,
spatial_merge_size: int,
t_index: list[torch.Tensor],
grid_hs: list[torch.Tensor],
grid_ws: list[torch.Tensor],
):
llm_pos_ids_list = []
llm_grid_h = grid_hs[vision_idx] // spatial_merge_size
llm_grid_w = grid_ws[vision_idx] // spatial_merge_size
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(len(t_index), -1, llm_grid_w).flatten().float()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(len(t_index), llm_grid_h, -1).flatten().float()
t_index = torch.Tensor(t_index).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten().float()
_llm_pos_ids = torch.stack([t_index, h_index, w_index])
llm_pos_ids_list.append(_llm_pos_ids + start_idx)
llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1)
return llm_pos_ids
def get_rope_index(
self,
input_ids: Optional[torch.LongTensor] = None,
image_grid_thw: Optional[torch.LongTensor] = None,
video_grid_thw: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
use_audio_in_video: bool = False,
audio_seqlens: Optional[torch.LongTensor] = None,
second_per_grids: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
Explanation:
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
Examples:
input_ids: [T T T T T], here T is for text.
temporal position_ids: [0, 1, 2, 3, 4]
height position_ids: [0, 1, 2, 3, 4]
width position_ids: [0, 1, 2, 3, 4]
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
and 1D rotary position embedding for text part.
Examples:
Temporal (Time): 3 patches, representing different segments of the video in time.
Height: 2 patches, dividing each frame vertically.
Width: 2 patches, dividing each frame horizontally.
We also have some important parameters:
fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
text temporal position_ids: [101, 102, 103, 104, 105]
text height position_ids: [101, 102, 103, 104, 105]
text width position_ids: [101, 102, 103, 104, 105]
Here we calculate the text start position_ids as the max vision position_ids plus 1.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
use_audio_in_video (`bool`, *optional*):
If set to `True`, use the audio in video.
audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
second_per_grids (`torch.LongTensor` of shape `(num_videos)`, *optional*):
The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
spatial_merge_size = self.spatial_merge_size
image_token_id = self.config.image_token_id
video_token_id = self.config.video_token_id
audio_token_id = self.config.audio_token_id
vision_start_token_id = self.config.vision_start_token_id
audio_start_token_id = self.config.audio_start_token_id
position_id_per_seconds = self.config.position_id_per_seconds
mrope_position_deltas = []
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
total_input_ids = input_ids
if attention_mask is not None:
attention_mask = attention_mask == 1
position_ids = torch.zeros(
3,
input_ids.shape[0],
input_ids.shape[1],
dtype=torch.float,
device=input_ids.device,
)
image_idx, video_idx, audio_idx = 0, 0, 0
for i, input_ids in enumerate(total_input_ids):
if attention_mask is not None:
input_ids = input_ids[attention_mask[i]]
image_nums, video_nums, audio_nums = 0, 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
vision_tokens = input_ids[vision_start_indices + 1]
audio_nums = torch.sum(input_ids == audio_start_token_id)
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (
(vision_tokens == audio_start_token_id).sum()
if use_audio_in_video
else (vision_tokens == video_token_id).sum()
)
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos, remain_audios = image_nums, video_nums, audio_nums
multimodal_nums = (
image_nums + audio_nums if use_audio_in_video else image_nums + video_nums + audio_nums
)
for _ in range(multimodal_nums):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
if (image_token_id in input_tokens or video_token_id in input_tokens) and (
remain_videos > 0 or remain_images > 0
):
ed_vision_start = input_tokens.index(vision_start_token_id, st)
else:
ed_vision_start = len(input_tokens) + 1
if audio_token_id in input_tokens and remain_audios > 0:
ed_audio_start = input_tokens.index(audio_start_token_id, st)
else:
ed_audio_start = len(input_tokens) + 1
min_ed = min(ed_vision_start, ed_audio_start)
text_len = min_ed - st
if text_len != 0:
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
st_idx += text_len
# Audio in Video
if min_ed == ed_vision_start and ed_vision_start + 1 == ed_audio_start:
bos_len, eos_len = 2, 2
else:
bos_len, eos_len = 1, 1
llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
st_idx += bos_len
# Audio Only
if min_ed == ed_audio_start:
audio_len = _get_feat_extract_output_lengths(audio_seqlens[audio_idx])
llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
llm_pos_ids_list.append(llm_pos_ids)
st += int(text_len + bos_len + audio_len + eos_len)
audio_idx += 1
remain_audios -= 1
# Image Only
elif min_ed == ed_vision_start and input_ids[ed_vision_start + 1] == image_token_id:
grid_t = image_grid_thw[image_idx][0]
grid_hs = image_grid_thw[:, 1]
grid_ws = image_grid_thw[:, 2]
t_index = (torch.arange(grid_t) * 1 * position_id_per_seconds).float()
llm_pos_ids = self.get_llm_pos_ids_for_vision(
st_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws
)
image_len = image_grid_thw[image_idx].prod() // (spatial_merge_size**2)
llm_pos_ids_list.append(llm_pos_ids)
st += int(text_len + bos_len + image_len + eos_len)
image_idx += 1
remain_images -= 1
# Video Only
elif min_ed == ed_vision_start and input_ids[ed_vision_start + 1] == video_token_id:
grid_t = video_grid_thw[video_idx][0]
grid_hs = video_grid_thw[:, 1]
grid_ws = video_grid_thw[:, 2]
t_index = (
torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
).float()
llm_pos_ids = self.get_llm_pos_ids_for_vision(
st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
)
video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
llm_pos_ids_list.append(llm_pos_ids)
st += int(text_len + bos_len + video_len + eos_len)
video_idx += 1
remain_videos -= 1
# Audio in Video
elif min_ed == ed_vision_start and ed_vision_start + 1 == ed_audio_start:
audio_len = _get_feat_extract_output_lengths(audio_seqlens[audio_idx])
audio_llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
grid_t = video_grid_thw[video_idx][0]
grid_hs = video_grid_thw[:, 1]
grid_ws = video_grid_thw[:, 2]
t_index = (
torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
).float()
video_llm_pos_ids = self.get_llm_pos_ids_for_vision(
st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
)
video_data_index, audio_data_index = 0, 0
while (
video_data_index < video_llm_pos_ids.shape[-1]
and audio_data_index < audio_llm_pos_ids.shape[-1]
):
if video_llm_pos_ids[0][video_data_index] <= audio_llm_pos_ids[0][audio_data_index]:
llm_pos_ids_list.append(video_llm_pos_ids[:, video_data_index : video_data_index + 1])
video_data_index += 1
else:
llm_pos_ids_list.append(audio_llm_pos_ids[:, audio_data_index : audio_data_index + 1])
audio_data_index += 1
if video_data_index < video_llm_pos_ids.shape[-1]:
llm_pos_ids_list.append(
video_llm_pos_ids[:, video_data_index : video_llm_pos_ids.shape[-1]]
)
if audio_data_index < audio_llm_pos_ids.shape[-1]:
llm_pos_ids_list.append(
audio_llm_pos_ids[:, audio_data_index : audio_llm_pos_ids.shape[-1]]
)
video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
st += int(text_len + bos_len + audio_len + video_len + eos_len)
audio_idx += 1
video_idx += 1
remain_videos -= 1
remain_audios -= 1
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat([item.float() for item in llm_pos_ids_list], dim=1).reshape(3, -1)
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(input_ids))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return position_ids, mrope_position_deltas
else:
position_ids = attention_mask.float().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True)
return position_ids, mrope_position_deltas
|
Qwen3OmniMoePreTrainedModelForConditionalGeneration
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-days-in-a-month.py
|
{
"start": 29,
"end": 316
}
|
class ____(object):
def numberOfDays(self, Y, M):
"""
:type Y: int
:type M: int
:rtype: int
"""
leap = 1 if ((Y % 4 == 0) and (Y % 100 != 0)) or (Y % 400 == 0) else 0
return (28+leap if (M == 2) else 31-(M-1)%7%2)
|
Solution
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/timeseries.py
|
{
"start": 5070,
"end": 5388
}
|
class ____:
# GH 7754
def setup(self):
rng3 = date_range(
start="2000-01-01 00:00:00", end="2000-01-01 10:00:00", freq="555000us"
)
self.dt_ts = Series(5, rng3, dtype="datetime64[ns]")
def time_resample(self):
self.dt_ts.resample("1s").last()
|
ResampleDatetetime64
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 115471,
"end": 116412
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"pull_request_id",
"commit_headline",
"commit_body",
"expected_head_oid",
"merge_method",
"author_email",
"client_mutation_id",
)
pull_request_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="pullRequestId"
)
commit_headline = sgqlc.types.Field(String, graphql_name="commitHeadline")
commit_body = sgqlc.types.Field(String, graphql_name="commitBody")
expected_head_oid = sgqlc.types.Field(GitObjectID, graphql_name="expectedHeadOid")
merge_method = sgqlc.types.Field(PullRequestMergeMethod, graphql_name="mergeMethod")
author_email = sgqlc.types.Field(String, graphql_name="authorEmail")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
MergePullRequestInput
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/queues.py
|
{
"start": 395,
"end": 5186
}
|
class ____(NonStrictDataModel):
"""
:param queue: ID of the queue
:type queue: str
:param dates: List of timestamps (in seconds from epoch) in the acceding order.
The timestamps are separated by the requested interval. Timestamps where no
queue status change was recorded are omitted.
:type dates: Sequence[int]
:param avg_waiting_times: List of average waiting times for tasks in the queue.
The points correspond to the timestamps in the dates list. If more than one
value exists for the given interval then the maximum value is taken.
:type avg_waiting_times: Sequence[float]
:param queue_lengths: List of tasks counts in the queue. The points correspond
to the timestamps in the dates list. If more than one value exists for the
given interval then the count that corresponds to the maximum average value is
taken.
:type queue_lengths: Sequence[int]
"""
_schema = {
"properties": {
"avg_waiting_times": {
"description": "List of average waiting times for tasks in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the maximum value is taken.",
"items": {"type": "number"},
"type": ["array", "null"],
},
"dates": {
"description": "List of timestamps (in seconds from epoch) in the acceding order. The timestamps are separated by the requested interval. Timestamps where no queue status change was recorded are omitted.",
"items": {"type": "integer"},
"type": ["array", "null"],
},
"queue": {"description": "ID of the queue", "type": ["string", "null"]},
"queue_lengths": {
"description": "List of tasks counts in the queue. The points correspond to the timestamps in the dates list. If more than one value exists for the given interval then the count that corresponds to the maximum average value is taken.",
"items": {"type": "integer"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
queue: Optional[str] = None,
dates: Optional[List[int]] = None,
avg_waiting_times: Optional[List[float]] = None,
queue_lengths: Optional[List[int]] = None,
**kwargs: Any
) -> None:
super(QueueMetrics, self).__init__(**kwargs)
self.queue = queue
self.dates = dates
self.avg_waiting_times = avg_waiting_times
self.queue_lengths = queue_lengths
@schema_property("queue")
def queue(self) -> Optional[str]:
return self._property_queue
@queue.setter
def queue(self, value: Optional[str]) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("dates")
def dates(self) -> Optional[List[int]]:
return self._property_dates
@dates.setter
def dates(self, value: Optional[List[int]]) -> None:
if value is None:
self._property_dates = None
return
self.assert_isinstance(value, "dates", (list, tuple))
value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value]
self.assert_isinstance(value, "dates", six.integer_types, is_array=True)
self._property_dates = value
@schema_property("avg_waiting_times")
def avg_waiting_times(self) -> Optional[List[float]]:
return self._property_avg_waiting_times
@avg_waiting_times.setter
def avg_waiting_times(self, value: Optional[List[float]]) -> None:
if value is None:
self._property_avg_waiting_times = None
return
self.assert_isinstance(value, "avg_waiting_times", (list, tuple))
self.assert_isinstance(value, "avg_waiting_times", six.integer_types + (float,), is_array=True)
self._property_avg_waiting_times = value
@schema_property("queue_lengths")
def queue_lengths(self) -> Optional[List[int]]:
return self._property_queue_lengths
@queue_lengths.setter
def queue_lengths(self, value: Optional[List[int]]) -> None:
if value is None:
self._property_queue_lengths = None
return
self.assert_isinstance(value, "queue_lengths", (list, tuple))
value = [int(v) if isinstance(v, float) and v.is_integer() else v for v in value]
self.assert_isinstance(value, "queue_lengths", six.integer_types, is_array=True)
self._property_queue_lengths = value
|
QueueMetrics
|
python
|
kamyu104__LeetCode-Solutions
|
Python/recover-the-original-array.py
|
{
"start": 52,
"end": 856
}
|
class ____(object):
def recoverArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def check(k, cnt, result):
for x in nums:
if cnt[x] == 0:
continue
if cnt[x+2*k] == 0:
return False
cnt[x] -= 1
cnt[x+2*k] -= 1
result.append(x+k)
return True
nums.sort()
cnt = collections.Counter(nums)
for i in xrange(1, len(nums)//2+1):
k = nums[i]-nums[0]
if k == 0 or k%2:
continue
k //= 2
result = []
if check(k, collections.Counter(cnt), result):
return result
return []
|
Solution
|
python
|
python-poetry__poetry
|
src/poetry/console/commands/check.py
|
{
"start": 303,
"end": 7765
}
|
class ____(Command):
name = "check"
description = (
"Validates the content of the <comment>pyproject.toml</> file and its"
" consistency with the poetry.lock file."
)
options: ClassVar[list[Option]] = [
option(
"lock",
None,
"Checks that <comment>poetry.lock</> exists for the current"
" version of <comment>pyproject.toml</>.",
),
option(
"strict",
None,
"Fail if check reports warnings.",
),
]
def _validate_classifiers(
self, project_classifiers: set[str]
) -> tuple[list[str], list[str]]:
"""Identify unrecognized and deprecated trove classifiers.
A fully-qualified classifier is a string delimited by `` :: `` separators. To
make the error message more readable we need to have visual clues to
materialize the start and end of a classifier string. That way the user can
easily copy and paste it from the messages while reducing mistakes because of
extra spaces.
We use ``!r`` (``repr()``) for classifiers and list of classifiers for
consistency. That way all strings will be rendered with the same kind of quotes
(i.e. simple tick: ``'``).
"""
from trove_classifiers import classifiers
from trove_classifiers import deprecated_classifiers
errors = []
warnings = []
unrecognized = sorted(
project_classifiers - set(classifiers) - set(deprecated_classifiers)
)
# Allow "Private ::" classifiers as recommended on PyPI and the packaging guide
# to allow users to avoid accidentally publishing private packages to PyPI.
# https://pypi.org/classifiers/
unrecognized = [u for u in unrecognized if not u.startswith("Private ::")]
if unrecognized:
errors.append(f"Unrecognized classifiers: {unrecognized!r}.")
deprecated = sorted(
project_classifiers.intersection(set(deprecated_classifiers))
)
if deprecated:
for old_classifier in deprecated:
new_classifiers = deprecated_classifiers[old_classifier]
if new_classifiers:
message = (
f"Deprecated classifier {old_classifier!r}. "
f"Must be replaced by {new_classifiers!r}."
)
else:
message = (
f"Deprecated classifier {old_classifier!r}. Must be removed."
)
warnings.append(message)
return errors, warnings
def _validate_readme(self, readme: str | list[str], poetry_file: Path) -> list[str]:
"""Check existence of referenced readme files"""
readmes = [readme] if isinstance(readme, str) else readme
errors = []
for name in readmes:
if not name:
errors.append("Declared README file is an empty string.")
elif not (poetry_file.parent / name).exists():
errors.append(f"Declared README file does not exist: {name}")
return errors
def _validate_dependencies_source(self, config: dict[str, Any]) -> list[str]:
"""Check dependencies's source are valid"""
sources = {repository.name for repository in self.poetry.pool.all_repositories}
dependency_declarations: list[
dict[str, str | dict[str, str] | list[dict[str, str]]]
] = []
# scan dependencies and group dependencies settings in pyproject.toml
if "dependencies" in config:
dependency_declarations.append(config["dependencies"])
for group in config.get("group", {}).values():
if "dependencies" in group:
dependency_declarations.append(group["dependencies"])
all_referenced_sources: set[str] = set()
for dependency_declaration in dependency_declarations:
for declaration in dependency_declaration.values():
if isinstance(declaration, list):
for item in declaration:
if "source" in item:
all_referenced_sources.add(item["source"])
elif isinstance(declaration, dict) and "source" in declaration:
all_referenced_sources.add(declaration["source"])
return [
f'Invalid source "{source}" referenced in dependencies.'
for source in sorted(all_referenced_sources - sources)
]
def handle(self) -> int:
from poetry.core.pyproject.toml import PyProjectTOML
from poetry.factory import Factory
# Load poetry config and display errors, if any
poetry_file = self.poetry.file.path
toml_data = PyProjectTOML(poetry_file).data
check_result = Factory.validate(toml_data, strict=True)
project = toml_data.get("project", {})
poetry_config = toml_data["tool"]["poetry"]
# Validate trove classifiers
project_classifiers = set(
project.get("classifiers") or poetry_config.get("classifiers", [])
)
errors, warnings = self._validate_classifiers(project_classifiers)
check_result["errors"].extend(errors)
check_result["warnings"].extend(warnings)
readme_errors = []
# Check poetry readme
if "readme" in poetry_config:
readme_errors += self._validate_readme(poetry_config["readme"], poetry_file)
project_readme = project.get("readme")
if project_readme is not None:
if isinstance(project_readme, dict):
readme_path = project_readme.get("file")
if readme_path is not None:
readme_errors += self._validate_readme(readme_path, poetry_file)
elif isinstance(project_readme, str):
readme_errors += self._validate_readme(project_readme, poetry_file)
else:
# should not happen due to prior schema validation, but just in case
readme_errors.append(
f"Invalid format for [project.readme]: {project_readme!r}"
)
check_result["errors"].extend(readme_errors)
# Validate dependencies' sources
check_result["errors"] += self._validate_dependencies_source(poetry_config)
# Verify that lock file is consistent
if self.option("lock") and not self.poetry.locker.is_locked():
check_result["errors"] += ["poetry.lock was not found."]
if self.poetry.locker.is_locked() and not self.poetry.locker.is_fresh():
check_result["errors"] += [
"pyproject.toml changed significantly since poetry.lock was last generated. "
"Run `poetry lock` to fix the lock file."
]
return_code = 0
if check_result["errors"] or (
check_result["warnings"] and self.option("strict")
):
return_code = 1
if not check_result["errors"] and not check_result["warnings"]:
self.info("All set!")
for error in check_result["errors"]:
self.line_error(f"<error>Error: {error}</error>")
for error in check_result["warnings"]:
self.line_error(f"<warning>Warning: {error}</warning>")
return return_code
|
CheckCommand
|
python
|
pytorch__pytorch
|
test/mobile/model_test/math_ops.py
|
{
"start": 14384,
"end": 16529
}
|
class ____(torch.nn.Module):
def forward(self):
return self.blas_lapack_ops()
def blas_lapack_ops(self):
m = torch.randn(3, 3)
a = torch.randn(10, 3, 4)
b = torch.randn(10, 4, 3)
v = torch.randn(3)
return len(
torch.addbmm(m, a, b),
torch.addmm(torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3)),
torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
torch.addr(torch.zeros(3, 3), v, v),
torch.baddbmm(m, a, b),
torch.bmm(a, b),
torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)),
# torch.cholesky(a), # deprecated
# torch.cholesky_inverse(torch.randn(3, 3)), # had some error
# torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
torch.dot(v, v),
# torch.linalg.eig(m), # not build with lapack
# torch.geqrf(a),
torch.ger(v, v),
torch.inner(m, m),
# torch.inverse(m),
# torch.det(m),
# torch.logdet(m),
# torch.slogdet(m),
# torch.lstsq(m, m),
# torch.linalg.lu_factor(m),
# torch.lu_solve(m, *torch.linalg.lu_factor(m)),
# torch.lu_unpack(*torch.linalg.lu_factor(m)),
torch.matmul(m, m),
torch.matrix_power(m, 2),
# torch.matrix_rank(m),
torch.matrix_exp(m),
torch.mm(m, m),
torch.mv(m, v),
# torch.orgqr(a, m),
# torch.ormqr(a, m, v),
torch.outer(v, v),
# torch.pinverse(m),
# torch.qr(a),
# torch.solve(m, m),
# torch.svd(a),
# torch.svd_lowrank(a),
# torch.pca_lowrank(a),
# torch.symeig(a), # deprecated
# torch.lobpcg(a, b), # not supported
torch.trapz(m, m),
torch.trapezoid(m, m),
torch.cumulative_trapezoid(m, m),
# torch.triangular_solve(m, m),
torch.vdot(v, v),
)
|
BlasLapackOpsModule
|
python
|
getsentry__sentry
|
tests/sentry/grouping/__init__.py
|
{
"start": 2886,
"end": 12248
}
|
class ____:
def __init__(self, inputs_dir: str, filename: str):
self.filename = filename # Necessary for test naming
with open(path.join(inputs_dir, self.filename)) as f:
self.data = json.load(f)
def _manually_save_event(
self, grouping_config: GroupingConfig, fingerprinting_config: FingerprintingConfig
) -> Event:
"""
Manually complete the steps to save an event, in such a way as to not touch postgres (which
makes it run a lot faster).
"""
mgr = EventManager(data=self.data, grouping_config=grouping_config)
mgr.normalize()
data = mgr.get_data()
# Before creating the event, manually run the parts of `EventManager.save` which are
# necessary for grouping.
normalize_stacktraces_for_grouping(data, load_grouping_config(grouping_config))
data.setdefault("fingerprint", ["{{ default }}"])
apply_server_side_fingerprinting(data, fingerprinting_config)
fingerprint_info = data.get("_fingerprint_info", {})
custom_title_template = get_path(fingerprint_info, "matched_rule", "attributes", "title")
# Technically handling custom titles happens during grouping, not before it, but we're not
# running grouping until later, and the title needs to be set before we get metadata below.
if custom_title_template:
resolved_title = expand_title_template(custom_title_template, data)
data["title"] = resolved_title
event_type = get_event_type(data)
event_metadata = event_type.get_metadata(data)
data.update(materialize_metadata(data, event_type, event_metadata))
event = eventstore.backend.create_event(project_id=1, data=data)
# Assigning the project after the fact also populates the cache, so that calls to
# `event.project` don't fail. (If the `event.project` getter can't pull the project from the
# cache it'll look in the database, but since this isn't a real project, that errors out.)
event.project = mock.Mock(id=11211231)
return event
def _save_event_with_pipeline(
self,
grouping_config: GroupingConfig,
fingerprinting_config: FingerprintingConfig,
project: Project,
) -> Event:
with (
mock.patch(
"sentry.grouping.ingest.hashing.get_grouping_config_dict_for_project",
return_value=grouping_config,
),
mock.patch(
"sentry.grouping.ingest.hashing.get_fingerprinting_config_for_project",
return_value=fingerprinting_config,
),
):
return save_new_event(self.data, project)
def create_event(
self,
config_name: str,
use_full_ingest_pipeline: bool = True,
project: Project | None = None,
) -> Event:
grouping_config = get_default_grouping_config_dict(config_name)
# Add in any extra grouping configuration from the input data
grouping_config["enhancements"] = EnhancementsConfig.from_rules_text(
self.data.get("_grouping", {}).get("enhancements", ""),
bases=EnhancementsConfig.from_base64_string(grouping_config["enhancements"]).bases,
).base64_string
fingerprinting_config = FingerprintingConfig.from_json(
{"rules": self.data.get("_fingerprinting_rules", [])},
bases=GROUPING_CONFIG_CLASSES[config_name].fingerprinting_bases,
)
if use_full_ingest_pipeline:
assert project, "'project' is required to use full pipeline"
event = self._save_event_with_pipeline(grouping_config, fingerprinting_config, project)
else:
event = self._manually_save_event(grouping_config, fingerprinting_config)
return event
def get_grouping_inputs(inputs_dir: str) -> list[GroupingInput]:
return [
GroupingInput(inputs_dir, filename)
for filename in sorted(os.listdir(inputs_dir))
if filename.endswith(".json")
]
def with_grouping_inputs(test_param_name: str, inputs_dir: str) -> pytest.MarkDecorator:
grouping_inputs = get_grouping_inputs(inputs_dir)
return pytest.mark.parametrize(
test_param_name,
grouping_inputs,
ids=lambda grouping_input: grouping_input.filename.replace("-", "_").replace(".json", ""),
)
def with_grouping_configs(config_ids: Iterable[str]) -> pytest.MarkDecorator:
if not config_ids:
return pytest.mark.skip("no configs to test")
return pytest.mark.parametrize(
"config_name", sorted(config_ids), ids=lambda config_name: config_name.replace("-", "_")
)
def get_grouping_input_snapshotter(
insta_snapshot: InstaSnapshotter,
folder_name: str,
test_name: str,
config_name: str,
grouping_input_file: str,
) -> InstaSnapshotter:
"""Create a snapshot function with the output path baked in."""
snapshot_path = path.join(
SNAPSHOTS_DIR,
folder_name,
test_name,
# Windows paths contain colons, so we have to swap out the colons in our config names
config_name.replace(":", "@"),
grouping_input_file.replace(".json", ".pysnap"),
)
# Convert from JSON to Python file formatting
snapshot_path = snapshot_path.replace("-", "_")
snapshot_function = functools.partial(insta_snapshot, reference_file=snapshot_path)
return snapshot_function
def run_as_grouping_inputs_snapshot_test(test_func: Callable[..., None]) -> Callable[..., None]:
"""
Decorator which causes a test to be run against all of the inputs in `grouping_inputs`.
Tests can be run using either the full `EventManager.save` pipeline, or a minimal (and much more
performant) save process. Using the full save process is the most realistic way to test, but
it's also slow, because it comes with the overhead of our full postgres database. Manually
cherry-picking only certain parts of the save process to run is much faster, but it's also more
likely to fall out of sync with reality.
We therefore use the full process when testing the current grouping config, and only use the
faster manual process for older configs. When testing locally, the faster process can be used
for all configs by setting `SENTRY_FAST_GROUPING_SNAPSHOTS=1` in the environment.
Basic usage:
@run_as_grouping_inputs_snapshot_test
def test_some_grouping_thing(
event: Event,
variants: dict[str, BaseVariant],
config_name: str,
create_snapshot: InstaSnapshotter,
**kwargs: Any,
)-> None:
# In this section, make any necessary assertions about the event and/or variants, and
# process them to create snapshot output
create_snapshot(output)
When the wrapped test function is called, all arguments are passed as keywords, so any which
aren't used can be absorbed into kwargs:
@run_as_grouping_inputs_snapshot_test
def test_some_grouping_thing(
variants: dict[str, BaseVariant],
create_snapshot: InstaSnapshotter,
**kwargs: Any,
)-> None:
# ...
If more mocking is necessary, it can be done alongside this decorator:
@run_as_grouping_inputs_snapshot_test
@patch("sentry.grouping.strategies.newstyle.logging.exception")
def test_variants(
mock_exception_logger: MagicMock,
event: Event,
variants: dict[str, BaseVariant],
config_name: str,
create_snapshot: InstaSnapshotter,
**kwargs: Any,
) -> None:
# ...
Note that because pytest adds in mocks as args rather than kwargs, the new mocks need to go at
the beginning of the test function's argument list (which in turn means the patching needs to go
underneath this decorator).
"""
@django_db_all
@with_grouping_inputs("grouping_input", GROUPING_INPUTS_DIR)
@with_grouping_configs(MANUAL_SAVE_CONFIGS | FULL_PIPELINE_CONFIGS)
def wrapped_test_func(
config_name: str,
grouping_input: GroupingInput,
insta_snapshot: InstaSnapshotter,
) -> None:
should_use_full_pipeline = config_name in FULL_PIPELINE_CONFIGS
project = (
Factories.create_project(Factories.create_organization())
if should_use_full_pipeline
else None
)
event = grouping_input.create_event(
config_name,
use_full_ingest_pipeline=should_use_full_pipeline,
project=project,
)
# Create a snapshot function with the output path baked in
create_snapshot = get_grouping_input_snapshotter(
insta_snapshot,
folder_name=test_func.__module__.split(".")[-1].replace("test_", ""),
test_name=test_func.__name__,
config_name=config_name,
grouping_input_file=grouping_input.filename,
)
# Run the actual test
test_func(
event=event,
variants=event.get_grouping_variants(),
config_name=config_name,
create_snapshot=create_snapshot,
)
return wrapped_test_func
|
GroupingInput
|
python
|
getsentry__sentry
|
src/sentry/api/serializers/models/groupseen.py
|
{
"start": 185,
"end": 1141
}
|
class ____(Serializer):
def get_attrs(self, item_list, user, **kwargs):
serialized_users = user_service.serialize_many(
filter=dict(user_ids=[i.user_id for i in item_list]), as_user=user
)
user_map = {}
for serialized in serialized_users:
user_map[serialized["id"]] = serialized
result = {}
for item in item_list:
user_id_str = str(item.user_id)
# Deleted users may have stale groupseen references as the "cascade deletion" is
# eventually consistent. We omit this groupseen data as it's no longer valid.
if user_id_str in user_map:
result[item] = {"user": user_map[user_id_str]}
return result
def serialize(self, obj, attrs, user, **kwargs):
data = attrs.get("user")
if data is None:
return None
data["lastSeen"] = obj.last_seen
return data
|
GroupSeenSerializer
|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_patches/_sys_intern_patch.py
|
{
"start": 160,
"end": 252
}
|
class ____(str):
"""Wrapper around `str` to test if subclasses are accepted."""
|
_CustomStr
|
python
|
django__django
|
tests/auth_tests/test_basic.py
|
{
"start": 5615,
"end": 8610
}
|
class ____(TestCase):
def test_get_user_anonymous(self):
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, AnonymousUser)
async def test_aget_user_anonymous(self):
request = HttpRequest()
request.session = await self.client.asession()
user = await aget_user(request)
self.assertIsInstance(user, AnonymousUser)
def test_get_user(self):
created_user = User.objects.create_user(
"testuser", "test@example.com", "testpw"
)
self.client.login(username="testuser", password="testpw")
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
def test_get_user_fallback_secret(self):
created_user = User.objects.create_user(
"testuser", "test@example.com", "testpw"
)
self.client.login(username="testuser", password="testpw")
request = HttpRequest()
request.session = self.client.session
prev_session_key = request.session.session_key
with override_settings(
SECRET_KEY="newsecret",
SECRET_KEY_FALLBACKS=[settings.SECRET_KEY],
):
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
self.assertNotEqual(request.session.session_key, prev_session_key)
# Remove the fallback secret.
# The session hash should be updated using the current secret.
with override_settings(SECRET_KEY="newsecret"):
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
async def test_aget_user_fallback_secret(self):
created_user = await User.objects.acreate_user(
"testuser", "test@example.com", "testpw"
)
await self.client.alogin(username="testuser", password="testpw")
request = HttpRequest()
request.session = await self.client.asession()
prev_session_key = request.session.session_key
with override_settings(
SECRET_KEY="newsecret",
SECRET_KEY_FALLBACKS=[settings.SECRET_KEY],
):
user = await aget_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
self.assertNotEqual(request.session.session_key, prev_session_key)
# Remove the fallback secret.
# The session hash should be updated using the current secret.
with override_settings(SECRET_KEY="newsecret"):
user = await aget_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
|
TestGetUser
|
python
|
tensorflow__tensorflow
|
tensorflow/python/platform/benchmark_test.py
|
{
"start": 918,
"end": 2756
}
|
class ____(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = self.get_temp_dir() + os.path.sep
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
always_print_fields_with_no_presence=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
BenchmarkTest
|
python
|
Netflix__metaflow
|
metaflow/plugins/datastores/s3_storage.py
|
{
"start": 404,
"end": 5473
}
|
class ____(DataStoreStorage):
TYPE = "s3"
@check_s3_deps
def __init__(self, root=None):
super(S3Storage, self).__init__(root)
self.s3_client = S3Client()
@classmethod
def get_datastore_root_from_config(cls, echo, create_on_absent=True):
return DATASTORE_SYSROOT_S3
def is_file(self, paths):
with S3(
s3root=self.datastore_root,
tmproot=ARTIFACT_LOCALROOT,
external_client=self.s3_client,
) as s3:
if len(paths) > 10:
s3objs = s3.info_many(paths, return_missing=True)
return [s3obj.exists for s3obj in s3objs]
else:
result = []
for path in paths:
result.append(s3.info(path, return_missing=True).exists)
return result
def info_file(self, path):
with S3(
s3root=self.datastore_root,
tmproot=ARTIFACT_LOCALROOT,
external_client=self.s3_client,
) as s3:
s3obj = s3.info(path, return_missing=True)
return s3obj.exists, s3obj.metadata
def size_file(self, path):
with S3(
s3root=self.datastore_root,
tmproot=ARTIFACT_LOCALROOT,
external_client=self.s3_client,
) as s3:
s3obj = s3.info(path, return_missing=True)
return s3obj.size
def list_content(self, paths):
strip_prefix_len = len(self.datastore_root.rstrip("/")) + 1
with S3(
s3root=self.datastore_root,
tmproot=ARTIFACT_LOCALROOT,
external_client=self.s3_client,
) as s3:
results = s3.list_paths(paths)
return [
self.list_content_result(
path=o.url[strip_prefix_len:], is_file=o.exists
)
for o in results
]
def save_bytes(self, path_and_bytes_iter, overwrite=False, len_hint=0):
def _convert():
# Output format is the same as what is needed for S3PutObject:
# key, value, path, content_type, metadata
for path, obj in path_and_bytes_iter:
if isinstance(obj, tuple):
yield path, obj[0], None, None, obj[1]
else:
yield path, obj, None, None, None
with S3(
s3root=self.datastore_root,
tmproot=ARTIFACT_LOCALROOT,
external_client=self.s3_client,
) as s3:
# HACK: The S3 datatools we rely on does not currently do a good job
# determining if uploading things in parallel is more efficient than
# serially. We use a heuristic for now where if we have a lot of
# files, we will go in parallel and if we have few files, we will
# serially upload them. This is not ideal because there is also a size
# factor and one very large file with a few other small files, for
# example, would benefit from a parallel upload.
#
# In the case of save_artifacts, currently len_hint is based on the
# total number of artifacts, not taking into account how many of them
# already exist in the CAS, i.e. it can be a gross overestimate. As a
# result, it is possible we take a latency hit by using put_many only
# for a few artifacts.
#
# A better approach would be to e.g. write all blobs to temp files
# and based on the total size and number of files use either put_files
# (which avoids re-writing the files) or s3.put sequentially.
if len_hint > 10:
# Use put_many
s3.put_many(starmap(S3PutObject, _convert()), overwrite)
else:
# Sequential upload
for key, obj, _, _, metadata in _convert():
s3.put(key, obj, overwrite=overwrite, metadata=metadata)
def load_bytes(self, paths):
if len(paths) == 0:
return CloseAfterUse(iter([]))
s3 = S3(
s3root=self.datastore_root,
tmproot=ARTIFACT_LOCALROOT,
external_client=self.s3_client,
)
def iter_results():
# We similarly do things in parallel for many files. This is again
# a hack.
if len(paths) > 10:
results = s3.get_many(paths, return_missing=True, return_info=True)
for r in results:
if r.exists:
yield r.key, r.path, r.metadata
else:
yield r.key, None, None
else:
for p in paths:
r = s3.get(p, return_missing=True, return_info=True)
if r.exists:
yield r.key, r.path, r.metadata
else:
yield r.key, None, None
return CloseAfterUse(iter_results(), closer=s3)
|
S3Storage
|
python
|
scipy__scipy
|
scipy/optimize/tests/test_linprog.py
|
{
"start": 93693,
"end": 93907
}
|
class ____(LinprogRSTests):
options = {"pivot": "bland"}
############################################
# HiGHS-Simplex-Dual Option-Specific Tests #
############################################
|
TestLinprogRSBland
|
python
|
qdrant__qdrant-client
|
qdrant_client/grpc/qdrant_pb2_grpc.py
|
{
"start": 694,
"end": 1675
}
|
class ____(object):
"""Missing associated documentation comment in .proto file."""
def HealthCheck(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_QdrantServicer_to_server(servicer, server):
rpc_method_handlers = {
'HealthCheck': grpc.unary_unary_rpc_method_handler(
servicer.HealthCheck,
request_deserializer=qdrant__pb2.HealthCheckRequest.FromString,
response_serializer=qdrant__pb2.HealthCheckReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'qdrant.Qdrant', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
|
QdrantServicer
|
python
|
apache__airflow
|
providers/sftp/tests/unit/sftp/hooks/test_sftp.py
|
{
"start": 30340,
"end": 40129
}
|
class ____:
@patch("asyncssh.connect", new_callable=AsyncMock)
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_connection")
@pytest.mark.asyncio
async def test_extra_dejson_fields_for_connection_building_known_hosts_none(
self, mock_get_connection, mock_connect, caplog
):
"""
Assert that connection details passed through the extra field in the Airflow connection
are properly passed when creating SFTP connection
"""
mock_get_connection.return_value = MockAirflowConnection(known_hosts="None")
hook = SFTPHookAsync()
await hook._get_conn()
expected_connection_details = {
"host": "localhost",
"port": 22,
"username": "username",
"password": "password",
"client_keys": "~/keys/my_key",
"known_hosts": None,
"passphrase": "mypassphrase",
}
mock_connect.assert_called_with(**expected_connection_details)
@pytest.mark.parametrize(
("mock_port", "mock_host_key"),
[
(22, "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFe8P8lk5HFfL/rMlcCMHQhw1cg+uZtlK5rXQk2C4pOY"),
(2222, "AAAAC3NzaC1lZDI1NTE5AAAAIFe8P8lk5HFfL/rMlcCMHQhw1cg+uZtlK5rXQk2C4pOY"),
(
2222,
"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDDsXFe87LsBA1Hfi+mtw"
"/EoQkv8bXVtfOwdMP1ETpHVsYpm5QG/7tsLlKdE8h6EoV/OFw7XQtoibNZp/l5ABjE=",
),
],
)
@patch("asyncssh.connect", new_callable=AsyncMock)
@patch("asyncssh.import_private_key")
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_connection")
@pytest.mark.asyncio
async def test_extra_dejson_fields_for_connection_with_host_key(
self,
mock_get_connection,
mock_import_private_key,
mock_connect,
mock_port,
mock_host_key,
):
"""
Assert that connection details passed through the extra field in the Airflow connection
are properly passed to paramiko client for validating given host key.
"""
mock_get_connection.return_value = MockAirflowConnectionWithHostKey(
host_key=mock_host_key, no_host_key_check=False, port=mock_port
)
hook = SFTPHookAsync()
await hook._get_conn()
assert hook.known_hosts == f"localhost {mock_host_key}".encode()
@patch("asyncssh.connect", new_callable=AsyncMock)
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_connection")
@pytest.mark.asyncio
async def test_extra_dejson_fields_for_connection_raises_valuerror(
self, mock_get_connection, mock_connect
):
"""
Assert that when both host_key and no_host_key_check are set, a valuerror is raised because no_host_key_check
should be unset when host_key is given and the host_key needs to be validated.
"""
host_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFe8P8lk5HFfL/rMlcCMHQhw1cg+uZtlK5rXQk2C4pOY"
mock_get_connection.return_value = MockAirflowConnectionWithHostKey(
host_key=host_key, no_host_key_check=True
)
hook = SFTPHookAsync()
with pytest.raises(ValueError, match="Host key check was skipped, but `host_key` value was given"):
await hook._get_conn()
@patch("paramiko.SSHClient.connect")
@patch("asyncssh.import_private_key")
@patch("asyncssh.connect", new_callable=AsyncMock)
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_connection")
@pytest.mark.asyncio
async def test_no_host_key_check_set_logs_warning(
self, mock_get_connection, mock_connect, mock_import_pkey, mock_ssh_connect, caplog
):
"""Assert that when no_host_key_check is set, a warning is logged for MITM attacks possibility."""
mock_get_connection.return_value = MockAirflowConnectionWithHostKey(no_host_key_check=True)
hook = SFTPHookAsync()
await hook._get_conn()
assert "No Host Key Verification. This won't protect against Man-In-The-Middle attacks" in caplog.text
@patch("asyncssh.connect", new_callable=AsyncMock)
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_connection")
@pytest.mark.asyncio
async def test_extra_dejson_fields_for_connection_building(self, mock_get_connection, mock_connect):
"""
Assert that connection details passed through the extra field in the Airflow connection
are properly passed when creating SFTP connection
"""
mock_get_connection.return_value = MockAirflowConnection()
hook = SFTPHookAsync()
await hook._get_conn()
expected_connection_details = {
"host": "localhost",
"port": 22,
"username": "username",
"password": "password",
"client_keys": "~/keys/my_key",
"known_hosts": None,
"passphrase": "mypassphrase",
}
mock_connect.assert_called_with(**expected_connection_details)
@pytest.mark.asyncio
@patch("asyncssh.connect", new_callable=AsyncMock)
@patch("asyncssh.import_private_key")
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_connection")
async def test_connection_private(self, mock_get_connection, mock_import_private_key, mock_connect):
"""
Assert that connection details with private key passed through the extra field in the Airflow connection
are properly passed when creating SFTP connection
"""
mock_get_connection.return_value = MockAirflowConnectionWithPrivate()
mock_import_private_key.return_value = "test"
hook = SFTPHookAsync()
await hook._get_conn()
expected_connection_details = {
"host": "localhost",
"port": 22,
"username": "username",
"password": "password",
"client_keys": ["test"],
"known_hosts": None,
"passphrase": "mypassphrase",
}
mock_connect.assert_called_with(**expected_connection_details)
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync._get_conn")
@pytest.mark.asyncio
async def test_list_directory_path_does_not_exist(self, mock_hook_get_conn):
"""
Assert that AirflowException is raised when path does not exist on SFTP server
"""
mock_hook_get_conn.return_value.__aenter__.return_value = MockSSHClient()
hook = SFTPHookAsync()
expected_files = None
files = await hook.list_directory(path="/path/does_not/exist/")
assert files == expected_files
mock_hook_get_conn.return_value.__aexit__.assert_called()
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync._get_conn")
@pytest.mark.asyncio
async def test_read_directory_path_does_not_exist(self, mock_hook_get_conn):
"""
Assert that AirflowException is raised when path does not exist on SFTP server
"""
mock_hook_get_conn.return_value.__aenter__.return_value = MockSSHClient()
hook = SFTPHookAsync()
expected_files = None
files = await hook.read_directory(path="/path/does_not/exist/")
assert files == expected_files
mock_hook_get_conn.return_value.__aexit__.assert_called()
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync._get_conn")
@pytest.mark.asyncio
async def test_list_directory_path_has_files(self, mock_hook_get_conn):
"""
Assert that file list is returned when path exists on SFTP server
"""
mock_hook_get_conn.return_value.__aenter__.return_value = MockSSHClient()
hook = SFTPHookAsync()
expected_files = ["..", ".", "file"]
files = await hook.list_directory(path="/path/exists/")
assert sorted(files) == sorted(expected_files)
mock_hook_get_conn.return_value.__aexit__.assert_called()
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync._get_conn")
@pytest.mark.asyncio
async def test_get_file_by_pattern_with_match(self, mock_hook_get_conn):
"""
Assert that filename is returned when file pattern is matched on SFTP server
"""
mock_hook_get_conn.return_value.__aenter__.return_value = MockSSHClient()
hook = SFTPHookAsync()
files = await hook.get_files_and_attrs_by_pattern(path="/path/exists/", fnmatch_pattern="file")
assert len(files) == 1
assert files[0].filename == "file"
mock_hook_get_conn.return_value.__aexit__.assert_called()
@pytest.mark.asyncio
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync._get_conn")
async def test_get_mod_time(self, mock_hook_get_conn):
"""
Assert that file attribute and return the modified time of the file
"""
mock_hook_get_conn.return_value.__aenter__.return_value = MockSSHClient()
hook = SFTPHookAsync()
mod_time = await hook.get_mod_time("/path/exists/file")
expected_value = datetime.datetime.fromtimestamp(1667302566).strftime("%Y%m%d%H%M%S")
assert mod_time == expected_value
@pytest.mark.asyncio
@patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync._get_conn")
async def test_get_mod_time_exception(self, mock_hook_get_conn):
"""
Assert that get_mod_time raise exception when file does not exist
"""
mock_hook_get_conn.return_value.__aenter__.return_value = MockSSHClient()
hook = SFTPHookAsync()
with pytest.raises(AirflowException) as exc:
await hook.get_mod_time("/path/does_not/exist/")
assert str(exc.value) == "No files matching"
|
TestSFTPHookAsync
|
python
|
celery__celery
|
t/unit/tasks/test_canvas.py
|
{
"start": 3063,
"end": 3249
}
|
class ____(chain):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.subtask_type = "chain_subclass"
@Signature.register_type()
|
chain_subclass
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 48758,
"end": 49269
}
|
class ____(VOTableSpecWarning):
"""
The ``timeorigin`` attribute on the ``TIMESYS`` element must be
either a floating point literal specifying a valid Julian Date,
or, for convenience, the string "MJD-origin" (standing for 2400000.5)
or the string "JD-origin" (standing for 0).
**References**: `1.4
<http://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC21>`__
"""
message_template = "Invalid timeorigin attribute '{}'"
default_args = ("x",)
|
E23
|
python
|
PyCQA__pylint
|
tests/functional/g/generic_alias/generic_alias_typing.py
|
{
"start": 2713,
"end": 2770
}
|
class ____(typing.OrderedDict):
pass
|
DerivedOrderedDict2
|
python
|
mlflow__mlflow
|
tests/pyfunc/test_scoring_server.py
|
{
"start": 3092,
"end": 3745
}
|
class ____(PythonModel):
# Example model that takes "prompt" as model input
def predict(self, context, model_input, params=None):
if isinstance(model_input, pd.DataFrame):
model_input = model_input.to_dict(orient="records")[0]
ret = model_input["prompt"]
return {
"choices": [
{
"index": 0,
"text": ret,
"finish_reason": "stop",
}
],
# Echo model input and params for testing purposes
"model_input": model_input,
"params": params,
}
|
MyCompletionsLLM
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/type/directives.py
|
{
"start": 273,
"end": 1165
}
|
class ____(object):
# Operations
QUERY = 'QUERY'
MUTATION = 'MUTATION'
SUBSCRIPTION = 'SUBSCRIPTION'
FIELD = 'FIELD'
FRAGMENT_DEFINITION = 'FRAGMENT_DEFINITION'
FRAGMENT_SPREAD = 'FRAGMENT_SPREAD'
INLINE_FRAGMENT = 'INLINE_FRAGMENT'
# Schema Definitions
SCHEMA = 'SCHEMA'
SCALAR = 'SCALAR'
OBJECT = 'OBJECT'
FIELD_DEFINITION = 'FIELD_DEFINITION'
ARGUMENT_DEFINITION = 'ARGUMENT_DEFINITION'
INTERFACE = 'INTERFACE'
UNION = 'UNION'
ENUM = 'ENUM'
ENUM_VALUE = 'ENUM_VALUE'
INPUT_OBJECT = 'INPUT_OBJECT'
INPUT_FIELD_DEFINITION = 'INPUT_FIELD_DEFINITION'
OPERATION_LOCATIONS = [
QUERY,
MUTATION,
SUBSCRIPTION
]
FRAGMENT_LOCATIONS = [
FRAGMENT_DEFINITION,
FRAGMENT_SPREAD,
INLINE_FRAGMENT
]
FIELD_LOCATIONS = [
FIELD
]
|
DirectiveLocation
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/embedding_ops_test.py
|
{
"start": 1233,
"end": 3478
}
|
class ____(test_util.TensorFlowTestCase):
def testEmbeddingLookupOnUninitializedVariableDoesSparseRead(self):
x = resource_variable_ops.UninitializedVariable(
trainable=True, shape=[3, 3], dtype=dtypes.float32)
@def_function.function(input_signature=[])
def _init():
return x.assign(np.zeros([3, 3]))
@def_function.function(input_signature=[])
def _call():
return embedding_ops.embedding_lookup_v2(x, [0])
self.assertAllClose(self.evaluate(_init()), np.zeros([3, 3]))
concrete_call = _call.get_concrete_function()
self.assertAllClose(self.evaluate(concrete_call()), [[0., 0., 0.]])
resource_gather_node = []
read_var_node = []
graph = concrete_call.graph.as_graph_def()
for n in graph.node:
if n.op == "ResourceGather":
resource_gather_node.append(n)
if n.op == "ReadVariableOp":
read_var_node.append(n)
for f in graph.library.function:
for n in f.node_def:
if n.op == "ResourceGather":
resource_gather_node.append(n)
if n.op == "ReadVariableOp":
read_var_node.append(n)
# There should be a single ResourceGather, but no ReadVariableOp
# (dense read).
self.assertLen(resource_gather_node, 1)
self.assertLen(read_var_node, 0)
def testEmbeddingLookupGradientsHaveKnownShape(self):
x = resource_variable_ops.ResourceVariable(
initial_value=np.zeros([3, 3]),
trainable=True,
shape=[3, 3],
dtype=dtypes.float32)
@def_function.function(input_signature=[])
def _init():
return x.assign(np.zeros([3, 3]))
@def_function.function(input_signature=[])
def _call():
with gradients.GradientTape() as tape:
y = embedding_ops.embedding_lookup_v2(x, [0])
loss = math_ops.reduce_sum(y)
grads = tape.gradient(loss, x)
self.assertAllEqual(grads.shape, [3, 3])
return ops.convert_to_tensor(grads)
self.assertAllClose(self.evaluate(_init()), np.zeros([3, 3]))
concrete_call = _call.get_concrete_function()
self.assertAllClose(
self.evaluate(concrete_call()),
[[1., 1., 1.], [0., 0., 0.], [0., 0., 0.]])
if __name__ == "__main__":
googletest.main()
|
EmbeddingLookupTest
|
python
|
django-haystack__django-haystack
|
test_haystack/test_fields.py
|
{
"start": 22691,
"end": 23312
}
|
class ____(TestCase):
def test_init(self):
try:
foo = FacetMultiValueField(model_attr="foo")
foo_exact = FacetMultiValueField(facet_for="bar")
except:
self.fail()
self.assertEqual(foo.facet_for, None)
self.assertEqual(foo_exact.null, True)
self.assertEqual(foo_exact.facet_for, "bar")
def test_prepare(self):
mock = MockModel()
mock.user = "daniel"
mock.sites = [1, 3, 4]
sites = FacetMultiValueField(model_attr="sites")
self.assertEqual(sites.prepare(mock), [1, 3, 4])
|
FacetMultiValueFieldTestCase
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/descriptors.py
|
{
"start": 20879,
"end": 21119
}
|
class ____(AOTInput):
"""The world token which is threaded through side-effectful operations"""
idx: int
def expr(self) -> str:
return f"__forward_token{self.idx}"
@dataclasses.dataclass(frozen=True)
|
ForwardTokenAOTInput
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_collectors.py
|
{
"start": 7114,
"end": 7310
}
|
class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'dns']
valid_subsets = ['dns']
fact_namespace = 'ansible_dns'
collector_class = DnsFactCollector
|
TestDnsFacts
|
python
|
PyCQA__pylint
|
doc/data/messages/m/multiple-constructor-doc/bad.py
|
{
"start": 0,
"end": 354
}
|
class ____: # [multiple-constructor-doc]
"""Represents a point in the xy-coordinate plane.
:param x: coordinate
:param y: coordinate
"""
def __init__(self, x, y):
"""Represents a point in the xy-coordinate plane.
:param x: coordinate
:param y: coordinate
"""
self.x = x
self.y = y
|
Point
|
python
|
kubernetes-client__python
|
kubernetes/base/config/kube_config_test.py
|
{
"start": 13942,
"end": 15803
}
|
class ____:
FILE_KEYS = ["ssl_ca_cert", "key_file", "cert_file"]
IGNORE_KEYS = ["refresh_api_key_hook"]
def __init__(self, token=None, **kwargs):
self.api_key = {}
# Provided by the OpenAPI-generated Configuration class
self.refresh_api_key_hook = None
if token:
self.api_key['authorization'] = token
self.__dict__.update(kwargs)
def __eq__(self, other):
if len(self.__dict__) != len(other.__dict__):
return
for k, v in self.__dict__.items():
if k in self.IGNORE_KEYS:
continue
if k not in other.__dict__:
return
if k in self.FILE_KEYS:
if v and other.__dict__[k]:
try:
with open(v) as f1, open(other.__dict__[k]) as f2:
if f1.read() != f2.read():
return
except OSError:
# fall back to only compare filenames in case we are
# testing the passing of filenames to the config
if other.__dict__[k] != v:
return
else:
if other.__dict__[k] != v:
return
else:
if other.__dict__[k] != v:
return
return True
def __repr__(self):
rep = "\n"
for k, v in self.__dict__.items():
val = v
if k in self.FILE_KEYS:
try:
with open(v) as f:
val = "FILE: %s" % str.decode(f.read())
except OSError as e:
val = "ERROR: %s" % str(e)
rep += "\t%s: %s\n" % (k, val)
return "Config(%s\n)" % rep
|
FakeConfig
|
python
|
pytorch__pytorch
|
torch/ao/quantization/fx/fuse_handler.py
|
{
"start": 723,
"end": 1302
}
|
class ____(ABC):
"""Base handler class for the fusion patterns"""
@abstractmethod
def __init__(self, node: Node):
pass
@abstractmethod
def fuse(
self,
load_arg: Callable,
named_modules: dict[str, torch.nn.Module],
fused_graph: Graph,
root_node: Node,
extra_inputs: list[Any],
matched_node_pattern: NodePattern,
fuse_custom_config: FuseCustomConfig,
fuser_method_mapping: dict[Pattern, torch.nn.Sequential | Callable],
is_qat: bool,
) -> Node:
pass
|
FuseHandler
|
python
|
langchain-ai__langchain
|
libs/langchain/langchain_classic/evaluation/schema.py
|
{
"start": 3503,
"end": 5345
}
|
class ____:
"""Mixin for checking evaluation arguments."""
@property
def requires_reference(self) -> bool:
"""Whether this evaluator requires a reference label."""
return False
@property
def requires_input(self) -> bool:
"""Whether this evaluator requires an input string."""
return False
@property
def _skip_input_warning(self) -> str:
"""Warning to show when input is ignored."""
return f"Ignoring input in {self.__class__.__name__}, as it is not expected."
@property
def _skip_reference_warning(self) -> str:
"""Warning to show when reference is ignored."""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
)
def _check_evaluation_args(
self,
reference: str | None = None,
input_: str | None = None,
) -> None:
"""Check if the evaluation arguments are valid.
Args:
reference: The reference label.
input_: The input string.
Raises:
ValueError: If the evaluator requires an input string but none is provided,
or if the evaluator requires a reference label but none is provided.
"""
if self.requires_input and input_ is None:
msg = f"{self.__class__.__name__} requires an input string."
raise ValueError(msg)
if input_ is not None and not self.requires_input:
warn(self._skip_input_warning, stacklevel=3)
if self.requires_reference and reference is None:
msg = f"{self.__class__.__name__} requires a reference string."
raise ValueError(msg)
if reference is not None and not self.requires_reference:
warn(self._skip_reference_warning, stacklevel=3)
|
_EvalArgsMixin
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/commands/ui_commands.py
|
{
"start": 3821,
"end": 4224
}
|
class ____(NamedTuple):
"""
Summary of missing and extra translation keys for a file, per locale.
Attributes:
missing_keys: A dictionary mapping locale codes to lists of missing translation keys.
extra_keys: A dictionary mapping locale codes to lists of extra translation keys.
"""
missing_keys: dict[str, list[str]]
extra_keys: dict[str, list[str]]
|
LocaleSummary
|
python
|
huggingface__transformers
|
src/transformers/models/minimax/modular_minimax.py
|
{
"start": 21665,
"end": 21720
}
|
class ____(MixtralTopKRouter):
pass
|
MiniMaxTopKRouter
|
python
|
ray-project__ray
|
release/llm_tests/benchmark/load_test.py
|
{
"start": 13600,
"end": 15188
}
|
class ____(BaseProvider):
DEFAULT_MODEL_NAME = "ensemble"
def get_url(self):
assert not self.parsed_options.chat, "Chat is not supported"
stream_suffix = "_stream" if self.parsed_options.stream else ""
return f"/v2/models/{self.model}/generate{stream_suffix}"
def format_payload(self, prompt, max_tokens, images):
assert images is None, "images are not supported"
assert self.parsed_options.n == 1, "n > 1 is not supported"
data = {
"text_input": prompt,
"max_tokens": max_tokens,
"stream": self.parsed_options.stream,
"temperature": self.parsed_options.temperature,
# for whatever reason these has to be provided
"bad_words": "",
"stop_words": "",
}
assert self.parsed_options.logprobs is None, "logprobs are not supported"
return data
def parse_output_json(self, data, prompt):
text = data["text_output"]
if not self.parsed_options.stream:
# Triton returns the original prompt in the output, cut it off
text = text.removeprefix("<s> ")
if text.startswith(prompt):
# HF tokenizers get confused by the leading space
text = text[len(prompt) :].removeprefix(" ")
else:
print("WARNING: prompt not found in the output")
return ChunkMetadata(
text=text,
logprob_tokens=None,
usage_tokens=None,
prompt_usage_tokens=None,
)
|
TritonGenerateProvider
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_ma.py
|
{
"start": 7430,
"end": 8474
}
|
class ____(Benchmark):
param_names = ['mtype', 'msize']
params = [['np', 'np.ma'],
['small', 'big']]
def setup(self, mtype, msize):
# Small arrays
xs = np.random.uniform(-1, 1, 6).reshape(2, 3)
ys = np.random.uniform(-1, 1, 6).reshape(2, 3)
m1 = [[True, False, False], [False, False, True]]
m2 = [[True, False, True], [False, False, True]]
self.nmxs = np.ma.array(xs, mask=m1)
self.nmys = np.ma.array(ys, mask=m2)
# Big arrays
xl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100)
yl = np.random.uniform(-1, 1, 100 * 100).reshape(100, 100)
maskx = xl > 0.8
masky = yl < -0.8
self.nmxl = np.ma.array(xl, mask=maskx)
self.nmyl = np.ma.array(yl, mask=masky)
def time_where(self, mtype, msize):
fun = eval(f"{mtype}.where")
if msize == 'small':
fun(self.nmxs > 2, self.nmxs, self.nmys)
elif msize == 'big':
fun(self.nmxl > 2, self.nmxl, self.nmyl)
|
Where
|
python
|
ApeWorX__ape
|
tests/functional/test_accounts.py
|
{
"start": 2078,
"end": 2153
}
|
class ____(EIP712Type):
addr: "address" # type: ignore # noqa: F821
|
Baz
|
python
|
jazzband__tablib
|
src/tablib/exceptions.py
|
{
"start": 0,
"end": 71
}
|
class ____(Exception):
"""Tablib common exception."""
|
TablibException
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol1.py
|
{
"start": 2044,
"end": 2082
}
|
class ____(Protocol[_B]): ...
|
ProtoBase2
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_scalarmath.py
|
{
"start": 3270,
"end": 5390
}
|
class ____(TestCase):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(
dtype=dt, type="binary", max_size=sz
):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(
np.square(inp2), np.multiply(inp2, inp2), err_msg=msg
)
# skip true divide for ints
if dt != np.int32:
assert_almost_equal(
np.reciprocal(inp2), np.divide(1, inp2), err_msg=msg
)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
@xpassIfTorchDynamo_np # (reason="pytorch does not have .view")
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
|
TestBaseMath
|
python
|
scipy__scipy
|
scipy/io/_harwell_boeing/hb.py
|
{
"start": 12957,
"end": 14964
}
|
class ____:
"""Class to hold the matrix type."""
# q2f* translates qualified names to Fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = {j: i for i, j in _q2f_type.items()}
_f2q_structure = {j: i for i, j in _q2f_structure.items()}
_f2q_storage = {j: i for i, j in _q2f_storage.items()}
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError as e:
raise ValueError(f"Unrecognized format {fmt}") from e
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError(f"Unrecognized type {value_type}")
if structure not in self._q2f_structure:
raise ValueError(f"Unrecognized structure {structure}")
if storage not in self._q2f_storage:
raise ValueError(f"Unrecognized storage {storage}")
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return f"HBMatrixType({self.value_type}, {self.structure}, {self.storage})"
|
HBMatrixType
|
python
|
docker__docker-py
|
docker/errors.py
|
{
"start": 4240,
"end": 4659
}
|
class ____(DockerException):
pass
def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
|
ImageLoadError
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/batch.py
|
{
"start": 1103,
"end": 2724
}
|
class ____(AwsBaseWaiterTrigger):
"""
Checks for the status of a submitted job_id to AWS Batch until it reaches a failure or a success state.
:param job_id: the job ID, to poll for job completion or not
:param region_name: AWS region name to use
Override the region_name in connection (if provided)
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
:param waiter_delay: polling period in seconds to check for the status of the job
:param waiter_max_attempts: The maximum number of attempts to be made.
"""
def __init__(
self,
job_id: str | None,
region_name: str | None = None,
aws_conn_id: str | None = "aws_default",
waiter_delay: int = 5,
waiter_max_attempts: int = 720,
):
super().__init__(
serialized_fields={"job_id": job_id},
waiter_name="batch_job_complete",
waiter_args={"jobs": [job_id]},
failure_message=f"Failure while running batch job {job_id}",
status_message=f"Batch job {job_id} not ready yet",
status_queries=["jobs[].status", "computeEnvironments[].statusReason"],
return_key="job_id",
return_value=job_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
region_name=region_name,
)
def hook(self) -> AwsGenericHook:
return BatchClientHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
|
BatchJobTrigger
|
python
|
google__pytype
|
pytype/pytd/pytd.py
|
{
"start": 8378,
"end": 9329
}
|
class ____(Node):
"""Represents an individual signature of a function.
For overloaded functions, this is one specific combination of parameters.
For non-overloaded functions, there is a 1:1 correspondence between function
and signature.
Attributes:
params: The list of parameters for this function definition.
starargs: Name of the "*" parameter. The "args" in "*args".
starstarargs: Name of the "*" parameter. The "kw" in "**kw".
return_type: The return type of this function.
exceptions: List of exceptions for this function definition.
template: names for bindings for bounded types in params/return_type
"""
params: tuple[Parameter, ...]
starargs: Parameter | None
starstarargs: Parameter | None
return_type: TypeU
exceptions: tuple[TypeU, ...]
template: tuple[TemplateItem, ...]
@property
def has_optional(self):
return self.starargs is not None or self.starstarargs is not None
|
Signature
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 116991,
"end": 117122
}
|
class ____:
xlSpeakByColumns = 1 # from enum XlSpeakDirection
xlSpeakByRows = 0 # from enum XlSpeakDirection
|
SpeakDirection
|
python
|
pydantic__pydantic
|
pydantic/types.py
|
{
"start": 84059,
"end": 85792
}
|
class ____(BaseModel):
base64_bytes: Base64Bytes
# Initialize the model with base64 data
m = Model(base64_bytes=b'VGhpcyBpcyB0aGUgd2F5')
# Access decoded value
print(m.base64_bytes)
#> b'This is the way'
# Serialize into the base64 form
print(m.model_dump())
#> {'base64_bytes': b'VGhpcyBpcyB0aGUgd2F5'}
# Validate base64 data
try:
print(Model(base64_bytes=b'undecodable').base64_bytes)
except ValidationError as e:
print(e)
'''
1 validation error for Model
base64_bytes
Base64 decoding error: 'Incorrect padding' [type=base64_decode, input_value=b'undecodable', input_type=bytes]
'''
```
"""
Base64Str = Annotated[str, EncodedStr(encoder=Base64Encoder)]
"""A str type that is encoded and decoded using the standard (non-URL-safe) base64 encoder.
Note:
Under the hood, `Base64Str` uses the standard library `base64.b64encode` and `base64.b64decode` functions.
As a result, attempting to decode url-safe base64 data using the `Base64Str` type may fail or produce an incorrect
decoding.
Warning:
In versions of Pydantic prior to v2.10, `Base64Str` used [`base64.encodebytes`][base64.encodebytes]
and [`base64.decodebytes`][base64.decodebytes] functions. According to the [base64 documentation](https://docs.python.org/3/library/base64.html),
these methods are considered legacy implementation, and thus, Pydantic v2.10+ now uses the modern
[`base64.b64encode`][base64.b64encode] and [`base64.b64decode`][base64.b64decode] functions.
See the [`Base64Bytes`][pydantic.types.Base64Bytes] type for more information on how to
replicate the old behavior with the legacy encoders / decoders.
```python
from pydantic import Base64Str, BaseModel, ValidationError
|
Model
|
python
|
getsentry__sentry
|
src/sentry/rules/conditions/event_attribute.py
|
{
"start": 7956,
"end": 8465
}
|
class ____(AttributeHandler):
minimum_path_length = 1
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
path.pop(0)
value = event.data.get("extra", {})
while path:
bit = path.pop(0)
value = value.get(bit)
if not value:
return []
if isinstance(value, (list, tuple)):
return list(value)
return [value]
@attribute_registry.register("exception")
|
ExtraAttributeHandler
|
python
|
coleifer__peewee
|
playhouse/test_utils.py
|
{
"start": 1122,
"end": 1854
}
|
class ____(count_queries):
def __init__(self, expected, only_select=False):
super(assert_query_count, self).__init__(only_select=only_select)
self.expected = expected
def __call__(self, f):
@wraps(f)
def decorated(*args, **kwds):
with self:
ret = f(*args, **kwds)
self._assert_count()
return ret
return decorated
def _assert_count(self):
error_msg = '%s != %s' % (self.count, self.expected)
assert self.count == self.expected, error_msg
def __exit__(self, exc_type, exc_val, exc_tb):
super(assert_query_count, self).__exit__(exc_type, exc_val, exc_tb)
self._assert_count()
|
assert_query_count
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/django/toystore/models.py
|
{
"start": 2621,
"end": 2705
}
|
class ____(models.Model):
customish = CustomishField(default="b")
|
CustomishDefault
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.