language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ethereum__web3.py
|
web3/_utils/rpc_abi.py
|
{
"start": 409,
"end": 8557
}
|
class ____:
# admin
admin_addPeer = RPCEndpoint("admin_addPeer")
admin_datadir = RPCEndpoint("admin_datadir")
admin_nodeInfo = RPCEndpoint("admin_nodeInfo")
admin_peers = RPCEndpoint("admin_peers")
admin_startHTTP = RPCEndpoint("admin_startHTTP")
admin_startWS = RPCEndpoint("admin_startWS")
admin_stopHTTP = RPCEndpoint("admin_stopHTTP")
admin_stopWS = RPCEndpoint("admin_stopWS")
# deprecated
admin_startRPC = RPCEndpoint("admin_startRPC")
admin_stopRPC = RPCEndpoint("admin_stopRPC")
# eth
eth_accounts = RPCEndpoint("eth_accounts")
eth_blobBaseFee = RPCEndpoint("eth_blobBaseFee")
eth_blockNumber = RPCEndpoint("eth_blockNumber")
eth_call = RPCEndpoint("eth_call")
eth_simulateV1 = RPCEndpoint("eth_simulateV1")
eth_createAccessList = RPCEndpoint("eth_createAccessList")
eth_chainId = RPCEndpoint("eth_chainId")
eth_estimateGas = RPCEndpoint("eth_estimateGas")
eth_feeHistory = RPCEndpoint("eth_feeHistory")
eth_maxPriorityFeePerGas = RPCEndpoint("eth_maxPriorityFeePerGas")
eth_gasPrice = RPCEndpoint("eth_gasPrice")
eth_getBalance = RPCEndpoint("eth_getBalance")
eth_getBlockByHash = RPCEndpoint("eth_getBlockByHash")
eth_getBlockByNumber = RPCEndpoint("eth_getBlockByNumber")
eth_getBlockReceipts = RPCEndpoint("eth_getBlockReceipts")
eth_getBlockTransactionCountByHash = RPCEndpoint(
"eth_getBlockTransactionCountByHash"
)
eth_getBlockTransactionCountByNumber = RPCEndpoint(
"eth_getBlockTransactionCountByNumber"
)
eth_getCode = RPCEndpoint("eth_getCode")
eth_getFilterChanges = RPCEndpoint("eth_getFilterChanges")
eth_getFilterLogs = RPCEndpoint("eth_getFilterLogs")
eth_getLogs = RPCEndpoint("eth_getLogs")
eth_getProof = RPCEndpoint("eth_getProof")
eth_getRawTransactionByHash = RPCEndpoint("eth_getRawTransactionByHash")
eth_getStorageAt = RPCEndpoint("eth_getStorageAt")
eth_getTransactionByBlockHashAndIndex = RPCEndpoint(
"eth_getTransactionByBlockHashAndIndex"
)
eth_getTransactionByBlockNumberAndIndex = RPCEndpoint(
"eth_getTransactionByBlockNumberAndIndex"
)
eth_getRawTransactionByBlockHashAndIndex = RPCEndpoint(
"eth_getRawTransactionByBlockHashAndIndex"
)
eth_getRawTransactionByBlockNumberAndIndex = RPCEndpoint(
"eth_getRawTransactionByBlockNumberAndIndex"
)
eth_getTransactionByHash = RPCEndpoint("eth_getTransactionByHash")
eth_getTransactionCount = RPCEndpoint("eth_getTransactionCount")
eth_getTransactionReceipt = RPCEndpoint("eth_getTransactionReceipt")
eth_getUncleByBlockHashAndIndex = RPCEndpoint("eth_getUncleByBlockHashAndIndex")
eth_getUncleByBlockNumberAndIndex = RPCEndpoint("eth_getUncleByBlockNumberAndIndex")
eth_getUncleCountByBlockHash = RPCEndpoint("eth_getUncleCountByBlockHash")
eth_getUncleCountByBlockNumber = RPCEndpoint("eth_getUncleCountByBlockNumber")
eth_getWork = RPCEndpoint("eth_getWork")
eth_newBlockFilter = RPCEndpoint("eth_newBlockFilter")
eth_newFilter = RPCEndpoint("eth_newFilter")
eth_newPendingTransactionFilter = RPCEndpoint("eth_newPendingTransactionFilter")
eth_protocolVersion = RPCEndpoint("eth_protocolVersion")
eth_sendRawTransaction = RPCEndpoint("eth_sendRawTransaction")
eth_sendTransaction = RPCEndpoint("eth_sendTransaction")
eth_sign = RPCEndpoint("eth_sign")
eth_signTransaction = RPCEndpoint("eth_signTransaction")
eth_signTypedData = RPCEndpoint("eth_signTypedData")
eth_submitHashrate = RPCEndpoint("eth_submitHashrate")
eth_submitWork = RPCEndpoint("eth_submitWork")
eth_syncing = RPCEndpoint("eth_syncing")
eth_uninstallFilter = RPCEndpoint("eth_uninstallFilter")
eth_subscribe = RPCEndpoint("eth_subscribe")
eth_unsubscribe = RPCEndpoint("eth_unsubscribe")
# evm
evm_mine = RPCEndpoint("evm_mine")
evm_reset = RPCEndpoint("evm_reset")
evm_revert = RPCEndpoint("evm_revert")
evm_snapshot = RPCEndpoint("evm_snapshot")
# net
net_listening = RPCEndpoint("net_listening")
net_peerCount = RPCEndpoint("net_peerCount")
net_version = RPCEndpoint("net_version")
# testing
testing_timeTravel = RPCEndpoint("testing_timeTravel")
# trace
trace_block = RPCEndpoint("trace_block")
trace_call = RPCEndpoint("trace_call")
trace_filter = RPCEndpoint("trace_filter")
trace_rawTransaction = RPCEndpoint("trace_rawTransaction")
trace_replayBlockTransactions = RPCEndpoint("trace_replayBlockTransactions")
trace_replayTransaction = RPCEndpoint("trace_replayTransaction")
trace_transaction = RPCEndpoint("trace_transaction")
# txpool
txpool_content = RPCEndpoint("txpool_content")
txpool_inspect = RPCEndpoint("txpool_inspect")
txpool_status = RPCEndpoint("txpool_status")
# web3
web3_clientVersion = RPCEndpoint("web3_clientVersion")
# debug
debug_traceTransaction = RPCEndpoint("debug_traceTransaction")
TRANSACTION_PARAMS_ABIS = {
"data": "bytes",
"from": "address",
"gas": "uint",
"gasPrice": "uint",
"maxFeePerBlobGas": "uint",
"maxFeePerGas": "uint",
"maxPriorityFeePerGas": "uint",
"nonce": "uint",
"to": "address",
"value": "uint",
"chainId": "uint",
}
FILTER_PARAMS_ABIS = {
"to": "address",
"address": "address[]",
}
TRACE_FILTER_PARAM_ABIS = {
"fromBlock": "uint",
"toBlock": "uint",
"fromAddress": "address[]",
"toAddress": "address[]",
"after": "int",
"count": "int",
}
RPC_ABIS: dict[str, Sequence[Any] | dict[str, str]] = {
# eth
"eth_call": TRANSACTION_PARAMS_ABIS,
"eth_createAccessList": TRANSACTION_PARAMS_ABIS,
"eth_estimateGas": TRANSACTION_PARAMS_ABIS,
"eth_getBalance": ["address", None],
"eth_getBlockByHash": ["bytes32", "bool"],
"eth_getBlockTransactionCountByHash": ["bytes32"],
"eth_getCode": ["address", None],
"eth_getLogs": FILTER_PARAMS_ABIS,
"eth_getRawTransactionByHash": ["bytes32"],
"eth_getStorageAt": ["address", "uint", None],
"eth_getProof": ["address", "uint[]", None],
"eth_getTransactionByBlockHashAndIndex": ["bytes32", "uint"],
"eth_getTransactionByHash": ["bytes32"],
"eth_getTransactionCount": ["address", None],
"eth_getTransactionReceipt": ["bytes32"],
"eth_getRawTransactionByBlockHashAndIndex": ["bytes32", "uint"],
"eth_getUncleCountByBlockHash": ["bytes32"],
"eth_newFilter": FILTER_PARAMS_ABIS,
"eth_sendRawTransaction": ["bytes"],
"eth_sendTransaction": TRANSACTION_PARAMS_ABIS,
"eth_signTransaction": TRANSACTION_PARAMS_ABIS,
"eth_sign": ["address", "bytes"],
"eth_signTypedData": ["address", None],
"eth_submitHashrate": ["uint", "bytes32"],
"eth_submitWork": ["bytes8", "bytes32", "bytes32"],
"trace_call": TRANSACTION_PARAMS_ABIS,
"trace_filter": TRACE_FILTER_PARAM_ABIS,
}
@curry
def apply_abi_formatters_to_dict(
normalizers: Sequence[Callable[[TypeStr, Any], tuple[TypeStr, Any]]],
abi_dict: dict[str, Any],
data: dict[Any, Any],
) -> dict[Any, Any]:
fields = list(abi_dict.keys() & data.keys())
formatted_values = map_abi_data(
normalizers,
(abi_dict[field] for field in fields),
(data[field] for field in fields),
)
formatted_dict = data.copy()
formatted_dict.update(zip(fields, formatted_values))
return formatted_dict
@to_dict
def abi_request_formatters(
normalizers: Sequence[Callable[[TypeStr, Any], tuple[TypeStr, Any]]],
abis: dict[RPCEndpoint, Any],
) -> Iterable[tuple[RPCEndpoint, Callable[..., Any]]]:
for method, abi_types in abis.items():
if isinstance(abi_types, list):
yield method, map_abi_data(normalizers, abi_types)
elif isinstance(abi_types, dict):
single_dict_formatter = apply_abi_formatters_to_dict(normalizers, abi_types)
yield method, apply_formatter_at_index(single_dict_formatter, 0)
else:
raise Web3TypeError(
f"ABI definitions must be a list or dictionary, got {abi_types!r}"
)
|
RPC
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/cloud_sql.py
|
{
"start": 32571,
"end": 58562
}
|
class ____(BaseHook):
"""
Serves DB connection configuration for Google Cloud SQL (Connections of *gcpcloudsqldb://* type).
The hook is a "meta" one. It does not perform an actual connection.
It is there to retrieve all the parameters configured in gcpcloudsql:// connection,
start/stop Cloud SQL Proxy if needed, dynamically generate Postgres or MySQL
connection in the database and return an actual Postgres or MySQL hook.
The returned Postgres/MySQL hooks are using direct connection or Cloud SQL
Proxy socket/TCP as configured.
Main parameters of the hook are retrieved from the standard URI components:
* **user** - User name to authenticate to the database (from login of the URI).
* **password** - Password to authenticate to the database (from password of the URI).
* **public_ip** - IP to connect to for public connection (from host of the URI).
* **public_port** - Port to connect to for public connection (from port of the URI).
* **database** - Database to connect to (from schema of the URI).
* **sql_proxy_binary_path** - Optional path to Cloud SQL Proxy binary. If the binary
is not specified or the binary is not present, it is automatically downloaded.
Remaining parameters are retrieved from the extras (URI query parameters):
* **project_id** - Optional, Google Cloud project where the Cloud SQL
instance exists. If missing, default project id passed is used.
* **instance** - Name of the instance of the Cloud SQL database instance.
* **location** - The location of the Cloud SQL instance (for example europe-west1).
* **database_type** - The type of the database instance (MySQL or Postgres).
* **use_proxy** - (default False) Whether SQL proxy should be used to connect to Cloud
SQL DB.
* **use_ssl** - (default False) Whether SSL should be used to connect to Cloud SQL DB.
You cannot use proxy and SSL together.
* **use_iam** - (default False) Whether IAM should be used to connect to Cloud SQL DB.
With using IAM password field should be empty string.
* **sql_proxy_use_tcp** - (default False) If set to true, TCP is used to connect via
proxy, otherwise UNIX sockets are used.
* **sql_proxy_version** - Specific version of the proxy to download (for example
v1.13). If not specified, the latest version is downloaded.
* **sslcert** - Path to client certificate to authenticate when SSL is used.
* **sslkey** - Path to client private key to authenticate when SSL is used.
* **sslrootcert** - Path to server's certificate to authenticate when SSL is used.
:param gcp_cloudsql_conn_id: URL of the connection
:param gcp_conn_id: The connection ID used to connect to Google Cloud for
cloud-sql-proxy authentication.
:param default_gcp_project_id: Default project id used if project_id not specified
in the connection URL
:param ssl_cert: Optional. Path to client certificate to authenticate when SSL is used. Overrides the
connection field ``sslcert``.
:param ssl_key: Optional. Path to client private key to authenticate when SSL is used. Overrides the
connection field ``sslkey``.
:param ssl_root_cert: Optional. Path to server's certificate to authenticate when SSL is used. Overrides
the connection field ``sslrootcert``.
:param ssl_secret_id: Optional. ID of the secret in Google Cloud Secret Manager that stores SSL
certificate in the format below:
{'sslcert': '',
'sslkey': '',
'sslrootcert': ''}
Overrides the connection fields ``sslcert``, ``sslkey``, ``sslrootcert``.
Note that according to the Secret Manager requirements, the mentioned dict should be saved as a
string, and encoded with base64.
Note that this parameter is incompatible with parameters ``ssl_cert``, ``ssl_key``, ``ssl_root_cert``.
"""
conn_name_attr = "gcp_cloudsql_conn_id"
default_conn_name = "google_cloud_sqldb_default"
conn_type = "gcpcloudsqldb"
hook_name = "Google Cloud SQL Database"
def __init__(
self,
gcp_cloudsql_conn_id: str = "google_cloud_sql_default",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
default_gcp_project_id: str | None = None,
sql_proxy_binary_path: str | None = None,
ssl_cert: str | None = None,
ssl_key: str | None = None,
ssl_root_cert: str | None = None,
ssl_secret_id: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.gcp_conn_id = gcp_conn_id
self.gcp_cloudsql_conn_id = gcp_cloudsql_conn_id
self.impersonation_chain = impersonation_chain
self.cloudsql_connection = self.get_connection(self.gcp_cloudsql_conn_id)
self.extras = self.cloudsql_connection.extra_dejson
self.project_id = self.extras.get("project_id", default_gcp_project_id)
self.instance = self.extras.get("instance")
self.database = self.cloudsql_connection.schema
self.location = self.extras.get("location")
self.database_type = self.extras.get("database_type")
self.use_proxy = self._get_bool(self.extras.get("use_proxy", "False"))
self.use_ssl = self._get_bool(self.extras.get("use_ssl", "False"))
self.use_iam = self._get_bool(self.extras.get("use_iam", "False"))
self.sql_proxy_use_tcp = self._get_bool(self.extras.get("sql_proxy_use_tcp", "False"))
self.sql_proxy_version = self.extras.get("sql_proxy_version")
self.sql_proxy_binary_path = sql_proxy_binary_path
if self.use_iam:
self.user = self._get_iam_db_login()
self.password = self._generate_login_token(service_account=self.cloudsql_connection.login)
else:
self.user = cast("str", self.cloudsql_connection.login)
self.password = cast("str", self.cloudsql_connection.password)
self.public_ip = self.cloudsql_connection.host
self.public_port = self.cloudsql_connection.port
self.ssl_cert = ssl_cert
self.ssl_key = ssl_key
self.ssl_root_cert = ssl_root_cert
self.ssl_secret_id = ssl_secret_id
self._ssl_cert_temp_files: dict[str, _TemporaryFileWrapper] = {}
# Port and socket path and db_hook are automatically generated
self.sql_proxy_tcp_port = None
self.sql_proxy_unique_path: str | None = None
self.db_hook: BaseHook | None = None
self.reserved_tcp_socket: socket.socket | None = None
# Generated based on clock + clock sequence. Unique per host (!).
# This is important as different hosts share the database
self.db_conn_id = str(uuid.uuid1())
self._validate_inputs()
@property
def sslcert(self) -> str | None:
return self._get_ssl_temporary_file_path(cert_name="sslcert", cert_path=self.ssl_cert)
@property
def sslkey(self) -> str | None:
return self._get_ssl_temporary_file_path(cert_name="sslkey", cert_path=self.ssl_key)
@property
def sslrootcert(self) -> str | None:
return self._get_ssl_temporary_file_path(cert_name="sslrootcert", cert_path=self.ssl_root_cert)
def _get_ssl_temporary_file_path(self, cert_name: str, cert_path: str | None) -> str | None:
cert_value = self._get_cert_from_secret(cert_name)
original_cert_path = cert_path or self.extras.get(cert_name)
if cert_value or original_cert_path:
if cert_name not in self._ssl_cert_temp_files:
return self._set_temporary_ssl_file(
cert_name=cert_name, cert_path=original_cert_path, cert_value=cert_value
)
return self._ssl_cert_temp_files[cert_name].name
return None
def _get_cert_from_secret(self, cert_name: str) -> str | None:
if not self.ssl_secret_id:
return None
secret_hook = GoogleCloudSecretManagerHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
secret: AccessSecretVersionResponse = secret_hook.access_secret(
project_id=self.project_id,
secret_id=self.ssl_secret_id,
)
secret_data = json.loads(base64.b64decode(secret.payload.data))
if cert_name in secret_data:
return secret_data[cert_name]
raise AirflowException(
"Invalid secret format. Expected dictionary with keys: `sslcert`, `sslkey`, `sslrootcert`"
)
def _set_temporary_ssl_file(
self, cert_name: str, cert_path: str | None = None, cert_value: str | None = None
) -> str | None:
"""
Save the certificate as a temporary file.
This method was implemented in order to overcome psql connection error caused by excessive file
permissions: "private key file "..." has group or world access; file must have permissions
u=rw (0600) or less if owned by the current user, or permissions u=rw,g=r (0640) or less if owned
by root". NamedTemporaryFile enforces using exactly one of create/read/write/append mode so the
created file obtains least required permissions "-rw-------" that satisfies the rules.
:param cert_name: Required. Name of the certificate (one of sslcert, sslkey, sslrootcert).
:param cert_path: Optional. Path to the certificate.
:param cert_value: Optional. The certificate content.
:returns: The path to the temporary certificate file.
"""
if all([cert_path, cert_value]):
raise AirflowException(
"Both parameters were specified: `cert_path`, `cert_value`. Please use only one of them."
)
if not any([cert_path, cert_value]):
self.log.info("Neither cert path and cert value provided. Nothing to save.")
return None
certs_folder = "/tmp/certs/"
Path(certs_folder).mkdir(parents=True, exist_ok=True)
_temp_file = NamedTemporaryFile(mode="w+b", prefix=certs_folder)
if cert_path:
with open(cert_path, "rb") as cert_file:
_temp_file.write(cert_file.read())
elif cert_value:
_temp_file.write(cert_value.encode("ascii"))
_temp_file.flush()
self._ssl_cert_temp_files[cert_name] = _temp_file
self.log.info("Copied the certificate '%s' into a temporary file '%s'", cert_name, _temp_file.name)
return _temp_file.name
@staticmethod
def _get_bool(val: Any) -> bool:
if val == "False" or val is False:
return False
return True
@staticmethod
def _check_ssl_file(file_to_check, name) -> None:
if not file_to_check:
raise AirflowException(f"SSL connections requires {name} to be set")
if not os.path.isfile(file_to_check):
raise AirflowException(f"The {file_to_check} must be a readable file")
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required extra 'project_id' is empty")
if not self.location:
raise AirflowException("The required extra 'location' is empty or None")
if not self.instance:
raise AirflowException("The required extra 'instance' is empty or None")
if self.database_type not in CLOUD_SQL_VALID_DATABASE_TYPES:
raise AirflowException(
f"Invalid database type '{self.database_type}'. "
f"Must be one of {CLOUD_SQL_VALID_DATABASE_TYPES}"
)
if self.use_proxy and self.use_ssl:
raise AirflowException(
"Cloud SQL Proxy does not support SSL connections."
" SSL is not needed as Cloud SQL Proxy "
"provides encryption on its own"
)
if any([self.ssl_key, self.ssl_cert, self.ssl_root_cert]) and self.ssl_secret_id:
raise AirflowException(
"Invalid SSL settings. Please use either all of parameters ['ssl_cert', 'ssl_cert', "
"'ssl_root_cert'] or a single parameter 'ssl_secret_id'."
)
if any([self.ssl_key, self.ssl_cert, self.ssl_root_cert]):
field_names = ["ssl_key", "ssl_cert", "ssl_root_cert"]
if missed_values := [field for field in field_names if not getattr(self, field)]:
s = "s are" if len(missed_values) > 1 else "is"
missed_values_str = ", ".join(f for f in missed_values)
raise AirflowException(f"Invalid SSL settings. Parameter{s} missing: {missed_values_str}")
def validate_ssl_certs(self) -> None:
"""
SSL certificates validator.
:return: None
"""
if self.use_ssl:
self._check_ssl_file(self.sslcert, "sslcert")
self._check_ssl_file(self.sslkey, "sslkey")
self._check_ssl_file(self.sslrootcert, "sslrootcert")
def validate_socket_path_length(self) -> None:
"""
Validate sockets path length.
:return: None or rises AirflowException
"""
if self.use_proxy and not self.sql_proxy_use_tcp:
if self.database_type == "postgres":
suffix = "/.s.PGSQL.5432"
else:
suffix = ""
expected_path = (
f"{self._generate_unique_path()}/{self.project_id}:{self.instance}:{self.database}{suffix}"
)
if len(expected_path) > UNIX_PATH_MAX:
self.log.info("Too long (%s) path: %s", len(expected_path), expected_path)
raise AirflowException(
f"The UNIX socket path length cannot exceed {UNIX_PATH_MAX} characters on Linux system. "
"Either use shorter instance/database name or switch to TCP connection. "
f"The socket path for Cloud SQL proxy is now:{expected_path}"
)
@staticmethod
def _generate_unique_path() -> str:
"""
Generate a unique path.
We don't using mkdtemp here since it can generate paths close to 60
characters. We append project/location/instance to the path, Postgres
will then appends its own prefix, making the resulting path exceed the
100 character length limitation of a socket path. This generates a
shorter path ``${tempdir()}[8 random characters]``.
"""
random.seed()
while True:
candidate = os.path.join(
gettempdir(), "".join(random.choices(string.ascii_lowercase + string.digits, k=8))
)
if not os.path.exists(candidate):
return candidate
@staticmethod
def _quote(value) -> str | None:
return quote_plus(value) if value else None
def _reserve_port(self):
if self.use_proxy:
if self.sql_proxy_use_tcp:
if not self.sql_proxy_tcp_port:
self.reserve_free_tcp_port()
if not self.sql_proxy_unique_path:
self.sql_proxy_unique_path = self._generate_unique_path()
def _generate_connection_uri(self) -> str:
self._reserve_port()
if not self.database_type:
raise ValueError("The database_type should be set")
if not self.user:
raise AirflowException("The login parameter needs to be set in connection")
if not self.public_ip:
raise AirflowException("The location parameter needs to be set in connection")
if not self.password:
raise AirflowException("The password parameter needs to be set in connection")
if not self.database:
raise AirflowException("The database parameter needs to be set in connection")
database_uris = CONNECTION_URIS[self.database_type]
ssl_spec = None
socket_path = None
if self.use_proxy:
proxy_uris = database_uris["proxy"]
if self.sql_proxy_use_tcp:
format_string = proxy_uris["tcp"]
else:
format_string = proxy_uris["socket"]
socket_path = f"{self.sql_proxy_unique_path}/{self._get_instance_socket_name()}"
else:
public_uris = database_uris["public"]
if self.use_ssl:
format_string = public_uris["ssl"]
ssl_spec = {"cert": self.sslcert, "key": self.sslkey, "ca": self.sslrootcert}
else:
format_string = public_uris["non-ssl"]
connection_uri = format_string.format(
user=quote_plus(self.user) if self.user else "",
password=quote_plus(self.password) if self.password else "",
database=quote_plus(self.database) if self.database else "",
public_ip=self.public_ip,
public_port=self.public_port,
proxy_port=self.sql_proxy_tcp_port,
socket_path=self._quote(socket_path),
ssl_spec=self._quote(json.dumps(ssl_spec)) if ssl_spec else "",
client_cert_file=self._quote(self.sslcert) if self.sslcert else "",
client_key_file=self._quote(self.sslkey) if self.sslcert else "",
server_ca_file=self._quote(self.sslrootcert if self.sslcert else ""),
)
self.log.info(
"DB connection URI %s",
connection_uri.replace(
quote_plus(self.password) if self.password else "PASSWORD", "XXXXXXXXXXXX"
),
)
return connection_uri
def _get_instance_socket_name(self) -> str:
if self.project_id is None:
raise ValueError("The project_id should not be none")
return self.project_id + ":" + self.location + ":" + self.instance
def _get_sqlproxy_instance_specification(self) -> str:
instance_specification = self._get_instance_socket_name()
if self.sql_proxy_use_tcp:
instance_specification += f"=tcp:{self.sql_proxy_tcp_port}"
return instance_specification
def _generate_connection_parameters(self) -> dict:
self._reserve_port()
if not self.database_type:
raise ValueError("The database_type should be set")
if not self.user:
raise AirflowException("The login parameter needs to be set in connection")
if not self.public_ip:
raise AirflowException("The location parameter needs to be set in connection")
if not self.password:
raise AirflowException("The password parameter needs to be set in connection")
if not self.database:
raise AirflowException("The database parameter needs to be set in connection")
connection_parameters = {}
connection_parameters["conn_type"] = self.database_type
connection_parameters["login"] = self.user
connection_parameters["password"] = self.password
connection_parameters["schema"] = self.database
connection_parameters["extra"] = {}
database_uris = CONNECTION_URIS[self.database_type]
if self.use_proxy:
proxy_uris = database_uris["proxy"]
if self.sql_proxy_use_tcp:
connection_parameters["host"] = "127.0.0.1"
connection_parameters["port"] = self.sql_proxy_tcp_port
else:
socket_path = f"{self.sql_proxy_unique_path}/{self._get_instance_socket_name()}"
if "localhost" in proxy_uris["socket"]:
connection_parameters["host"] = "localhost"
connection_parameters["extra"].update({"unix_socket": socket_path})
else:
connection_parameters["host"] = socket_path
else:
public_uris = database_uris["public"]
if self.use_ssl:
connection_parameters["host"] = self.public_ip
connection_parameters["port"] = self.public_port
if "ssl_spec" in public_uris["ssl"]:
connection_parameters["extra"].update(
{
"ssl": json.dumps(
{"cert": self.sslcert, "key": self.sslkey, "ca": self.sslrootcert}
)
}
)
else:
connection_parameters["extra"].update(
{
"sslmode": "verify-ca",
"sslcert": self.sslcert,
"sslkey": self.sslkey,
"sslrootcert": self.sslrootcert,
}
)
else:
connection_parameters["host"] = self.public_ip
connection_parameters["port"] = self.public_port
if connection_parameters.get("extra"):
connection_parameters["extra"] = json.dumps(connection_parameters["extra"])
return connection_parameters
def create_connection(self) -> Connection:
"""
Create a connection.
Connection ID will be randomly generated according to whether it uses
proxy, TCP, UNIX sockets, SSL.
"""
if AIRFLOW_V_3_1_PLUS:
kwargs = self._generate_connection_parameters()
else:
kwargs = {"uri": self._generate_connection_uri()}
connection = Connection(conn_id=self.db_conn_id, **kwargs)
self.log.info("Creating connection %s", self.db_conn_id)
return connection
def get_sqlproxy_runner(self) -> CloudSqlProxyRunner:
"""
Retrieve Cloud SQL Proxy runner.
It is used to manage the proxy lifecycle per task.
:return: The Cloud SQL Proxy runner.
"""
if not self.use_proxy:
raise ValueError("Proxy runner can only be retrieved in case of use_proxy = True")
if not self.sql_proxy_unique_path:
raise ValueError("The sql_proxy_unique_path should be set")
if self.project_id is None:
raise ValueError("The project_id should not be None")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path,
gcp_conn_id=self.gcp_conn_id,
)
def get_database_hook(self, connection: Connection) -> DbApiHook:
"""
Retrieve database hook.
This is the actual Postgres or MySQL database hook that uses proxy or
connects directly to the Google Cloud SQL database.
"""
if self.database_type == "postgres":
from airflow.providers.postgres.hooks.postgres import PostgresHook
db_hook: DbApiHook = PostgresHook(connection=connection, database=self.database)
else:
from airflow.providers.mysql.hooks.mysql import MySqlHook
db_hook = MySqlHook(connection=connection, schema=self.database)
self.db_hook = db_hook
return db_hook
def cleanup_database_hook(self) -> None:
"""Clean up database hook after it was used."""
from airflow.providers.postgres.hooks.postgres import PostgresHook
if self.database_type == "postgres":
if not self.db_hook:
raise ValueError("The db_hook should be set")
if not isinstance(self.db_hook, PostgresHook):
raise ValueError(f"The db_hook should be PostgresHook and is {type(self.db_hook)}")
conn = getattr(self.db_hook, "conn", None)
if conn and hasattr(conn, "notices") and conn.notices:
for output in conn.notices:
self.log.info(output)
def reserve_free_tcp_port(self) -> None:
"""Reserve free TCP port to be used by Cloud SQL Proxy."""
self.reserved_tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.reserved_tcp_socket.bind(("127.0.0.1", 0))
self.sql_proxy_tcp_port = self.reserved_tcp_socket.getsockname()[1]
def free_reserved_port(self) -> None:
"""
Free TCP port.
Makes it immediately ready to be used by Cloud SQL Proxy.
"""
if self.reserved_tcp_socket:
self.reserved_tcp_socket.close()
self.reserved_tcp_socket = None
def _get_iam_db_login(self) -> str:
"""Get an IAM login for Cloud SQL database."""
if not self.cloudsql_connection.login:
raise AirflowException("The login parameter needs to be set in connection")
if self.database_type == "postgres":
return self.cloudsql_connection.login.split(".gserviceaccount.com")[0]
return self.cloudsql_connection.login.split("@")[0]
def _generate_login_token(self, service_account) -> str:
"""Generate an IAM login token for Cloud SQL and return the token."""
cmd = ["gcloud", "sql", "generate-login-token", f"--impersonate-service-account={service_account}"]
self.log.info("Executing command: %s", " ".join(shlex.quote(c) for c in cmd))
cloud_sql_hook = CloudSQLHook(api_version="v1", gcp_conn_id=self.gcp_conn_id)
with cloud_sql_hook.provide_authorized_gcloud():
proc = subprocess.run(cmd, check=False, capture_output=True)
if proc.returncode != 0:
stderr_last_20_lines = "\n".join(proc.stderr.decode().strip().splitlines()[-20:])
raise AirflowException(
f"Process exited with non-zero exit code. Exit code: {proc.returncode}. Error Details: "
f"{stderr_last_20_lines}"
)
auth_token = proc.stdout.decode().strip()
return auth_token
|
CloudSQLDatabaseHook
|
python
|
huggingface__transformers
|
src/transformers/models/vilt/configuration_vilt.py
|
{
"start": 781,
"end": 6857
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ViLTModel`]. It is used to instantiate an ViLT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ViLT
[dandelin/vilt-b32-mlm](https://huggingface.co/dandelin/vilt-b32-mlm) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the text part of the model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`ViltModel`].
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ViltModel`]. This is used when encoding
text.
modality_type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the modalities passed when calling [`ViltModel`]. This is used after concatenating the
embeddings of the text and image modalities.
max_position_embeddings (`int`, *optional*, defaults to 40):
The maximum sequence length that this model might ever be used with.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 384):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
max_image_length (`int`, *optional*, defaults to -1):
The maximum number of patches to take as input for the Transformer encoder. If set to a positive integer,
the encoder will sample `max_image_length` patches at maximum. If set to -1, will not be taken into
account.
num_images (`int`, *optional*, defaults to -1):
The number of images to use for natural language visual reasoning. If set to a positive integer, will be
used by [`ViltForImagesAndTextClassification`] for defining the classifier head.
Example:
```python
>>> from transformers import ViLTModel, ViLTConfig
>>> # Initializing a ViLT dandelin/vilt-b32-mlm style configuration
>>> configuration = ViLTConfig()
>>> # Initializing a model from the dandelin/vilt-b32-mlm style configuration
>>> model = ViLTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vilt"
def __init__(
self,
vocab_size=30522,
type_vocab_size=2,
modality_type_vocab_size=2,
max_position_embeddings=40,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
image_size=384,
patch_size=32,
num_channels=3,
qkv_bias=True,
max_image_length=-1,
tie_word_embeddings=False,
num_images=-1,
**kwargs,
):
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
self.vocab_size = vocab_size
self.type_vocab_size = type_vocab_size
self.modality_type_vocab_size = modality_type_vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.max_image_length = max_image_length
self.num_images = num_images
self.tie_encoder_decoder = True
__all__ = ["ViltConfig"]
|
ViltConfig
|
python
|
pytest-dev__pytest-cov
|
src/pytest_cov/plugin.py
|
{
"start": 16205,
"end": 17374
}
|
class ____:
cov_controller: 'CovController'
def __init__(self, cov_controller):
self.cov_controller = cov_controller
def pytest_runtest_setup(self, item):
self.switch_context(item, 'setup')
def pytest_runtest_teardown(self, item):
self.switch_context(item, 'teardown')
def pytest_runtest_call(self, item):
self.switch_context(item, 'run')
def switch_context(self, item, when):
if self.cov_controller.started:
self.cov_controller.cov.switch_context(f'{item.nodeid}|{when}')
@pytest.fixture
def no_cover():
"""A pytest fixture to disable coverage."""
@pytest.fixture
def cov(request):
"""A pytest fixture to provide access to the underlying coverage object."""
# Check with hasplugin to avoid getplugin exception in older pytest.
if request.config.pluginmanager.hasplugin('_cov'):
plugin = request.config.pluginmanager.getplugin('_cov')
if plugin.cov_controller:
return plugin.cov_controller.cov
return None
def pytest_configure(config):
config.addinivalue_line('markers', 'no_cover: disable coverage for this test.')
|
TestContextPlugin
|
python
|
pytorch__pytorch
|
torch/fx/experimental/sym_node.py
|
{
"start": 1573,
"end": 21341
}
|
class ____:
"""
This is a type erased SymInt/SymFloat which we use to do actual operations.
End users don't touch this. Magic methods are NOT defined on this object.
"""
# Note [optimized_summation]: indicates that SymNode is an Add expression of the form
# a + b + c + d... etc where all terms are unique symbols. This allows us to do some optimizations
# for common patterns see _optimized_add.
# The unfortunate reason we have this here is because sympy sets __slots__ = () for add expression,
# so we cannot add the attribute directly to the sympy expression. Furthermore, we cannot use it as
# a weak dictionary key either! So instead, we attach the attribute here to the SymNode.
_optimized_summation: bool = False
def __init__(
self,
expr,
shape_env,
pytype,
hint: Optional[Union[int, float, bool]],
constant=None,
fx_node=None,
optimized_summation=False,
):
self._expr = expr
self.shape_env = shape_env
self.pytype = pytype
self._optimized_summation = optimized_summation
# What's the difference between hint and constant?
#
# - A constant is known to be invariant across invocations of the model;
# it will always be this value. We only really know this when we
# encounter an honest-to-goodness literal (when wrapping it into
# a SymNode, we set constant.) Most of the time, constant is None
#
# - A hint is a *particular* value from the particular run we are
# tracing, but it may vary the next time around. It's useful to
# keep this around, as if we need a concrete value from a SymNode,
# we will return the hint and guard on the expression that produced
# it giving the same hint next time around. The hint is not
# guaranteed to be set either: if you have an unbacked SymNode,
# there won't be any hint; it was the result of some tensor-dependent
# computation, but we don't know what it actually is because we
# haven't actually run the tensor computation.
#
# If _hint is None, we will query maybe_evaluate_static(compute_hint=True)
# in hopes that we've learned enough about the unbacked symints to
# discharge the hint; otherwise, you're likely to just error out.
#
# (A previous version of this system had some optimizations to only
# recompute when it was possible we had learned enough about the
# unbacked symint that a hint was now possible, but as we added more
# potential refinements to unbacked symints this got harder to keep
# in sync, so we've deleted it for now.)
def compute_hint():
from torch.fx.experimental.symbolic_shapes import has_free_unbacked_symbols
# This occasionally gets exercised by, e.g.,
# convert_shape_to_symint. It's just a nicety so you don't HAVE
# to have a correct hint on hand when making a SymNode.
# Don't attempt to compute for unbacked, this can be quite
# expensive.
if has_free_unbacked_symbols(self.expr):
return None
hint = self.shape_env._maybe_evaluate_static(self.expr, compute_hint=True)
if hint is not None:
hint = self.pytype(hint) if not isinstance(hint, SymTypes) else hint
return hint
if hint is not None:
assert type(hint) is pytype or type(hint) is _to_symtype(pytype), (
"Cannot create SymNode of type "
f"{pytype} with incompatible hint of type {type(hint)}"
)
if self.shape_env and self.shape_env._translation_validation_enabled:
# This is technically not TV, but this assert is expensive so
# let's only do it when we're already doing expensive things
computed_hint = compute_hint()
assert hint == computed_hint, (
f"{hint} != {computed_hint} (for {self.expr})"
)
else:
hint = compute_hint()
self._hint = hint
self.constant: Optional[Union[int, float, bool]] = constant
# Record the FX node of the current node if we are doing translation
# validation. They will be used for building the input assertions for
# the translation validation problem.
tx_validation_en = (
self.shape_env and self.shape_env._translation_validation_enabled
)
self.fx_node = tx_validation_en and fx_node
def with_shape_env(self, shape_env: ShapeEnv) -> SymNode:
return SymNode(
self._expr, shape_env, self.pytype, self._hint, self.constant, self.fx_node
)
def _value_eq(self, other: SymNode) -> bool:
# Purposely don't include the shape_env in the eq.
return (
self._expr == other._expr
and self.pytype == other.pytype
and self._hint == other._hint
and self.constant == other.constant
and self.fx_node == other.fx_node
)
def _value_hash(self) -> int:
# Purposely don't include the shape_env in the hash.
return hash((self._expr, self.pytype, self._hint, self.constant, self.fx_node))
@property
def expr(self):
return self.shape_env.replace(self._expr)
@property
def hint(self):
return self._hint
def has_hint(self):
return self._hint is not None
def require_hint(self, fallback=None):
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
if self._hint is None:
if fallback is not None:
# Say we have some expr like 2*u0 + s0
# The hint will be None, since the expr contains at least 1 unbacked.
# We will:
# - replace every backed free symbol with its corresponding hint
# - replace every unbacked free symbol with the fallback
# - regenerate the expression with those symbol replacements
# Note: this is not really complete either, since right now
# this logic does not take into account any value ranges
# for the unbacked symints, we may need to beef it up at some point.
unbacked_symbols = free_unbacked_symbols(self.expr)
replacements = {
s: 4096 if s in unbacked_symbols else self.shape_env.var_to_val[s]
for s in self.expr.free_symbols
}
return self.expr.xreplace(replacements)
# NB: we expect this to raise
return self.shape_env.size_hint(self.expr)
return self._hint
def maybe_as_int(self):
if self.expr.is_number:
return int(self.expr)
else:
return None
# NB: This does conversions, not sure if this is good or not
def maybe_as_float(self):
import sympy
if isinstance(self.expr, sympy.Float):
return float(self.expr)
else:
return None
def maybe_as_bool(self):
import sympy
if self.expr is sympy.true:
return True
elif self.expr is sympy.false:
return False
else:
return None
def is_int(self):
return self.pytype is int
def is_float(self):
return self.pytype is float
def is_bool(self):
return self.pytype is bool
def is_nested_int(self):
# Unbacked SymInts cannot be nested int today
return (
self._hint is not None
and isinstance(self._hint, SymInt)
and self._hint.node.is_nested_int()
)
def wrap_int(self, num):
assert type(num) is int
import sympy
return SymNode(
sympy.Integer(num), self.shape_env, int, num, constant=num, fx_node=num
)
def wrap_float(self, num):
assert type(num) is float
import sympy
return SymNode(
sympy.Float(num), self.shape_env, float, num, constant=num, fx_node=num
)
def wrap_bool(self, num):
assert type(num) is bool
import sympy
return SymNode(
sympy.true if num else sympy.false,
self.shape_env,
bool,
num,
constant=num,
fx_node=num,
)
def clone(self):
return self
def str(self):
return f"{self.expr}"
def __str__(self):
return self.str()
def __repr__(self):
rep = [
f"SymNode({self._expr}, shape_env={self.shape_env}, pytype={self.pytype}",
]
if self._hint is not None:
rep.append(f"hint={self._hint}")
if self.constant is not None:
rep.append(f"constant={self.constant}")
if self.fx_node is not None:
rep.append(f"fx_node={self.fx_node}")
return ", ".join(rep) + ")"
def _graph_repr(self) -> builtins.str:
# Representation used by GraphModule to create a pythonic version of a graph
return self.str()
# These methods call the metaprogrammed methods, they're hand written
# here so we get good stack traces
def abs(self) -> SymNode:
return self._abs() # type: ignore[attr-defined]
def pos(self) -> SymNode:
return self._pos() # type: ignore[attr-defined]
def round(self, ndigits=None) -> SymNode:
return self._round(ndigits) # type: ignore[attr-defined]
def trunc(self) -> SymNode:
return self._trunc() # type: ignore[attr-defined]
def add(self, other) -> SymNode:
return self._add(other) # type: ignore[attr-defined]
def sub(self, other) -> SymNode:
return self._sub(other) # type: ignore[attr-defined]
def mul(self, other) -> SymNode:
return self._mul(other) # type: ignore[attr-defined]
def mod(self, other) -> SymNode:
return self._mod(other) # type: ignore[attr-defined]
def float_pow(self, other) -> SymNode:
return self._float_pow(other) # type: ignore[attr-defined]
def pow_by_natural(self, other) -> SymNode:
return self._pow_by_natural(other) # type: ignore[attr-defined]
def and_(self, other) -> SymNode:
return self._and_(other) # type: ignore[attr-defined]
def or_(self, other) -> SymNode:
return self._or_(other) # type: ignore[attr-defined]
def float_truediv(self, other) -> SymNode:
return self._float_truediv(other) # type: ignore[attr-defined]
def int_truediv(self, other) -> SymNode:
return self._int_truediv(other) # type: ignore[attr-defined]
def int_floordiv(self, other) -> SymNode:
return self._int_floordiv(other) # type: ignore[attr-defined]
def lshift(self, other) -> SymNode:
return self._lshift(other) # type: ignore[attr-defined]
def rshift(self, other) -> SymNode:
return self._rshift(other) # type: ignore[attr-defined]
def sym_not(self) -> SymNode: # noqa: F811
return self._sym_not() # type: ignore[attr-defined]
def eq(self, other) -> SymNode:
return self._eq(other) # type: ignore[attr-defined]
def ne(self, other) -> SymNode:
return self._ne(other) # type: ignore[attr-defined]
def gt(self, other) -> SymNode:
return self._gt(other) # type: ignore[attr-defined]
def lt(self, other) -> SymNode:
return self._lt(other) # type: ignore[attr-defined]
def le(self, other) -> SymNode:
return self._le(other) # type: ignore[attr-defined]
def ge(self, other) -> SymNode:
return self._ge(other) # type: ignore[attr-defined]
def floor(self) -> SymNode:
return self._floor() # type: ignore[attr-defined]
def is_integer(self) -> SymNode:
return self._is_integer() # type: ignore[attr-defined]
def sym_float(self) -> SymNode: # noqa: F811
return self._sym_float() # type: ignore[attr-defined]
def sym_int(self) -> SymNode:
return self._sym_int() # type: ignore[attr-defined]
def ceil(self) -> SymNode:
return self._ceil() # type: ignore[attr-defined]
def neg(self) -> SymNode:
return self._neg() # type: ignore[attr-defined]
def sym_min(self, other) -> SymNode: # noqa: F811
return self._sym_min(other) # type: ignore[attr-defined]
def sym_max(self, other) -> SymNode: # noqa: F811
return self._sym_max(other) # type: ignore[attr-defined]
def sym_ite(self, then_val, else_val) -> SymNode:
return self._sym_ite(then_val, else_val) # type: ignore[attr-defined]
def is_contiguous(self, sizes, strides) -> SymNode:
return self._is_contiguous(sizes, strides) # type: ignore[attr-defined]
def is_channels_last_contiguous_2d(self, sizes, strides) -> SymNode:
return self._is_channels_last_contiguous_2d(sizes, strides) # type: ignore[attr-defined]
def is_channels_last_contiguous_3d(self, sizes, strides) -> SymNode:
return self._is_channels_last_contiguous_3d(sizes, strides) # type: ignore[attr-defined]
def is_channels_last_strides_2d(self, sizes, strides) -> SymNode:
return self._is_channels_last_strides_2d(sizes, strides) # type: ignore[attr-defined]
def is_channels_last_strides_3d(self, sizes, strides) -> SymNode:
return self._is_channels_last_strides_3d(sizes, strides) # type: ignore[attr-defined]
def is_non_overlapping_and_dense_indicator(self, sizes, strides) -> SymNode:
return self._is_non_overlapping_and_dense_indicator(sizes, strides) # type: ignore[attr-defined]
# Make C++ happy
def sym_or(self, other):
return self.or_(other)
def sym_and(self, other):
return self.and_(other)
# Integer bitwise ops
def bitwise_and(self, other):
return self._bitwise_and(other) # type: ignore[attr-defined]
def bitwise_or(self, other):
return self._bitwise_or(other) # type: ignore[attr-defined]
def bitwise_xor(self, other):
return self._bitwise_xor(other) # type: ignore[attr-defined]
# There is no int_truediv available from C++
def truediv(self, other):
return self.float_truediv(other)
def floordiv(self, other) -> SymNode:
return self.int_floordiv(other)
# We didn't bind integer pow in C++
def pow(self, other):
return self.float_pow(other)
def is_non_overlapping_and_dense(self, sizes, strides):
return self.is_non_overlapping_and_dense_indicator(sizes, strides).eq(
to_node(self, 1)
) # type: ignore[attr-defined]
def int_(self):
return self.guard_int("", 0) # NB: uses Python backtrace
# This one is currently done by hand, but if we add other variadic
# functions consider factoring it out to be metaprogrammed too. Note that
# some load bearing logic is directly in torch.sym_sum
def sym_sum(self, args) -> SymNode:
import sympy
# Inner impl
from torch.fx.experimental.proxy_tensor import (
get_proxy_mode,
handle_sym_dispatch,
)
if get_proxy_mode():
return to_node(
self,
handle_sym_dispatch(
torch.sym_sum,
(tuple(wrap_node(a) for a in args),),
{},
),
)
exprs = [a.expr for a in args]
out = sympy.Add(*exprs)
size_hints = []
out_hint = None
for a in args:
if a.hint is None:
break
size_hints.append(a.hint)
else:
out_hint = sum(size_hints)
fx_node, _ = self.shape_env._create_fx_call_function(
torch.sym_sum, (tuple(a.fx_node for a in args),)
)
# NB: Only for integers!
return SymNode(out, self.shape_env, int, out_hint, fx_node=fx_node)
def evaluate(self, size_oblivious=False):
return self.shape_env.evaluate_sym_node(self, size_oblivious)
# You can manually trigger a guard with this function
def guard_int(self, file, line):
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
r = self.evaluate()
try:
return int(r)
except Exception:
log.warning("Failed to convert to int: %s", r)
raise
def guard_float(self, file, line):
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
r = self.evaluate()
try:
return float(r)
except Exception:
log.warning("Failed to convert to float: %s", r)
raise
def guard_bool(self, file, line):
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
r = self.evaluate()
try:
return bool(r)
except Exception:
log.warning("Failed to convert to bool: %s", r)
raise
def expect_true(self, file, line):
from torch.fx.experimental.symbolic_shapes import free_unbacked_symbols
if (
self.has_hint()
and not free_unbacked_symbols(self.expr)
and not self.shape_env.prefer_deferred_runtime_asserts_over_guards
):
# OK to generate guards
return self.guard_bool(file, line)
# Generate a deferred runtime assert (this might actually end up doing
# a regular guard if we can!)
# TODO: file/line here is very important, because the assert has been
# deferred so you can't backtrace easily
return self.shape_env.guard_or_defer_runtime_assert(
self.expr, f"{file}:{line}", fx_node=self.fx_node
)
def statically_known_true(self, file, line):
from torch.fx.experimental.symbolic_shapes import statically_known_true
assert self.is_bool()
return statically_known_true(SymBool(self))
def guard_size_oblivious(self, file, line):
"""
Like guard_bool, but if we encounter unbacked symbols, if those symbols
are size-like, we will treat them as >= 2 for the purposes of the analysis.
This CHANGES the runtime semantics, but all size-oblivious sites have been
audited to ensure that the runtime semantics don't change in a material way.
Acceptable runtime semantic changes are, e.g., squeeze() no longer dropping
an unbacked one size, or a tensor reporting as non-contiguous even if it's
contiguous if it would have been reported contiguous due to being empty.
"""
# TODO: use the file/line for some useful diagnostic on why a
# guard occurred
r = self.evaluate(size_oblivious=True)
try:
return bool(r)
except Exception:
log.warning("Failed to convert to bool: %s", r)
raise
def guard_or_false(self, file, line):
from torch.fx.experimental.symbolic_shapes import guard_or_false
assert self.is_bool()
return guard_or_false(SymBool(self))
def guard_or_true(self, file, line):
from torch.fx.experimental.symbolic_shapes import guard_or_true
assert self.is_bool()
return guard_or_true(SymBool(self))
def bool_(self):
return self.guard_bool("", 0)
def is_symbolic(self):
return True
def nested_int(self):
return None
def is_constant(self):
return False
|
SymNode
|
python
|
pandas-dev__pandas
|
pandas/io/sql.py
|
{
"start": 94689,
"end": 104641
}
|
class ____(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con) -> None:
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, sql: str | Select | TextClause, params=None):
from sqlite3 import Error
if not isinstance(sql, str):
raise TypeError("Query must be a string unless using sqlalchemy.")
args = [] if params is None else [params]
cur = self.con.cursor()
try:
cur.execute(sql, *args)
return cur
except Error as exc:
try:
self.con.rollback()
except Error as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {sql}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor,
chunksize: int,
columns,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> Generator[DataFrame]:
"""Return generator through chunked result set"""
has_read_data = False
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
if not has_read_data:
result = DataFrame.from_records(
[], columns=columns, coerce_float=coerce_float
)
if dtype:
result = result.astype(dtype)
yield result
break
has_read_data = True
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
def read_query(
self,
sql,
index_col=None,
coerce_float: bool = True,
parse_dates=None,
params=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
dtype_backend: DtypeBackend | Literal["numpy"] = "numpy",
) -> DataFrame | Iterator[DataFrame]:
cursor = self.execute(sql, params)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
dtype=dtype,
dtype_backend=dtype_backend,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name: str,
if_exists: str = "fail",
index: bool = True,
index_label=None,
schema=None,
chunksize: int | None = None,
dtype: DtypeArg | None = None,
method: Literal["multi"] | Callable | None = None,
engine: str = "auto",
**engine_kwargs,
) -> int | None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append', 'delete_rows'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
delete_rows: If a table exists, delete all records and insert data.
index : bool, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
"""
if dtype:
if not is_dict_like(dtype):
# error: Value expression in dictionary comprehension has incompatible
# type "Union[ExtensionDtype, str, dtype[Any], Type[object],
# Dict[Hashable, Union[ExtensionDtype, Union[str, dtype[Any]],
# Type[str], Type[float], Type[int], Type[complex], Type[bool],
# Type[object]]]]"; expected type "Union[ExtensionDtype, str,
# dtype[Any], Type[object]]"
dtype = dict.fromkeys(frame, dtype) # type: ignore[arg-type]
else:
dtype = cast(dict, dtype)
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
return table.insert(chunksize, method)
def has_table(self, name: str, schema: str | None = None) -> bool:
wld = "?"
query = f"""
SELECT
name
FROM
sqlite_master
WHERE
type IN ('table', 'view')
AND name={wld};
"""
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name: str, schema: str | None = None) -> None:
return None # not supported in fallback mode
def drop_table(self, name: str, schema: str | None = None) -> None:
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql).close()
def delete_rows(self, name: str, schema: str | None = None) -> None:
delete_sql = f"DELETE FROM {_get_valid_sqlite_name(name)}"
if self.has_table(name, schema):
self.execute(delete_sql).close()
def _create_sql_schema(
self,
frame,
table_name: str,
keys=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
) -> str:
table = SQLiteTable(
table_name,
self,
frame=frame,
index=False,
keys=keys,
dtype=dtype,
schema=schema,
)
return str(table.sql_schema())
def get_schema(
frame,
name: str,
keys=None,
con=None,
dtype: DtypeArg | None = None,
schema: str | None = None,
) -> str:
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : str
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: ADBC Connection, SQLAlchemy connectable, sqlite3 connection, default: None
ADBC provides high performance I/O with native type support, where available.
Using SQLAlchemy makes it possible to use any DB supported by that
library
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
schema: str, default: None
Optional specifying the schema to be used in creating the table.
"""
with pandasSQL_builder(con=con) as pandas_sql:
return pandas_sql._create_sql_schema(
frame, name, keys=keys, dtype=dtype, schema=schema
)
|
SQLiteDatabase
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/types/beta/beta_tool_bash_20241022_param.py
|
{
"start": 355,
"end": 1044
}
|
class ____(TypedDict, total=False):
name: Required[Literal["bash"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["bash_20241022"]]
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
input_examples: Iterable[Dict[str, object]]
strict: bool
|
BetaToolBash20241022Param
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 650533,
"end": 651085
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("fragment", "highlights", "property")
fragment = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="fragment")
highlights = sgqlc.types.Field(
sgqlc.types.non_null(
sgqlc.types.list_of(sgqlc.types.non_null("TextMatchHighlight"))
),
graphql_name="highlights",
)
property = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="property")
|
TextMatch
|
python
|
Lightning-AI__lightning
|
src/lightning/fabric/accelerators/mps.py
|
{
"start": 868,
"end": 3063
}
|
class ____(Accelerator):
"""Accelerator for Metal Apple Silicon GPU devices.
.. warning:: Use of this accelerator beyond import and instantiation is experimental.
"""
@override
def setup_device(self, device: torch.device) -> None:
"""
Raises:
ValueError:
If the selected device is not MPS.
"""
if device.type != "mps":
raise ValueError(f"Device should be MPS, got {device} instead.")
@override
def teardown(self) -> None:
pass
@staticmethod
@override
def parse_devices(devices: Union[int, str, list[int]]) -> Optional[list[int]]:
"""Accelerator device parsing logic."""
from lightning.fabric.utilities.device_parser import _parse_gpu_ids
return _parse_gpu_ids(devices, include_mps=True)
@staticmethod
@override
def get_parallel_devices(devices: Union[int, str, list[int]]) -> list[torch.device]:
"""Gets parallel devices for the Accelerator."""
parsed_devices = MPSAccelerator.parse_devices(devices)
assert parsed_devices is not None
return [torch.device("mps", i) for i in range(len(parsed_devices))]
@staticmethod
@override
def auto_device_count() -> int:
"""Get the devices when set to auto."""
return 1
@staticmethod
@override
@lru_cache(1)
def is_available() -> bool:
"""MPS is only available on a machine with the ARM-based Apple Silicon processors."""
mps_disabled = os.getenv("DISABLE_MPS", "0") == "1"
return not mps_disabled and torch.backends.mps.is_available() and platform.processor() in ("arm", "arm64")
@staticmethod
@override
def name() -> str:
return "mps"
@classmethod
@override
def register_accelerators(cls, accelerator_registry: _AcceleratorRegistry) -> None:
accelerator_registry.register(
cls.name(),
cls,
description=cls.__name__,
)
def _get_all_available_mps_gpus() -> list[int]:
"""
Returns:
A list of all available MPS GPUs
"""
return [0] if MPSAccelerator.is_available() else []
|
MPSAccelerator
|
python
|
getsentry__sentry
|
src/sentry/features/manager.py
|
{
"start": 17225,
"end": 18429
}
|
class ____:
"""
A batch of objects to be checked for a feature flag.
An instance of this class encapsulates a call to
``FeatureManager.has_for_batch``. The objects (such as projects) have a
common parent organization.
"""
def __init__(
self,
manager: RegisteredFeatureManager,
name: str,
organization: Organization,
objects: Iterable[Project],
actor: User | RpcUser | AnonymousUser | None,
) -> None:
self._manager = manager
self.feature_name = name
self.organization = organization
self.objects = objects
self.actor = actor
def get_feature_objects(self) -> dict[Project, Feature]:
"""
Iterate over individual Feature objects.
This is a fallback mode for applying a FeatureHandler that doesn't
support checking the entire batch at once.
"""
cls = self._manager._get_feature_class(self.feature_name)
return {obj: cls(self.feature_name, obj) for obj in self.objects}
@property
def subject(self) -> Organization | User | RpcUser | AnonymousUser | None:
return self.organization or self.actor
|
FeatureCheckBatch
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1117173,
"end": 1118171
}
|
class ____(sgqlc.types.Type, Node):
"""Represents a 'connected' event on a given issue or pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "is_cross_repository", "source", "subject")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
is_cross_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCrossRepository")
"""Reference originated in a different repository."""
source = sgqlc.types.Field(sgqlc.types.non_null("ReferencedSubject"), graphql_name="source")
"""Issue or pull request that made the reference."""
subject = sgqlc.types.Field(sgqlc.types.non_null("ReferencedSubject"), graphql_name="subject")
"""Issue or pull request which was connected."""
|
ConnectedEvent
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/modeling_emu3.py
|
{
"start": 10110,
"end": 11908
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: Emu3Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Emu3Attention(config=config, layer_idx=layer_idx)
self.mlp = Emu3MLP(config)
self.input_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Emu3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = nn.Dropout(config.attention_dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
|
Emu3DecoderLayer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/bold-words-in-string.py
|
{
"start": 1272,
"end": 1948
}
|
class ____(object):
def boldWords(self, words, S):
"""
:type words: List[str]
:type S: str
:rtype: str
"""
lookup = [0] * len(S)
for d in words:
pos = S.find(d)
while pos != -1:
lookup[pos:pos+len(d)] = [1] * len(d)
pos = S.find(d, pos+1)
result = []
for i in xrange(len(S)):
if lookup[i] and (i == 0 or not lookup[i-1]):
result.append("<b>")
result.append(S[i])
if lookup[i] and (i == len(S)-1 or not lookup[i+1]):
result.append("</b>")
return "".join(result)
|
Solution2
|
python
|
mwaskom__seaborn
|
seaborn/categorical.py
|
{
"start": 113939,
"end": 120940
}
|
class ____:
"""Modifies a scatterplot artist to show a beeswarm plot."""
def __init__(self, orient="x", width=0.8, warn_thresh=.05):
self.orient = orient
self.width = width
self.warn_thresh = warn_thresh
def __call__(self, points, center):
"""Swarm `points`, a PathCollection, around the `center` position."""
# Convert from point size (area) to diameter
ax = points.axes
dpi = ax.figure.dpi
# Get the original positions of the points
orig_xy_data = points.get_offsets()
# Reset the categorical positions to the center line
cat_idx = 1 if self.orient == "y" else 0
orig_xy_data[:, cat_idx] = center
# Transform the data coordinates to point coordinates.
# We'll figure out the swarm positions in the latter
# and then convert back to data coordinates and replot
orig_x_data, orig_y_data = orig_xy_data.T
orig_xy = ax.transData.transform(orig_xy_data)
# Order the variables so that x is the categorical axis
if self.orient == "y":
orig_xy = orig_xy[:, [1, 0]]
# Add a column with each point's radius
sizes = points.get_sizes()
if sizes.size == 1:
sizes = np.repeat(sizes, orig_xy.shape[0])
edge = points.get_linewidth().item()
radii = (np.sqrt(sizes) + edge) / 2 * (dpi / 72)
orig_xy = np.c_[orig_xy, radii]
# Sort along the value axis to facilitate the beeswarm
sorter = np.argsort(orig_xy[:, 1])
orig_xyr = orig_xy[sorter]
# Adjust points along the categorical axis to prevent overlaps
new_xyr = np.empty_like(orig_xyr)
new_xyr[sorter] = self.beeswarm(orig_xyr)
# Transform the point coordinates back to data coordinates
if self.orient == "y":
new_xy = new_xyr[:, [1, 0]]
else:
new_xy = new_xyr[:, :2]
new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T
# Add gutters
t_fwd, t_inv = _get_transform_functions(ax, self.orient)
if self.orient == "y":
self.add_gutters(new_y_data, center, t_fwd, t_inv)
else:
self.add_gutters(new_x_data, center, t_fwd, t_inv)
# Reposition the points so they do not overlap
if self.orient == "y":
points.set_offsets(np.c_[orig_x_data, new_y_data])
else:
points.set_offsets(np.c_[new_x_data, orig_y_data])
def beeswarm(self, orig_xyr):
"""Adjust x position of points to avoid overlaps."""
# In this method, `x` is always the categorical axis
# Center of the swarm, in point coordinates
midline = orig_xyr[0, 0]
# Start the swarm with the first point
swarm = np.atleast_2d(orig_xyr[0])
# Loop over the remaining points
for xyr_i in orig_xyr[1:]:
# Find the points in the swarm that could possibly
# overlap with the point we are currently placing
neighbors = self.could_overlap(xyr_i, swarm)
# Find positions that would be valid individually
# with respect to each of the swarm neighbors
candidates = self.position_candidates(xyr_i, neighbors)
# Sort candidates by their centrality
offsets = np.abs(candidates[:, 0] - midline)
candidates = candidates[np.argsort(offsets)]
# Find the first candidate that does not overlap any neighbors
new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)
# Place it into the swarm
swarm = np.vstack([swarm, new_xyr_i])
return swarm
def could_overlap(self, xyr_i, swarm):
"""Return a list of all swarm points that could overlap with target."""
# Because we work backwards through the swarm and can short-circuit,
# the for-loop is faster than vectorization
_, y_i, r_i = xyr_i
neighbors = []
for xyr_j in reversed(swarm):
_, y_j, r_j = xyr_j
if (y_i - y_j) < (r_i + r_j):
neighbors.append(xyr_j)
else:
break
return np.array(neighbors)[::-1]
def position_candidates(self, xyr_i, neighbors):
"""Return a list of coordinates that might be valid by adjusting x."""
candidates = [xyr_i]
x_i, y_i, r_i = xyr_i
left_first = True
for x_j, y_j, r_j in neighbors:
dy = y_i - y_j
dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05
cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)
if left_first:
new_candidates = [cl, cr]
else:
new_candidates = [cr, cl]
candidates.extend(new_candidates)
left_first = not left_first
return np.array(candidates)
def first_non_overlapping_candidate(self, candidates, neighbors):
"""Find the first candidate that does not overlap with the swarm."""
# If we have no neighbors, all candidates are good.
if len(neighbors) == 0:
return candidates[0]
neighbors_x = neighbors[:, 0]
neighbors_y = neighbors[:, 1]
neighbors_r = neighbors[:, 2]
for xyr_i in candidates:
x_i, y_i, r_i = xyr_i
dx = neighbors_x - x_i
dy = neighbors_y - y_i
sq_distances = np.square(dx) + np.square(dy)
sep_needed = np.square(neighbors_r + r_i)
# Good candidate does not overlap any of neighbors which means that
# squared distance between candidate and any of the neighbors has
# to be at least square of the summed radii
good_candidate = np.all(sq_distances >= sep_needed)
if good_candidate:
return xyr_i
raise RuntimeError(
"No non-overlapping candidates found. This should not happen."
)
def add_gutters(self, points, center, trans_fwd, trans_inv):
"""Stop points from extending beyond their territory."""
half_width = self.width / 2
low_gutter = trans_inv(trans_fwd(center) - half_width)
off_low = points < low_gutter
if off_low.any():
points[off_low] = low_gutter
high_gutter = trans_inv(trans_fwd(center) + half_width)
off_high = points > high_gutter
if off_high.any():
points[off_high] = high_gutter
gutter_prop = (off_high + off_low).sum() / len(points)
if gutter_prop > self.warn_thresh:
msg = (
"{:.1%} of the points cannot be placed; you may want "
"to decrease the size of the markers or use stripplot."
).format(gutter_prop)
warnings.warn(msg, UserWarning)
return points
BoxPlotArtists = namedtuple("BoxPlotArtists", "box median whiskers caps fliers mean")
|
Beeswarm
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/tasks/builds.py
|
{
"start": 9871,
"end": 43381
}
|
class ____(SyncRepositoryMixin, Task):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported, was
created or a webhook is received. Then it will sync the repository and
build all the documentation formats and upload them to the storage.
"""
name = __name__ + ".update_docs_task"
autoretry_for = (BuildMaxConcurrencyError,)
max_retries = settings.RTD_BUILDS_MAX_RETRIES
default_retry_delay = settings.RTD_BUILDS_RETRY_DELAY
retry_backoff = False
# Expected exceptions that will be logged as info only and not retried.
# These exceptions are not sent to Sentry either because we are using
# ``SENTRY_CELERY_IGNORE_EXPECTED=True``.
#
# All exceptions generated by a user miss-configuration should be listed
# here. Actually, every subclass of ``BuildUserError``.
throws = (
ConfigError,
BuildCancelled,
BuildUserError,
RepositoryError,
MkDocsYAMLParseError,
ProjectConfigurationError,
BuildMaxConcurrencyError,
SoftTimeLimitExceeded,
)
# Do not send notifications on failure builds for these exceptions.
exceptions_without_notifications = (
BuildCancelled.CANCELLED_BY_USER,
BuildCancelled.SKIPPED_EXIT_CODE_183,
BuildAppError.BUILDS_DISABLED,
BuildMaxConcurrencyError.LIMIT_REACHED,
)
# Do not send external build status on failure builds for these exceptions.
exceptions_without_external_build_status = (BuildMaxConcurrencyError.LIMIT_REACHED,)
acks_late = True
track_started = True
# These values have to be dynamic based on project
time_limit = None
soft_time_limit = None
Request = BuildRequest
def _setup_sigterm(self):
def sigterm_received(*args, **kwargs):
log.warning("SIGTERM received. Waiting for build to stop gracefully after it finishes.")
def sigint_received(*args, **kwargs):
log.warning("SIGINT received. Canceling the build running.")
# Only allow to cancel the build if it's not already uploading the files.
# This is to protect our users to end up with half of the documentation uploaded.
# TODO: remove this condition once we implement "Atomic Uploads"
if self.data.build.get("state") == BUILD_STATE_UPLOADING:
log.warning('Ignoring cancelling the build at "Uploading" state.')
return
raise BuildCancelled(message_id=BuildCancelled.CANCELLED_BY_USER)
# Do not send the SIGTERM signal to children (pip is automatically killed when
# receives SIGTERM and make the build to fail one command and stop build)
signal.signal(signal.SIGTERM, sigterm_received)
signal.signal(signal.SIGINT, sigint_received)
def _check_concurrency_limit(self):
try:
response = self.data.api_client.build.concurrent.get(
project__slug=self.data.project.slug
)
concurrency_limit_reached = response.get("limit_reached", False)
max_concurrent_builds = response.get(
"max_concurrent",
settings.RTD_MAX_CONCURRENT_BUILDS,
)
except Exception:
log.exception(
"Error while hitting/parsing API for concurrent limit checks from builder.",
project_slug=self.data.project.slug,
version_slug=self.data.version.slug,
)
concurrency_limit_reached = False
max_concurrent_builds = settings.RTD_MAX_CONCURRENT_BUILDS
if concurrency_limit_reached:
# By calling ``retry`` Celery will raise an exception and call ``on_retry``.
# NOTE: autoretry_for doesn't work with exceptions raised from before_start,
# it only works if they are raised from the run/execute method.
log.info("Concurrency limit reached, retrying task.")
self.retry(
exc=BuildMaxConcurrencyError(
BuildMaxConcurrencyError.LIMIT_REACHED,
format_values={
"limit": max_concurrent_builds,
},
)
)
def _check_project_disabled(self):
if self.data.project.skip:
log.warning("Project build skipped.")
raise BuildAppError(BuildAppError.BUILDS_DISABLED)
def before_start(self, task_id, args, kwargs):
# Create the object to store all the task-related data
self.data = TaskData()
# Comes from the signature of the task and they are the only
# required arguments.
self.data.version_pk, self.data.build_pk = args
structlog.contextvars.bind_contextvars(build_id=self.data.build_pk)
log.info("Running task.", name=self.name)
self.data.start_time = timezone.now()
self.data.environment_class = DockerBuildEnvironment
if not settings.DOCKER_ENABLE:
# TODO: delete LocalBuildEnvironment since it's not supported
# anymore and we are not using it
self.data.environment_class = LocalBuildEnvironment
self.data.api_client = setup_api(kwargs["build_api_key"])
self.data.build = self.get_build(self.data.build_pk)
self.data.version = self.get_version(self.data.version_pk)
self.data.project = self.data.version.project
# Save the builder instance's name into the build object
self.data.build["builder"] = socket.gethostname()
# Reset any previous build error reported to the user
self.data.build["error"] = ""
# Also note there are builds that are triggered without a commit
# because they just build the latest commit for that version
self.data.build_commit = kwargs.get("build_commit")
self.data.build_director = BuildDirector(
data=self.data,
)
structlog.contextvars.bind_contextvars(
# NOTE: ``self.data.build`` is just a regular dict, not an APIBuild :'(
builder=self.data.build["builder"],
commit=self.data.build_commit,
project_slug=self.data.project.slug,
version_slug=self.data.version.slug,
)
# Log a warning if the task took more than 10 minutes to be retried
if self.data.build["task_executed_at"]:
task_executed_at = datetime.datetime.fromisoformat(self.data.build["task_executed_at"])
delta = timezone.now() - task_executed_at
if delta > timezone.timedelta(minutes=10):
log.warning(
"This task waited more than 10 minutes to be retried.",
delta_minutes=round(delta.seconds / 60, 1),
)
# Save when the task was executed by a builder
self.data.build["task_executed_at"] = timezone.now()
# Enable scale-in protection on this instance
#
# TODO: move this to the beginning of this method
# once we don't need to rely on `self.data.project`.
if self.data.project.has_feature(Feature.SCALE_IN_PROTECTION):
set_builder_scale_in_protection.delay(
build_id=self.data.build_pk,
builder=socket.gethostname(),
protected_from_scale_in=True,
)
if self.data.project.has_feature(Feature.BUILD_FULL_CLEAN):
# Clean DOCROOT path completely to avoid conflicts other projects
clean_build()
else:
# Clean the build paths for this version to avoid conflicts with previous run
clean_build(self.data.version)
# NOTE: this is never called. I didn't find anything in the logs, so we
# can probably remove it
self._setup_sigterm()
self._check_project_disabled()
self._check_concurrency_limit()
self._reset_build()
def _reset_build(self):
# Always reset the build before starting.
# We used to only reset it when it has at least one command executed already.
# However, with the introduction of the new notification system,
# it could have a notification attached (e.g. Max concurrency build)
# that needs to be removed from the build.
# See https://github.com/readthedocs/readthedocs.org/issues/11131
log.info("Resetting build.")
self.data.api_client.build(self.data.build["id"]).reset.post()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Celery handler to be executed when a task fails.
Updates build data, adds tasks to send build notifications.
.. note::
Since the task has failed, some attributes from the `self.data`
object may not be defined.
"""
log.info("Task failed.")
if not self.data.build:
# NOTE: use `self.data.build_id` (passed to the task) instead
# `self.data.build` (retrieved from the API) because it's not present,
# probably due the API failed when retrieving it.
#
# So, we create the `self.data.build` with the minimum required data.
self.data.build = {
"id": self.data.build_pk,
}
# Known errors in our application code (e.g. we couldn't connect to
# Docker API). Report a generic message to the user.
if isinstance(exc, BuildAppError):
message_id = exc.message_id
# Known errors in the user's project (e.g. invalid config file, invalid
# repository, command failed, etc). Report the error back to the user
# by creating a notification attached to the build
# Otherwise, use a notification with a generic message.
elif isinstance(exc, BuildUserError):
if hasattr(exc, "message_id") and exc.message_id is not None:
message_id = exc.message_id
else:
message_id = BuildUserError.GENERIC
# Set build state as cancelled if the user cancelled the build
if isinstance(exc, BuildCancelled):
self.data.build["state"] = BUILD_STATE_CANCELLED
elif isinstance(exc, SoftTimeLimitExceeded):
log.info("Soft time limit exceeded.")
message_id = BuildUserError.BUILD_TIME_OUT
else:
# We don't know what happened in the build. Log the exception and
# report a generic notification to the user.
# Note we are using `log.error(exc_info=...)` instead of `log.exception`
# because this is not executed inside a try/except block.
log.error("Build failed with unhandled exception.", exc_info=exc)
message_id = BuildAppError.GENERIC_WITH_BUILD_ID
# Grab the format values from the exception in case it contains
format_values = exc.format_values if hasattr(exc, "format_values") else None
# Attach the notification to the build, only when ``BuildDirector`` is available.
# It may happens the director is not created because the API failed to retrieve
# required data to initialize it on ``before_start``.
if self.data.build_director:
self.data.build_director.attach_notification(
attached_to=f"build/{self.data.build['id']}",
message_id=message_id,
format_values=format_values,
)
else:
log.warning(
"We couldn't attach a notification to the build since it failed on an early stage."
)
# Send notifications for unhandled errors
if message_id not in self.exceptions_without_notifications:
self.send_notifications(
self.data.version_pk,
self.data.build["id"],
event=WebHookEvent.BUILD_FAILED,
)
# NOTE: why we wouldn't have `self.data.build_commit` here?
# This attribute is set when we get it after cloning the repository
#
# Oh, I think this is to differentiate a task triggered with
# `Build.commit` than a one triggered just with the `Version` to build
# the _latest_ commit of it
if (
self.data.build_commit
and message_id not in self.exceptions_without_external_build_status
):
version_type = None
if self.data.version:
version_type = self.data.version.type
status = BUILD_STATUS_FAILURE
if message_id == BuildCancelled.SKIPPED_EXIT_CODE_183:
# The build was skipped by returning the magic exit code,
# marked as CANCELLED, but communicated to GitHub as successful.
# This is because the PR has to be available for merging when the build
# was skipped on purpose.
status = BUILD_STATUS_SUCCESS
send_external_build_status(
version_type=version_type,
build_pk=self.data.build["id"],
commit=self.data.build_commit,
status=status,
)
# Update build object
self.data.build["success"] = False
def get_valid_artifact_types(self):
"""
Return a list of all the valid artifact types for this build.
It performs the following checks on each output format type path:
- it exists
- it is a directory
- does not contains more than 1 files (only PDF, HTMLZip, ePUB)
- it contains an "index.html" file at its root directory (only HTML)
TODO: remove the limitation of only 1 file.
Add support for multiple PDF files in the output directory and
grab them by using glob syntax between other files that could be garbage.
"""
valid_artifacts = []
for artifact_type in ARTIFACT_TYPES:
artifact_directory = self.data.project.artifact_path(
version=self.data.version.slug,
type_=artifact_type,
)
if artifact_type == "html":
index_html_filepath = os.path.join(artifact_directory, "index.html")
if not os.path.exists(index_html_filepath):
log.info(
"Failing the build. "
"HTML output does not contain an 'index.html' at its root directory.",
index_html=index_html_filepath,
)
raise BuildUserError(BuildUserError.BUILD_OUTPUT_HTML_NO_INDEX_FILE)
if not os.path.exists(artifact_directory):
# There is no output directory.
# Skip this format.
continue
if not os.path.isdir(artifact_directory):
log.debug(
"The output path is not a directory.",
output_format=artifact_type,
)
raise BuildUserError(
BuildUserError.BUILD_OUTPUT_IS_NOT_A_DIRECTORY,
format_values={
"artifact_type": artifact_type,
},
)
# Check if there are multiple files on artifact directories.
# These output format does not support multiple files yet.
# In case multiple files are found, the upload for this format is not performed.
if artifact_type in ARTIFACT_TYPES_WITHOUT_MULTIPLE_FILES_SUPPORT:
list_dir = os.listdir(artifact_directory)
artifact_format_files = len(list_dir)
if artifact_format_files > 1:
log.debug(
"Multiple files are not supported for this format. "
"Skipping this output format.",
output_format=artifact_type,
)
raise BuildUserError(
BuildUserError.BUILD_OUTPUT_HAS_MULTIPLE_FILES,
format_values={
"artifact_type": artifact_type,
},
)
if artifact_format_files == 0:
raise BuildUserError(
BuildUserError.BUILD_OUTPUT_HAS_0_FILES,
format_values={
"artifact_type": artifact_type,
},
)
# Rename file as "<project_slug>-<version_slug>.<artifact_type>",
# which is the filename that Proxito serves for offline formats.
filename = list_dir[0]
_, extension = filename.rsplit(".")
path = Path(artifact_directory) / filename
destination = Path(artifact_directory) / f"{self.data.project.slug}.{extension}"
assert_path_is_inside_docroot(path)
assert_path_is_inside_docroot(destination)
shutil.move(path, destination)
# If all the conditions were met, the artifact is valid
valid_artifacts.append(artifact_type)
return valid_artifacts
def on_success(self, retval, task_id, args, kwargs):
valid_artifacts = self.get_valid_artifact_types()
# NOTE: we are updating the db version instance *only* when
# TODO: remove this condition and *always* update the DB Version instance
if "html" in valid_artifacts:
try:
self.data.api_client.version(self.data.version.pk).patch(
{
"built": True,
"documentation_type": self.data.version.documentation_type,
"has_pdf": "pdf" in valid_artifacts,
"has_epub": "epub" in valid_artifacts,
"has_htmlzip": "htmlzip" in valid_artifacts,
"build_data": self.data.version.build_data,
"addons": self.data.version.addons,
}
)
except HttpClientError:
# NOTE: I think we should fail the build if we cannot update
# the version at this point. Otherwise, we will have inconsistent data
log.exception(
"Updating version db object failed. "
'Files are synced in the storage, but "Version" object is not updated',
)
# Index search data
index_build.delay(build_id=self.data.build["id"])
# Check if the project is spam
if "readthedocsext.spamfighting" in settings.INSTALLED_APPS:
from readthedocsext.spamfighting.tasks import ( # noqa
spam_check_after_build_complete,
)
spam_check_after_build_complete.delay(build_id=self.data.build["id"])
if not self.data.project.has_valid_clone:
self.set_valid_clone()
self.send_notifications(
self.data.version.pk,
self.data.build["id"],
event=WebHookEvent.BUILD_PASSED,
)
if self.data.build_commit:
send_external_build_status(
version_type=self.data.version.type,
build_pk=self.data.build["id"],
commit=self.data.build_commit,
status=BUILD_STATUS_SUCCESS,
)
# Update build object
self.data.build["success"] = True
def on_retry(self, exc, task_id, args, kwargs, einfo):
"""
Celery helper called when the task is retried.
This happens when any of the exceptions defined in ``autoretry_for``
argument is raised or when ``self.retry`` is called from inside the
task.
See https://docs.celeryproject.org/en/master/userguide/tasks.html#retrying
"""
log.info("Retrying this task.")
if isinstance(exc, BuildMaxConcurrencyError):
log.warning(
"Delaying tasks due to concurrency limit.",
project_slug=self.data.project.slug,
version_slug=self.data.version.slug,
)
# Grab the format values from the exception in case it contains
format_values = exc.format_values if hasattr(exc, "format_values") else None
self.data.build_director.attach_notification(
attached_to=f"build/{self.data.build['id']}",
message_id=BuildMaxConcurrencyError.LIMIT_REACHED,
format_values=format_values,
)
# Always update the build on retry
self.update_build(state=BUILD_STATE_TRIGGERED)
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
Celery handler to be executed after a task runs.
.. note::
This handler is called even if the task has failed,
so some attributes from the `self.data` object may not be defined.
"""
# Update build object
self.data.build["length"] = (timezone.now() - self.data.start_time).seconds
build_state = None
# The state key might not be defined
# previous to finishing the task.
if self.data.build.get("state") not in BUILD_FINAL_STATES:
build_state = BUILD_STATE_FINISHED
self.update_build(build_state)
self.save_build_data()
# Be defensive with the signal, so if a listener fails we still clean up
try:
build_complete.send(sender=Build, build=self.data.build)
except Exception:
log.exception("Error during build_complete", exc_info=True)
if self.data.version:
clean_build(self.data.version)
try:
self.data.api_client.revoke.post()
except Exception:
log.exception("Failed to revoke build api key.", exc_info=True)
# Disable scale-in protection on this instance
if self.data.project.has_feature(Feature.SCALE_IN_PROTECTION):
set_builder_scale_in_protection.delay(
build_id=self.data.build_pk,
builder=socket.gethostname(),
protected_from_scale_in=False,
)
log.info(
"Build finished.",
length=self.data.build["length"],
success=self.data.build["success"],
)
def update_build(self, state=None):
if state:
self.data.build["state"] = state
# Attempt to stop unicode errors on build reporting
# for key, val in list(self.data.build.items()):
# if isinstance(val, bytes):
# self.data.build[key] = val.decode('utf-8', 'ignore')
try:
self.data.api_client.build(self.data.build["id"]).patch(self.data.build)
except Exception:
# NOTE: we are updating the "Build" object on each `state`.
# Only if the last update fails, there may be some inconsistency
# between the "Build" object in our db and the reality.
#
# The `state` argument will help us to track this more and understand
# at what state our updates are failing and decide what to do.
log.exception("Error while updating the build object.", state=state)
def execute(self):
# Cloning
self.update_build(state=BUILD_STATE_CLONING)
# TODO: remove the ``create_vcs_environment`` hack. Ideally, this should be
# handled inside the ``BuildDirector`` but we can't use ``with
# self.vcs_environment`` twice because it kills the container on
# ``__exit__``
self.data.build_director.create_vcs_environment()
with self.data.build_director.vcs_environment:
self.data.build_director.setup_vcs()
# Sync tags/branches from VCS repository into Read the Docs'
# `Version` objects in the database. This method runs commands
# (e.g. "hg tags") inside the VCS environment, so it requires to be
# inside the `with` statement
self.sync_versions(self.data.build_director.vcs_repository)
# TODO: remove the ``create_build_environment`` hack. Ideally, this should be
# handled inside the ``BuildDirector`` but we can't use ``with
# self.build_environment`` twice because it kills the container on
# ``__exit__``
self.data.build_director.create_build_environment()
with self.data.build_director.build_environment:
try:
if getattr(self.data.config.build, "commands", False):
self.update_build(state=BUILD_STATE_INSTALLING)
self.data.build_director.install_build_tools()
self.update_build(state=BUILD_STATE_BUILDING)
self.data.build_director.run_build_commands()
else:
# Installing
self.update_build(state=BUILD_STATE_INSTALLING)
self.data.build_director.setup_environment()
# Building
self.update_build(state=BUILD_STATE_BUILDING)
self.data.build_director.build()
finally:
self.data.build_director.check_old_output_directory()
self.data.build_data = self.collect_build_data()
# At this point, the user's build already succeeded.
# However, we cannot use `.on_success()` because we still have to upload the artifacts;
# which could fail, and we want to detect that and handle it properly at `.on_failure()`
# Store build artifacts to storage (local or cloud storage)
self.store_build_artifacts()
def collect_build_data(self):
"""
Collect data from the current build.
The data is collected from inside the container,
so this must be called before killing the container.
"""
try:
return BuildDataCollector(self.data.build_director.build_environment).collect()
except Exception:
log.exception("Error while collecting build data")
def save_build_data(self):
"""
Save the data collected from the build after it has ended.
This must be called after the build has finished updating its state,
otherwise some attributes like ``length`` won't be available.
"""
try:
if self.data.build_data:
save_build_data.delay(
build_id=self.data.build_pk,
data=self.data.build_data,
)
except Exception:
log.exception("Error while saving build data")
def get_build(self, build_pk):
"""
Retrieve build object from API.
:param build_pk: Build primary key
"""
build = {}
if build_pk:
build = self.data.api_client.build(build_pk).get()
private_keys = [
"project",
"version",
"resource_uri",
"absolute_uri",
]
# TODO: try to use the same technique than for ``APIProject``.
return {key: val for key, val in build.items() if key not in private_keys}
# NOTE: this can be just updated on `self.data.build['']` and sent once the
# build has finished to reduce API calls.
def set_valid_clone(self):
"""Mark on the project that it has been cloned properly."""
self.data.api_client.project(self.data.project.pk).patch({"has_valid_clone": True})
self.data.project.has_valid_clone = True
self.data.version.project.has_valid_clone = True
def store_build_artifacts(self):
"""
Save build artifacts to "storage" using Django's storage API.
The storage could be local filesystem storage OR cloud blob storage
such as S3, Azure storage or Google Cloud Storage.
Remove build artifacts of types not included in this build (PDF, ePub, zip only).
"""
time_before_store_build_artifacts = timezone.now()
log.info("Writing build artifacts to media storage")
self.update_build(state=BUILD_STATE_UPLOADING)
valid_artifacts = self.get_valid_artifact_types()
structlog.contextvars.bind_contextvars(artifacts=valid_artifacts)
types_to_copy = []
types_to_delete = []
build_media_storage = get_storage(
project=self.data.project,
build_id=self.data.build["id"],
api_client=self.data.api_client,
storage_type=StorageType.build_media,
)
for artifact_type in ARTIFACT_TYPES:
if artifact_type in valid_artifacts:
types_to_copy.append(artifact_type)
# Never delete HTML nor JSON (search index)
elif artifact_type not in UNDELETABLE_ARTIFACT_TYPES:
types_to_delete.append(artifact_type)
# Upload formats
for media_type in types_to_copy:
from_path = self.data.project.artifact_path(
version=self.data.version.slug,
type_=media_type,
)
to_path = self.data.project.get_storage_path(
type_=media_type,
version_slug=self.data.version.slug,
include_file=False,
version_type=self.data.version.type,
)
self._log_directory_size(from_path, media_type)
try:
build_media_storage.rclone_sync_directory(from_path, to_path)
except Exception as exc:
# NOTE: the exceptions reported so far are:
# - botocore.exceptions:HTTPClientError
# - botocore.exceptions:ClientError
# - readthedocs.doc_builder.exceptions:BuildCancelled
log.exception(
"Error copying to storage",
media_type=media_type,
from_path=from_path,
to_path=to_path,
)
# Re-raise the exception to fail the build and handle it
# automatically at `on_failure`.
# It will clearly communicate the error to the user.
raise BuildAppError(
BuildAppError.UPLOAD_FAILED,
exception_message="Error uploading files to the storage.",
) from exc
# Delete formats
for media_type in types_to_delete:
media_path = self.data.version.project.get_storage_path(
type_=media_type,
version_slug=self.data.version.slug,
include_file=False,
version_type=self.data.version.type,
)
try:
build_media_storage.delete_directory(media_path)
except Exception as exc:
# NOTE: I didn't find any log line for this case yet
log.exception(
"Error deleting files from storage",
media_type=media_type,
media_path=media_path,
)
# Re-raise the exception to fail the build and handle it
# automatically at `on_failure`.
# It will clearly communicate the error to the user.
raise BuildAppError(
BuildAppError.GENERIC_WITH_BUILD_ID,
exception_message="Error deleting files from storage.",
) from exc
log.info(
"Store build artifacts finished.",
time=(timezone.now() - time_before_store_build_artifacts).seconds,
)
def _log_directory_size(self, directory, media_type):
try:
output = subprocess.check_output(["du", "--summarize", "-m", "--", directory])
# The output is something like: "5\t/path/to/directory".
directory_size = int(output.decode().split()[0])
log.info(
"Build artifacts directory size.",
directory=directory,
size=directory_size, # Size in mega bytes
media_type=media_type,
)
except Exception:
log.info(
"Error getting build artifacts directory size.",
exc_info=True,
)
def send_notifications(self, version_pk, build_pk, event):
"""Send notifications to all subscribers of `event`."""
# Try to infer the version type if we can
# before creating a task.
if not self.data.version or self.data.version.type != EXTERNAL:
build_tasks.send_build_notifications.delay(
version_pk=version_pk,
build_pk=build_pk,
event=event,
)
@app.task(
base=UpdateDocsTask,
bind=True,
ignore_result=True,
)
def update_docs_task(self, version_id, build_id, *, build_api_key, build_commit=None, **kwargs):
# In case we pass more arguments than expected, log them and ignore them,
# so we don't break builds while we deploy a change that requires an extra argument.
if kwargs:
log.warning("Extra arguments passed to update_docs_task", arguments=kwargs)
self.execute()
|
UpdateDocsTask
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/common/tests/pt_backward_test.py
|
{
"start": 288,
"end": 818
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(add_configs, AddBenchmark)
op_bench.generate_pt_gradient_test(add_configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
AddBenchmark
|
python
|
django-guardian__django-guardian
|
example_project/articles/migrations/0001_initial.py
|
{
"start": 157,
"end": 2916
}
|
class ____(migrations.Migration):
initial = True
dependencies = [
("auth", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Article",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("title", models.CharField(max_length=64, verbose_name="title")),
("slug", models.SlugField(max_length=64)),
("content", models.TextField(verbose_name="content")),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
],
options={
"get_latest_by": "created_at",
"default_permissions": ("add", "change", "delete"),
"permissions": (("view_article", "Can view article"),),
},
),
migrations.CreateModel(
name="ArticleGroupObjectPermission",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"content_object",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="articles.Article"),
),
("group", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.Group")),
("permission", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.Permission")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ArticleUserObjectPermission",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
(
"content_object",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="articles.Article"),
),
("permission", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.Permission")),
("user", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
"abstract": False,
},
),
migrations.AlterUniqueTogether(
name="articleuserobjectpermission",
unique_together={("user", "permission", "content_object")},
),
migrations.AlterUniqueTogether(
name="articlegroupobjectpermission",
unique_together={("group", "permission", "content_object")},
),
]
|
Migration
|
python
|
sanic-org__sanic
|
sanic/pages/directory_page.py
|
{
"start": 123,
"end": 256
}
|
class ____(TypedDict):
"""Type for file info."""
icon: str
file_name: str
file_access: str
file_size: str
|
FileInfo
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/image/test_image_class01.py
|
{
"start": 291,
"end": 1629
}
|
class ____(unittest.TestCase):
"""
Test the properties of an Image object.
"""
def test_image_properties01(self):
"""Test the Image class properties."""
image = Image("xlsxwriter/test/comparison/images/red.png")
self.assertEqual(image.image_type, "PNG")
self.assertEqual(image.width, 32)
self.assertEqual(image.height, 32)
self.assertEqual(image.x_dpi, 96)
self.assertEqual(image.y_dpi, 96)
def test_image_properties02(self):
"""Test the Image class properties."""
with open("xlsxwriter/test/comparison/images/red.png", "rb") as image_file:
image_data = BytesIO(image_file.read())
image = Image(image_data)
self.assertEqual(image.image_type, "PNG")
self.assertEqual(image.width, 32)
self.assertEqual(image.height, 32)
self.assertEqual(image.x_dpi, 96)
self.assertEqual(image.y_dpi, 96)
def test_image_properties03(self):
"""Test the Image class properties."""
image = Image("xlsxwriter/test/comparison/images/red_64x20.png")
self.assertEqual(image.image_type, "PNG")
self.assertEqual(image.width, 64)
self.assertEqual(image.height, 20)
self.assertEqual(image.x_dpi, 96)
self.assertEqual(image.y_dpi, 96)
|
TestImageProperties
|
python
|
kamyu104__LeetCode-Solutions
|
Python/tallest-billboard.py
|
{
"start": 66,
"end": 660
}
|
class ____(object):
def tallestBillboard(self, rods):
"""
:type rods: List[int]
:rtype: int
"""
def dp(A):
lookup = collections.defaultdict(int)
lookup[0] = 0
for x in A:
for d, y in lookup.items():
lookup[d+x] = max(lookup[d+x], y)
lookup[abs(d-x)] = max(lookup[abs(d-x)], y + min(d, x))
return lookup
left, right = dp(rods[:len(rods)//2]), dp(rods[len(rods)//2:])
return max(left[d]+right[d]+d for d in left if d in right)
|
Solution
|
python
|
astropy__astropy
|
astropy/time/tests/test_basic.py
|
{
"start": 53637,
"end": 54455
}
|
class ____:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r"bad day \(JD computed\)") as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.0])
|
TestSofaErrors
|
python
|
huggingface__transformers
|
src/transformers/models/ministral/modeling_ministral.py
|
{
"start": 9756,
"end": 11622
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: MinistralConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MinistralAttention(config=config, layer_idx=layer_idx)
self.mlp = MinistralMLP(config)
self.input_layernorm = MinistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MinistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attention_type = config.layer_types[layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
MinistralDecoderLayer
|
python
|
kamyu104__LeetCode-Solutions
|
Python/best-position-for-a-service-centre.py
|
{
"start": 1588,
"end": 2803
}
|
class ____(object):
def getMinDistSum(self, positions):
"""
:type positions: List[List[int]]
:rtype: float
"""
DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)]
EPS = 1e-6
def dist(positions, p):
return sum(((p[0]-x)**2 + (p[1]-y)**2)**0.5 for x, y in positions)
median = [0.0, 0.0]
median[0] = float(sum(x for x, _ in positions))/len(positions)
median[1] = float(sum(y for _, y in positions))/len(positions)
result = dist(positions, median)
delta = float(max(max(positions, key=lambda x: x[0])[0],
max(positions, key=lambda x: x[1])[1])) - \
float(min(min(positions, key=lambda x: x[0])[0],
min(positions, key=lambda x: x[1])[1]))
while delta > EPS:
for dx, dy in DIRECTIONS:
new_median = [median[0] + delta*dx, median[1] + delta*dy]
nd = dist(positions, new_median)
if nd < result:
result = nd
median = new_median
break
else:
delta /= 2.0
return result
|
Solution2
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/memory_planning.py
|
{
"start": 6376,
"end": 6928
}
|
class ____(MemorySplitProtocol):
"""
Helper to assist in caching get_live_ranges, get_size_hint, and
get_symbolic_size.
"""
def allocate(self, block: Allocation, is_last: bool):
is_allocated = self._allocate(block, is_last)
if is_allocated:
self.clear_cache()
return is_allocated
def clear_cache(self):
self.get_live_ranges.clear_cache(self)
self.get_size_hint.clear_cache(self)
self.get_symbolic_size.clear_cache(self)
@dataclasses.dataclass
|
ClearCacheOnAllocateMixin
|
python
|
pypa__setuptools
|
setuptools/tests/test_develop.py
|
{
"start": 960,
"end": 3072
}
|
class ____:
@staticmethod
def install_develop(src_dir, target):
develop_cmd = [
sys.executable,
'setup.py',
'develop',
'--install-dir',
str(target),
]
with src_dir.as_cwd():
with paths_on_pythonpath([str(target)]):
subprocess.check_call(develop_cmd)
@pytest.mark.skipif(
bool(os.environ.get("APPVEYOR")),
reason="https://github.com/pypa/setuptools/issues/851",
)
@pytest.mark.skipif(
platform.python_implementation() == 'PyPy',
reason="https://github.com/pypa/setuptools/issues/1202",
)
@pytest.mark.uses_network
def test_namespace_package_importable(self, tmpdir):
"""
Installing two packages sharing the same namespace, one installed
naturally using pip or `--single-version-externally-managed`
and the other installed using `develop` should leave the namespace
in tact and both packages reachable by import.
"""
pkg_A = namespaces.build_namespace_package(tmpdir, 'myns.pkgA')
pkg_B = namespaces.build_namespace_package(tmpdir, 'myns.pkgB')
target = tmpdir / 'packages'
# use pip to install to the target directory
install_cmd = [
sys.executable,
'-m',
'pip',
'install',
str(pkg_A),
'-t',
str(target),
]
subprocess.check_call(install_cmd)
self.install_develop(pkg_B, target)
namespaces.make_site_dir(target)
try_import = [
sys.executable,
'-c',
'import myns.pkgA; import myns.pkgB',
]
with paths_on_pythonpath([str(target)]):
subprocess.check_call(try_import)
# additionally ensure that pkg_resources import works
pkg_resources_imp = [
sys.executable,
'-c',
'import pkg_resources',
]
with paths_on_pythonpath([str(target)]):
subprocess.check_call(pkg_resources_imp)
|
TestNamespaces
|
python
|
spyder-ide__spyder
|
spyder/plugins/remoteclient/widgets/connectionstatus.py
|
{
"start": 3080,
"end": 12724
}
|
class ____(
QWidget,
SpyderFontsMixin,
SvgToScaledPixmap,
SpyderConfigurationAccessor,
):
CONF_SECTION = "remoteclient"
def __init__(self, parent, host_id):
super().__init__(parent)
# Attributes
self.host_id = host_id
self.status = ConnectionStatus.Inactive
# Widgets
self._connection_label = QLabel(self)
self._status_label = QLabel(self)
self._user_label = QLabel(self)
self._message_label = QLabel(self)
self._message_label.setWordWrap(True)
self._image_label = QLabel(self)
self._log_label = QLabel(_("Connection messages"))
self._log_widget = SimpleCodeEditor(self)
self._copy_logs_button = QPushButton(_("Copy messages"))
# Initial settings
self._set_initial_text_in_labels()
self._set_stylesheet()
self._log_widget.setMaximumBlockCount(MAX_CLIENT_MESSAGES)
self._log_widget.setReadOnly(True)
self._log_widget.setMinimumHeight(210 if MAC else 230)
self._log_widget.setPlaceholderText(_("No logs to show"))
self._copy_logs_button.setEnabled(False)
self._copy_logs_button.clicked.connect(self._copy_logs)
# Info layout
info_layout = QVBoxLayout()
info_layout.setSpacing(0)
info_layout.setContentsMargins(0, 0, 0, 0)
info_layout.addWidget(self._connection_label)
info_layout.addWidget(self._status_label)
info_layout.addWidget(self._user_label)
info_layout.addSpacing(4 * AppStyle.MarginSize)
info_layout.addWidget(self._message_label)
info_layout.addStretch()
# This is necessary to align the image on the top side to the info
# widgets to the left
image_layout = QVBoxLayout()
image_layout.setContentsMargins(0, 2 * AppStyle.MarginSize, 0, 0)
image_layout.addWidget(self._image_label)
# Top layout
top_layout = QHBoxLayout()
top_layout.addLayout(info_layout)
top_layout.setStretchFactor(info_layout, 2)
top_layout.addStretch()
top_layout.addLayout(image_layout)
# Bottom layout
bottom_layout = QVBoxLayout()
bottom_layout.setSpacing(0)
bottom_layout.setContentsMargins(0, 0, 0, 0)
bottom_layout.addWidget(self._log_label)
bottom_layout.addWidget(self._log_widget)
copy_layout = QHBoxLayout()
copy_layout.addStretch()
copy_layout.addWidget(self._copy_logs_button)
bottom_layout.addSpacing(2 * AppStyle.MarginSize)
bottom_layout.addLayout(copy_layout)
# Final layout
layout = QVBoxLayout()
layout.setContentsMargins(
# Match left margin of info widgets
4 * AppStyle.MarginSize,
0,
# Match right margin of info widgets
3 * AppStyle.MarginSize,
# Prevent copy logs button to be clipped
4 * AppStyle.MarginSize
)
layout.addLayout(top_layout)
layout.addSpacing(6 * AppStyle.MarginSize)
layout.addLayout(bottom_layout)
self.setLayout(layout)
# ---- Public API
# -------------------------------------------------------------------------
def update_status(self, info: ConnectionInfo):
"""Update graphical elements related to the connection status."""
self.status = info["status"]
message = info["message"]
self._set_icon(self.status)
self._set_text_in_labels(self.status)
self._message_label.setText(message)
def update_info(self):
self._set_text_in_labels(self.status)
def add_log(self, log: RemoteClientLog):
"""Add a new log message to the log widget."""
if not self._copy_logs_button.isEnabled():
self._copy_logs_button.setEnabled(True)
formatted_log = (
# Message
f"<p>{LOG_LEVEL_TO_FMT_STRING[log['level']]} {log['message']}</p>"
# Small vertical space to separate logs
f"<div style='font-size: 2pt;font-weight: normal;'><p></p></div>"
)
# Move cursor so that new logs are always shown at the end
self._log_widget.moveCursor(QTextCursor.End)
self._log_widget.appendHtml(formatted_log)
self._log_widget.moveCursor(QTextCursor.End)
def add_logs(self, logs: Iterable):
"""Add saved log messages to the log widget."""
for log in logs:
self.add_log(log)
# ---- Private API
# -------------------------------------------------------------------------
def _set_stylesheet(self):
"""Set stylesheet for elements in this widget."""
# -- Style of important labels
font_size = self.get_font(SpyderFontType.Interface).pointSize()
important_labels_css = qstylizer.style.StyleSheet()
important_labels_css.QLabel.setValues(
fontSize=f"{font_size + 1}pt",
)
# Remove automatic indent added by Qt
important_labels_css.setValues(**{'qproperty-indent': '0'})
for label in [self._connection_label, self._message_label]:
label.setStyleSheet(important_labels_css.toString())
# -- Style of other info labels
other_info_labels_css = qstylizer.style.StyleSheet()
other_info_labels_css.setValues(
marginLeft=f"{9 * AppStyle.MarginSize}px"
)
for label in [self._status_label, self._user_label]:
label.setStyleSheet(other_info_labels_css.toString())
# -- Style of log widgets
log_label_css = qstylizer.style.StyleSheet()
log_label_css.QLabel.setValues(
# Increase padding (the default one is too small).
padding=f"{2 * AppStyle.MarginSize}px",
# Make it a bit different from a default QPushButton to not drag
# the same amount of attention to it.
backgroundColor=SpyderPalette.COLOR_BACKGROUND_3,
# Remove bottom rounded borders
borderBottomLeftRadius='0px',
borderBottomRightRadius='0px',
# This is necessary to align the label to the text above it
marginLeft="2px",
)
self._log_label.setStyleSheet(log_label_css.toString())
self._log_widget.css.QPlainTextEdit.setValues(
# Remove these borders to make it appear attached to the top label
borderTop="0px",
borderTopLeftRadius='0px',
borderTopRightRadius='0px',
# Match border color with the top label one and avoid to change
# that color when the widget is given focus
borderLeft=f"1px solid {SpyderPalette.COLOR_BACKGROUND_3}",
borderRight=f"1px solid {SpyderPalette.COLOR_BACKGROUND_3}",
borderBottom=f"1px solid {SpyderPalette.COLOR_BACKGROUND_3}",
# This is necessary to align the widget to the top label
marginLeft="2px",
# Increase padding a bit to make text look better
paddingLeft="6px",
paddingRight="6px",
paddingTop="6px",
# No need to have this due to the scrollbar
paddingBottom="0px",
)
self._log_widget.setStyleSheet(self._log_widget.css.toString())
def _set_initial_text_in_labels(self):
status = self.get_conf(
f"{self.host_id}/status", default=ConnectionStatus.Inactive
)
self._set_text_in_labels(status)
self._set_icon(status)
message = self.get_conf(f"{self.host_id}/status_message", default="")
if not message:
# This can only happen at startup or if the connection has never
# been used
message = _("The connection hasn't been used")
self._message_label.setText(message)
def _set_text_in_labels(self, status):
color = STATUS_TO_COLOR[status]
localized_status = STATUS_TO_TRANSLATION_STRINGS[status]
address, username = self._get_address_and_username()
self._connection_label.setText(
_('Connection to: <span style="color:{}">{}<span>').format(
color, address
)
)
self._user_label.setText(
_("Username: {}").format(username) if username else ""
)
self._status_label.setText(
_('Status: <span style="color:{}">{}<span>').format(
color, localized_status
)
)
def _set_icon(self, status):
pixmap = self.svg_to_scaled_pixmap(STATUS_TO_ICON[status], rescale=1)
self._image_label.setPixmap(pixmap)
@property
def _auth_method(self):
"""Get authentication method."""
return self.get_conf(f"{self.host_id}/auth_method")
def _copy_logs(self, clicked):
"""Copy log messages to clipboard."""
text = self._log_widget.toPlainText()
QApplication.clipboard().setText(text)
def _get_address_and_username(self):
if self._auth_method == AuthenticationMethod.JupyterHub:
address = self.get_conf(f"{self.host_id}/url")
username = ""
# TODO: Address this for configfile login
elif self._auth_method != AuthenticationMethod.ConfigFile:
address = self.get_conf(
f"{self.host_id}/{self._auth_method}/address"
)
username = self.get_conf(
f"{self.host_id}/{self._auth_method}/username"
)
else:
address = ""
username = ""
return (address, username)
|
ConnectionStatusWidget
|
python
|
scrapy__scrapy
|
scrapy/exporters.py
|
{
"start": 5855,
"end": 8234
}
|
class ____(BaseItemExporter):
def __init__(self, file: BytesIO, **kwargs: Any):
self.item_element = kwargs.pop("item_element", "item")
self.root_element = kwargs.pop("root_element", "items")
super().__init__(**kwargs)
if not self.encoding:
self.encoding = "utf-8"
self.xg = XMLGenerator(file, encoding=self.encoding)
def _beautify_newline(self, new_item: bool = False) -> None:
if self.indent is not None and (self.indent > 0 or new_item):
self.xg.characters("\n")
def _beautify_indent(self, depth: int = 1) -> None:
if self.indent:
self.xg.characters(" " * self.indent * depth)
def start_exporting(self) -> None:
self.xg.startDocument()
self.xg.startElement(self.root_element, AttributesImpl({}))
self._beautify_newline(new_item=True)
def export_item(self, item: Any) -> None:
self._beautify_indent(depth=1)
self.xg.startElement(self.item_element, AttributesImpl({}))
self._beautify_newline()
for name, value in self._get_serialized_fields(item, default_value=""):
self._export_xml_field(name, value, depth=2)
self._beautify_indent(depth=1)
self.xg.endElement(self.item_element)
self._beautify_newline(new_item=True)
def finish_exporting(self) -> None:
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name: str, serialized_value: Any, depth: int) -> None:
self._beautify_indent(depth=depth)
self.xg.startElement(name, AttributesImpl({}))
if hasattr(serialized_value, "items"):
self._beautify_newline()
for subname, value in serialized_value.items():
self._export_xml_field(subname, value, depth=depth + 1)
self._beautify_indent(depth=depth)
elif is_listlike(serialized_value):
self._beautify_newline()
for value in serialized_value:
self._export_xml_field("value", value, depth=depth + 1)
self._beautify_indent(depth=depth)
elif isinstance(serialized_value, str):
self.xg.characters(serialized_value)
else:
self.xg.characters(str(serialized_value))
self.xg.endElement(name)
self._beautify_newline()
|
XmlItemExporter
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/worksheet/test_cond_format07.py
|
{
"start": 345,
"end": 4138
}
|
class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1:A4",
{
"type": "text",
"criteria": "containing",
"value": "foo",
},
)
worksheet.conditional_format(
"A1:A4",
{
"type": "text",
"criteria": "not containing",
"value": "foo",
"format": None,
},
)
worksheet.conditional_format(
"A1:A4",
{
"type": "text",
"criteria": "begins with",
"value": "b",
"format": None,
},
)
worksheet.conditional_format(
"A1:A4",
{
"type": "text",
"criteria": "ends with",
"value": "b",
"format": None,
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="containsText" priority="1" operator="containsText" text="foo">
<formula>NOT(ISERROR(SEARCH("foo",A1)))</formula>
</cfRule>
<cfRule type="notContainsText" priority="2" operator="notContains" text="foo">
<formula>ISERROR(SEARCH("foo",A1))</formula>
</cfRule>
<cfRule type="beginsWith" priority="3" operator="beginsWith" text="b">
<formula>LEFT(A1,1)="b"</formula>
</cfRule>
<cfRule type="endsWith" priority="4" operator="endsWith" text="b">
<formula>RIGHT(A1,1)="b"</formula>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
|
TestAssembleWorksheet
|
python
|
PrefectHQ__prefect
|
src/prefect/_internal/lazy.py
|
{
"start": 139,
"end": 1702
}
|
class ____(Generic[K, V]):
"""
A dictionary-like object that defers loading its contents until first access.
Useful for module-level registries that import heavy dependencies. The loader
function is called once on first access, and the result is cached.
Example:
>>> def load_plugins() -> dict[str, type]:
... from heavy_module import PluginA, PluginB
... return {"a": PluginA, "b": PluginB}
>>>
>>> plugins: LazyDict[str, type] = LazyDict(load_plugins)
>>> # Nothing loaded yet
>>> plugins.get("a") # Triggers load, returns PluginA
>>> plugins.get("b") # Uses cached result, returns PluginB
"""
def __init__(self, loader_func: Callable[[], dict[K, V]]) -> None:
self._loader = loader_func
self._cache: dict[K, V] | None = None
def _ensure_loaded(self) -> dict[K, V]:
"""Load the dictionary contents if not already loaded."""
if self._cache is None:
self._cache = self._loader()
return self._cache
def get(self, key: K, default: V | None = None) -> V | None:
"""Get an item from the lazy dict, loading if necessary."""
return self._ensure_loaded().get(key, default)
def __contains__(self, key: K) -> bool:
"""Check if key is in the lazy dict, loading if necessary."""
return key in self._ensure_loaded()
def __getitem__(self, key: K) -> V:
"""Get an item from the lazy dict, loading if necessary."""
return self._ensure_loaded()[key]
|
LazyDict
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_shuffle.py
|
{
"start": 35452,
"end": 37630
}
|
class ____(SetIndex):
"""Shuffles the DataFrame according to its new divisions.
Simplifies the Expression to blockwise pre-processing, shuffle and
blockwise post-processing expressions.
Parameters
----------
frame: Expr
Frame-like expression where the index is set.
_other: Expr | Scalar
Either a Series-like expression to use as Index or a scalar defining the column.
drop: bool
Whether to drop the old column.
new_divisions: int
Divisions of the resulting expression.
"""
_parameters = [
"frame",
"_other",
"drop",
"npartitions",
"ascending",
"upsample",
"user_divisions",
"shuffle_method",
"options", # Shuffle method options
]
def _lower(self):
divisions = self.other._meta._constructor(self._divisions())
partitions = _SetPartitionsPreSetIndex(self.other, divisions)
assigned = Assign(self.frame, "_partitions", partitions)
if isinstance(self._other, Expr):
assigned = Assign(assigned, "_index", self._other)
shuffled = Shuffle(
assigned,
"_partitions",
npartitions_out=len(self._divisions()) - 1,
ignore_index=True,
method=self.shuffle_method,
options=self.options,
)
shuffled = Projection(
shuffled, [c for c in assigned.columns if c != "_partitions"]
)
if isinstance(self._other, Expr):
drop, set_name = True, "_index"
else:
drop, set_name = self.drop, self.other._meta.name
lru_key = (
self.other._name,
self._npartitions_input,
self.ascending,
128e6,
self.upsample,
)
computed_divisions = divisions_lru.get(lru_key)
index_set = _SetIndexPost(
shuffled,
self.other._meta.name,
drop,
set_name,
self.frame._meta.columns.dtype,
computed_divisions,
self.user_divisions,
)
return SortIndexBlockwise(index_set)
|
SetPartition
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/axisline_style.py
|
{
"start": 250,
"end": 3715
}
|
class ____:
class SimpleArrow(FancyArrowPatch):
"""The artist class that will be returned for SimpleArrow style."""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
(x0, y0), (x1, y1) = path.vertices[-2:]
theta = math.atan2(y1 - y0, x1 - x0)
x2 = x1 + math.cos(theta) * mutation_size
y2 = y1 + math.sin(theta) * mutation_size
if path.codes is None:
return Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
return Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) Transform the path to the display coordinate.
2) Extend the path to make a room for arrow.
3) Update the path of the FancyArrowPatch.
4) Draw.
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() # line_mutation_scale()
extended_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extended_path
FancyArrowPatch.draw(self, renderer)
def get_window_extent(self, renderer=None):
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() # line_mutation_scale()
extended_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extended_path
return FancyArrowPatch.get_window_extent(self, renderer)
class FilledArrow(SimpleArrow):
"""The artist class that will be returned for FilledArrow style."""
_ARROW_STYLE = "-|>"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale, facecolor):
super().__init__(axis_artist, line_path, transform,
line_mutation_scale)
self.set_facecolor(facecolor)
|
_FancyAxislineStyle
|
python
|
networkx__networkx
|
networkx/algorithms/centrality/tests/test_katz_centrality.py
|
{
"start": 8207,
"end": 9958
}
|
class ____:
@classmethod
def setup_class(cls):
G = nx.DiGraph()
edges = [
(1, 2),
(1, 3),
(2, 4),
(3, 2),
(3, 5),
(4, 2),
(4, 5),
(4, 6),
(5, 6),
(5, 7),
(5, 8),
(6, 8),
(7, 1),
(7, 5),
(7, 8),
(8, 6),
(8, 7),
]
G.add_edges_from(edges, weight=2.0)
cls.G = G.reverse()
cls.G.alpha = 0.1
cls.G.evc = [
0.3289589783189635,
0.2832077296243516,
0.3425906003685471,
0.3970420865198392,
0.41074871061646284,
0.272257430756461,
0.4201989685435462,
0.34229059218038554,
]
H = nx.DiGraph(edges)
cls.H = G.reverse()
cls.H.alpha = 0.1
cls.H.evc = [
0.3289589783189635,
0.2832077296243516,
0.3425906003685471,
0.3970420865198392,
0.41074871061646284,
0.272257430756461,
0.4201989685435462,
0.34229059218038554,
]
def test_katz_centrality_weighted(self):
G = self.G
alpha = self.G.alpha
p = nx.katz_centrality(G, alpha, weight="weight")
for a, b in zip(list(p.values()), self.G.evc):
assert a == pytest.approx(b, abs=1e-7)
def test_katz_centrality_unweighted(self):
H = self.H
alpha = self.H.alpha
p = nx.katz_centrality(H, alpha, weight="weight")
for a, b in zip(list(p.values()), self.H.evc):
assert a == pytest.approx(b, abs=1e-7)
|
TestKatzCentralityDirected
|
python
|
huggingface__transformers
|
src/transformers/models/reformer/modeling_reformer.py
|
{
"start": 62907,
"end": 63727
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(
self.forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
def forward_chunk(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense(hidden_states)
return self.output(hidden_states)
|
ChunkReformerFeedForward
|
python
|
pallets__werkzeug
|
src/werkzeug/_reloader.py
|
{
"start": 9207,
"end": 9837
}
|
class ____(ReloaderLoop):
name = "stat"
def __enter__(self) -> ReloaderLoop:
self.mtimes: dict[str, float] = {}
return super().__enter__()
def run_step(self) -> None:
for name in _find_stat_paths(self.extra_files, self.exclude_patterns):
try:
mtime = os.stat(name).st_mtime
except OSError:
continue
old_time = self.mtimes.get(name)
if old_time is None:
self.mtimes[name] = mtime
continue
if mtime > old_time:
self.trigger_reload(name)
|
StatReloaderLoop
|
python
|
apache__airflow
|
providers/standard/tests/unit/standard/operators/test_branch_operator.py
|
{
"start": 1977,
"end": 2103
}
|
class ____(BaseBranchOperator):
def choose_branch(self, context):
return ["branch_1", "branch_2"]
|
ChooseBranchOneTwo
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 17194,
"end": 17308
}
|
class ____(Person):
code = models.CharField(max_length=20)
class Meta:
ordering = ["name"]
|
Employee
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/autoVariance4.py
|
{
"start": 298,
"end": 539
}
|
class ____[T](Parent_Invariant[T]):
pass
# This should generate an error.
a1: ShouldBeInvariant[int] = ShouldBeInvariant[float]()
# This should generate an error.
a2: ShouldBeInvariant[float] = ShouldBeInvariant[int]()
|
ShouldBeInvariant
|
python
|
pytorch__pytorch
|
test/test_functionalization_of_rng_ops.py
|
{
"start": 10980,
"end": 11575
}
|
class ____(TestCase):
@dtypes(torch.float32)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
def test_on_cpu(self, dtype, device):
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
x = torch.rand(10, device=device, dtype=dtype)
aot_fn = aot_function(fn, nop)
with self.assertRaises(RuntimeError):
aot_fn(x)
only_for = ("cpu",)
instantiate_device_type_tests(NegativeTest, globals(), only_for=only_for)
if __name__ == "__main__":
run_tests()
|
NegativeTest
|
python
|
django-compressor__django-compressor
|
compressor/management/commands/mtime_cache.py
|
{
"start": 198,
"end": 3856
}
|
class ____(BaseCommand):
help = "Add or remove all mtime values from the cache"
def add_arguments(self, parser):
parser.add_argument(
"-i",
"--ignore",
action="append",
default=[],
dest="ignore_patterns",
metavar="PATTERN",
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.",
),
parser.add_argument(
"--no-default-ignore",
action="store_false",
dest="use_default_ignore_patterns",
default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'.",
),
parser.add_argument(
"--follow-links",
dest="follow_links",
action="store_true",
help="Follow symlinks when traversing the COMPRESS_ROOT "
"(which defaults to STATIC_ROOT). Be aware that using this "
"can lead to infinite recursion if a link points to a parent "
"directory of itself.",
),
parser.add_argument(
"-c", "--clean", dest="clean", action="store_true", help="Remove all items"
),
parser.add_argument(
"-a", "--add", dest="add", action="store_true", help="Add all items"
),
def is_ignored(self, path):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
for pattern in self.ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def handle(self, **options):
ignore_patterns = options["ignore_patterns"]
if options["use_default_ignore_patterns"]:
ignore_patterns += ["CVS", ".*", "*~"]
options["ignore_patterns"] = ignore_patterns
self.ignore_patterns = ignore_patterns
if (options["add"] and options["clean"]) or (
not options["add"] and not options["clean"]
):
raise CommandError('Please specify either "--add" or "--clean"')
if not settings.COMPRESS_MTIME_DELAY:
raise CommandError(
"mtime caching is currently disabled. Please "
"set the COMPRESS_MTIME_DELAY setting to a number of seconds."
)
files_to_add = set()
keys_to_delete = set()
for root, dirs, files in os.walk(
settings.COMPRESS_ROOT, followlinks=options["follow_links"]
):
for dir_ in dirs:
if self.is_ignored(dir_):
dirs.remove(dir_)
for filename in files:
common = "".join(root.split(settings.COMPRESS_ROOT))
if common.startswith(os.sep):
common = common[len(os.sep) :]
if self.is_ignored(os.path.join(common, filename)):
continue
filename = os.path.join(root, filename)
keys_to_delete.add(get_mtime_cachekey(filename))
if options["add"]:
files_to_add.add(filename)
if keys_to_delete:
cache.delete_many(list(keys_to_delete))
self.stdout.write(
"Deleted mtimes of %d files from the cache." % len(keys_to_delete)
)
if files_to_add:
for filename in files_to_add:
get_mtime(filename)
self.stdout.write("Added mtimes of %d files to cache." % len(files_to_add))
|
Command
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/clients.py
|
{
"start": 7327,
"end": 8650
}
|
class ____:
_http_client: PrefectHttpxAsyncClient
def __init__(self, additional_headers: dict[str, str] = {}):
from prefect.server.api.server import create_app
# create_app caches application instances, and invoking it with no arguments
# will point it to the the currently running server instance
api_app = create_app()
self._http_client = PrefectHttpxAsyncClient(
transport=httpx.ASGITransport(app=api_app, raise_app_exceptions=False),
headers={**additional_headers},
base_url="http://prefect-in-memory/api",
enable_csrf_support=False,
raise_on_all_errors=False,
)
async def __aenter__(self) -> Self:
await self._http_client.__aenter__()
return self
async def __aexit__(self, *args: Any) -> None:
await self._http_client.__aexit__(*args)
async def pause_automation(self, automation_id: UUID) -> httpx.Response:
return await self._http_client.patch(
f"/automations/{automation_id}", json={"enabled": False}
)
async def resume_automation(self, automation_id: UUID) -> httpx.Response:
return await self._http_client.patch(
f"/automations/{automation_id}", json={"enabled": True}
)
|
PrefectServerEventsAPIClient
|
python
|
django__django
|
tests/generic_views/test_detail.py
|
{
"start": 465,
"end": 9547
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.artist1 = Artist.objects.create(name="Rene Magritte")
cls.author1 = Author.objects.create(
name="Roberto Bolaño", slug="roberto-bolano"
)
cls.author2 = Author.objects.create(
name="Scott Rosenberg", slug="scott-rosenberg"
)
cls.book1 = Book.objects.create(
name="2066", slug="2066", pages=800, pubdate=datetime.date(2008, 10, 1)
)
cls.book1.authors.add(cls.author1)
cls.book2 = Book.objects.create(
name="Dreaming in Code",
slug="dreaming-in-code",
pages=300,
pubdate=datetime.date(2006, 5, 1),
)
cls.page1 = Page.objects.create(
content="I was once bitten by a moose.",
template="generic_views/page_template.html",
)
def test_simple_object(self):
res = self.client.get("/detail/obj/")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], {"foo": "bar"})
self.assertIsInstance(res.context["view"], View)
self.assertTemplateUsed(res, "generic_views/detail.html")
def test_detail_by_pk(self):
res = self.client.get("/detail/author/%s/" % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_missing_object(self):
res = self.client.get("/detail/author/500/")
self.assertEqual(res.status_code, 404)
def test_detail_object_does_not_exist(self):
with self.assertRaises(ObjectDoesNotExist):
self.client.get("/detail/doesnotexist/1/")
def test_detail_by_custom_pk(self):
res = self.client.get("/detail/author/bycustompk/%s/" % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_by_slug(self):
res = self.client.get("/detail/author/byslug/scott-rosenberg/")
self.assertEqual(res.status_code, 200)
self.assertEqual(
res.context["object"], Author.objects.get(slug="scott-rosenberg")
)
self.assertEqual(
res.context["author"], Author.objects.get(slug="scott-rosenberg")
)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_by_custom_slug(self):
res = self.client.get("/detail/author/bycustomslug/scott-rosenberg/")
self.assertEqual(res.status_code, 200)
self.assertEqual(
res.context["object"], Author.objects.get(slug="scott-rosenberg")
)
self.assertEqual(
res.context["author"], Author.objects.get(slug="scott-rosenberg")
)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_by_pk_ignore_slug(self):
res = self.client.get(
"/detail/author/bypkignoreslug/%s-roberto-bolano/" % self.author1.pk
)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_by_pk_ignore_slug_mismatch(self):
res = self.client.get(
"/detail/author/bypkignoreslug/%s-scott-rosenberg/" % self.author1.pk
)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_by_pk_and_slug(self):
res = self.client.get(
"/detail/author/bypkandslug/%s-roberto-bolano/" % self.author1.pk
)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_detail_by_pk_and_slug_mismatch_404(self):
res = self.client.get(
"/detail/author/bypkandslug/%s-scott-rosenberg/" % self.author1.pk
)
self.assertEqual(res.status_code, 404)
def test_verbose_name(self):
res = self.client.get("/detail/artist/%s/" % self.artist1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.artist1)
self.assertEqual(res.context["artist"], self.artist1)
self.assertTemplateUsed(res, "generic_views/artist_detail.html")
def test_template_name(self):
res = self.client.get("/detail/author/%s/template_name/" % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/about.html")
def test_template_name_suffix(self):
res = self.client.get(
"/detail/author/%s/template_name_suffix/" % self.author1.pk
)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["author"], self.author1)
self.assertTemplateUsed(res, "generic_views/author_view.html")
def test_template_name_field(self):
res = self.client.get("/detail/page/%s/field/" % self.page1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.page1)
self.assertEqual(res.context["page"], self.page1)
self.assertTemplateUsed(res, "generic_views/page_template.html")
def test_context_object_name(self):
res = self.client.get(
"/detail/author/%s/context_object_name/" % self.author1.pk
)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertEqual(res.context["thingy"], self.author1)
self.assertNotIn("author", res.context)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_duplicated_context_object_name(self):
res = self.client.get(
"/detail/author/%s/dupe_context_object_name/" % self.author1.pk
)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"], self.author1)
self.assertNotIn("author", res.context)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_custom_detail(self):
"""
AuthorCustomDetail overrides get() and ensures that
SingleObjectMixin.get_context_object_name() always uses the obj
parameter instead of self.object.
"""
res = self.client.get("/detail/author/%s/custom_detail/" % self.author1.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["custom_author"], self.author1)
self.assertNotIn("author", res.context)
self.assertNotIn("object", res.context)
self.assertTemplateUsed(res, "generic_views/author_detail.html")
def test_deferred_queryset_template_name(self):
class FormContext(SingleObjectTemplateResponseMixin):
request = RequestFactory().get("/")
model = Author
object = Author.objects.defer("name").get(pk=self.author1.pk)
self.assertEqual(
FormContext().get_template_names()[0], "generic_views/author_detail.html"
)
def test_deferred_queryset_context_object_name(self):
class FormContext(ModelFormMixin):
request = RequestFactory().get("/")
model = Author
object = Author.objects.defer("name").get(pk=self.author1.pk)
fields = ("name",)
form_context_data = FormContext().get_context_data()
self.assertEqual(form_context_data["object"], self.author1)
self.assertEqual(form_context_data["author"], self.author1)
def test_invalid_url(self):
with self.assertRaises(AttributeError):
self.client.get("/detail/author/invalid/url/")
def test_invalid_queryset(self):
msg = (
"AuthorDetail is missing a QuerySet. Define AuthorDetail.model, "
"AuthorDetail.queryset, or override AuthorDetail.get_queryset()."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get("/detail/author/invalid/qs/")
def test_non_model_object_with_meta(self):
res = self.client.get("/detail/nonmodel/1/")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context["object"].id, "non_model_1")
|
DetailViewTest
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/envs/mujoco/walker2d_v4.py
|
{
"start": 266,
"end": 4824
}
|
class ____(MujocoEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": 125,
}
def __init__(
self,
forward_reward_weight=1.0,
ctrl_cost_weight=1e-3,
healthy_reward=1.0,
terminate_when_unhealthy=True,
healthy_z_range=(0.8, 2.0),
healthy_angle_range=(-1.0, 1.0),
reset_noise_scale=5e-3,
exclude_current_positions_from_observation=True,
**kwargs,
):
utils.EzPickle.__init__(
self,
forward_reward_weight,
ctrl_cost_weight,
healthy_reward,
terminate_when_unhealthy,
healthy_z_range,
healthy_angle_range,
reset_noise_scale,
exclude_current_positions_from_observation,
**kwargs,
)
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._healthy_reward = healthy_reward
self._terminate_when_unhealthy = terminate_when_unhealthy
self._healthy_z_range = healthy_z_range
self._healthy_angle_range = healthy_angle_range
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
if exclude_current_positions_from_observation:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(17,), dtype=np.float64
)
else:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(18,), dtype=np.float64
)
MujocoEnv.__init__(
self,
"walker2d.xml",
4,
observation_space=observation_space,
default_camera_config=DEFAULT_CAMERA_CONFIG,
**kwargs,
)
@property
def healthy_reward(self):
return (
float(self.is_healthy or self._terminate_when_unhealthy)
* self._healthy_reward
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
@property
def is_healthy(self):
z, angle = self.data.qpos[1:3]
min_z, max_z = self._healthy_z_range
min_angle, max_angle = self._healthy_angle_range
healthy_z = min_z < z < max_z
healthy_angle = min_angle < angle < max_angle
is_healthy = healthy_z and healthy_angle
return is_healthy
@property
def terminated(self):
terminated = not self.is_healthy if self._terminate_when_unhealthy else False
return terminated
def _get_obs(self):
position = self.data.qpos.flat.copy()
velocity = np.clip(self.data.qvel.flat.copy(), -10, 10)
if self._exclude_current_positions_from_observation:
position = position[1:]
observation = np.concatenate((position, velocity)).ravel()
return observation
def step(self, action):
x_position_before = self.data.qpos[0]
self.do_simulation(action, self.frame_skip)
x_position_after = self.data.qpos[0]
x_velocity = (x_position_after - x_position_before) / self.dt
ctrl_cost = self.control_cost(action)
forward_reward = self._forward_reward_weight * x_velocity
healthy_reward = self.healthy_reward
rewards = forward_reward + healthy_reward
costs = ctrl_cost
observation = self._get_obs()
reward = rewards - costs
terminated = self.terminated
info = {
"x_position": x_position_after,
"x_velocity": x_velocity,
}
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return observation, reward, terminated, False, info
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
|
Walker2dEnv
|
python
|
huggingface__transformers
|
src/transformers/models/superpoint/modeling_superpoint.py
|
{
"start": 3155,
"end": 4667
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Relative (x, y) coordinates of predicted keypoints in a given image.
scores (`torch.FloatTensor` of shape `(batch_size, num_keypoints)`):
Scores of predicted keypoints.
descriptors (`torch.FloatTensor` of shape `(batch_size, num_keypoints, descriptor_size)`):
Descriptors of predicted keypoints.
mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in keypoints, scores and descriptors are keypoint information.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or
when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage.
"""
loss: Optional[torch.FloatTensor] = None
keypoints: Optional[torch.IntTensor] = None
scores: Optional[torch.FloatTensor] = None
descriptors: Optional[torch.FloatTensor] = None
mask: Optional[torch.BoolTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
|
SuperPointKeypointDescriptionOutput
|
python
|
boto__boto3
|
boto3/exceptions.py
|
{
"start": 1764,
"end": 2618
}
|
class ____(
Boto3Error, botocore.exceptions.DataNotFoundError
):
"""Raised when you attempt to create a resource that does not exist."""
def __init__(self, service_name, available_services, has_low_level_client):
msg = (
"The '{}' resource does not exist.\n"
"The available resources are:\n"
" - {}\n".format(
service_name, '\n - '.join(available_services)
)
)
if has_low_level_client:
msg = (
f"{msg}\nConsider using a boto3.client('{service_name}') "
f"instead of a resource for '{service_name}'"
)
# Not using super because we don't want the DataNotFoundError
# to be called, it has a different __init__ signature.
Boto3Error.__init__(self, msg)
|
ResourceNotExistsError
|
python
|
graphql-python__graphene
|
graphene/types/tests/test_schema.py
|
{
"start": 290,
"end": 1621
}
|
class ____(ObjectType):
inner = Field(MyOtherType)
def test_schema():
schema = Schema(Query)
graphql_schema = schema.graphql_schema
assert isinstance(graphql_schema, GraphQLSchema)
query_type = graphql_schema.query_type
assert isinstance(query_type, GraphQLObjectType)
assert query_type.name == "Query"
assert query_type.graphene_type is Query
def test_schema_get_type():
schema = Schema(Query)
assert schema.Query == Query
assert schema.MyOtherType == MyOtherType
def test_schema_get_type_error():
schema = Schema(Query)
with raises(AttributeError) as exc_info:
schema.X
assert str(exc_info.value) == 'Type "X" not found in the Schema'
def test_schema_str():
schema = Schema(Query)
assert (
str(schema).strip()
== dedent(
"""
type Query {
inner: MyOtherType
}
type MyOtherType {
field: String
}
"""
).strip()
)
def test_schema_introspect():
schema = Schema(Query)
assert "__schema" in schema.introspect()
def test_schema_requires_query_type():
schema = Schema()
result = schema.execute("query {}")
assert len(result.errors) == 1
error = result.errors[0]
assert error.message == "Query root type must be provided."
|
Query
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-nodes-equal-to-sum-of-descendants.py
|
{
"start": 159,
"end": 1113
}
|
class ____(object):
def equalToDescendants(self, root):
"""
:type root: Optional[TreeNode]
:rtype: int
"""
def iter_dfs(node):
result = 0
stk = [(1, [node, [0]])]
while stk:
step, args = stk.pop()
if step == 1:
node, ret = args
if not node:
continue
ret1, ret2 = [0], [0]
stk.append((2, [node, ret1, ret2, ret]))
stk.append((1, [node.right, ret2]))
stk.append((1, [node.left, ret1]))
elif step == 2:
node, ret1, ret2, ret = args
if node.val == ret1[0]+ret2[0]:
result += 1
ret[0] = ret1[0]+ret2[0]+node.val
return result
return iter_dfs(root)
# Time: O(n)
# Space: O(h)
|
Solution
|
python
|
doocs__leetcode
|
solution/0800-0899/0850.Rectangle Area II/Solution.py
|
{
"start": 101,
"end": 1230
}
|
class ____:
def __init__(self, nums):
n = len(nums) - 1
self.nums = nums
self.tr = [Node() for _ in range(n << 2)]
self.build(1, 0, n - 1)
def build(self, u, l, r):
self.tr[u].l, self.tr[u].r = l, r
if l != r:
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
def modify(self, u, l, r, k):
if self.tr[u].l >= l and self.tr[u].r <= r:
self.tr[u].cnt += k
else:
mid = (self.tr[u].l + self.tr[u].r) >> 1
if l <= mid:
self.modify(u << 1, l, r, k)
if r > mid:
self.modify(u << 1 | 1, l, r, k)
self.pushup(u)
def pushup(self, u):
if self.tr[u].cnt:
self.tr[u].length = self.nums[self.tr[u].r + 1] - self.nums[self.tr[u].l]
elif self.tr[u].l == self.tr[u].r:
self.tr[u].length = 0
else:
self.tr[u].length = self.tr[u << 1].length + self.tr[u << 1 | 1].length
@property
def length(self):
return self.tr[1].length
|
SegmentTree
|
python
|
pytest-dev__pytest
|
src/_pytest/warning_types.py
|
{
"start": 1873,
"end": 2097
}
|
class ____(PytestWarning):
"""
Warning emitted when a test function returns a value other than ``None``.
See :ref:`return-not-none` for details.
"""
__module__ = "pytest"
@final
|
PytestReturnNotNoneWarning
|
python
|
doocs__leetcode
|
solution/0100-0199/0162.Find Peak Element/Solution.py
|
{
"start": 0,
"end": 315
}
|
class ____:
def findPeakElement(self, nums: List[int]) -> int:
left, right = 0, len(nums) - 1
while left < right:
mid = (left + right) >> 1
if nums[mid] > nums[mid + 1]:
right = mid
else:
left = mid + 1
return left
|
Solution
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1047450,
"end": 1048012
}
|
class ____(VegaLiteSchema):
"""
RowColLayoutAlign schema wrapper.
Parameters
----------
column : :class:`LayoutAlign`, Literal['all', 'each', 'none']
row : :class:`LayoutAlign`, Literal['all', 'each', 'none']
"""
_schema = {"$ref": "#/definitions/RowCol<LayoutAlign>"}
def __init__(
self,
column: Optional[SchemaBase | LayoutAlign_T] = Undefined,
row: Optional[SchemaBase | LayoutAlign_T] = Undefined,
**kwds,
):
super().__init__(column=column, row=row, **kwds)
|
RowColLayoutAlign
|
python
|
pytorch__pytorch
|
torch/_guards.py
|
{
"start": 23186,
"end": 24192
}
|
class ____:
@abstractmethod
def add_dynamo_installed_submodule(self, fn_id: int, identifier: str) -> None: ...
@abstractmethod
def get_dynamo_installed_submodules(self, fn_id: int) -> list[str]: ...
@abstractmethod
def add_autograd_key_entry(self, identifier: str, key: Callable) -> None: ...
@abstractmethod
def get_autograd_key_entry(self, identifier: str) -> Optional[Callable]: ...
@abstractmethod
def add_proxy_dispatch_entry(self, identifier: str, key: Callable) -> None: ...
@abstractmethod
def get_proxy_dispatch_entry(self, identifier: str) -> Optional[Callable]: ...
@abstractmethod
def add_lazy_bwd_entry(
self,
identifier: str,
tangent_metadata: tuple[object],
gmod: torch.fx.GraphModule,
) -> int: ...
@abstractmethod
def get_lazy_bwd_entry(
self, identifier: str, tangent_metadata: tuple[object]
) -> tuple[Optional[torch.fx.GraphModule], Optional[int]]: ...
|
HopSubgraphCache
|
python
|
redis__redis-py
|
redis/asyncio/multidb/command_executor.py
|
{
"start": 1136,
"end": 3562
}
|
class ____(CommandExecutor):
@property
@abstractmethod
def databases(self) -> Databases:
"""Returns a list of databases."""
pass
@property
@abstractmethod
def failure_detectors(self) -> List[AsyncFailureDetector]:
"""Returns a list of failure detectors."""
pass
@abstractmethod
def add_failure_detector(self, failure_detector: AsyncFailureDetector) -> None:
"""Adds a new failure detector to the list of failure detectors."""
pass
@property
@abstractmethod
def active_database(self) -> Optional[AsyncDatabase]:
"""Returns currently active database."""
pass
@abstractmethod
async def set_active_database(self, database: AsyncDatabase) -> None:
"""Sets the currently active database."""
pass
@property
@abstractmethod
def active_pubsub(self) -> Optional[PubSub]:
"""Returns currently active pubsub."""
pass
@active_pubsub.setter
@abstractmethod
def active_pubsub(self, pubsub: PubSub) -> None:
"""Sets currently active pubsub."""
pass
@property
@abstractmethod
def failover_strategy_executor(self) -> FailoverStrategyExecutor:
"""Returns failover strategy executor."""
pass
@property
@abstractmethod
def command_retry(self) -> Retry:
"""Returns command retry object."""
pass
@abstractmethod
async def pubsub(self, **kwargs):
"""Initializes a PubSub object on a currently active database"""
pass
@abstractmethod
async def execute_command(self, *args, **options):
"""Executes a command and returns the result."""
pass
@abstractmethod
async def execute_pipeline(self, command_stack: tuple):
"""Executes a stack of commands in pipeline."""
pass
@abstractmethod
async def execute_transaction(
self, transaction: Callable[[Pipeline], None], *watches, **options
):
"""Executes a transaction block wrapped in callback."""
pass
@abstractmethod
async def execute_pubsub_method(self, method_name: str, *args, **kwargs):
"""Executes a given method on active pub/sub."""
pass
@abstractmethod
async def execute_pubsub_run(self, sleep_time: float, **kwargs) -> Any:
"""Executes pub/sub run in a thread."""
pass
|
AsyncCommandExecutor
|
python
|
joke2k__faker
|
faker/providers/currency/th_TH/__init__.py
|
{
"start": 111,
"end": 6238
}
|
class ____(CurrencyProvider):
# Format: (code, name)
currencies = (
("AED", "ดีแรห์ม สหรัฐอาหรับเอมิเรตส์"),
("AFN", "อัฟกานิ"),
("ALL", "เลค"),
("AMD", "ดีแรห์ม อาร์เมเนีย"),
("ANG", "กิลเดอร์ เนเธอร์แลนด์แอนทิลลิส"),
("AOA", "ควันซา"),
("ARS", "เปโซ อาร์เจนตินา"),
("AUD", "ดอลลาร์ ออสเตรเลีย"),
("AWG", "กิลเดอร์ อารูบา"),
("AZN", "มานาท อาเซอร์ไบจาน"),
("BAM", "มาร์ค บอสเนียและเฮอร์เซโกวีนา"),
("BBD", "ดอลลาร์ บาร์เบโดส"),
("BDT", "ตากา"),
("BGN", "เลฟ บัลแกเรีย"),
("BHD", "ดีนาร์ บาห์เรน"),
("BIF", "ฟรังก์ บุรุนดี"),
("BMD", "ดอลลาร์ เบอร์มิวดา"),
("BND", "ดอลลาร์ บรูไนดารุสซาลาม"),
("BOB", "โบลีเวียโน"),
("BRL", "เรียล บราซิล"),
("BSD", "ดอลลาร์ บาฮามาส"),
("BTN", "เองกัลทรัม"),
("BWP", "พูลา"),
("BYR", "รูเบิล เบลารุส"),
("BZD", "ดอลลาร์ เบลีซ"),
("CAD", "ดอลลาร์ แคนาดา"),
("CDF", "ฟรังก์ คองโก"),
("CHF", "ฟรังก์ สวิส"),
("CLF", "ฟันด์ โค้ด ยูนิแดด ดี โฟเมนโต"),
("CLP", "เปโซ ชิลี"),
("CNY", "หยวนเหรินหมินปี้"),
("COP", "เปโซ โคลอมเบีย"),
("COU", "ยูนิแดด ดี วาโล เรียล"),
("CRC", "โคโลน คอสตาริกา"),
("CUC", "แปลงสภาพเปโซ"),
("CUP", "เปโซ คิวบา"),
("CVE", "เอสคูโด เคปเวิร์ด"),
("CZK", "คราวน์ เช็ก"),
("DJF", "ฟรังก์ จิบูตี"),
("DKK", "โครน เดนมาร์ก"),
("DOP", "เปโซ สาธารณรัฐโดมินิกัน"),
("DZD", "ดีนาร์ แอลจีเรีย"),
("EGP", "ปอนด์ อียิปต์"),
("ERN", "นาคฟา"),
("ETB", "เปอร์ เอธิโอเปีย"),
("EUR", "ยูโร"),
("FJD", "ดอลลาร์ ฟิจิ"),
("FKP", "ปอนด์ หมู่เกาะฟอล์กแลนด์"),
("GBP", "ปอนด์สเตอลิง"),
("GEL", "ลารี"),
("GHS", "เซดี กานา"),
("GIP", "ปอนด์ ยิบรอลตาร์"),
("GMD", "ดาราซี"),
("GNF", "ฟรังก์ กินี"),
("GTQ", "เก็ตซาล"),
("GYD", "ดอลลาร์ กายอานา"),
("HKD", "ดอลลาร์ ฮ่องกง"),
("HNL", "เลมพีรา ฮอนดูรัส"),
("HRK", "คูนา"),
("HTG", "กอร์ด"),
("HUF", "ฟอรินท์"),
("IDR", "รูเปีย"),
("ILS", "เชคเกิล อิสราเอล"),
("INR", "รูปี อินเดีย"),
("IQD", "ดีนาร์ อิรัก"),
("IRR", "เรียล อิหร่าน"),
("ISK", "โครนา ไอซ์แลนด์"),
("JMD", "ดอลลาร์ จาเมกา"),
("JOD", "ดอลลาร์ จอร์แดน"),
("JPY", "เยน"),
("KES", "ชิลลิง เคนยา"),
("KGS", "ซอม"),
("KHR", "เรียล กัมพูชา"),
("KMF", "ฟรังก์ คอโมโรส"),
("KPW", "วอน เกาหลีเหนือ"),
("KRW", "วอน เกาหลีใต้"),
("KWD", "ดีนาร์ คูเวต"),
("KYD", "ดอลลาร์ หมู่เกาะเคย์แมน"),
("KZT", "เทงเก"),
("LAK", "กีบ"),
("LBP", "ปอนด์ เลบานอน"),
("LKR", "รูปี ศรีลังกา"),
("LRD", "ดอลลาร์ ไลบีเรีย"),
("LSL", "โลตี"),
("LTL", "ลีทาส ลิทัวเนีย"),
("LVL", "ลัตส์ ลัตเวีย"),
("LYD", "ดีนาร์ ลิเบีย"),
("MAD", "ดีแรห์ม โมร็อกโก"),
("MDL", "ลิว มอลโดวา"),
("MGA", "อเรียรี่ มาดากัสการ์"),
("MKD", "ดีนาร์ มาซิโดเนีย"),
("MMK", "จัต"),
("MNT", "ทูกริค"),
("MOP", "พาทากา"),
("MRO", "อูกุยยา / อูกียา"),
("MUR", "รูปี มอริเชียส"),
("MVR", "รูฟียา"),
("MWK", "ควาซา มาลาวี"),
("MXN", "เปโซ เม็กซิโก"),
("MYR", "ริงกิต มาเลเซีย"),
("MZN", "เมททิคัล โมซัมบิก"),
("NAD", "ดอลลาร์ นามิเบีย"),
("NGN", "ไนรา"),
("NIO", "คอร์โดบา"),
("NOK", "โครน นอร์เวย์"),
("NPR", "รูปี เนปาล"),
("NZD", "ดอลลาร์ นิวซีแลนด์"),
("OMR", "เรียล โอมาน"),
("PAB", "บัลโบอา"),
("PEN", "ซัล เปรู"),
("PGK", "คีนา"),
("PHP", "เปโซ ฟิลิปปินส์"),
("PKR", "รูปี ปากีสถาน"),
("PLN", "สล็อตตี"),
("PYG", "กวารานี"),
("QAR", "เรียล กาตาร์"),
("RON", "ลิว โรมาเนีย"),
("RSD", "ดีนาร์ เซอร์เบีย"),
("RUB", "รูเบิล รัสเซีย"),
("RWF", "ฟรังก์ รวันดา"),
("SAR", "ริยัล ซาอุดีอาระเบีย"),
("SBD", "ดอลลาร์ หมู่เกาะโซโลมอน"),
("SCR", "รูปี เซเชลส์"),
("SDG", "ปอนด์ ซูดาน"),
("SEK", "โครนา สวีเดน"),
("SGD", "ดอลลาร์ สิงคโปร์"),
("SHP", "ปอนด์ เซนต์เฮเลนา"),
("SLL", "ลีโอน"),
("SOS", "ชิลลิง โซมาเลีย"),
("SRD", "ดอลลาร์ ซูรินาเม"),
("SSP", "ปอนด์ เซาท์ซูดาน"),
("STD", "โดบรา"),
("SVC", "โคโลน เอลซัลวาดอร์"),
("SYP", "ปอนด์ ซีเรีย"),
("SZL", "ลิลอนเกนี"),
("THB", "บาท"),
("TJS", "โซโมนิ"),
("TMT", "มานาท เติร์กเมนิสถานใหม่"),
("TND", "ดีนาร์ ตูนิเซีย"),
("TOP", "พาอานกา"),
("TRY", "ลีร์ ตุรกี"),
("TTD", "ดอลลาร์ ตรินิแดดและโตเบโก"),
("TWD", "ดอลลาร์ ไต้หวัน"),
("TZS", "ชิลลิง แทนซาเนีย"),
("UAH", "รีฟเนีย"),
("UGX", "ชิลลิง ยูกันดา"),
("USD", "ดอลลาร์ สหรัฐอเมริกา"),
("USN", "ดอลลาร์ สหรัฐอเมริกา เน็กซ์เดย์ฟัน"),
("UYI", "อุรุกวัย เปโซ เอ็น อุนดิดาเดซ อินเด็กซาดาซ"),
("UYU", "เปโซ อุรุกวัย"),
("UZS", "โซม อุซเบกิสถาน"),
("VEF", "โบลิวาร์"),
("VND", "ดอง"),
("VUV", "วาตู"),
("WST", "ทาลา"),
("XAF", "ฟรังก์ ซีเอฟเอ บีอีเอซี"),
("XAG", "เงิน"),
("XAU", "ทองคำ"),
("XBA", "อียูอาร์ซีโอ"),
("XBB", "อีเอ็มยู 6"),
("XBC", "บัญชี อียู 9"),
("XBD", "บัญชี อียู 17"),
("XCD", "ดอลลาร์ คาริบเบียลตะวันออก"),
("XDR", "สิทธิพิเศษถอนเงิน (กองทุนการเงินระหว่างประเทศ)"),
("XEU", "อี ซี ยู"),
("XFU", "ยูไอซี ฟรังก์"),
("XOF", "ฟรังก์ ซีเอฟเอ บีซีอีเอโอ"),
("XPD", "พัลเลเดียม"),
("XPF", "ฟรังก์ ซีเอฟพี"),
("XPT", "แพลตตินัม"),
("XSU", "ซูเคร"),
("XUA", "เอดีบี"),
("XXX", "ธุรกรรมที่ไม่มีเงินสกุลใดเกี่ยวข้อง"),
("YER", "เรียล เยเมน"),
("ZAR", "แรนด์"),
("ZMW", "ควาซา แซมเบีย"),
("ZWL", "ดอลลาร์ ซิมบับเว"),
)
|
Provider
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
|
{
"start": 63949,
"end": 64753
}
|
class ____(nn.Module):
def __init__(self, config: Qwen3OmniMoeThinkerConfig):
super().__init__()
self.experts = Qwen3OmniMoeThinkerTextExperts(config)
self.router = Qwen3OmniMoeThinkerTextTopKRouter(config)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states_reshaped = hidden_states.view(-1, hidden_dim)
routing_weights, selected_experts = self.router(hidden_states_reshaped)
final_hidden_states = self.experts(hidden_states_reshaped, selected_experts, routing_weights)
return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
@use_kernel_forward_from_hub("RMSNorm")
|
Qwen3OmniMoeThinkerTextSparseMoeBlock
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/req/req_install.py
|
{
"start": 2455,
"end": 35788
}
|
class ____:
"""
Represents something that may be installed later on, may have information
about where to fetch the relevant requirement and also contains logic for
installing the said requirement.
"""
def __init__(
self,
req: Optional[Requirement],
comes_from: Optional[Union[str, "InstallRequirement"]],
editable: bool = False,
link: Optional[Link] = None,
markers: Optional[Marker] = None,
use_pep517: Optional[bool] = None,
isolated: bool = False,
*,
global_options: Optional[List[str]] = None,
hash_options: Optional[Dict[str, List[str]]] = None,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
constraint: bool = False,
extras: Collection[str] = (),
user_supplied: bool = False,
permit_editable_wheels: bool = False,
) -> None:
assert req is None or isinstance(req, Requirement), req
self.req = req
self.comes_from = comes_from
self.constraint = constraint
self.editable = editable
self.permit_editable_wheels = permit_editable_wheels
# source_dir is the local directory where the linked requirement is
# located, or unpacked. In case unpacking is needed, creating and
# populating source_dir is done by the RequirementPreparer. Note this
# is not necessarily the directory where pyproject.toml or setup.py is
# located - that one is obtained via unpacked_source_directory.
self.source_dir: Optional[str] = None
if self.editable:
assert link
if link.is_file:
self.source_dir = os.path.normpath(os.path.abspath(link.file_path))
# original_link is the direct URL that was provided by the user for the
# requirement, either directly or via a constraints file.
if link is None and req and req.url:
# PEP 508 URL requirement
link = Link(req.url)
self.link = self.original_link = link
# When this InstallRequirement is a wheel obtained from the cache of locally
# built wheels, this is the source link corresponding to the cache entry, which
# was used to download and build the cached wheel.
self.cached_wheel_source_link: Optional[Link] = None
# Information about the location of the artifact that was downloaded . This
# property is guaranteed to be set in resolver results.
self.download_info: Optional[DirectUrl] = None
# Path to any downloaded or already-existing package.
self.local_file_path: Optional[str] = None
if self.link and self.link.is_file:
self.local_file_path = self.link.file_path
if extras:
self.extras = extras
elif req:
self.extras = req.extras
else:
self.extras = set()
if markers is None and req:
markers = req.marker
self.markers = markers
# This holds the Distribution object if this requirement is already installed.
self.satisfied_by: Optional[BaseDistribution] = None
# Whether the installation process should try to uninstall an existing
# distribution before installing this requirement.
self.should_reinstall = False
# Temporary build location
self._temp_build_dir: Optional[TempDirectory] = None
# Set to True after successful installation
self.install_succeeded: Optional[bool] = None
# Supplied options
self.global_options = global_options if global_options else []
self.hash_options = hash_options if hash_options else {}
self.config_settings = config_settings
# Set to True after successful preparation of this requirement
self.prepared = False
# User supplied requirement are explicitly requested for installation
# by the user via CLI arguments or requirements files, as opposed to,
# e.g. dependencies, extras or constraints.
self.user_supplied = user_supplied
self.isolated = isolated
self.build_env: BuildEnvironment = NoOpBuildEnvironment()
# For PEP 517, the directory where we request the project metadata
# gets stored. We need this to pass to build_wheel, so the backend
# can ensure that the wheel matches the metadata (see the PEP for
# details).
self.metadata_directory: Optional[str] = None
# The static build requirements (from pyproject.toml)
self.pyproject_requires: Optional[List[str]] = None
# Build requirements that we will check are available
self.requirements_to_check: List[str] = []
# The PEP 517 backend we should use to build the project
self.pep517_backend: Optional[BuildBackendHookCaller] = None
# Are we using PEP 517 for this requirement?
# After pyproject.toml has been loaded, the only valid values are True
# and False. Before loading, None is valid (meaning "use the default").
# Setting an explicit value before loading pyproject.toml is supported,
# but after loading this flag should be treated as read only.
self.use_pep517 = use_pep517
# If config settings are provided, enforce PEP 517.
if self.config_settings:
if self.use_pep517 is False:
logger.warning(
"--no-use-pep517 ignored for %s "
"because --config-settings are specified.",
self,
)
self.use_pep517 = True
# This requirement needs more preparation before it can be built
self.needs_more_preparation = False
# This requirement needs to be unpacked before it can be installed.
self._archive_source: Optional[Path] = None
def __str__(self) -> str:
if self.req:
s = redact_auth_from_requirement(self.req)
if self.link:
s += f" from {redact_auth_from_url(self.link.url)}"
elif self.link:
s = redact_auth_from_url(self.link.url)
else:
s = "<InstallRequirement>"
if self.satisfied_by is not None:
if self.satisfied_by.location is not None:
location = display_path(self.satisfied_by.location)
else:
location = "<memory>"
s += f" in {location}"
if self.comes_from:
if isinstance(self.comes_from, str):
comes_from: Optional[str] = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += f" (from {comes_from})"
return s
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} object: "
f"{str(self)} editable={self.editable!r}>"
)
def format_debug(self) -> str:
"""An un-tested helper for getting state, for debugging."""
attributes = vars(self)
names = sorted(attributes)
state = (f"{attr}={attributes[attr]!r}" for attr in sorted(names))
return "<{name} object: {{{state}}}>".format(
name=self.__class__.__name__,
state=", ".join(state),
)
# Things that are valid for all kinds of requirements?
@property
def name(self) -> Optional[str]:
if self.req is None:
return None
return self.req.name
@functools.cached_property
def supports_pyproject_editable(self) -> bool:
if not self.use_pep517:
return False
assert self.pep517_backend
with self.build_env:
runner = runner_with_spinner_message(
"Checking if build backend supports build_editable"
)
with self.pep517_backend.subprocess_runner(runner):
return "build_editable" in self.pep517_backend._supported_features()
@property
def specifier(self) -> SpecifierSet:
assert self.req is not None
return self.req.specifier
@property
def is_direct(self) -> bool:
"""Whether this requirement was specified as a direct URL."""
return self.original_link is not None
@property
def is_pinned(self) -> bool:
"""Return whether I am pinned to an exact version.
For example, some-package==1.2 is pinned; some-package>1.2 is not.
"""
assert self.req is not None
specifiers = self.req.specifier
return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool:
if not extras_requested:
# Provide an extra to safely evaluate the markers
# without matching any extra
extras_requested = ("",)
if self.markers is not None:
return any(
self.markers.evaluate({"extra": extra}) for extra in extras_requested
)
else:
return True
@property
def has_hash_options(self) -> bool:
"""Return whether any known-good hashes are specified as options.
These activate --require-hashes mode; hashes specified as part of a
URL do not.
"""
return bool(self.hash_options)
def hashes(self, trust_internet: bool = True) -> Hashes:
"""Return a hash-comparer that considers my option- and URL-based
hashes to be known-good.
Hashes in URLs--ones embedded in the requirements file, not ones
downloaded from an index server--are almost peers with ones from
flags. They satisfy --require-hashes (whether it was implicitly or
explicitly activated) but do not activate it. md5 and sha224 are not
allowed in flags, which should nudge people toward good algos. We
always OR all hashes together, even ones from URLs.
:param trust_internet: Whether to trust URL-based (#md5=...) hashes
downloaded from the internet, as by populate_link()
"""
good_hashes = self.hash_options.copy()
if trust_internet:
link = self.link
elif self.is_direct and self.user_supplied:
link = self.original_link
else:
link = None
if link and link.hash:
assert link.hash_name is not None
good_hashes.setdefault(link.hash_name, []).append(link.hash)
return Hashes(good_hashes)
def from_path(self) -> Optional[str]:
"""Format a nice indicator to show where this "comes from" """
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
comes_from: Optional[str]
if isinstance(self.comes_from, str):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += "->" + comes_from
return s
def ensure_build_location(
self, build_dir: str, autodelete: bool, parallel_builds: bool
) -> str:
assert build_dir is not None
if self._temp_build_dir is not None:
assert self._temp_build_dir.path
return self._temp_build_dir.path
if self.req is None:
# Some systems have /tmp as a symlink which confuses custom
# builds (such as numpy). Thus, we ensure that the real path
# is returned.
self._temp_build_dir = TempDirectory(
kind=tempdir_kinds.REQ_BUILD, globally_managed=True
)
return self._temp_build_dir.path
# This is the only remaining place where we manually determine the path
# for the temporary directory. It is only needed for editables where
# it is the value of the --src option.
# When parallel builds are enabled, add a UUID to the build directory
# name so multiple builds do not interfere with each other.
dir_name: str = canonicalize_name(self.req.name)
if parallel_builds:
dir_name = f"{dir_name}_{uuid.uuid4().hex}"
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
logger.debug("Creating directory %s", build_dir)
os.makedirs(build_dir)
actual_build_dir = os.path.join(build_dir, dir_name)
# `None` indicates that we respect the globally-configured deletion
# settings, which is what we actually want when auto-deleting.
delete_arg = None if autodelete else False
return TempDirectory(
path=actual_build_dir,
delete=delete_arg,
kind=tempdir_kinds.REQ_BUILD,
globally_managed=True,
).path
def _set_requirement(self) -> None:
"""Set requirement after generating metadata."""
assert self.req is None
assert self.metadata is not None
assert self.source_dir is not None
# Construct a Requirement object from the generated metadata
if isinstance(parse_version(self.metadata["Version"]), Version):
op = "=="
else:
op = "==="
self.req = get_requirement(
"".join(
[
self.metadata["Name"],
op,
self.metadata["Version"],
]
)
)
def warn_on_mismatching_name(self) -> None:
assert self.req is not None
metadata_name = canonicalize_name(self.metadata["Name"])
if canonicalize_name(self.req.name) == metadata_name:
# Everything is fine.
return
# If we're here, there's a mismatch. Log a warning about it.
logger.warning(
"Generating metadata for package %s "
"produced metadata for project name %s. Fix your "
"#egg=%s fragments.",
self.name,
metadata_name,
self.name,
)
self.req = get_requirement(metadata_name)
def check_if_exists(self, use_user_site: bool) -> None:
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.should_reinstall appropriately.
"""
if self.req is None:
return
existing_dist = get_default_environment().get_distribution(self.req.name)
if not existing_dist:
return
version_compatible = self.req.specifier.contains(
existing_dist.version,
prereleases=True,
)
if not version_compatible:
self.satisfied_by = None
if use_user_site:
if existing_dist.in_usersite:
self.should_reinstall = True
elif running_under_virtualenv() and existing_dist.in_site_packages:
raise InstallationError(
f"Will not install to the user site because it will "
f"lack sys.path precedence to {existing_dist.raw_name} "
f"in {existing_dist.location}"
)
else:
self.should_reinstall = True
else:
if self.editable:
self.should_reinstall = True
# when installing editables, nothing pre-existing should ever
# satisfy
self.satisfied_by = None
else:
self.satisfied_by = existing_dist
# Things valid for wheels
@property
def is_wheel(self) -> bool:
if not self.link:
return False
return self.link.is_wheel
@property
def is_wheel_from_cache(self) -> bool:
# When True, it means that this InstallRequirement is a local wheel file in the
# cache of locally built wheels.
return self.cached_wheel_source_link is not None
# Things valid for sdists
@property
def unpacked_source_directory(self) -> str:
assert self.source_dir, f"No source dir for {self}"
return os.path.join(
self.source_dir, self.link and self.link.subdirectory_fragment or ""
)
@property
def setup_py_path(self) -> str:
assert self.source_dir, f"No source dir for {self}"
setup_py = os.path.join(self.unpacked_source_directory, "setup.py")
return setup_py
@property
def setup_cfg_path(self) -> str:
assert self.source_dir, f"No source dir for {self}"
setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg")
return setup_cfg
@property
def pyproject_toml_path(self) -> str:
assert self.source_dir, f"No source dir for {self}"
return make_pyproject_path(self.unpacked_source_directory)
def load_pyproject_toml(self) -> None:
"""Load the pyproject.toml file.
After calling this routine, all of the attributes related to PEP 517
processing for this requirement have been set. In particular, the
use_pep517 attribute can be used to determine whether we should
follow the PEP 517 or legacy (setup.py) code path.
"""
pyproject_toml_data = load_pyproject_toml(
self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self)
)
if pyproject_toml_data is None:
assert not self.config_settings
self.use_pep517 = False
return
self.use_pep517 = True
requires, backend, check, backend_path = pyproject_toml_data
self.requirements_to_check = check
self.pyproject_requires = requires
self.pep517_backend = ConfiguredBuildBackendHookCaller(
self,
self.unpacked_source_directory,
backend,
backend_path=backend_path,
)
def isolated_editable_sanity_check(self) -> None:
"""Check that an editable requirement if valid for use with PEP 517/518.
This verifies that an editable that has a pyproject.toml either supports PEP 660
or as a setup.py or a setup.cfg
"""
if (
self.editable
and self.use_pep517
and not self.supports_pyproject_editable
and not os.path.isfile(self.setup_py_path)
and not os.path.isfile(self.setup_cfg_path)
):
raise InstallationError(
f"Project {self} has a 'pyproject.toml' and its build "
f"backend is missing the 'build_editable' hook. Since it does not "
f"have a 'setup.py' nor a 'setup.cfg', "
f"it cannot be installed in editable mode. "
f"Consider using a build backend that supports PEP 660."
)
def prepare_metadata(self) -> None:
"""Ensure that project metadata is available.
Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.
Under legacy processing, call setup.py egg-info.
"""
assert self.source_dir, f"No source dir for {self}"
details = self.name or f"from {self.link}"
if self.use_pep517:
assert self.pep517_backend is not None
if (
self.editable
and self.permit_editable_wheels
and self.supports_pyproject_editable
):
self.metadata_directory = generate_editable_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata(
build_env=self.build_env,
backend=self.pep517_backend,
details=details,
)
else:
self.metadata_directory = generate_metadata_legacy(
build_env=self.build_env,
setup_py_path=self.setup_py_path,
source_dir=self.unpacked_source_directory,
isolated=self.isolated,
details=details,
)
# Act on the newly generated metadata, based on the name and version.
if not self.name:
self._set_requirement()
else:
self.warn_on_mismatching_name()
self.assert_source_matches_version()
@property
def metadata(self) -> Any:
if not hasattr(self, "_metadata"):
self._metadata = self.get_dist().metadata
return self._metadata
def get_dist(self) -> BaseDistribution:
if self.metadata_directory:
return get_directory_distribution(self.metadata_directory)
elif self.local_file_path and self.is_wheel:
assert self.req is not None
return get_wheel_distribution(
FilesystemWheel(self.local_file_path),
canonicalize_name(self.req.name),
)
raise AssertionError(
f"InstallRequirement {self} has no metadata directory and no wheel: "
f"can't make a distribution."
)
def assert_source_matches_version(self) -> None:
assert self.source_dir, f"No source dir for {self}"
version = self.metadata["version"]
if self.req and self.req.specifier and version not in self.req.specifier:
logger.warning(
"Requested %s, but installing version %s",
self,
version,
)
else:
logger.debug(
"Source in %s has version %s, which satisfies requirement %s",
display_path(self.source_dir),
version,
self,
)
# For both source distributions and editables
def ensure_has_source_dir(
self,
parent_dir: str,
autodelete: bool = False,
parallel_builds: bool = False,
) -> None:
"""Ensure that a source_dir is set.
This will create a temporary build dir if the name of the requirement
isn't known yet.
:param parent_dir: The ideal pip parent_dir for the source_dir.
Generally src_dir for editables and build_dir for sdists.
:return: self.source_dir
"""
if self.source_dir is None:
self.source_dir = self.ensure_build_location(
parent_dir,
autodelete=autodelete,
parallel_builds=parallel_builds,
)
def needs_unpacked_archive(self, archive_source: Path) -> None:
assert self._archive_source is None
self._archive_source = archive_source
def ensure_pristine_source_checkout(self) -> None:
"""Ensure the source directory has not yet been built in."""
assert self.source_dir is not None
if self._archive_source is not None:
unpack_file(str(self._archive_source), self.source_dir)
elif is_installable_dir(self.source_dir):
# If a checkout exists, it's unwise to keep going.
# version inconsistencies are logged later, but do not fail
# the installation.
raise PreviousBuildDirError(
f"pip can't proceed with requirements '{self}' due to a "
f"pre-existing build directory ({self.source_dir}). This is likely "
"due to a previous installation that failed . pip is "
"being responsible and not assuming it can delete this. "
"Please delete it and try again."
)
# For editable installations
def update_editable(self) -> None:
if not self.link:
logger.debug(
"Cannot update repository at %s; repository location is unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.link.scheme == "file":
# Static paths don't get updated
return
vcs_backend = vcs.get_backend_for_scheme(self.link.scheme)
# Editable requirements are validated in Requirement constructors.
# So here, if it's neither a path nor a valid VCS URL, it's a bug.
assert vcs_backend, f"Unsupported VCS URL {self.link.url}"
hidden_url = hide_url(self.link.url)
vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0)
# Top-level Actions
def uninstall(
self, auto_confirm: bool = False, verbose: bool = False
) -> Optional[UninstallPathSet]:
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
assert self.req
dist = get_default_environment().get_distribution(self.req.name)
if not dist:
logger.warning("Skipping %s as it is not installed.", self.name)
return None
logger.info("Found existing installation: %s", dist)
uninstalled_pathset = UninstallPathSet.from_dist(dist)
uninstalled_pathset.remove(auto_confirm, verbose)
return uninstalled_pathset
def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str:
def _clean_zip_name(name: str, prefix: str) -> str:
assert name.startswith(
prefix + os.path.sep
), f"name {name!r} doesn't start with prefix {prefix!r}"
name = name[len(prefix) + 1 :]
name = name.replace(os.path.sep, "/")
return name
assert self.req is not None
path = os.path.join(parentdir, path)
name = _clean_zip_name(path, rootdir)
return self.req.name + "/" + name
def archive(self, build_dir: Optional[str]) -> None:
"""Saves archive to provided build_dir.
Used for saving downloaded VCS requirements as part of `pip download`.
"""
assert self.source_dir
if build_dir is None:
return
create_archive = True
archive_name = "{}-{}.zip".format(self.name, self.metadata["version"])
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
f"The file {display_path(archive_path)} exists. (i)gnore, (w)ipe, "
"(b)ackup, (a)bort ",
("i", "w", "b", "a"),
)
if response == "i":
create_archive = False
elif response == "w":
logger.warning("Deleting %s", display_path(archive_path))
os.remove(archive_path)
elif response == "b":
dest_file = backup_dir(archive_path)
logger.warning(
"Backing up %s to %s",
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
elif response == "a":
sys.exit(-1)
if not create_archive:
return
zip_output = zipfile.ZipFile(
archive_path,
"w",
zipfile.ZIP_DEFLATED,
allowZip64=True,
)
with zip_output:
dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory))
for dirpath, dirnames, filenames in os.walk(dir):
for dirname in dirnames:
dir_arcname = self._get_archive_name(
dirname,
parentdir=dirpath,
rootdir=dir,
)
zipdir = zipfile.ZipInfo(dir_arcname + "/")
zipdir.external_attr = 0x1ED << 16 # 0o755
zip_output.writestr(zipdir, "")
for filename in filenames:
file_arcname = self._get_archive_name(
filename,
parentdir=dirpath,
rootdir=dir,
)
filename = os.path.join(dirpath, filename)
zip_output.write(filename, file_arcname)
logger.info("Saved %s", display_path(archive_path))
def install(
self,
global_options: Optional[Sequence[str]] = None,
root: Optional[str] = None,
home: Optional[str] = None,
prefix: Optional[str] = None,
warn_script_location: bool = True,
use_user_site: bool = False,
pycompile: bool = True,
) -> None:
assert self.req is not None
scheme = get_scheme(
self.req.name,
user=use_user_site,
home=home,
root=root,
isolated=self.isolated,
prefix=prefix,
)
if self.editable and not self.is_wheel:
deprecated(
reason=(
f"Legacy editable install of {self} (setup.py develop) "
"is deprecated."
),
replacement=(
"to add a pyproject.toml or enable --use-pep517, "
"and use setuptools >= 64. "
"If the resulting installation is not behaving as expected, "
"try using --config-settings editable_mode=compat. "
"Please consult the setuptools documentation for more information"
),
gone_in="25.0",
issue=11457,
)
if self.config_settings:
logger.warning(
"--config-settings ignored for legacy editable install of %s. "
"Consider upgrading to a version of setuptools "
"that supports PEP 660 (>= 64).",
self,
)
install_editable_legacy(
global_options=global_options if global_options is not None else [],
prefix=prefix,
home=home,
use_user_site=use_user_site,
name=self.req.name,
setup_py_path=self.setup_py_path,
isolated=self.isolated,
build_env=self.build_env,
unpacked_source_directory=self.unpacked_source_directory,
)
self.install_succeeded = True
return
assert self.is_wheel
assert self.local_file_path
install_wheel(
self.req.name,
self.local_file_path,
scheme=scheme,
req_description=str(self.req),
pycompile=pycompile,
warn_script_location=warn_script_location,
direct_url=self.download_info if self.is_direct else None,
requested=self.user_supplied,
)
self.install_succeeded = True
def check_invalid_constraint_type(req: InstallRequirement) -> str:
# Check for unsupported forms
problem = ""
if not req.name:
problem = "Unnamed requirements are not allowed as constraints"
elif req.editable:
problem = "Editable requirements are not allowed as constraints"
elif req.extras:
problem = "Constraints cannot have extras"
if problem:
deprecated(
reason=(
"Constraints are only allowed to take the form of a package "
"name and a version specifier. Other forms were originally "
"permitted as an accident of the implementation, but were "
"undocumented. The new implementation of the resolver no "
"longer supports these forms."
),
replacement="replacing the constraint with a requirement",
# No plan yet for when the new resolver becomes default
gone_in=None,
issue=8210,
)
return problem
def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool:
if getattr(options, option, None):
return True
for req in reqs:
if getattr(req, option, None):
return True
return False
def check_legacy_setup_py_options(
options: Values,
reqs: List[InstallRequirement],
) -> None:
has_build_options = _has_option(options, reqs, "build_options")
has_global_options = _has_option(options, reqs, "global_options")
if has_build_options or has_global_options:
deprecated(
reason="--build-option and --global-option are deprecated.",
issue=11859,
replacement="to use --config-settings",
gone_in="25.0",
)
logger.warning(
"Implying --no-binary=:all: due to the presence of "
"--build-option / --global-option. "
)
options.format_control.disallow_binaries()
|
InstallRequirement
|
python
|
getsentry__sentry
|
src/sentry/replays/post_process.py
|
{
"start": 1049,
"end": 1257
}
|
class ____(TypedDict, total=False):
channel: str | None
runtime_version: str | None
update_id: str | None
@extend_schema_serializer(exclude_fields=["info_ids", "warning_ids"])
|
OTAUpdatesResponseType
|
python
|
getsentry__sentry
|
src/sentry/issues/escalating/escalating_issues_alg.py
|
{
"start": 354,
"end": 4907
}
|
class ____:
std_multiplier: int = 5
min_spike_multiplier: int = 5
max_spike_multiplier: int = 8
min_bursty_multiplier: int = 2
max_bursty_multiplier: int = 5
standard_version = ThresholdVariables()
def generate_issue_forecast(
data: GroupCount, start_time: datetime, alg_params: ThresholdVariables = standard_version
) -> list[IssueForecast]:
"""
Calculates daily issue spike limits, given an input dataset from snuba.
For issues with at least 14 days of history, we combine a weighted average of the last
7 days of hourly data with the observed variance over that time interval. We double the
weight if historical observation falls on the same day of week to incorporate daily seasonality.
The overall multiplier is calibrated to 5 standard deviations, although it is
truncated to [5, 8] to avoid poor results in a timeseries with very high
or low variance.
In addition, we also calculate the cv (coefficient of variance) of the timeseries the past week, which is the ratio of the
standard deviation over the average. This is to get an understanding of how high or low the variance
is relative to the data. The CV is then placed into an exponential equation that outputs
a multiplier inversely related to how high the cv is. The multiplier is bounded between 2 and 5. The
ceilings for the next week are all the same - which is the maximum number of events in an hour over the
past week multiplied by this multiplier. This calculation is to account for bursty issues or those that
have a very high variance.
The final spike limit for each hour is set to the max of the bursty limit bound or the calculated limit.
:param data: Dict of Snuba query results - hourly data over past 7 days
:param start_time: datetime indicating the first hour to calc spike protection for
:param alg_params: Threshold Variables dataclass with different ceiling versions
:return output: Dict containing a list of spike protection values
"""
# output list of dictionaries
output: list[IssueForecast] = []
input_dates = [datetime.strptime(x, "%Y-%m-%dT%H:%M:%S%f%z") for x in data["intervals"]]
output_dates = [start_time + timedelta(days=x) for x in range(14)]
ts_data = data["data"]
# if data is empty return empty output
if len(ts_data) == 0 or len(input_dates) == 0:
return output
ts_max = max(ts_data)
# if we have less than a week's worth of data (new issue),
# set the threshold to 10x the max of the dataset to account for
# how the pattern of the issue will change over the first week
if len(ts_data) < 168:
for output_ts in output_dates:
output.append(
{"forecasted_date": output_ts.strftime("%Y-%m-%d"), "forecasted_value": ts_max * 10}
)
return output
# gather stats from the timeseries - average, standard dev
ts_avg = statistics.mean(ts_data)
ts_std_dev = statistics.stdev(ts_data)
# calculate cv to identify how high/low variance is
ts_cv = ts_std_dev / ts_avg
# multiplier determined by exponential equation - bounded between [2,5]
regression_multiplier = min(
max(alg_params.min_bursty_multiplier, 5 * ((math.e) ** (-0.65 * ts_cv))),
alg_params.max_bursty_multiplier,
)
# first ceiling calculation
limit_v1 = ts_max * regression_multiplier
# This second multiplier corresponds to 5 standard deviations above the avg ts value
ts_multiplier = min(
max(
(ts_avg + (alg_params.std_multiplier * ts_std_dev)) / ts_avg,
alg_params.min_spike_multiplier,
),
alg_params.max_spike_multiplier,
)
# Default upper limit is the truncated multiplier * avg value
baseline = ts_multiplier * ts_avg
for output_ts in output_dates:
# Calculate weights (based on day of week)
weights = [(1 + (input_ts.weekday() == output_ts.weekday())) for input_ts in input_dates]
# Calculate weighted avg
numerator = sum([datum * weight for datum, weight in zip(ts_data, weights)])
wavg_limit = numerator / sum(weights)
# second ceiling calculation
limit_v2 = wavg_limit + baseline
# final limit is max of the two calculations
forecast: IssueForecast = {
"forecasted_date": output_ts.strftime("%Y-%m-%d"),
"forecasted_value": int(max(limit_v1, limit_v2)),
}
output.append(forecast)
return output
|
ThresholdVariables
|
python
|
ray-project__ray
|
python/ray/llm/_internal/batch/stages/sglang_engine_stage.py
|
{
"start": 631,
"end": 1273
}
|
class ____(BaseModel):
"""A request to the SGLang engine."""
# The request ID for the LLM engine (unique per replica).
request_id: int
# The index of the request in the batch.
idx_in_batch: int
# The input prompt.
prompt: Optional[str]
# Alternative to text. Specify the input as token IDs instead of text.
prompt_token_ids: Optional[List[int]]
# The sampling parameters (more details can be seen in https://docs.sglang.ai/backend/sampling_params.html).
params: Optional[Dict[str, Any]]
class Config:
validate_assignment = True
arbitrary_types_allowed = True
|
SGLangEngineRequest
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_groupby.py
|
{
"start": 19421,
"end": 21207
}
|
class ____(Reduction, GroupByBase):
_chunk_cls = GroupByChunk
def _tune_down(self):
if self.operand("split_out") is None:
return self.substitute_parameters(
{
"split_out": functools.partial(
_adjust_split_out_for_group_keys, by=self.by
)
}
)
@property
def split_out(self):
if self.operand("split_out") is None:
return 1
return super().split_out
@property
def _chunk_cls_args(self):
return self.by
@functools.cached_property
def _meta_chunk(self):
meta = meta_nonempty(self.frame._meta)
return self.chunk(meta, *self._by_meta, **self.chunk_kwargs)
def _divisions(self):
if self.sort:
return (None, None)
split_out = self.split_out
if split_out is True:
split_out = self.frame.npartitions
return (None,) * (split_out + 1)
def _simplify_up(self, parent, dependents):
return groupby_projection(self, parent, dependents)
@functools.cached_property
def combine_kwargs(self):
return {"levels": self.levels, "observed": self.observed, "dropna": self.dropna}
@functools.cached_property
def chunk_kwargs(self):
return {"observed": self.observed, "dropna": self.dropna}
@functools.cached_property
def aggregate_kwargs(self):
return {
"levels": self.levels,
"sort": self.sort,
"observed": self.observed,
"dropna": self.dropna,
}
def _var_combine(g, levels, sort=False, observed=False, dropna=True):
return g.groupby(level=levels, sort=sort, observed=observed, dropna=dropna).sum()
|
GroupByReduction
|
python
|
weaviate__weaviate-python-client
|
weaviate/rbac/models.py
|
{
"start": 20542,
"end": 21275
}
|
class ____:
@staticmethod
def oidc(
*,
group: Union[str, Sequence[str]],
read: bool = False,
assign_and_revoke: bool = False,
) -> PermissionsCreateType:
permissions: List[_Permission] = []
if isinstance(group, str):
group = [group]
for g in group:
permission = _GroupsPermission(group=g, group_type="oidc", actions=set())
if read:
permission.actions.add(GroupAction.READ)
if assign_and_revoke:
permission.actions.add(GroupAction.ASSIGN_AND_REVOKE)
if len(permission.actions) > 0:
permissions.append(permission)
return permissions
|
GroupsPermissions
|
python
|
pandas-dev__pandas
|
pandas/tests/indexing/test_at.py
|
{
"start": 4239,
"end": 7213
}
|
class ____:
# TODO: De-duplicate/parametrize
# test_at_series_raises_key_error2, test_at_frame_raises_key_error2
def test_at_series_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=[3, 2, 1])
result = indexer_al(ser)[1]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(ser)["a"]
def test_at_frame_raises_key_error(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({0: [1, 2, 3]}, index=[3, 2, 1])
result = indexer_al(df)[1, 0]
assert result == 3
with pytest.raises(KeyError, match="a"):
indexer_al(df)["a", 0]
with pytest.raises(KeyError, match="a"):
indexer_al(df)[1, "a"]
def test_at_series_raises_key_error2(self, indexer_al):
# at should not fallback
# GH#7814
# GH#31724 .at should match .loc
ser = Series([1, 2, 3], index=list("abc"))
result = indexer_al(ser)["a"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(ser)[0]
def test_at_frame_raises_key_error2(self, indexer_al):
# GH#31724 .at should match .loc
df = DataFrame({"A": [1, 2, 3]}, index=list("abc"))
result = indexer_al(df)["a", "A"]
assert result == 1
with pytest.raises(KeyError, match="^0$"):
indexer_al(df)["a", 0]
def test_at_frame_multiple_columns(self):
# GH#48296 - at shouldn't modify multiple columns
df = DataFrame({"a": [1, 2], "b": [3, 4]})
new_row = [6, 7]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at[5] = new_row
def test_at_getitem_mixed_index_no_fallback(self):
# GH#19860
ser = Series([1, 2, 3, 4, 5], index=["a", "b", "c", 1, 2])
with pytest.raises(KeyError, match="^0$"):
ser.at[0]
with pytest.raises(KeyError, match="^4$"):
ser.at[4]
def test_at_categorical_integers(self):
# CategoricalIndex with integer categories that don't happen to match
# the Categorical's codes
ci = CategoricalIndex([3, 4])
arr = np.arange(4).reshape(2, 2)
frame = DataFrame(arr, index=ci)
for df in [frame, frame.T]:
for key in [0, 1]:
with pytest.raises(KeyError, match=str(key)):
df.at[key, key]
def test_at_applied_for_rows(self):
# GH#48729 .at should raise InvalidIndexError when assigning rows
df = DataFrame(index=["a"], columns=["col1", "col2"])
new_row = [123, 15]
with pytest.raises(
InvalidIndexError,
match=f"You can only assign a scalar value not a \\{type(new_row)}",
):
df.at["a"] = new_row
|
TestAtErrors
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/models.py
|
{
"start": 13760,
"end": 16970
}
|
class ____(models.Model):
"""
An AccessToken instance represents the actual access token to
access user's resources, as in :rfc:`5`.
Fields:
* :attr:`user` The Django user representing resources" owner
* :attr:`source_refresh_token` If from a refresh, the consumed RefeshToken
* :attr:`token` Access token
* :attr:`application` Application instance
* :attr:`expires` Date and time of token expiration, in DateTime format
* :attr:`scope` Allowed scopes
"""
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="%(app_label)s_%(class)s",
)
source_refresh_token = models.OneToOneField(
# unique=True implied by the OneToOneField
oauth2_settings.REFRESH_TOKEN_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name="refreshed_access_token",
)
token = models.TextField()
token_checksum = TokenChecksumField(
max_length=64,
blank=False,
unique=True,
db_index=True,
)
id_token = models.OneToOneField(
oauth2_settings.ID_TOKEN_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
related_name="access_token",
)
application = models.ForeignKey(
oauth2_settings.APPLICATION_MODEL,
on_delete=models.CASCADE,
blank=True,
null=True,
)
expires = models.DateTimeField()
scope = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def is_valid(self, scopes=None):
"""
Checks if the access token is valid.
:param scopes: An iterable containing the scopes to check or None
"""
return not self.is_expired() and self.allow_scopes(scopes)
def is_expired(self):
"""
Check token expiration with timezone awareness
"""
if not self.expires:
return True
return timezone.now() >= self.expires
def allow_scopes(self, scopes):
"""
Check if the token allows the provided scopes
:param scopes: An iterable containing the scopes to check
"""
if not scopes:
return True
provided_scopes = set(self.scope.split())
resource_scopes = set(scopes)
return resource_scopes.issubset(provided_scopes)
def revoke(self):
"""
Convenience method to uniform tokens" interface, for now
simply remove this token from the database in order to revoke it.
"""
self.delete()
@property
def scopes(self):
"""
Returns a dictionary of allowed scope names (as keys) with their descriptions (as values)
"""
all_scopes = get_scopes_backend().get_all_scopes()
token_scopes = self.scope.split()
return {name: desc for name, desc in all_scopes.items() if name in token_scopes}
def __str__(self):
return self.token
class Meta:
abstract = True
|
AbstractAccessToken
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/memberAccess6.py
|
{
"start": 627,
"end": 1062
}
|
class ____(ParentA):
attr1: Column[str] = Column(str)
attr2 = Column(str)
ChildA.attr1
ChildA().attr1
ChildA.attr2
ChildA().attr2
foo = ChildA()
# This should generate an error because bar is declared as containing a
# Column[str], which doesn't provide a __set__ method.
foo.attr1 = ""
# This should generate an error because baz's inferred type is
# Column[str], which doesn't provide a __set__ method.
foo.attr2 = ""
|
ChildA
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/py_numpy/package.py
|
{
"start": 289,
"end": 717
}
|
class ____(Package, PythonExtension):
"""A package which extends python, depends on C and C++, and has a pure build dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/py-numpy-1.0.tar.gz"
version("2.3.4", md5="00000000000000000000000000000120")
extends("python")
depends_on("c", type="build")
depends_on("cxx", type="build")
depends_on("cmake", type="build")
|
PyNumpy
|
python
|
chroma-core__chroma
|
chromadb/utils/embedding_functions/google_embedding_function.py
|
{
"start": 9321,
"end": 14381
}
|
class ____(EmbeddingFunction[Documents]):
"""To use this EmbeddingFunction, you must have the google.generativeai Python package installed and have a Google API key."""
def __init__(
self,
api_key: Optional[str] = None,
model_name: str = "models/embedding-001",
task_type: str = "RETRIEVAL_DOCUMENT",
api_key_env_var: str = "CHROMA_GOOGLE_GENAI_API_KEY",
):
"""
Initialize the GoogleGenerativeAiEmbeddingFunction.
Args:
api_key_env_var (str, optional): Environment variable name that contains your API key for the Google Generative AI API.
Defaults to "CHROMA_GOOGLE_GENAI_API_KEY".
model_name (str, optional): The name of the model to use for text embeddings.
Defaults to "models/embedding-001".
task_type (str, optional): The task type for the embeddings.
Use "RETRIEVAL_DOCUMENT" for embedding documents and "RETRIEVAL_QUERY" for embedding queries.
Defaults to "RETRIEVAL_DOCUMENT".
"""
try:
import google.generativeai as genai
except ImportError:
raise ValueError(
"The Google Generative AI python package is not installed. Please install it with `pip install google-generativeai`"
)
if api_key is not None:
warnings.warn(
"Direct api_key configuration will not be persisted. "
"Please use environment variables via api_key_env_var for persistent storage.",
DeprecationWarning,
)
if os.getenv("GOOGLE_API_KEY") is not None:
self.api_key_env_var = "GOOGLE_API_KEY"
else:
self.api_key_env_var = api_key_env_var
self.api_key = api_key or os.getenv(self.api_key_env_var)
if not self.api_key:
raise ValueError(
f"The {self.api_key_env_var} environment variable is not set."
)
self.model_name = model_name
self.task_type = task_type
genai.configure(api_key=self.api_key)
self._genai = genai
def __call__(self, input: Documents) -> Embeddings:
"""
Generate embeddings for the given documents.
Args:
input: Documents or images to generate embeddings for.
Returns:
Embeddings for the documents.
"""
# Google Generative AI only works with text documents
if not all(isinstance(item, str) for item in input):
raise ValueError(
"Google Generative AI only supports text documents, not images"
)
embeddings_list: List[npt.NDArray[np.float32]] = []
for text in input:
embedding_result = self._genai.embed_content(
model=self.model_name,
content=text,
task_type=self.task_type,
)
embeddings_list.append(
np.array(embedding_result["embedding"], dtype=np.float32)
)
# Convert to the expected Embeddings type (List[Vector])
return cast(Embeddings, embeddings_list)
@staticmethod
def name() -> str:
return "google_generative_ai"
def default_space(self) -> Space:
return "cosine"
def supported_spaces(self) -> List[Space]:
return ["cosine", "l2", "ip"]
@staticmethod
def build_from_config(config: Dict[str, Any]) -> "EmbeddingFunction[Documents]":
api_key_env_var = config.get("api_key_env_var")
model_name = config.get("model_name")
task_type = config.get("task_type")
if api_key_env_var is None or model_name is None or task_type is None:
assert False, "This code should not be reached"
return GoogleGenerativeAiEmbeddingFunction(
api_key_env_var=api_key_env_var, model_name=model_name, task_type=task_type
)
def get_config(self) -> Dict[str, Any]:
return {
"api_key_env_var": self.api_key_env_var,
"model_name": self.model_name,
"task_type": self.task_type,
}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
if "model_name" in new_config:
raise ValueError(
"The model name cannot be changed after the embedding function has been initialized."
)
if "task_type" in new_config:
raise ValueError(
"The task type cannot be changed after the embedding function has been initialized."
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "google_generative_ai")
|
GoogleGenerativeAiEmbeddingFunction
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/solids.py
|
{
"start": 20095,
"end": 20265
}
|
class ____(graphene.ObjectType):
nodes = non_null_list(GrapheneRunStepStats)
class Meta:
name = "SolidStepStatsConnection"
|
GrapheneSolidStepStatsConnection
|
python
|
getsentry__sentry
|
tests/sentry/feedback/__init__.py
|
{
"start": 1626,
"end": 1861
}
|
class ____:
def __init__(self, status: int, json_data: dict):
self.status = status
self.json_data = json_data
self.data = json.dumps(json_data)
def json(self):
return self.json_data
|
MockSeerResponse
|
python
|
getsentry__sentry
|
src/sentry/stacktraces/processing.py
|
{
"start": 3815,
"end": 25200
}
|
class ____:
def __init__(self, data, stacktrace_infos, project=None):
self.data = data
self.stacktrace_infos = stacktrace_infos
if project is None:
project = Project.objects.get_from_cache(id=data["project"])
self.project = project
def close(self):
pass
def get_release(self, create=False):
"""Convenient helper to return the release for the current data
and optionally creates the release if it's missing. In case there
is no release info it will return `None`.
"""
release = self.data.get("release")
if not release:
return None
if not create:
return Release.get(project=self.project, version=self.data["release"])
timestamp = self.data.get("timestamp")
if timestamp is not None:
date = datetime.fromtimestamp(timestamp).replace(tzinfo=timezone.utc)
else:
date = None
return Release.get_or_create(
project=self.project, version=self.data["release"], date_added=date
)
def handles_frame(self, frame, stacktrace_info):
"""Returns true if this processor can handle this frame. This is the
earliest check and operates on a raw frame and stacktrace info. If
this returns `True` a processable frame is created.
"""
return False
def preprocess_frame(self, processable_frame):
"""After a processable frame has been created this method is invoked
to give the processor a chance to store additional data to the frame
if wanted. In particular a cache key can be set here.
"""
def process_exception(self, exception):
"""Processes an exception."""
return False
def process_frame(self, processable_frame, processing_task):
"""Processes the processable frame and returns a tuple of three
lists: ``(frames, raw_frames, errors)`` where frames is the list of
processed frames, raw_frames is the list of raw unprocessed frames
(which however can also be modified if needed) as well as a list of
optional errors. Each one of the items can be `None` in which case
the original input frame is assumed.
"""
def preprocess_step(self, processing_task):
"""After frames are preprocessed but before frame processing kicks in
the preprocessing step is run. This already has access to the cache
values on the frames.
"""
return False
def find_stacktraces_in_data(
data: Mapping[str, Any], include_raw: bool = False, include_empty_exceptions: bool = False
) -> list[StacktraceInfo]:
"""
Finds all stacktraces in a given data blob and returns them together with some meta information.
If `include_raw` is True, then also raw stacktraces are included.
If `include_empty_exceptions` is set to `True` then null/empty stacktraces and stacktraces with
no or only null/empty frames are included (where they otherwise would not be), with the
`is_exception` flag is set on their `StacktraceInfo` object.
"""
rv = []
def _append_stacktrace(
stacktrace: Any,
# The entry in `exception.values` or `threads.values` containing the `stacktrace` attribute,
# or None for top-level stacktraces
container: Any = None,
# Whether or not the container is from `exception.values`
is_exception: bool = False,
# Prevent skipping empty/null stacktraces from `exception.values` (other empty/null
# stacktraces are always skipped)
include_empty_exceptions: bool = False,
) -> None:
frames = _safe_get_frames(stacktrace)
if is_exception and include_empty_exceptions:
# win-fast bypass of null/empty check
pass
elif not stacktrace or not frames:
return
platforms = _get_frames_metadata(frames, data.get("platform", "unknown"))
rv.append(
StacktraceInfo(
stacktrace=stacktrace,
container=container,
platforms=platforms,
is_exception=is_exception,
)
)
# Look for stacktraces under the key `exception`
for exc in get_path(data, "exception", "values", filter=True, default=()):
_append_stacktrace(
exc.get("stacktrace"),
container=exc,
is_exception=True,
include_empty_exceptions=include_empty_exceptions,
)
# Look for stacktraces under the key `stacktrace`
_append_stacktrace(data.get("stacktrace"))
# The native family includes stacktraces under threads
for thread in get_path(data, "threads", "values", filter=True, default=()):
_append_stacktrace(thread.get("stacktrace"), container=thread)
if include_raw:
# Iterate over a copy of rv, otherwise, it will infinitely append to itself
for info in rv[:]:
if info.container is not None:
# We don't set `is_exception` to `True` here, even if `info.is_exception` is set,
# because otherwise we'd end up processing each exception container twice in
# `process_stacktraces`
_append_stacktrace(info.container.get("raw_stacktrace"), container=info.container)
return rv
def _get_frames_metadata(frames: Sequence[dict[str, Any]], fallback_platform: str) -> set[str]:
"""Create a set of platforms involved"""
return {frame.get("platform", fallback_platform) for frame in frames}
def _normalize_in_app(stacktrace: Sequence[dict[str, str]]) -> str:
"""
Ensures consistent values of in_app across a stacktrace. Returns a classification of the
stacktrace as either "in-app-only", "system-only", or "mixed", for use in metrics.
"""
has_in_app_frames = False
has_system_frames = False
# Default to false in all cases where processors or grouping enhancers
# have not yet set in_app.
for frame in stacktrace:
if frame.get("in_app") is None:
set_in_app(frame, False)
if frame.get("in_app"):
has_in_app_frames = True
else:
has_system_frames = True
if has_in_app_frames and has_system_frames:
return "mixed"
elif has_in_app_frames:
return "in-app-only"
else:
return "system-only"
def normalize_stacktraces_for_grouping(
data: MutableMapping[str, Any], grouping_config: StrategyConfiguration | None = None
) -> None:
"""
Applies grouping enhancement rules and ensure in_app is set on all frames.
This also trims functions and pulls query strings off of filenames if necessary.
"""
stacktrace_frames = []
stacktrace_containers = []
for stacktrace_info in find_stacktraces_in_data(data, include_raw=True):
frames = stacktrace_info.get_frames()
if frames:
stacktrace_frames.append(frames)
stacktrace_containers.append(
stacktrace_info.container if stacktrace_info.is_exception else {}
)
if not stacktrace_frames:
return
platform = data.get("platform", "")
sentry_sdk.set_tag("platform", platform)
# Put the trimmed function names into the frames. We only do this if
# the trimming produces a different function than the function we have
# otherwise stored in `function` to not make the payload larger
# unnecessarily.
with sentry_sdk.start_span(op=op, name="iterate_frames"):
stripped_querystring = False
for frames in stacktrace_frames:
for frame in frames:
_trim_function_name(frame, platform)
# Restore the original in_app value before applying in-app stacktrace rules. This
# lets us run grouping enhancers on the stacktrace multiple times, as would happen
# during a grouping config transition, for example.
orig_in_app = get_path(frame, "data", "orig_in_app")
if orig_in_app is not None:
frame["in_app"] = None if orig_in_app == -1 else bool(orig_in_app)
# Track the incoming `in_app` value, before we make any changes. This is different
# from the `orig_in_app` value which may be set by
# `apply_category_and_updated_in_app_to_frames`, because it's not tied to the value
# changing as a result of stacktrace rules.
client_in_app = frame.get("in_app")
if client_in_app is not None:
set_path(frame, "data", "client_in_app", value=client_in_app)
if platform == "javascript":
try:
parsed_filename = urlparse(frame.get("filename", ""))
if parsed_filename.query:
stripped_querystring = True
frame["filename"] = frame["filename"].replace(
f"?{parsed_filename.query}", ""
)
# ignore unparsable filenames
except Exception:
pass
if stripped_querystring:
# Fires once per event, regardless of how many frames' filenames were stripped
metrics.incr("sentry.grouping.stripped_filename_querystrings")
# If a grouping config is available, run grouping enhancers
if grouping_config is not None:
with sentry_sdk.start_span(op=op, name="apply_modifications_to_frame"):
for frames, stacktrace_container in zip(stacktrace_frames, stacktrace_containers):
# This call has a caching mechanism when the same stacktrace and rules are used
grouping_config.enhancements.apply_category_and_updated_in_app_to_frames(
frames, platform, stacktrace_container
)
# normalize `in_app` values, noting and storing the event's mix of in-app and system frames, so
# we can track the mix with a metric in cases where this event creates a new group
frame_mixes = {"mixed": 0, "in-app-only": 0, "system-only": 0}
for frames in stacktrace_frames:
stacktrace_frame_mix = _normalize_in_app(frames)
frame_mixes[stacktrace_frame_mix] += 1
event_metadata = data.get("metadata") or {}
event_metadata["in_app_frame_mix"] = (
"in-app-only"
if frame_mixes["in-app-only"] == len(stacktrace_frames)
else "system-only" if frame_mixes["system-only"] == len(stacktrace_frames) else "mixed"
)
data["metadata"] = event_metadata
def _trim_function_name(frame: dict[str, Any], platform: str | None) -> None:
function = frame.get("function")
raw_function = frame.get("raw_function")
# Nothing to trim or trimming has already been done
if not function or raw_function is not None:
return
trimmed_function = trim_function_name(function, frame.get("platform", platform))
if trimmed_function != function:
frame["raw_function"] = function
frame["function"] = trimmed_function
def should_process_for_stacktraces(data):
from sentry.plugins.base import plugins
infos = find_stacktraces_in_data(data, include_empty_exceptions=True)
platforms: set[str] = set()
for info in infos:
platforms.update(info.platforms or ())
for plugin in plugins.all(version=2):
processors = safe_execute(
plugin.get_stacktrace_processors, data=data, stacktrace_infos=infos, platforms=platforms
)
if processors:
return True
return False
def get_processors_for_stacktraces(data, infos):
from sentry.plugins.base import plugins
platforms: set[str] = set()
for info in infos:
platforms.update(info.platforms or ())
processors: list[Callable] = []
for plugin in plugins.all(version=2):
processors.extend(
safe_execute(
plugin.get_stacktrace_processors,
data=data,
stacktrace_infos=infos,
platforms=platforms,
)
or ()
)
if processors:
project = Project.objects.get_from_cache(id=data["project"])
processors = [x(data, infos, project) for x in processors]
return processors
def get_processable_frames(stacktrace_info, processors) -> list[ProcessableFrame]:
"""Returns thin wrappers around the frames in a stacktrace associated
with the processor for it.
"""
frames = stacktrace_info.get_frames()
frame_count = len(frames)
rv: list[ProcessableFrame] = []
for idx, frame in enumerate(frames):
processor = next((p for p in processors if p.handles_frame(frame, stacktrace_info)), None)
if processor is not None:
rv.append(
ProcessableFrame(frame, frame_count - idx - 1, processor, stacktrace_info, rv)
)
return rv
def process_single_stacktrace(processing_task, stacktrace_info, processable_frames):
# TODO: associate errors with the frames and processing issues
changed_raw = False
changed_processed = False
raw_frames = []
processed_frames = []
all_errors: list[Any] = []
bare_frames = stacktrace_info.get_frames()
frame_count = len(bare_frames)
processable_frames = {frame.idx: frame for frame in processable_frames}
for i, bare_frame in enumerate(bare_frames):
idx = frame_count - i - 1
rv = None
if idx in processable_frames:
processable_frame = processable_frames[idx]
assert processable_frame.frame is bare_frame
try:
rv = processable_frame.processor.process_frame(processable_frame, processing_task)
except Exception:
logger.exception("Failed to process frame")
expand_processed, expand_raw, errors = rv or (None, None, None)
if expand_processed is not None:
processed_frames.extend(expand_processed)
changed_processed = True
elif expand_raw: # is not empty
processed_frames.extend(expand_raw)
changed_processed = True
else:
processed_frames.append(bare_frame)
if expand_raw is not None:
raw_frames.extend(expand_raw)
changed_raw = True
else:
raw_frames.append(bare_frame)
all_errors.extend(errors or ())
return (
processed_frames if changed_processed else None,
raw_frames if changed_raw else None,
all_errors,
)
def get_crash_frame_from_event_data(data, frame_filter=None):
"""
Return the highest (closest to the crash) in-app frame in the top stacktrace
which doesn't fail the given filter test.
If no such frame is available, return the highest non-in-app frame which
otherwise meets the same criteria.
Return None if any of the following are true:
- there are no frames
- all frames fail the given filter test
- we're unable to find any frames nested in either event.exception or
event.stacktrace, and there's anything other than exactly one thread
in the data
"""
frames = get_path(data, "exception", "values", -1, "stacktrace", "frames") or get_path(
data, "stacktrace", "frames"
)
if not frames:
threads = get_path(data, "threads", "values")
if threads and len(threads) == 1:
frames = get_path(threads, 0, "stacktrace", "frames")
default = None
for frame in reversed(frames or ()):
if frame is None:
continue
if frame_filter is not None:
if not frame_filter(frame):
continue
if frame.get("in_app"):
return frame
if default is None:
default = frame
if default:
return default
def lookup_frame_cache(keys):
rv = {}
for key in keys:
rv[key] = cache.get(key)
return rv
def get_stacktrace_processing_task(infos, processors):
"""Returns a list of all tasks for the processors. This can skip over
processors that seem to not handle any frames.
"""
by_processor: dict[str, list[Any]] = {}
to_lookup: dict[str, ProcessableFrame] = {}
# by_stacktrace_info requires stable sorting as it is used in
# StacktraceProcessingTask.iter_processable_stacktraces. This is important
# to guarantee reproducible symbolicator requests.
by_stacktrace_info: dict[str, Any] = {}
for info in infos:
processable_frames = get_processable_frames(info, processors)
for processable_frame in processable_frames:
processable_frame.processor.preprocess_frame(processable_frame)
by_processor.setdefault(processable_frame.processor, []).append(processable_frame)
by_stacktrace_info.setdefault(processable_frame.stacktrace_info, []).append(
processable_frame
)
if processable_frame.cache_key is not None:
to_lookup[processable_frame.cache_key] = processable_frame
frame_cache = lookup_frame_cache(to_lookup)
for cache_key, processable_frame in to_lookup.items():
processable_frame.cache_value = frame_cache.get(cache_key)
return StacktraceProcessingTask(
processable_stacktraces=by_stacktrace_info, processors=by_processor
)
def dedup_errors(errors):
# This operation scales bad but we do not expect that many items to
# end up in rv, so that should be okay enough to do.
rv = []
for error in errors:
if error not in rv:
rv.append(error)
return rv
@sentry_sdk.tracing.trace
def process_stacktraces(
data: MutableMapping[str, Any], make_processors=None, set_raw_stacktrace: bool = True
) -> MutableMapping[str, Any] | None:
infos = find_stacktraces_in_data(data, include_empty_exceptions=True)
if make_processors is None:
processors = get_processors_for_stacktraces(data, infos)
else:
processors = make_processors(data, infos)
# Early out if we have no processors. We don't want to record a timer
# in that case.
if not processors:
return None
changed = False
# Build a new processing task
processing_task = get_stacktrace_processing_task(infos, processors)
try:
# Preprocess step
for processor in processing_task.iter_processors():
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.preprocess_step"
) as span:
span.set_data("processor", processor.__class__.__name__)
if processor.preprocess_step(processing_task):
changed = True
span.set_data("data_changed", True)
# Process all stacktraces
for stacktrace_info, processable_frames in processing_task.iter_processable_stacktraces():
# Let the stacktrace processors touch the exception
if stacktrace_info.is_exception and stacktrace_info.container:
for processor in processing_task.iter_processors():
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.process_exception"
) as span:
span.set_data("processor", processor.__class__.__name__)
if processor.process_exception(stacktrace_info.container):
changed = True
span.set_data("data_changed", True)
# If the stacktrace is empty we skip it for processing
if not stacktrace_info.stacktrace:
continue
with sentry_sdk.start_span(
op="stacktraces.processing.process_stacktraces.process_single_stacktrace"
) as span:
new_frames, new_raw_frames, errors = process_single_stacktrace(
processing_task, stacktrace_info, processable_frames
)
if new_frames is not None:
stacktrace_info.stacktrace["frames"] = new_frames
changed = True
span.set_data("data_changed", True)
if (
set_raw_stacktrace
and new_raw_frames is not None
and stacktrace_info.container is not None
):
stacktrace_info.container["raw_stacktrace"] = dict(
stacktrace_info.stacktrace, frames=new_raw_frames
)
changed = True
if errors:
data.setdefault("errors", []).extend(dedup_errors(errors))
data.setdefault("_metrics", {})["flag.processing.error"] = True
changed = True
except Exception:
logger.exception("stacktraces.processing.crash")
data.setdefault("_metrics", {})["flag.processing.fatal"] = True
data.setdefault("_metrics", {})["flag.processing.error"] = True
changed = True
finally:
for processor in processors:
processor.close()
processing_task.close()
if changed:
return data
else:
return None
|
StacktraceProcessor
|
python
|
jd__tenacity
|
tenacity/asyncio/__init__.py
|
{
"start": 2200,
"end": 7772
}
|
class ____(BaseRetrying):
def __init__(
self,
sleep: t.Callable[
[t.Union[int, float]], t.Union[None, t.Awaitable[None]]
] = _portable_async_sleep,
stop: "StopBaseT" = tenacity.stop.stop_never,
wait: "WaitBaseT" = tenacity.wait.wait_none(),
retry: "t.Union[SyncRetryBaseT, RetryBaseT]" = tenacity.retry_if_exception_type(),
before: t.Callable[
["RetryCallState"], t.Union[None, t.Awaitable[None]]
] = before_nothing,
after: t.Callable[
["RetryCallState"], t.Union[None, t.Awaitable[None]]
] = after_nothing,
before_sleep: t.Optional[
t.Callable[["RetryCallState"], t.Union[None, t.Awaitable[None]]]
] = None,
reraise: bool = False,
retry_error_cls: t.Type["RetryError"] = RetryError,
retry_error_callback: t.Optional[
t.Callable[["RetryCallState"], t.Union[t.Any, t.Awaitable[t.Any]]]
] = None,
) -> None:
super().__init__(
sleep=sleep, # type: ignore[arg-type]
stop=stop,
wait=wait,
retry=retry, # type: ignore[arg-type]
before=before, # type: ignore[arg-type]
after=after, # type: ignore[arg-type]
before_sleep=before_sleep, # type: ignore[arg-type]
reraise=reraise,
retry_error_cls=retry_error_cls,
retry_error_callback=retry_error_callback,
)
async def __call__( # type: ignore[override]
self, fn: WrappedFn, *args: t.Any, **kwargs: t.Any
) -> WrappedFnReturnT:
self.begin()
retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
while True:
do = await self.iter(retry_state=retry_state)
if isinstance(do, DoAttempt):
try:
result = await fn(*args, **kwargs)
except BaseException: # noqa: B902
retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
else:
retry_state.set_result(result)
elif isinstance(do, DoSleep):
retry_state.prepare_for_next_attempt()
await self.sleep(do) # type: ignore[misc]
else:
return do # type: ignore[no-any-return]
def _add_action_func(self, fn: t.Callable[..., t.Any]) -> None:
self.iter_state.actions.append(_utils.wrap_to_async_func(fn))
async def _run_retry(self, retry_state: "RetryCallState") -> None: # type: ignore[override]
self.iter_state.retry_run_result = await _utils.wrap_to_async_func(self.retry)(
retry_state
)
async def _run_wait(self, retry_state: "RetryCallState") -> None: # type: ignore[override]
if self.wait:
sleep = await _utils.wrap_to_async_func(self.wait)(retry_state)
else:
sleep = 0.0
retry_state.upcoming_sleep = sleep
async def _run_stop(self, retry_state: "RetryCallState") -> None: # type: ignore[override]
self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
self.iter_state.stop_run_result = await _utils.wrap_to_async_func(self.stop)(
retry_state
)
async def iter(
self, retry_state: "RetryCallState"
) -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa: A003
self._begin_iter(retry_state)
result = None
for action in self.iter_state.actions:
result = await action(retry_state)
return result
def __iter__(self) -> t.Generator[AttemptManager, None, None]:
raise TypeError("AsyncRetrying object is not iterable")
def __aiter__(self) -> "AsyncRetrying":
self.begin()
self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
return self
async def __anext__(self) -> AttemptManager:
while True:
do = await self.iter(retry_state=self._retry_state)
if do is None:
raise StopAsyncIteration
elif isinstance(do, DoAttempt):
return AttemptManager(retry_state=self._retry_state)
elif isinstance(do, DoSleep):
self._retry_state.prepare_for_next_attempt()
await self.sleep(do) # type: ignore[misc]
else:
raise StopAsyncIteration
def wraps(self, fn: WrappedFn) -> WrappedFn:
wrapped = super().wraps(fn)
# Ensure wrapper is recognized as a coroutine function.
@functools.wraps(
fn, functools.WRAPPER_ASSIGNMENTS + ("__defaults__", "__kwdefaults__")
)
async def async_wrapped(*args: t.Any, **kwargs: t.Any) -> t.Any:
# Always create a copy to prevent overwriting the local contexts when
# calling the same wrapped functions multiple times in the same stack
copy = self.copy()
async_wrapped.statistics = copy.statistics # type: ignore[attr-defined]
return await copy(fn, *args, **kwargs)
# Preserve attributes
async_wrapped.retry = self # type: ignore[attr-defined]
async_wrapped.retry_with = wrapped.retry_with # type: ignore[attr-defined]
async_wrapped.statistics = {} # type: ignore[attr-defined]
return async_wrapped # type: ignore[return-value]
__all__ = [
"retry_all",
"retry_any",
"retry_if_exception",
"retry_if_result",
"WrappedFn",
"AsyncRetrying",
]
|
AsyncRetrying
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/generator12.py
|
{
"start": 114,
"end": 498
}
|
class ____:
def __iter__(self) -> Generator[int, None, bool]:
yield 1
return True
def collect1() -> Generator[str, None, bool]:
y = Yielder()
# This should generate an error because int doesn't match str.
z = yield from y
return z
def collect2():
y = Yielder()
z = yield from y
reveal_type(z, expected_text="bool")
return z
|
Yielder
|
python
|
python__mypy
|
test-data/unit/plugins/attrhook.py
|
{
"start": 154,
"end": 611
}
|
class ____(Plugin):
def get_attribute_hook(self, fullname: str) -> Callable[[AttributeContext], Type] | None:
if fullname == "m.Signal.__call__":
return signal_call_callback
return None
def signal_call_callback(ctx: AttributeContext) -> Type:
if isinstance(ctx.type, Instance):
return ctx.type.args[0]
return ctx.default_attr_type
def plugin(version: str) -> type[AttrPlugin]:
return AttrPlugin
|
AttrPlugin
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py
|
{
"start": 5084,
"end": 9107
}
|
class ____(BaseTargetConfigs):
"""
Target configs contain credentials and
settings, specific to the warehouse you're connecting to.
To find valid keys, head to the [Available adapters](
https://docs.getdbt.com/docs/available-adapters) page and
click the desired adapter's "Profile Setup" hyperlink.
Attributes:
type: The name of the database warehouse.
schema: The schema that dbt will build objects into;
in BigQuery, a schema is actually a dataset.
threads: The number of threads representing the max number
of paths through the graph dbt may work on at once.
Examples:
Load stored TargetConfigs:
```python
from prefect_dbt.cli.configs import TargetConfigs
dbt_cli_target_configs = TargetConfigs.load("BLOCK_NAME")
```
"""
_block_type_name = "dbt CLI Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
@classmethod
def from_profiles_yml(
cls: Type[Self],
profile_name: Optional[str] = None,
target_name: Optional[str] = None,
profiles_dir: Optional[str] = None,
allow_field_overrides: bool = False,
) -> "TargetConfigs":
"""
Create a TargetConfigs instance from a dbt profiles.yml file.
Args:
profile_name: Name of the profile to use from profiles.yml.
If None, uses the first profile.
target_name: Name of the target to use from the profile.
If None, uses the default target in the selected profile.
profiles_dir: Path to the directory containing profiles.yml.
If None, uses the default profiles directory.
allow_field_overrides: If enabled, fields from dbt target configs
will override fields provided in extras and credentials.
Returns:
A TargetConfigs instance populated from the profiles.yml target.
Raises:
ValueError: If profiles.yml is not found or if profile/target is invalid
"""
if profiles_dir:
profiles = PrefectDbtSettings(
profiles_dir=Path(profiles_dir)
).load_profiles_yml()
else:
profiles = PrefectDbtSettings().load_profiles_yml()
# If no profile specified, use first non-config one
if profile_name is None:
for name in profiles:
if name != "config":
profile_name = name
break
elif profile_name not in profiles:
raise ValueError(f"Profile {profile_name} not found in profiles.yml")
profile = profiles[profile_name]
if "outputs" not in profile:
raise ValueError(f"No outputs found in profile {profile_name}")
outputs = profile["outputs"]
# If no target specified, use default target
if target_name is None:
target_name = profile["target"]
elif target_name not in outputs:
raise ValueError(
f"Target {target_name} not found in profile {profile_name}"
)
target_config = outputs[target_name]
type = target_config.pop("type")
schema = None
possible_keys = ["schema", "path", "dataset", "database"]
for key in possible_keys:
if key in target_config:
schema = target_config.pop(key)
break
if schema is None:
raise ValueError(f"No schema found. Expected one of: {possible_keys}")
threads = target_config.pop("threads", 4)
return cls(
type=type,
schema=schema,
threads=threads,
extras=target_config or None,
allow_field_overrides=allow_field_overrides,
)
|
TargetConfigs
|
python
|
astropy__astropy
|
astropy/cosmology/_src/tests/io/base.py
|
{
"start": 4604,
"end": 6036
}
|
class ____(IODirectTestBase, ToFromTestMixinBase):
"""Directly test ``to/from_<format>``.
These functions are not public API and are discouraged from public use, in
favor of ``Cosmology.to/from_format(..., format="<format>")``. They are
tested because they are used internally and because some tests for the
methods on |Cosmology| don't need to be run in the |Cosmology| class's
large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
Subclasses should have an attribute ``functions`` which is a dictionary
containing two items: ``"to"=<function for to_format>`` and
``"from"=<function for from_format>``.
"""
@pytest.fixture(scope="class")
def from_format(self):
"""Convert to Cosmology using function ``from``."""
def use_from_format(*args, **kwargs):
kwargs.pop("format", None) # specific to Cosmology.from_format
return self.functions["from"](*args, **kwargs)
return use_from_format
@pytest.fixture(scope="class")
def to_format(self, cosmo):
"""Convert Cosmology to format using function ``to``."""
def use_to_format(*args, **kwargs):
return self.functions["to"](cosmo, *args, **kwargs)
return use_to_format
|
ToFromDirectTestBase
|
python
|
bokeh__bokeh
|
examples/advanced/extensions/tool.py
|
{
"start": 1800,
"end": 2193
}
|
class ____(Tool):
__implementation__ = TypeScript(CODE)
source = Instance(ColumnDataSource)
source = ColumnDataSource(data=dict(x=[], y=[]))
plot = figure(x_range=(0,10), y_range=(0,10), title="Click and drag to draw",
background_fill_color="#efefef", tools="")
plot.add_tools(DrawTool(source=source))
plot.line('x', 'y', line_width=3, source=source)
show(plot)
|
DrawTool
|
python
|
squidfunk__mkdocs-material
|
material/plugins/projects/structure/__init__.py
|
{
"start": 1660,
"end": 10260
}
|
class ____:
# Initialize project - note that the configuration of the projects plugin
# of the enclosing project is necessary to resolve nested projects
def __init__(self, file: str, plugin: ProjectsConfig, slug = "."):
self.config, self.plugin = self._resolve(file, plugin)
# The slug should not be changed after initialization, as it's used for
# correct resolution of projects and nested projects
self.slug = slug
# Find and yield nested projects of the current project - the project's
# slug is prepended to the computed slug for a simple resolution of nested
# projects, allowing authors to use the project:// protocol for linking to
# projects from the top-level project or nested and adjacent projects
def __iter__(self):
seen: list[str] = []
# Compute project root and base directory
root = os.path.dirname(self.config.config_file_path)
base = os.path.join(root, self.plugin.projects_dir)
# Find and yield all projects - note that we need to filter for nested
# projects at this point, as we're only interested in the projects on
# the next level, not in projects inside projects as they are resolved
# recursively to preserve topological ordering. This is also why we must
# sort the list of projects by path, ordering shorted paths first which
# ensures that nested projects are resolved before their parents.
glob = os.path.join(base, self.plugin.projects_config_files)
glob = iglob(os.path.normpath(glob), recursive = True)
for file in sorted(glob, key = os.path.dirname):
path = os.path.join(os.path.dirname(file), "")
if any(path.startswith(_) for _ in seen):
continue
else:
seen.append(path)
# Extract the first level of the project's directory relative to
# the projects directory as the computed slug of the project. This
# allows authors to build projects whose mkdocs.yml files are not
# located at the project root, e.g., when using git submodules.
slug = os.path.relpath(file, base)
slug, *_ = slug.split(os.path.sep)
# Normalize slug to an internal dot notation which we convert to
# file system or URL paths when necessary. Each slug starts with
# a dot to denote that it is resolved from the top-level project,
# which also allows for resolving slugs in nested projects.
root = self.slug.rstrip(".")
slug = f"{root}.{slug}"
# Create and yield project
yield Project(file, self.plugin, slug)
# Compute project hash
def __hash__(self):
return hash(self.slug)
# Find and yield all nested projects (excluding this project) in reverse
# topological order, by performing a post-order traversal on the tree of
# projects. This function returns project jobs, which are projects with
# their immediate dependencies, to build them in the correct order.
def jobs(self):
stack = [*self]
while stack:
# Pop project from stack and get its dependencies
project = stack.pop()
dependencies = [*project]
# Add project dependencies to stack and yield job
stack.extend(dependencies)
yield ProjectJob(project, dependencies)
# Compute relative path between two projects
def path(self, that: Project):
# If both, the top-level and the current project have a site URL set,
# compute slug from the common path of both site URLs
if self.config.site_url and that.config.site_url:
source = self._path_from_config(that.config)
target = self._path_from_config(self.config)
# Edge case: the author has set a site URL that does not include a
# path, so the path of the project is equal to the top-level path.
# In this case, we need to fall back to the path computed from the
# slug - see https://t.ly/5vqMr
if target == source:
target = self._path_from_slug(self.slug)
# Otherwise, always compute the path from the slugs of both projects,
# as we want to support consolidation of unrelated projects
else:
source = self._path_from_slug(that.slug)
target = self._path_from_slug(self.slug)
# Compute path between projects, and add trailing slash
path = posixpath.relpath(target, source)
return posixpath.join(path, "")
# -------------------------------------------------------------------------
# Resolve project and plugin configuration
def _resolve(self, file: str, plugin: ProjectsConfig):
config = self._resolve_config(file)
plugin = self._resolve_plugin(config, plugin)
# Return project and plugin configuration
return config, plugin
# Resolve project configuration
def _resolve_config(self, file: str):
with open(file, encoding = "utf-8-sig") as f:
config: MkDocsConfig = MkDocsConfig(config_file_path = file)
config.load_file(f)
# Return project configuration
return config
# Resolve project plugin configuration
def _resolve_plugin(self, config: MkDocsConfig, plugin: ProjectsConfig):
# Make sure that every project has a plugin configuration set - we need
# to deep copy the configuration object, as it's mutated during parsing.
# We're using an internal method of the Plugins class to ensure that we
# always stick to the syntaxes allowed by MkDocs (list and dictionary).
plugins = Plugins._parse_configs(deepcopy(config.plugins))
for index, (key, settings) in enumerate(plugins):
if not re.match(r"^(material/)?projects$", key):
continue
# Forward these settings of the plugin configuration to the project,
# as we need to build nested projects consistently
for name in ["cache", "projects", "projects_root_dir", "hoisting"]:
settings[name] = plugin[name]
# Forward these settings only if they have not been set in the
# project configuration, as they might be overwritten by the author
for name in ["log", "log_level"]:
if not name in settings:
settings[name] = plugin[name]
# Initialize and expand the plugin configuration, and mutate the
# plugin collection to persist the patched configuration
plugin: ProjectsConfig = ProjectsConfig()
plugin.load_dict(settings)
if isinstance(config.plugins, list):
config.plugins[index] = { key: dict(plugin.items()) }
else:
config.plugins[key] = dict(plugin.items())
# Return project plugin configuration
return plugin
# If no plugin configuration was found, add the default configuration
# and call this function recursively to ensure that it's present
config.plugins.append("material/projects")
return self._resolve_plugin(config, plugin)
# -------------------------------------------------------------------------
# Compute path from given slug - split slug at dots, ignoring the first one,
# and join the segments to a path, prefixed with a dot. This is necessary
# to compute the common path correctly, so we can use the same logic for
# when the path is computed from the site URL (see below).
def _path_from_slug(self, slug: str):
_, *segments = slug.split(".")
return posixpath.join(".", *segments)
# Compute path from given project configuration - parse site URL and return
# canonicalized path. Paths always start with a dot and trailing slashes are
# always removed. This is necessary so that we can compute the common path
# correctly, since the site URL might or might not contain a trailing slash.
def _path_from_config(self, config: MkDocsConfig):
url = urlparse(config.site_url)
# Remove leading slash, if any
path = url.path
if path.startswith("/"):
path = path[1:]
# Return normalized path
path = posixpath.normpath(path) if path else path
return posixpath.join(".", path)
# -----------------------------------------------------------------------------
# Project job
|
Project
|
python
|
jina-ai__jina
|
jina/serve/runtimes/servers/http.py
|
{
"start": 9100,
"end": 9945
}
|
class ____(FastAPIBaseServer):
"""
:class:`HTTPServer` is a FastAPIBaseServer that uses the default FastAPI app for a given request handler
"""
@property
def app(self):
"""Get the default base API app for Server
:return: Return a FastAPI app for the default HTTPGateway
"""
return self._request_handler._http_fastapi_default_app(
title=self.title,
description=self.description,
no_crud_endpoints=self.no_crud_endpoints,
no_debug_endpoints=self.no_debug_endpoints,
expose_endpoints=self.expose_endpoints,
expose_graphql_endpoint=self.expose_graphql_endpoint,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
cors=self.cors,
logger=self.logger,
)
|
HTTPServer
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/views.py
|
{
"start": 24425,
"end": 24880
}
|
class ____(
APIv3Settings,
NestedViewSetMixin,
OrganizationQuerySetMixin,
FlexFieldsMixin,
ListModelMixin,
GenericViewSet,
):
model = Team
serializer_class = TeamSerializer
permission_classes = [IsAuthenticated & IsOrganizationAdmin]
permit_list_expands = ["members"]
def get_queryset(self):
organization = self._get_parent_organization()
return organization.teams.all()
|
OrganizationsTeamsViewSet
|
python
|
ray-project__ray
|
python/ray/actor.py
|
{
"start": 4795,
"end": 5463
}
|
class ____(Generic[_Ret, _T0, _T1, _T2, _T3, _T4, _T5, _T6]):
def remote(
self,
__arg0: "Union[_T0, ObjectRef[_T0]]",
__arg1: "Union[_T1, ObjectRef[_T1]]",
__arg2: "Union[_T2, ObjectRef[_T2]]",
__arg3: "Union[_T3, ObjectRef[_T3]]",
__arg4: "Union[_T4, ObjectRef[_T4]]",
__arg5: "Union[_T5, ObjectRef[_T5]]",
__arg6: "Union[_T6, ObjectRef[_T6]]",
) -> "ObjectRef[_Ret]":
...
def bind(
self,
__arg0: _T0,
__arg1: _T1,
__arg2: _T2,
__arg3: _T3,
__arg4: _T4,
__arg5: _T5,
__arg6: _T6,
) -> Any:
...
|
_RemoteMethod6
|
python
|
keras-team__keras
|
guides/making_new_layers_and_models_via_subclassing.py
|
{
"start": 17460,
"end": 17822
}
|
class ____(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = ops.shape(z_mean)[0]
dim = ops.shape(z_mean)[1]
epsilon = keras.random.normal(shape=(batch, dim))
return z_mean + ops.exp(0.5 * z_log_var) * epsilon
|
Sampling
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/triton.py
|
{
"start": 59877,
"end": 67456
}
|
class ____(TritonOverrides):
"""Map element-wise ops to Triton within a TritonKernel
Unlike TritonOverrides, these assume the code is going to be inserted into
the body of the main triton kernel and so it may use indexing and mask
variables which are assumed to already be defined in the current scope.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# happens in __init__ unlike _initialize_pointwise_overrides
# because the libdevice registrations are populated during lowerings
self._setup_libdevice_routing()
@classmethod
@functools.cache
def _setup_libdevice_routing(cls):
"""Set up routing to libdevice implementations for fp64 inputs."""
from torch._inductor.codegen.common import OpDecompositions
for fn_name in torch._inductor.utils.op_requires_libdevice_fp64:
assert hasattr(cls, fn_name)
original_impl = getattr(cls, fn_name)
def decomposition_router(x, _original_impl, _fn_name):
if x.dtype != torch.float64:
return _original_impl(x)
else:
return getattr(OpDecompositions, _fn_name)(x).value
if fn_name == "sigmoid":
assert hasattr(OpDecompositions, "sigmoid")
fn = functools.partial(
decomposition_router, _original_impl=original_impl, _fn_name=fn_name
)
fn.__name__ = fn_name # type: ignore[attr-defined]
setattr(cls, fn_name, staticmethod(fn))
continue
def dtype_router(x, _original_impl, _fn_name):
if x.dtype == torch.float64:
return f"libdevice.{_fn_name}({x})"
else:
return _original_impl(x)
fn = functools.partial(
dtype_router, _original_impl=original_impl, _fn_name=fn_name
)
fn.__name__ = fn_name # type: ignore[attr-defined]
setattr(cls, fn_name, staticmethod(fn))
@classmethod
def constant(cls, value, dtype):
# NOTE: Cannot use shape=[] as it's not supported by triton-rocm
# We could use shape=[1] instead but starting with the correct
# ndim avoids extra `tt.expand_dim` ops appearing in the triton IR.
ndim = V.kernel.triton_tensor_ndim()
shape = [1] * ndim
return cls._shaped_constant(value, dtype, shape=shape)
@classmethod
def index_expr(cls, expr, dtype):
indexing = V.kernel.indexing(
expr, block_ptr=False, tma_compatibility_checker=None
)
assert isinstance(indexing, IndexingOptions)
shape: BlockShapeType
if indexing.expand_shape:
shape = indexing.expand_shape
else:
shape = TritonSymbols.get_block_shape(indexing.index)
# Our sympy expr printing casts to the current kernel index dtype.
# we only respect non int32-int64 dtypes and otherwise use current kernel indexing dtype
index_dtype = V.kernel.get_index_dtype_as_torch_dtype()
dtype = dtype if dtype not in (torch.int32, torch.int64) else index_dtype
# after we emit this var we cast it to the correct dtype
orig = config.test_configs.runtime_triton_dtype_assert
try:
config.test_configs.runtime_triton_dtype_assert = False
var = V.kernel.cse.generate(
V.kernel.compute,
indexing.index_str,
bounds=get_bounds_index_expr(expr),
dtype=dtype,
shape=shape,
)
finally:
config.test_configs.runtime_triton_dtype_assert = orig
if dtype not in (torch.int32, torch.int64):
var = V.kernel.cse.generate(
V.kernel.compute,
cls.to_dtype(var, dtype),
dtype=upcast_compute_type(dtype),
shape=var.shape,
)
else:
# TODO: we are not always consistent in enforcing that the output of the index expr printing
# results in the indexing dtype. So if we detect that we have an input which might type promote
# to a dtype other than indexing dtype, add a cast.
# Trying to avoid
dtype = index_dtype
for index_var in expr.free_symbols:
if symbol_is_type(index_var, SymT.TMP):
dtype = torch.promote_types(
dtype, V.kernel.cse.varname_map[index_var.name].dtype
)
if dtype != index_dtype:
var = V.kernel.cse.generate(
V.kernel.compute,
cls.to_dtype(var, index_dtype),
dtype=index_dtype,
shape=var.shape,
)
var.mask_vars = indexing.mask_vars
return var
@staticmethod
def masked(mask, body, other):
if mask is not None and torch.version.hip is not None:
mask = V.kernel.cse.generate(
V.kernel.compute,
f"{mask}.to(tl.int1)",
dtype=torch.bool,
shape=mask.shape,
)
nodes = body.graph.find_nodes(op="output")
assert nodes, "graph for body does not contain an output"
need_where = False
# If we have a tl.load with a masking operator and no other value
# we can add the mask here and the other value to the tl.load
# operator to save the branching cost.
for node in nodes:
for arg in node.args:
if arg.target != "load" or should_unwrap_unspec_arg(arg.args[1]):
need_where = True
break
value = None if need_where else other
with V.kernel.mask_loads(mask, value=value) as new_mask:
result = body()
if need_where:
# Remove once CSEVariables track the dtype
if result.bounds.is_bool:
other = bool(other)
# Take dtype from result to prevent accidental promotion
other = V.kernel.cse.generate(
V.kernel.compute,
f"tl.full({result}.shape, {constant_repr(other)}, {result}.dtype)",
bounds=ValueRanges.wrap(other),
dtype=result.dtype,
shape=result.shape,
)
ret = ops.where(new_mask, result, other)
else:
ret = result
ret.mask_vars.discard(new_mask)
return ret
@staticmethod
def load_seed(name, offset):
var = V.kernel.args.input(name)
return (
f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})"
)
@staticmethod
def frexp(x):
cache_key = f"frexp({x})"
if cse_val := V.kernel.cse.try_get(cache_key):
return cse_val
mantissa = V.kernel.cse.newvar(dtype=x.dtype, shape=x.shape)
exponent = V.kernel.cse.newvar(dtype=torch.int32, shape=x.shape)
V.kernel.compute.writeline(
f"{mantissa}, {exponent} = triton_helpers.frexp({x})"
)
V.kernel.cse.put(cache_key, (mantissa, exponent))
return (mantissa, exponent)
@staticmethod
def partial_accumulate(
name: str,
reduction_type: str,
value: CSEVariable,
extra_meta: dict[str, Any],
) -> None:
raise NotImplementedError
|
TritonKernelOverrides
|
python
|
google__pytype
|
pytype/state.py
|
{
"start": 14576,
"end": 17586
}
|
class ____:
"""Represents a condition due to if-splitting.
Properties:
node: A CFGNode.
binding: A Binding for the condition's constraints.
"""
def __init__(self, node, dnf):
# The condition is represented by a dummy variable with a single binding
# to None. The origins for this binding are the dnf clauses.
self._var = node.program.NewVariable()
self._binding = self._var.AddBinding(None)
for clause in dnf:
sources = set(clause)
self._binding.AddOrigin(node, sources)
@property
def binding(self):
return self._binding
_restrict_counter = metrics.MapCounter("state_restrict")
def _match_condition(value, condition):
if isinstance(condition, bool):
return compare.compatible_with(value, condition)
elif condition is None:
return compare.compatible_with_none(value)
else:
assert condition is NOT_NONE
return value.full_name != "builtins.NoneType"
def restrict_condition(node, var, condition):
"""Return a restricted condition based on filtered bindings.
Args:
node: The CFGNode.
var: A variable.
condition: A value that we will check each binding for compatibility with.
Returns:
A Condition or None. Each binding of the variable is checked for
compatibility with the condition. If either no bindings match, or all
bindings match, then None is returned. Otherwise a new Condition is built
from the specified, compatible, bindings.
"""
dnf = []
restricted = False
for b in var.bindings:
match_result = _match_condition(b.data, condition)
if match_result:
dnf.append([b]) # the binding may match the condition
else:
restricted = True # the binding cannot match the condition
if not dnf:
_restrict_counter.inc("unsatisfiable")
return UNSATISFIABLE
elif restricted:
_restrict_counter.inc("restricted")
return Condition(node, dnf)
else:
_restrict_counter.inc("unrestricted")
return None
def _is_or_is_not_cmp(left, right, is_not=False):
"""Implementation of 'left is right' amd 'left is not right'."""
if isinstance(left, abstract.PythonConstant) and isinstance(
right, abstract.PythonConstant
):
if left.cls != right.cls:
return is_not
return is_not ^ (left.pyval == right.pyval)
elif isinstance(left, abstract.Instance) and isinstance(
right, abstract.Instance
):
if left.cls != right.cls:
# If those were the same they could be the same but we can't be sure from
# comparing types.
return is_not
return None
elif isinstance(left, abstract.Class) and isinstance(right, abstract.Class):
# types are singletons. We use the name so that, e.g., two different
# TupleClass instances compare as identical.
return is_not ^ (left.full_name == right.full_name)
else:
return None
def is_cmp(left, right):
return _is_or_is_not_cmp(left, right, is_not=False)
def is_not_cmp(left, right):
return _is_or_is_not_cmp(left, right, is_not=True)
|
Condition
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 223384,
"end": 223699
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("CheckSuite", graphql_name="node")
|
CheckSuiteEdge
|
python
|
apache__airflow
|
devel-common/src/tests_common/test_utils/perf/perf_kit/sqlalchemy.py
|
{
"start": 1210,
"end": 5042
}
|
class ____:
"""
Track SQL queries in a code block.
:param display_num: If True, displays the query number.
:param display_time: If True, displays the query execution time.
:param display_trace: If True, displays the simplified (one-line) stack trace
:param display_sql: If True, displays the SQL statements
:param display_parameters: If True, display SQL statement parameters
:param print_fn: The function used to display the text. By default,``builtins.print``
"""
def __init__(
self,
*,
display_num: bool = True,
display_time: bool = True,
display_trace: bool = True,
display_sql: bool = False,
display_parameters: bool = True,
print_fn: Callable[[str], None] = print,
):
self.display_num = display_num
self.display_time = display_time
self.display_trace = display_trace
self.display_sql = display_sql
self.display_parameters = display_parameters
self.print_fn = print_fn
self.query_count = 0
def before_cursor_execute(
self,
conn,
cursor,
statement,
parameters,
context,
executemany,
):
"""
Execute before cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
:return:
"""
conn.info.setdefault("query_start_time", []).append(time.monotonic())
self.query_count += 1
def after_cursor_execute(
self,
conn,
cursor,
statement,
parameters,
context,
executemany,
):
"""
Execute after cursor.
:param conn: connection
:param cursor: cursor
:param statement: statement
:param parameters: parameters
:param context: context
:param executemany: whether many statements executed
:return:
"""
total = time.monotonic() - conn.info["query_start_time"].pop()
file_names = [
f"{f.filename}:{f.name}:{f.lineno}"
for f in traceback.extract_stack()
if "sqlalchemy" not in f.filename
]
file_name = file_names[-1] if file_names else ""
stack = [f for f in traceback.extract_stack() if "sqlalchemy" not in f.filename]
stack_info = " > ".join([f"{f.filename.rpartition('/')[-1]}:{f.name}:{f.lineno}" for f in stack][-7:])
conn.info.setdefault("query_start_time", []).append(time.monotonic())
output_parts = []
if self.display_num:
output_parts.append(f"{self.query_count:>3}")
if self.display_time:
output_parts.append(f"{total:.5f}")
if self.display_trace:
output_parts.extend([f"{file_name}", f"{stack_info}"])
if self.display_sql:
sql_oneline = statement.replace("\n", " ")
output_parts.append(f"{_pretty_format_sql(sql_oneline)}")
if self.display_parameters:
output_parts.append(f"{parameters}")
self.print_fn(" | ".join(output_parts))
def __enter__(self):
import airflow.settings
event.listen(airflow.settings.engine, "before_cursor_execute", self.before_cursor_execute)
event.listen(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
def __exit__(self, type_, value, traceback):
import airflow.settings
event.remove(airflow.settings.engine, "before_cursor_execute", self.before_cursor_execute)
event.remove(airflow.settings.engine, "after_cursor_execute", self.after_cursor_execute)
trace_queries = TraceQueries
|
TraceQueries
|
python
|
walkccc__LeetCode
|
solutions/2458. Height of Binary Tree After Subtree Removal Queries/2458.py
|
{
"start": 0,
"end": 810
}
|
class ____:
def treeQueries(self, root: TreeNode | None, queries: list[int]) -> list[int]:
@lru_cache(None)
def height(root: TreeNode | None) -> int:
if not root:
return 0
return 1 + max(height(root.left), height(root.right))
# valToMaxHeight[val] := the maximum height without the node with `val`
valToMaxHeight = {}
# maxHeight := the maximum height without the current node `root`
def dfs(root: TreeNode | None, depth: int, maxHeight: int) -> None:
if not root:
return
valToMaxHeight[root.val] = maxHeight
dfs(root.left, depth + 1, max(maxHeight, depth + height(root.right)))
dfs(root.right, depth + 1, max(maxHeight, depth + height(root.left)))
dfs(root, 0, 0)
return [valToMaxHeight[query] for query in queries]
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultClass1.py
|
{
"start": 2982,
"end": 3318
}
|
class ____(Generic[*Ts5, P4, P5]): ... # OK
reveal_type(
ClassD[int, str, complex],
expected_text="type[ClassD[int, str, complex, (float, bool), (bool)]]",
)
reveal_type(
ClassD[int, str, [str, complex]],
expected_text="type[ClassD[int, str, (str, complex), (bool)]]",
)
P6 = ParamSpec("P6", default=[str, int])
|
ClassD
|
python
|
tensorflow__tensorflow
|
tensorflow/dtensor/python/tests/mnist_test.py
|
{
"start": 3255,
"end": 6532
}
|
class ____(test_util.DTensorBaseTest):
def setUp(self):
super(DTensorMNISTTest, self).setUp()
global_ids = test_util.create_device_ids_array((2,))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = {
device: layout_lib.Mesh(
[_BATCH_DIM],
global_ids,
local_ids,
test_util.create_device_list((2,), device),
)
for device in ['TPU', 'GPU', 'CPU']
}
self.mesh = self.configTestMesh(mesh_dict)
def init_var(self, mesh):
# Initialize TF randon normal variables(without using DTensor).
w_initializer = stateless_random_ops.stateless_random_normal(
shape=[28 * 28, 10], seed=[0, 1]
)
b_initializer = stateless_random_ops.stateless_random_normal(
shape=[10], seed=[1, 2]
)
# A filter with 3x3 shape, 1 input channel and 1 output channel.
k_initializer = stateless_random_ops.stateless_random_normal(
[3, 3, 1, 1], seed=[2, 3]
)
n_w = variables.Variable(w_initializer)
n_b = variables.Variable(b_initializer)
n_k = variables.Variable(k_initializer)
# Initialize DTensor variables.
w_initializer_on_mesh = api.copy_to_mesh(
w_initializer, Layout.replicated(mesh, 2)
)
b_initializer_on_mesh = api.copy_to_mesh(
b_initializer, Layout.replicated(mesh, rank=1)
)
k_initializer_on_mesh = api.copy_to_mesh(
k_initializer, Layout.replicated(mesh, rank=4)
)
w = d_variable.DVariable(w_initializer_on_mesh)
b = d_variable.DVariable(b_initializer_on_mesh)
k = d_variable.DVariable(k_initializer_on_mesh)
return (n_w, n_b, n_k), (w, b, k)
@parameterized.named_parameters(('Eager', False), ('Function', True))
def testMnist(self, on_function):
mnist_dataset = mnist_fake_dataset()
(n_w, n_b, n_k), (w, b, k) = self.init_var(self.mesh)
n_dataset = mnist_dataset.batch(_BATCH_SIZE, drop_remainder=True)
n_iter = iter(n_dataset)
input_layout = Layout.batch_sharded(self.mesh, _BATCH_DIM, rank=4)
label_layout = Layout.batch_sharded(self.mesh, _BATCH_DIM, rank=2)
dtensor_dataset = input_util.DTensorDataset(
dataset=mnist_dataset,
global_batch_size=_BATCH_SIZE,
mesh=self.mesh,
layouts=(input_layout, label_layout),
batch_dim=_BATCH_DIM,
)
dtensor_iter = iter(dtensor_dataset)
step_fn = (
polymorphic_function.function(_run_step) if on_function else _run_step
)
# Training loop.
for _ in range(_STEPS):
# Normal run without DTensor.
n_input, _ = next(n_iter)
g_nw, g_nb, n_loss = step_fn(n_input, n_w, n_b, n_k)
# DTensor Run
dtensor_input, _ = next(dtensor_iter)
with ops.device_v2(api.device_name()):
gw, gb, loss = step_fn(dtensor_input, w, b, k)
loss_unpack = api.unpack(loss)
self.assertAllEqual(loss_unpack[0], loss_unpack[1])
self.assertAllClose(n_loss, loss, atol=_ATOL, rtol=_RTOL)
self.assertAllClose(g_nw, gw, atol=_ATOL, rtol=_RTOL)
self.assertAllClose(g_nb, gb, atol=_ATOL, rtol=_RTOL)
self.assertAllClose(n_w, w, atol=_ATOL, rtol=_RTOL)
self.assertAllClose(n_b, b, atol=_ATOL, rtol=_RTOL)
if __name__ == '__main__':
test.main()
|
DTensorMNISTTest
|
python
|
fluentpython__example-code-2e
|
24-class-metaprog/checked/metaclass/checkedlib.py
|
{
"start": 3732,
"end": 4880
}
|
class ____(metaclass=CheckedMeta):
__slots__ = () # skip CheckedMeta.__new__ processing
@classmethod
def _fields(cls) -> dict[str, type]:
return get_type_hints(cls)
def __init__(self, **kwargs: Any) -> None:
for name in self._fields():
value = kwargs.pop(name, ...)
setattr(self, name, value)
if kwargs:
self.__flag_unknown_attrs(*kwargs)
def __flag_unknown_attrs(self, *names: str) -> NoReturn:
plural = 's' if len(names) > 1 else ''
extra = ', '.join(f'{name!r}' for name in names)
cls_name = repr(self.__class__.__name__)
raise AttributeError(f'{cls_name} object has no attribute{plural} {extra}')
def _asdict(self) -> dict[str, Any]:
return {
name: getattr(self, name)
for name, attr in self.__class__.__dict__.items()
if isinstance(attr, Field)
}
def __repr__(self) -> str:
kwargs = ', '.join(
f'{key}={value!r}' for key, value in self._asdict().items()
)
return f'{self.__class__.__name__}({kwargs})'
# end::CHECKED_CLASS[]
|
Checked
|
python
|
readthedocs__readthedocs.org
|
readthedocs/core/tests/test_filesystem_utils.py
|
{
"start": 399,
"end": 7078
}
|
class ____(TestCase):
def assert_files_equal(self, directory, files):
self.assertEqual(
{str(p.relative_to(directory)) for p in directory.iterdir()}, files
)
def test_copytree(self):
from_directory = Path(mkdtemp())
docroot_path = from_directory.parent
to_directory = Path(mkdtemp()) / "target"
(from_directory / "test.txt").touch()
self.assertFalse(to_directory.exists())
with override_settings(DOCROOT=docroot_path):
safe_copytree(from_directory, to_directory)
self.assert_files_equal(to_directory, {"test.txt"})
def test_copytree_outside_docroot(self):
from_directory = Path(mkdtemp())
(from_directory / "test.txt").touch()
to_directory = Path(mkdtemp()) / "target"
docroot_path = Path(mkdtemp())
with pytest.raises(SuspiciousFileOperation):
with override_settings(DOCROOT=docroot_path):
safe_copytree(from_directory, to_directory)
def test_copytree_with_symlinks(self):
from_directory = Path(mkdtemp())
docroot_path = from_directory.parent
to_directory = Path(mkdtemp()) / "target"
file_a = from_directory / "test.txt"
file_a.touch()
symlink_a = from_directory / "symlink.txt"
symlink_a.symlink_to(file_a)
symlink_b = from_directory / "symlink-dir"
symlink_b.symlink_to(to_directory.parent)
self.assertFalse(to_directory.exists())
with override_settings(DOCROOT=docroot_path):
safe_copytree(from_directory, to_directory)
# Symlinks are copied as symlinks, not as files.
self.assert_files_equal(
to_directory, {"test.txt", "symlink.txt", "symlink-dir"}
)
self.assertTrue((to_directory / "symlink.txt").is_symlink())
self.assertTrue((to_directory / "symlink-dir").is_symlink())
def test_copytree_from_dir_as_symlink(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
from_directory = root_directory / "a"
from_directory.mkdir()
(from_directory / "test.txt").touch()
to_directory = root_directory / "b"
from_directory_symlink = root_directory / "symlink-a"
from_directory_symlink.symlink_to(from_directory)
self.assertFalse(to_directory.exists())
with override_settings(DOCROOT=docroot_path):
self.assertFalse(safe_copytree(from_directory_symlink, to_directory))
self.assertFalse(to_directory.exists())
def test_open(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
file_a = root_directory / "test.txt"
file_a.touch()
with override_settings(DOCROOT=docroot_path):
context_manager = safe_open(file_a, allow_symlinks=False)
self.assertIsNotNone(context_manager)
with override_settings(DOCROOT=docroot_path):
context_manager = safe_open(
file_a, allow_symlinks=True, base_path=root_directory
)
self.assertIsNotNone(context_manager)
def test_open_large_file(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
file_a = root_directory / "test.txt"
file_a.write_bytes(b"0" * (1024 * 2))
with override_settings(DOCROOT=docroot_path):
with pytest.raises(BuildUserError):
safe_open(file_a, max_size_bytes=1024)
def test_write_file(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
file_a = root_directory / "test.txt"
with override_settings(DOCROOT=docroot_path):
with safe_open(file_a, mode="w") as f:
f.write("Hello World")
self.assertEqual(file_a.read_text(), "Hello World")
def test_open_outside_docroot(self):
root_directory = Path(mkdtemp())
docroot_path = Path(mkdtemp())
file_a = root_directory / "test.txt"
file_a.touch()
with pytest.raises(SuspiciousFileOperation):
with override_settings(DOCROOT=docroot_path):
safe_open(file_a)
def test_open_with_symlinks(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
file_a = root_directory / "test.txt"
file_a.touch()
symlink_a = root_directory / "symlink.txt"
symlink_a.symlink_to(file_a)
# Symlinks aren't allowed.
with pytest.raises(UnsupportedSymlinkFileError):
with override_settings(DOCROOT=docroot_path):
safe_open(symlink_a, allow_symlinks=False)
# Symlinks are allowed if they are under the root_directory.
with override_settings(DOCROOT=docroot_path):
context_manager = safe_open(
symlink_a, allow_symlinks=True, base_path=root_directory
)
self.assertIsNotNone(context_manager)
# Symlinks aren't allowed if they aren't under the root_directory.
with pytest.raises(SymlinkOutsideBasePath):
with override_settings(DOCROOT=docroot_path):
new_root_directory = root_directory / "dir"
new_root_directory.mkdir()
safe_open(symlink_a, allow_symlinks=True, base_path=new_root_directory)
def test_rmtree(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
(root_directory / "test.txt").touch()
self.assertTrue(root_directory.exists())
with override_settings(DOCROOT=docroot_path):
safe_rmtree(root_directory)
self.assertFalse(root_directory.exists())
def test_rmtree_outside_docroot(self):
root_directory = Path(mkdtemp())
docroot_path = Path(mkdtemp())
(root_directory / "test.txt").touch()
self.assertTrue(root_directory.exists())
with pytest.raises(SuspiciousFileOperation):
with override_settings(DOCROOT=docroot_path):
safe_rmtree(root_directory)
def test_rmtree_with_symlinks(self):
root_directory = Path(mkdtemp())
docroot_path = root_directory
dir_a = root_directory / "test"
dir_a.mkdir()
(dir_a / "test.txt").touch()
symlink_a = root_directory / "symlink"
symlink_a.symlink_to(dir_a)
# Directories that point to a symlink aren't deleted.
self.assertTrue(symlink_a.exists())
with override_settings(DOCROOT=docroot_path):
safe_rmtree(symlink_a)
self.assertTrue(symlink_a.exists())
|
TestFileSystemUtils
|
python
|
chroma-core__chroma
|
chromadb/utils/embedding_functions/schemas/bm25_tokenizer.py
|
{
"start": 3277,
"end": 4519
}
|
class ____:
"""Tokenizer with stopword filtering and stemming used by BM25 embeddings."""
def __init__(
self,
stemmer: SnowballStemmer,
stopwords: Iterable[str],
token_max_length: int,
) -> None:
self._stemmer = stemmer
self._stopwords = {word.lower() for word in stopwords}
self._token_max_length = token_max_length
self._non_alphanumeric_pattern = re.compile(r"[^\w\s]+", flags=re.UNICODE)
def _remove_non_alphanumeric(self, text: str) -> str:
return self._non_alphanumeric_pattern.sub(" ", text)
@staticmethod
def _simple_tokenize(text: str) -> List[str]:
return [token for token in text.lower().split() if token]
def tokenize(self, text: str) -> List[str]:
cleaned = self._remove_non_alphanumeric(text)
raw_tokens = self._simple_tokenize(cleaned)
tokens: List[str] = []
for token in raw_tokens:
if token in self._stopwords:
continue
if len(token) > self._token_max_length:
continue
stemmed = self._stemmer.stem(token).strip()
if stemmed:
tokens.append(stemmed)
return tokens
|
Bm25Tokenizer
|
python
|
pytorch__pytorch
|
torch/_export/pass_infra/proxy_value.py
|
{
"start": 141,
"end": 1269
}
|
class ____(Generic[_T]):
# pyre-ignore
def __init__(self, data: Iterable[_T], proxy: Union[torch.fx.Proxy, torch.fx.Node]):
# pyre-ignore
self.data = data
self.proxy_or_node = proxy
@property
def node(self) -> torch.fx.Node:
if isinstance(self.proxy_or_node, torch.fx.Node):
return self.proxy_or_node
assert isinstance(self.proxy_or_node, torch.fx.Proxy)
return self.proxy_or_node.node
@property
def proxy(self) -> torch.fx.Proxy:
if not isinstance(self.proxy_or_node, torch.fx.Proxy):
raise RuntimeError(
f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}"
)
return self.proxy_or_node
def to_tensor(self) -> torch.Tensor:
assert isinstance(self.data, torch.Tensor)
return self.data
def is_tensor(self) -> bool:
return isinstance(self.data, torch.Tensor)
# pyre-ignore
def __iter__(self) -> Iterator[_T]:
yield from self.data
def __bool__(self) -> bool:
return bool(self.data)
|
ProxyValue
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/padding.py
|
{
"start": 680,
"end": 1520
}
|
class ____(AsymmetricPadding):
MAX_LENGTH = _MaxLength()
AUTO = _Auto()
DIGEST_LENGTH = _DigestLength()
name = "EMSA-PSS"
_salt_length: int | _MaxLength | _Auto | _DigestLength
def __init__(
self,
mgf: MGF,
salt_length: int | _MaxLength | _Auto | _DigestLength,
) -> None:
self._mgf = mgf
if not isinstance(
salt_length, (int, _MaxLength, _Auto, _DigestLength)
):
raise TypeError(
"salt_length must be an integer, MAX_LENGTH, "
"DIGEST_LENGTH, or AUTO"
)
if isinstance(salt_length, int) and salt_length < 0:
raise ValueError("salt_length must be zero or greater.")
self._salt_length = salt_length
@property
def mgf(self) -> MGF:
return self._mgf
|
PSS
|
python
|
scipy__scipy
|
scipy/stats/tests/test_generation/reference_distributions.py
|
{
"start": 12343,
"end": 12815
}
|
class ____(ReferenceDistribution):
def __init__(self, *, a, b):
super().__init__(a=a, b=b)
def _support(self, **kwargs):
return mp.zero, mp.inf
def _logpdf(self, x, a, b):
return (a - mp.one)*mp.log(x) - (a + b)*mp.log1p(x) - mp.log(mp.beta(a, b))
def _pdf(self, x, a, b):
return mp.exp(self._logpdf(x=x, a=a, b=b))
def _sf(self, x, a, b):
return 1.0 - mp.betainc(a, b, 0, x/(1+x), regularized=True)
|
BetaPrime
|
python
|
pytorch__pytorch
|
test/quantization/eager/test_numeric_suite_eager.py
|
{
"start": 1805,
"end": 2607
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mycat = nnq.FloatFunctional()
self.myadd = nnq.FloatFunctional()
self.mymul = nnq.FloatFunctional()
self.myadd_relu = nnq.FloatFunctional()
self.my_scalar_add = nnq.FloatFunctional()
self.my_scalar_mul = nnq.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.mycat.cat([x, x, x])
x = self.myadd.add(x, x)
x = self.mymul.mul(x, x)
x = self.myadd_relu.add_relu(x, x)
w = self.my_scalar_add.add_scalar(x, -0.5)
w = self.my_scalar_mul.mul_scalar(w, 0.5)
w = self.dequant(w)
return w
|
ModelWithFunctionals
|
python
|
astropy__astropy
|
astropy/convolution/kernels.py
|
{
"start": 7741,
"end": 10019
}
|
class ____(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts when applied repeatedly to the same data.
The generated kernel is normalized so that it integrates to 1.
By default, the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : {'linear_interp', 'center', 'oversample', 'integrate'}, optional
One of the following discretization modes:
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, RickerWavelet2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1.0 / width**2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs["mode"] = "linear_interp"
super().__init__(**kwargs)
self.normalize()
|
Box2DKernel
|
python
|
mlflow__mlflow
|
dev/clint/src/clint/rules/unknown_mlflow_function.py
|
{
"start": 36,
"end": 356
}
|
class ____(Rule):
def __init__(self, function_name: str) -> None:
self.function_name = function_name
def _message(self) -> str:
return (
f"Unknown MLflow function: `{self.function_name}`. "
"This function may not exist or could be misspelled."
)
|
UnknownMlflowFunction
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/scene/camera/_projection.py
|
{
"start": 235,
"end": 2556
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene.camera"
_path_str = "layout.scene.camera.projection"
_valid_props = {"type"}
@property
def type(self):
"""
Sets the projection type. The projection type could be either
"perspective" or "orthographic". The default is "perspective".
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['perspective', 'orthographic']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def _prop_descriptions(self):
return """\
type
Sets the projection type. The projection type could be
either "perspective" or "orthographic". The default is
"perspective".
"""
def __init__(self, arg=None, type=None, **kwargs):
"""
Construct a new Projection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.c
amera.Projection`
type
Sets the projection type. The projection type could be
either "perspective" or "orthographic". The default is
"perspective".
Returns
-------
Projection
"""
super().__init__("projection")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.camera.Projection
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.camera.Projection`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("type", arg, type)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Projection
|
python
|
readthedocs__readthedocs.org
|
readthedocs/core/permissions.py
|
{
"start": 6554,
"end": 6642
}
|
class ____(SettingsOverrideObject):
_default_class = AdminPermissionBase
|
AdminPermission
|
python
|
pytorch__pytorch
|
torch/fx/experimental/migrate_gradual_types/constraint.py
|
{
"start": 13407,
"end": 14300
}
|
class ____(Constraint):
def __init__(self, res1, res2, input1, input2):
"""
:param res1: resulting tensor 1
:param res2: resulting tensor 2
:param input1: tensor variable 1
:param input2: tensor variable 2
"""
self.res1 = res1
self.res2 = res2
self.input1 = input1
self.input2 = input2
def __eq__(self, other):
if isinstance(other, ApplyBroadcasting):
return (
self.res1 == other.res1
and self.res2 == other.res2
and self.input1 == other.input1
and self.input2 == other.input2
)
else:
return False
def __repr__(self):
return (
f"{self.res1}, {self.res2} ="
f" apply-broadcasting({self.input1},"
f" {self.input2})"
)
|
ApplyBroadcasting
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-gcp/dagster_gcp/gcs/gcs_fake_resource.py
|
{
"start": 2927,
"end": 3036
}
|
class ____:
@cached_method
def get_client(self):
return FakeGCSClient()
|
FakeConfigurableGCSClient
|
python
|
numpy__numpy
|
numpy/_core/tests/test_getlimits.py
|
{
"start": 3285,
"end": 5219
}
|
class ____:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\
" max=3.4028235e+38, dtype=float32)"
assert_equal(repr(np.finfo(np.float32)), expected)
def test_instances():
# Test the finfo and iinfo results on numeric instances agree with
# the results on the corresponding types
for c in [int, np.int16, np.int32, np.int64]:
class_iinfo = iinfo(c)
instance_iinfo = iinfo(c(12))
assert_iinfo_equal(class_iinfo, instance_iinfo)
for c in [float, np.float16, np.float32, np.float64]:
class_finfo = finfo(c)
instance_finfo = finfo(c(1.2))
assert_finfo_equal(class_finfo, instance_finfo)
with pytest.raises(ValueError):
iinfo(10.)
with pytest.raises(ValueError):
iinfo('hi')
with pytest.raises(ValueError):
finfo(np.int64(1))
def test_subnormal_warning():
"""Test that the subnormal is zero warning is not being raised."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Test for common float types
for dtype in [np.float16, np.float32, np.float64]:
f = finfo(dtype)
_ = f.smallest_subnormal
# Also test longdouble
with np.errstate(all='ignore'):
fld = finfo(np.longdouble)
_ = fld.smallest_subnormal
# Check no warnings were raised
assert len(w) == 0
def test_plausible_finfo():
# Assert that finfo returns reasonable results for all types
for ftype in np._core.sctypes['float'] + np._core.sctypes['complex']:
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
|
TestRepr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.