language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
astropy__astropy
|
astropy/extern/configobj/configobj.py
|
{
"start": 4845,
"end": 5171
}
|
class ____(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
|
ConfigObjError
|
python
|
pytorch__pytorch
|
torch/_inductor/cache.py
|
{
"start": 898,
"end": 1015
}
|
class ____(ValueError):
"""
Exception raised for errors encountered during cache operations.
"""
|
CacheError
|
python
|
redis__redis-py
|
tests/conftest.py
|
{
"start": 1320,
"end": 22749
}
|
class ____(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
_option_strings = []
for option_string in option_strings:
_option_strings.append(option_string)
if option_string.startswith("--"):
option_string = "--no-" + option_string[2:]
_option_strings.append(option_string)
if help is not None and default is not None:
help += f" (default: {default})"
super().__init__(
option_strings=_option_strings,
dest=dest,
nargs=0,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
if option_string in self.option_strings:
setattr(namespace, self.dest, not option_string.startswith("--no-"))
def format_usage(self):
return " | ".join(self.option_strings)
@pytest.fixture(scope="session", autouse=True)
def enable_tracemalloc():
"""
Enable tracemalloc while tests are being executed.
"""
try:
import tracemalloc
tracemalloc.start()
yield
tracemalloc.stop()
except ImportError:
yield
def pytest_addoption(parser):
parser.addoption(
"--redis-url",
default=default_redis_url,
action="store",
help="Redis connection string, defaults to `%(default)s`",
)
parser.addoption(
"--redis-mod-url",
default=default_redismod_url,
action="store",
help="Redis with modules connection string, defaults to `%(default)s`",
)
parser.addoption(
"--protocol",
default=default_protocol,
action="store",
help="Protocol version, defaults to `%(default)s`",
)
parser.addoption(
"--redis-ssl-url",
default=default_redis_ssl_url,
action="store",
help="Redis SSL connection string, defaults to `%(default)s`",
)
parser.addoption(
"--redis-cluster-nodes",
default=default_cluster_nodes,
action="store",
help="The number of cluster nodes that need to be "
"available before the test can start,"
" defaults to `%(default)s`",
)
parser.addoption(
"--uvloop", action=BooleanOptionalAction, help="Run tests with uvloop"
)
parser.addoption(
"--sentinels",
action="store",
default="localhost:26379,localhost:26380,localhost:26381",
help="Comma-separated list of sentinel IPs and ports",
)
parser.addoption(
"--master-service",
action="store",
default="redis-py-test",
help="Name of the Redis master service that the sentinels are monitoring",
)
parser.addoption(
"--endpoint-name",
action="store",
default=None,
help="Name of the Redis endpoint the tests should be executed on",
)
def _get_info(redis_url):
client = redis.Redis.from_url(redis_url)
info = client.info()
try:
client.execute_command("DPING")
info["enterprise"] = True
except redis.ResponseError:
info["enterprise"] = False
client.connection_pool.disconnect()
return info
def pytest_sessionstart(session):
# during test discovery, e.g. with VS Code, we may not
# have a server running.
protocol = session.config.getoption("--protocol")
REDIS_INFO["resp_version"] = int(protocol) if protocol else None
redis_url = session.config.getoption("--redis-url")
try:
info = _get_info(redis_url)
version = info["redis_version"]
arch_bits = info["arch_bits"]
cluster_enabled = info["cluster_enabled"]
enterprise = info["enterprise"]
except redis.ConnectionError:
# provide optimistic defaults
info = {}
version = "10.0.0"
arch_bits = 64
cluster_enabled = False
enterprise = False
REDIS_INFO["version"] = version
REDIS_INFO["arch_bits"] = arch_bits
REDIS_INFO["cluster_enabled"] = cluster_enabled
REDIS_INFO["tls_cert_subdir"] = "cluster" if cluster_enabled else "standalone"
REDIS_INFO["enterprise"] = enterprise
# store REDIS_INFO in config so that it is available from "condition strings"
session.config.REDIS_INFO = REDIS_INFO
# module info
stack_url = session.config.getoption("--redis-mod-url")
try:
stack_info = _get_info(stack_url)
REDIS_INFO["modules"] = stack_info["modules"]
except (KeyError, redis.exceptions.ConnectionError):
pass
if cluster_enabled:
cluster_nodes = session.config.getoption("--redis-cluster-nodes")
wait_for_cluster_creation(redis_url, cluster_nodes)
use_uvloop = session.config.getoption("--uvloop")
if use_uvloop:
try:
import uvloop
uvloop.install()
except ImportError as e:
raise RuntimeError(
"Can not import uvloop, make sure it is installed"
) from e
def wait_for_cluster_creation(redis_url, cluster_nodes, timeout=60):
"""
Waits for the cluster creation to complete.
As soon as all :cluster_nodes: nodes become available, the cluster will be
considered ready.
:param redis_url: the cluster's url, e.g. redis://localhost:16379/0
:param cluster_nodes: The number of nodes in the cluster
:param timeout: the amount of time to wait (in seconds)
"""
now = time.monotonic()
end_time = now + timeout
client = None
print(f"Waiting for {cluster_nodes} cluster nodes to become available")
while now < end_time:
try:
client = redis.RedisCluster.from_url(redis_url)
if len(client.get_nodes()) == int(cluster_nodes):
print("All nodes are available!")
break
except RedisClusterException:
pass
time.sleep(1)
now = time.monotonic()
if now >= end_time:
available_nodes = 0 if client is None else len(client.get_nodes())
raise RedisClusterException(
f"The cluster did not become available after {timeout} seconds. "
f"Only {available_nodes} nodes out of {cluster_nodes} are available"
)
def skip_if_server_version_lt(min_version: str) -> _TestDecorator:
redis_version = REDIS_INFO.get("version", "0")
check = Version(redis_version) < Version(min_version)
return pytest.mark.skipif(check, reason=f"Redis version required >= {min_version}")
def skip_if_server_version_gte(min_version: str) -> _TestDecorator:
redis_version = REDIS_INFO.get("version", "0")
check = Version(redis_version) >= Version(min_version)
return pytest.mark.skipif(check, reason=f"Redis version required < {min_version}")
def skip_unless_arch_bits(arch_bits: int) -> _TestDecorator:
return pytest.mark.skipif(
REDIS_INFO.get("arch_bits", "") != arch_bits,
reason=f"server is not {arch_bits}-bit",
)
def skip_ifmodversion_lt(min_version: str, module_name: str):
try:
modules = REDIS_INFO["modules"]
except KeyError:
return pytest.mark.skipif(True, reason="Redis server does not have modules")
if modules == []:
return pytest.mark.skipif(True, reason="No redis modules found")
for j in modules:
if module_name == j.get("name"):
version = j.get("ver")
mv = int(
"".join(["%02d" % int(segment) for segment in min_version.split(".")])
)
check = version < mv
return pytest.mark.skipif(check, reason="Redis module version")
raise AttributeError(f"No redis module named {module_name}")
def skip_if_redis_enterprise() -> _TestDecorator:
check = REDIS_INFO.get("enterprise", False) is True
return pytest.mark.skipif(check, reason="Redis enterprise")
def skip_ifnot_redis_enterprise() -> _TestDecorator:
check = REDIS_INFO.get("enterprise", False) is False
return pytest.mark.skipif(check, reason="Not running in redis enterprise")
def skip_if_nocryptography() -> _TestDecorator:
# try:
# import cryptography # noqa
#
# return pytest.mark.skipif(False, reason="Cryptography dependency found")
# except ImportError:
# TODO: Because JWT library depends on cryptography,
# now it's always true and tests should be fixed
return pytest.mark.skipif(True, reason="No cryptography dependency")
def skip_if_cryptography() -> _TestDecorator:
try:
import cryptography # noqa
return pytest.mark.skipif(True, reason="Cryptography dependency found")
except ImportError:
return pytest.mark.skipif(False, reason="No cryptography dependency")
def skip_if_resp_version(resp_version) -> _TestDecorator:
check = REDIS_INFO.get("resp_version", None) == resp_version
return pytest.mark.skipif(check, reason=f"RESP version required != {resp_version}")
def skip_if_hiredis_parser() -> _TestDecorator:
try:
import hiredis # noqa
return pytest.mark.skipif(True, reason="hiredis dependency found")
except ImportError:
return pytest.mark.skipif(False, reason="No hiredis dependency")
def _get_client(
cls, request, single_connection_client=True, flushdb=True, from_url=None, **kwargs
):
"""
Helper for fixtures or tests that need a Redis client
Uses the "--redis-url" command line argument for connection info. Unlike
ConnectionPool.from_url, keyword arguments to this function override
values specified in the URL.
"""
if from_url is None:
redis_url = request.config.getoption("--redis-url")
else:
redis_url = from_url
redis_tls_url = request.config.getoption("--redis-ssl-url")
if "protocol" not in redis_url and kwargs.get("protocol") is None:
kwargs["protocol"] = request.config.getoption("--protocol")
cluster_mode = REDIS_INFO["cluster_enabled"]
ssl = kwargs.pop("ssl", False)
if not cluster_mode:
url_options = parse_url(redis_url)
connection_class = Connection
if ssl:
connection_class = SSLConnection
kwargs["ssl_certfile"], kwargs["ssl_keyfile"], kwargs["ssl_ca_certs"] = (
get_tls_certificates()
)
kwargs["ssl_cert_reqs"] = "required"
kwargs["port"] = urlparse(redis_tls_url).port
kwargs["connection_class"] = connection_class
url_options.update(kwargs)
pool = redis.ConnectionPool(**url_options)
client = cls(connection_pool=pool)
else:
client = redis.RedisCluster.from_url(redis_url, **kwargs)
single_connection_client = False
if single_connection_client:
client = client.client()
if request:
def teardown():
if not cluster_mode:
if flushdb:
try:
client.flushdb()
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
client.flushdb()
client.close()
client.connection_pool.disconnect()
else:
cluster_teardown(client, flushdb)
request.addfinalizer(teardown)
return client
def cluster_teardown(client, flushdb):
if flushdb:
try:
client.flushdb(target_nodes="primaries")
except redis.ConnectionError:
# handle cases where a test disconnected a client
# just manually retry the flushdb
client.flushdb(target_nodes="primaries")
client.close()
client.disconnect_connection_pools()
@pytest.fixture()
def r(request):
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def stack_url(request):
return request.config.getoption("--redis-mod-url", default=default_redismod_url)
@pytest.fixture()
def stack_r(request, stack_url):
with _get_client(redis.Redis, request, from_url=stack_url) as client:
yield client
@pytest.fixture()
def decoded_r(request):
with _get_client(redis.Redis, request, decode_responses=True) as client:
yield client
@pytest.fixture()
def r_timeout(request):
with _get_client(redis.Redis, request, socket_timeout=1) as client:
yield client
@pytest.fixture()
def r2(request):
"A second client for tests that need multiple"
with _get_client(redis.Redis, request) as client:
yield client
@pytest.fixture()
def sslclient(request):
with _get_client(redis.Redis, request, ssl=True) as client:
yield client
@pytest.fixture()
def sentinel_setup(request):
sentinel_ips = request.config.getoption("--sentinels")
sentinel_endpoints = [
(ip.strip(), int(port.strip()))
for ip, port in (endpoint.split(":") for endpoint in sentinel_ips.split(","))
]
kwargs = request.param.get("kwargs", {}) if hasattr(request, "param") else {}
cache = request.param.get("cache", None)
cache_config = request.param.get("cache_config", None)
force_master_ip = request.param.get("force_master_ip", None)
decode_responses = request.param.get("decode_responses", False)
sentinel = Sentinel(
sentinel_endpoints,
force_master_ip=force_master_ip,
socket_timeout=0.1,
cache=cache,
cache_config=cache_config,
protocol=3,
decode_responses=decode_responses,
**kwargs,
)
yield sentinel
for s in sentinel.sentinels:
s.close()
@pytest.fixture()
def master(request, sentinel_setup):
master_service = request.config.getoption("--master-service")
master = sentinel_setup.master_for(master_service)
yield master
master.close()
def _gen_cluster_mock_resp(r, response):
connection = Mock(spec=Connection)
connection.retry = Retry(NoBackoff(), 0)
connection.read_response.return_value = response
with mock.patch.object(r, "connection", connection):
yield r
@pytest.fixture()
def mock_cluster_resp_ok(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
yield from _gen_cluster_mock_resp(r, "OK")
@pytest.fixture()
def mock_cluster_resp_int(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
yield from _gen_cluster_mock_resp(r, 2)
@pytest.fixture()
def mock_cluster_resp_info(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = (
"cluster_state:ok\r\ncluster_slots_assigned:16384\r\n"
"cluster_slots_ok:16384\r\ncluster_slots_pfail:0\r\n"
"cluster_slots_fail:0\r\ncluster_known_nodes:7\r\n"
"cluster_size:3\r\ncluster_current_epoch:7\r\n"
"cluster_my_epoch:2\r\ncluster_stats_messages_sent:170262\r\n"
"cluster_stats_messages_received:105653\r\n"
)
yield from _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_nodes(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = (
"c8253bae761cb1ecb2b61857d85dfe455a0fec8b 172.17.0.7:7006 "
"slave aa90da731f673a99617dfe930306549a09f83a6b 0 "
"1447836263059 5 connected\n"
"9bd595fe4821a0e8d6b99d70faa660638a7612b3 172.17.0.7:7008 "
"master - 0 1447836264065 0 connected\n"
"aa90da731f673a99617dfe930306549a09f83a6b 172.17.0.7:7003 "
"myself,master - 0 0 2 connected 5461-10922\n"
"1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836262556 3 connected\n"
"4ad9a12e63e8f0207025eeba2354bcf4c85e5b22 172.17.0.7:7005 "
"master - 0 1447836262555 7 connected 0-5460\n"
"19efe5a631f3296fdf21a5441680f893e8cc96ec 172.17.0.7:7004 "
"master - 0 1447836263562 3 connected 10923-16383\n"
"fbb23ed8cfa23f17eaf27ff7d0c410492a1093d6 172.17.0.7:7002 "
"master,fail - 1447829446956 1447829444948 1 disconnected\n"
)
yield from _gen_cluster_mock_resp(r, response)
@pytest.fixture()
def mock_cluster_resp_slaves(request, **kwargs):
r = _get_client(redis.Redis, request, **kwargs)
response = (
"['1df047e5a594f945d82fc140be97a1452bcbf93e 172.17.0.7:7007 "
"slave 19efe5a631f3296fdf21a5441680f893e8cc96ec 0 "
"1447836789290 3 connected']"
)
yield from _gen_cluster_mock_resp(r, response)
@pytest.fixture(scope="session")
def master_host(request):
url = request.config.getoption("--redis-url")
parts = urlparse(url)
return parts.hostname, (parts.port or 6379)
@pytest.fixture()
def cache_conf() -> CacheConfig:
return CacheConfig(max_size=100, eviction_policy=EvictionPolicy.LRU)
@pytest.fixture()
def mock_cache_factory() -> CacheFactoryInterface:
mock_factory = Mock(spec=CacheFactoryInterface)
return mock_factory
@pytest.fixture()
def mock_cache() -> CacheInterface:
mock_cache = Mock(spec=CacheInterface)
return mock_cache
@pytest.fixture()
def mock_connection() -> ConnectionInterface:
mock_connection = Mock(spec=ConnectionInterface)
return mock_connection
@pytest.fixture()
def mock_ed() -> EventDispatcherInterface:
mock_ed = Mock(spec=EventDispatcherInterface)
return mock_ed
@pytest.fixture()
def cache_key(request) -> CacheKey:
command = request.param.get("command")
keys = request.param.get("redis_keys")
return CacheKey(command, keys)
def mock_identity_provider() -> IdentityProviderInterface:
jwt = pytest.importorskip("jwt")
mock_provider = Mock(spec=IdentityProviderInterface)
token = {"exp": datetime.now(timezone.utc).timestamp() + 3600, "oid": "username"}
encoded = jwt.encode(token, "secret", algorithm="HS256")
jwt_token = JWToken(encoded)
mock_provider.request_token.return_value = jwt_token
return mock_provider
def get_credential_provider(request) -> CredentialProvider:
cred_provider_class = request.param.get("cred_provider_class")
cred_provider_kwargs = request.param.get("cred_provider_kwargs", {})
# Since we can't import EntraIdCredentialsProvider in this module,
# we'll just check the class name.
if cred_provider_class.__name__ != "EntraIdCredentialsProvider":
return cred_provider_class(**cred_provider_kwargs)
from tests.entraid_utils import get_entra_id_credentials_provider
return get_entra_id_credentials_provider(request, cred_provider_kwargs)
@pytest.fixture()
def credential_provider(request) -> CredentialProvider:
return get_credential_provider(request)
def get_endpoint(endpoint_name: str):
endpoints_config = os.getenv("REDIS_ENDPOINTS_CONFIG_PATH", None)
if not (endpoints_config and os.path.exists(endpoints_config)):
raise FileNotFoundError(f"Endpoints config file not found: {endpoints_config}")
try:
with open(endpoints_config, "r") as f:
data = json.load(f)
db = data[endpoint_name]
return db["endpoints"][0]
except Exception as e:
raise ValueError(
f"Failed to load endpoints config file: {endpoints_config}"
) from e
def wait_for_command(client, monitor, command, key=None):
# issue a command with a key name that's local to this process.
# if we find a command with our key before the command we're waiting
# for, something went wrong
if key is None:
# generate key
redis_version = REDIS_INFO["version"]
if Version(redis_version) >= Version("5.0.0"):
id_str = str(client.client_id())
else:
id_str = f"{random.randrange(2**32):08x}"
key = f"__REDIS-PY-{id_str}__"
client.get(key)
while True:
monitor_response = monitor.next_command()
if command in monitor_response["command"]:
return monitor_response
if key in monitor_response["command"]:
return None
def is_resp2_connection(r):
if isinstance(r, redis.Redis) or isinstance(r, redis.asyncio.Redis):
protocol = r.connection_pool.connection_kwargs.get("protocol")
elif isinstance(r, redis.cluster.AbstractRedisCluster):
protocol = r.nodes_manager.connection_kwargs.get("protocol")
return protocol in ["2", 2, None]
def get_protocol_version(r):
if isinstance(r, redis.Redis) or isinstance(r, redis.asyncio.Redis):
return r.connection_pool.connection_kwargs.get("protocol")
elif isinstance(r, redis.cluster.AbstractRedisCluster):
return r.nodes_manager.connection_kwargs.get("protocol")
def assert_resp_response(r, response, resp2_expected, resp3_expected):
protocol = get_protocol_version(r)
if protocol in [2, "2", None]:
assert response == resp2_expected
else:
assert response == resp3_expected
def assert_resp_response_in(r, response, resp2_expected, resp3_expected):
protocol = get_protocol_version(r)
if protocol in [2, "2", None]:
assert response in resp2_expected
else:
assert response in resp3_expected
|
BooleanOptionalAction
|
python
|
aimacode__aima-python
|
logic.py
|
{
"start": 66401,
"end": 74057
}
|
class ____(KB):
"""A knowledge base consisting of first-order definite clauses.
>>> kb0 = FolKB([expr('Farmer(Mac)'), expr('Rabbit(Pete)'),
... expr('(Rabbit(r) & Farmer(f)) ==> Hates(f, r)')])
>>> kb0.tell(expr('Rabbit(Flopsie)'))
>>> kb0.retract(expr('Rabbit(Pete)'))
>>> kb0.ask(expr('Hates(Mac, x)'))[x]
Flopsie
>>> kb0.ask(expr('Wife(Pete, x)'))
False
"""
def __init__(self, clauses=None):
super().__init__()
self.clauses = [] # inefficient: no indexing
if clauses:
for clause in clauses:
self.tell(clause)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
else:
raise Exception('Not a definite clause: {}'.format(sentence))
def ask_generator(self, query):
return fol_bc_ask(self, query)
def retract(self, sentence):
self.clauses.remove(sentence)
def fetch_rules_for_goal(self, goal):
return self.clauses
def fol_fc_ask(kb, alpha):
"""
[Figure 9.3]
A simple forward-chaining algorithm.
"""
# TODO: improve efficiency
kb_consts = list({c for clause in kb.clauses for c in constant_symbols(clause)})
def enum_subst(p):
query_vars = list({v for clause in p for v in variables(clause)})
for assignment_list in itertools.product(kb_consts, repeat=len(query_vars)):
theta = {x: y for x, y in zip(query_vars, assignment_list)}
yield theta
# check if we can answer without new inferences
for q in kb.clauses:
phi = unify_mm(q, alpha)
if phi is not None:
yield phi
while True:
new = []
for rule in kb.clauses:
p, q = parse_definite_clause(rule)
for theta in enum_subst(p):
if set(subst(theta, p)).issubset(set(kb.clauses)):
q_ = subst(theta, q)
if all([unify_mm(x, q_) is None for x in kb.clauses + new]):
new.append(q_)
phi = unify_mm(q_, alpha)
if phi is not None:
yield phi
if not new:
break
for clause in new:
kb.tell(clause)
return None
def fol_bc_ask(kb, query):
"""
[Figure 9.6]
A simple backward-chaining algorithm for first-order logic.
KB should be an instance of FolKB, and query an atomic sentence.
"""
return fol_bc_or(kb, query, {})
def fol_bc_or(kb, goal, theta):
for rule in kb.fetch_rules_for_goal(goal):
lhs, rhs = parse_definite_clause(standardize_variables(rule))
for theta1 in fol_bc_and(kb, lhs, unify_mm(rhs, goal, theta)):
yield theta1
def fol_bc_and(kb, goals, theta):
if theta is None:
pass
elif not goals:
yield theta
else:
first, rest = goals[0], goals[1:]
for theta1 in fol_bc_or(kb, subst(theta, first), theta):
for theta2 in fol_bc_and(kb, rest, theta1):
yield theta2
# A simple KB that defines the relevant conditions of the Wumpus World as in Figure 7.4.
# See Sec. 7.4.3
wumpus_kb = PropKB()
P11, P12, P21, P22, P31, B11, B21 = expr('P11, P12, P21, P22, P31, B11, B21')
wumpus_kb.tell(~P11)
wumpus_kb.tell(B11 | '<=>' | (P12 | P21))
wumpus_kb.tell(B21 | '<=>' | (P11 | P22 | P31))
wumpus_kb.tell(~B11)
wumpus_kb.tell(B21)
test_kb = FolKB(map(expr, ['Farmer(Mac)',
'Rabbit(Pete)',
'Mother(MrsMac, Mac)',
'Mother(MrsRabbit, Pete)',
'(Rabbit(r) & Farmer(f)) ==> Hates(f, r)',
'(Mother(m, c)) ==> Loves(m, c)',
'(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)',
'(Farmer(f)) ==> Human(f)',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'(Mother(m, h) & Human(h)) ==> Human(m)']))
crime_kb = FolKB(map(expr, ['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)',
'Owns(Nono, M1)',
'Missile(M1)',
'(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)',
'Missile(x) ==> Weapon(x)',
'Enemy(x, America) ==> Hostile(x)',
'American(West)',
'Enemy(Nono, America)']))
# ______________________________________________________________________________
# Example application (not in the book).
# You can use the Expr class to do symbolic differentiation. This used to be
# a part of AI; now it is considered a separate field, Symbolic Algebra.
def diff(y, x):
"""Return the symbolic derivative, dy/dx, as an Expr.
However, you probably want to simplify the results with simp.
>>> diff(x * x, x)
((x * 1) + (x * 1))
"""
if y == x:
return 1
elif not y.args:
return 0
else:
u, op, v = y.args[0], y.op, y.args[-1]
if op == '+':
return diff(u, x) + diff(v, x)
elif op == '-' and len(y.args) == 1:
return -diff(u, x)
elif op == '-':
return diff(u, x) - diff(v, x)
elif op == '*':
return u * diff(v, x) + v * diff(u, x)
elif op == '/':
return (v * diff(u, x) - u * diff(v, x)) / (v * v)
elif op == '**' and isnumber(x.op):
return v * u ** (v - 1) * diff(u, x)
elif op == '**':
return (v * u ** (v - 1) * diff(u, x) +
u ** v * Expr('log')(u) * diff(v, x))
elif op == 'log':
return diff(u, x) / u
else:
raise ValueError('Unknown op: {} in diff({}, {})'.format(op, y, x))
def simp(x):
"""Simplify the expression x."""
if isnumber(x) or not x.args:
return x
args = list(map(simp, x.args))
u, op, v = args[0], x.op, args[-1]
if op == '+':
if v == 0:
return u
if u == 0:
return v
if u == v:
return 2 * u
if u == -v or v == -u:
return 0
elif op == '-' and len(args) == 1:
if u.op == '-' and len(u.args) == 1:
return u.args[0] # --y ==> y
elif op == '-':
if v == 0:
return u
if u == 0:
return -v
if u == v:
return 0
if u == -v or v == -u:
return 0
elif op == '*':
if u == 0 or v == 0:
return 0
if u == 1:
return v
if v == 1:
return u
if u == v:
return u ** 2
elif op == '/':
if u == 0:
return 0
if v == 0:
return Expr('Undefined')
if u == v:
return 1
if u == -v or v == -u:
return 0
elif op == '**':
if u == 0:
return 0
if v == 0:
return 1
if u == 1:
return 1
if v == 1:
return u
elif op == 'log':
if u == 1:
return 0
else:
raise ValueError('Unknown op: ' + op)
# If we fall through to here, we can not simplify further
return Expr(op, *args)
def d(y, x):
"""Differentiate and then simplify.
>>> d(x * x - x, x)
((2 * x) - 1)
"""
return simp(diff(y, x))
|
FolKB
|
python
|
conda__conda
|
conda/core/path_actions.py
|
{
"start": 47595,
"end": 51773
}
|
class ____(PathAction):
def __init__(
self,
source_full_path,
target_pkgs_dir,
target_extracted_dirname,
record_or_spec,
sha256,
size,
md5,
):
self.source_full_path = source_full_path
self.target_pkgs_dir = target_pkgs_dir
self.target_extracted_dirname = target_extracted_dirname
self.hold_path = self.target_full_path + CONDA_TEMP_EXTENSION
self.record_or_spec = record_or_spec
self.sha256 = sha256
self.size = size
self.md5 = md5
def verify(self):
self._verified = True
def execute(self, progress_update_callback=None):
# I hate inline imports, but I guess it's ok since we're importing from the conda.core
# The alternative is passing the the classes to ExtractPackageAction __init__
from .package_cache_data import PackageCacheData
log.log(
TRACE, "extracting %s => %s", self.source_full_path, self.target_full_path
)
if lexists(self.target_full_path):
rm_rf(self.target_full_path)
extract_tarball(
self.source_full_path,
self.target_full_path,
progress_update_callback=progress_update_callback,
)
try:
raw_index_json = read_index_json(self.target_full_path)
except (OSError, json.JSONDecodeError, FileNotFoundError):
# At this point, we can assume the package tarball is bad.
# Remove everything and move on.
print(
f"ERROR: Encountered corrupt package tarball at {self.source_full_path}. Conda has "
"left it in place. Please report this to the maintainers "
"of the package."
)
sys.exit(1)
if isinstance(self.record_or_spec, MatchSpec):
url = self.record_or_spec.get_raw_value("url")
if not url:
raise ValueError("URL cannot be empty.")
channel = (
Channel(url)
if has_platform(url, context.known_subdirs)
else Channel(None)
)
fn = basename(url)
sha256 = self.sha256 or compute_sum(self.source_full_path, "sha256")
size = getsize(self.source_full_path)
if self.size is not None and size != self.size:
raise RuntimeError(
f"Computed size ({size}) does not match expected value {self.size}"
)
md5 = self.md5 or compute_sum(self.source_full_path, "md5")
repodata_record = PackageRecord.from_objects(
raw_index_json,
url=url,
channel=channel,
fn=fn,
sha256=sha256,
size=size,
md5=md5,
)
else:
repodata_record = PackageRecord.from_objects(
self.record_or_spec, raw_index_json
)
repodata_record_path = join(
self.target_full_path, "info", "repodata_record.json"
)
write_as_json_to_file(repodata_record_path, repodata_record)
target_package_cache = PackageCacheData(self.target_pkgs_dir)
package_cache_record = PackageCacheRecord.from_objects(
repodata_record,
package_tarball_full_path=self.source_full_path,
extracted_package_dir=self.target_full_path,
)
target_package_cache.insert(package_cache_record)
def reverse(self):
rm_rf(self.target_full_path)
if lexists(self.hold_path):
log.log(TRACE, "moving %s => %s", self.hold_path, self.target_full_path)
rm_rf(self.target_full_path)
backoff_rename(self.hold_path, self.target_full_path)
def cleanup(self):
rm_rf(self.hold_path)
@property
def target_full_path(self):
return join(self.target_pkgs_dir, self.target_extracted_dirname)
def __str__(self):
return f"ExtractPackageAction<source_full_path={self.source_full_path!r}, target_full_path={self.target_full_path!r}>"
|
ExtractPackageAction
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/element_policies_test.py
|
{
"start": 1222,
"end": 1279
}
|
class ____(unittest.TestCase):
pass
|
ElementPoliciesTest
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_compare.py
|
{
"start": 82419,
"end": 85009
}
|
class ____(fixtures.TestBase):
def test_str(self):
eq_(
re.compile(r"[\n\s]+", re.M).sub(
" ",
str(
CacheKey(
key=((1, (2, 7, 4), 5),), bindparams=[], params={}
)
),
),
"CacheKey(key=( ( 1, ( 2, 7, 4, ), 5, ), ),)",
)
def test_nested_tuple_difference(self):
"""Test difference detection in nested tuples"""
k1 = CacheKey(key=((1, (2, 3, 4), 5),), bindparams=[], params={})
k2 = CacheKey(key=((1, (2, 7, 4), 5),), bindparams=[], params={})
eq_(list(k1._whats_different(k2)), ["key[0][1][1]: 3 != 7"])
def test_deeply_nested_tuple_difference(self):
"""Test difference detection in deeply nested tuples"""
k1 = CacheKey(
key=((1, (2, (3, 4, 5), 6), 7),), bindparams=[], params={}
)
k2 = CacheKey(
key=((1, (2, (3, 9, 5), 6), 7),), bindparams=[], params={}
)
eq_(list(k1._whats_different(k2)), ["key[0][1][1][1]: 4 != 9"])
def test_multiple_differences_nested(self):
"""Test detection of multiple differences in nested structure"""
k1 = CacheKey(key=((1, (2, 3), 4),), bindparams=[], params={})
k2 = CacheKey(key=((1, (5, 7), 4),), bindparams=[], params={})
eq_(
list(k1._whats_different(k2)),
["key[0][1][0]: 2 != 5", "key[0][1][1]: 3 != 7"],
)
def test_diff_method(self):
"""Test the _diff() method that returns a comma-separated string"""
k1 = CacheKey(key=((1, (2, 3)),), bindparams=[], params={})
k2 = CacheKey(key=((1, (5, 7)),), bindparams=[], params={})
eq_(k1._diff(k2), "key[0][1][0]: 2 != 5, key[0][1][1]: 3 != 7")
def test_with_string_differences(self):
"""Test detection of string differences"""
k1 = CacheKey(
key=(("name", ("x", "value")),), bindparams=[], params={}
)
k2 = CacheKey(
key=(("name", ("y", "value")),), bindparams=[], params={}
)
eq_(list(k1._whats_different(k2)), ["key[0][1][0]: x != y"])
def test_with_mixed_types(self):
"""Test detection of differences with mixed types"""
k1 = CacheKey(
key=(("id", 1, ("nested", 100)),), bindparams=[], params={}
)
k2 = CacheKey(
key=(("id", 1, ("nested", 200)),), bindparams=[], params={}
)
eq_(list(k1._whats_different(k2)), ["key[0][2][1]: 100 != 200"])
|
TestCacheKeyUtil
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/timer/file_based_local_timer.py
|
{
"start": 3084,
"end": 5907
}
|
class ____(TimerClient):
"""
Client side of ``FileTimerServer``. This client is meant to be used
on the same host that the ``FileTimerServer`` is running on and uses
pid to uniquely identify a worker.
This client uses a named_pipe to send timer requests to the
``FileTimerServer``. This client is a producer while the
``FileTimerServer`` is a consumer. Multiple clients can work with
the same ``FileTimerServer``.
Args:
file_path: str, the path of a FIFO special file. ``FileTimerServer``
must have created it by calling os.mkfifo().
signal: signal, the signal to use to kill the process. Using a
negative or zero signal will not kill the process.
"""
def __init__(
self,
file_path: str,
signal=(signal.SIGKILL if sys.platform != "win32" else signal.CTRL_C_EVENT), # type: ignore[attr-defined]
) -> None:
super().__init__()
self._file_path = file_path
self.signal = signal
@_retry(max_retries=10, sleep_time=0.1)
def _open_non_blocking(self) -> io.TextIOWrapper | None:
# The server may have crashed or may haven't started yet.
# In such case, calling open() in blocking model blocks the client.
# To avoid such issue, open it in non-blocking mode, and an OSError will
# be raised if the server is not there.
fd = os.open(self._file_path, os.O_WRONLY | os.O_NONBLOCK)
return os.fdopen(fd, "wt")
def _send_request(self, request: FileTimerRequest) -> None:
try:
file = self._open_non_blocking()
except Exception as e:
raise BrokenPipeError(
"Could not send the FileTimerRequest because FileTimerServer is not available."
) from e
with file:
json_request = request.to_json()
# Write request with no greater than select.PIPE_BUF is guarantee to be atomic.
if len(json_request) > select.PIPE_BUF:
raise RuntimeError(
f"FileTimerRequest larger than {select.PIPE_BUF} bytes "
f"is not supported: {json_request}"
)
file.write(json_request + "\n")
def acquire(self, scope_id: str, expiration_time: float) -> None:
self._send_request(
request=FileTimerRequest(
worker_pid=os.getpid(),
scope_id=scope_id,
expiration_time=expiration_time,
signal=self.signal,
),
)
def release(self, scope_id: str) -> None:
self._send_request(
request=FileTimerRequest(
worker_pid=os.getpid(), scope_id=scope_id, expiration_time=-1, signal=0
),
)
|
FileTimerClient
|
python
|
pytorch__pytorch
|
torch/_library/effects.py
|
{
"start": 66,
"end": 170
}
|
class ____(Enum):
ORDERED = "Ordered"
from torch._library.utils import RegistrationHandle
|
EffectType
|
python
|
huggingface__transformers
|
src/transformers/models/whisper/modeling_whisper.py
|
{
"start": 9628,
"end": 15464
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
layer_idx: Optional[int] = None,
config: Optional[WhisperConfig] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
if layer_idx is None and is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
# Scaling is susceptible to floating point arithmetics' inprecisions
# which can lead to different results (this is dependent from model
# to model, e.g. whisper is one such case). We therefore keep the
# original order of scaling to follow the original implementation
# and enforce no scaling (1.0) in the attention call below.
query_states = self.q_proj(hidden_states) * self.scaling
query_states = query_states.view(*q_input_shape)
query_states = query_states.transpose(1, 2).contiguous()
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
if past_key_values is not None and isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
past_key_values.is_updated[self.layer_idx] = True
past_key_values = past_key_values.cross_attention_cache
else:
past_key_values = past_key_values.self_attention_cache
# use key_value_states if cross attention
current_states = key_value_states if key_value_states is not None else hidden_states
if is_cross_attention and past_key_values and is_updated:
# reuse k,v, cross_attentions
key_states = past_key_values.layers[self.layer_idx].keys
value_states = past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states).view(bsz, -1, self.num_heads, self.head_dim)
value_states = self.v_proj(current_states).view(bsz, -1, self.num_heads, self.head_dim)
key_states = key_states.transpose(1, 2).contiguous()
value_states = value_states.transpose(1, 2).contiguous()
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=1.0,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Whisper, MBART->WHISPER
|
WhisperAttention
|
python
|
fluentpython__example-code
|
10-seq-hacking/vector_v4.py
|
{
"start": 3135,
"end": 5016
}
|
class ____:
typecode = 'd'
def __init__(self, components):
self._components = array(self.typecode, components)
def __iter__(self):
return iter(self._components)
def __repr__(self):
components = reprlib.repr(self._components)
components = components[components.find('['):-1]
return 'Vector({})'.format(components)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return (bytes([ord(self.typecode)]) +
bytes(self._components))
def __eq__(self, other):
return (len(self) == len(other) and
all(a == b for a, b in zip(self, other)))
def __hash__(self):
hashes = (hash(x) for x in self)
return functools.reduce(operator.xor, hashes, 0)
def __abs__(self):
return math.sqrt(sum(x * x for x in self))
def __bool__(self):
return bool(abs(self))
def __len__(self):
return len(self._components)
def __getitem__(self, index):
cls = type(self)
if isinstance(index, slice):
return cls(self._components[index])
elif isinstance(index, numbers.Integral):
return self._components[index]
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls))
shortcut_names = 'xyzt'
def __getattr__(self, name):
cls = type(self)
if len(name) == 1:
pos = cls.shortcut_names.find(name)
if 0 <= pos < len(self._components):
return self._components[pos]
msg = '{.__name__!r} object has no attribute {!r}'
raise AttributeError(msg.format(cls, name))
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(memv)
|
Vector
|
python
|
getsentry__sentry
|
tests/snuba/rules/conditions/test_event_frequency.py
|
{
"start": 1184,
"end": 2309
}
|
class ____(BaseMetricsTestCase):
def _make_sessions(
self,
num: int,
environment_name: str | None = None,
project: Project | None = None,
received: float | None = None,
):
if received is None:
received = time.time()
def make_session(i):
return dict(
distinct_id=uuid4().hex,
session_id=uuid4().hex,
org_id=project.organization_id if project else self.project.organization_id,
project_id=project.id if project else self.project.id,
status="ok",
seq=0,
release="foo@1.0.0",
environment=environment_name if environment_name else "prod",
retention_days=90,
duration=None,
errors=0,
# The line below is crucial to spread sessions throughout the time period.
started=received - i - 1,
received=received,
)
self.bulk_store_sessions([make_session(i) for i in range(num)])
|
BaseEventFrequencyPercentTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/math_ops/basic_gpu_test.py
|
{
"start": 1365,
"end": 3823
}
|
class ____(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
|
GPUBinaryOpsTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/function.py
|
{
"start": 9885,
"end": 23530
}
|
class ____(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
cached_definition: Same as definition. Needed to match AtomicFunction API.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
allowlisted_stateful_ops=None,
capture_resource_var_by_value=True,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
allowlisted_stateful_ops: A set of ops that if stateful we ignore and
copy into the function body, when `capture_by_value` is True.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._allowlisted_stateful_ops = allowlisted_stateful_ops
if self._allowlisted_stateful_ops is None:
self._allowlisted_stateful_ops = set()
self._capture_resource_var_by_value = capture_resource_var_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._function_deleter = None
self._sub_functions = {} # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possible.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def cached_definition(self):
return self.definition
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
with self._c_func.get() as func:
c_api.TF_FunctionToFunctionDef(func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_c_function(func)
self._function_deleter = _DefinedFunctionDeleter(
fdef.signature.name)
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Returns the name of the gradient function."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
# Copy variable collections (by reference) from the parent graph such that
# name based variable sharing (e.g. via tf.make_template) works between the
# func graph and parent graph.
variable_keys = []
variable_keys.extend(ops.GraphKeys._VARIABLE_COLLECTIONS) # pylint: disable=protected-access
variable_keys.append(vs._VARSTORE_KEY) # pylint: disable=protected-access
parent_graph = ops.get_default_graph()
collections_ref = {
key: parent_graph.get_collection_ref(key) for key in variable_keys}
temp_graph = func_graph_from_py_func(
self._func,
self._arg_names,
self._arg_types,
self._func_name,
self._capture_by_value,
self._caller_device,
collections_ref=collections_ref,
allowlisted_stateful_ops=self._allowlisted_stateful_ops,
capture_resource_var_by_value=self._capture_resource_var_by_value)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
# FIXME(feyu): C API is always enabled now. The if-true branch never runs.
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
with temp_graph._c_graph.get() as c_graph:
c_func = c_api.TF_GraphToFunction_wrapper(
c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
[], # control_outputs
[], # control_output_names
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func, base_func_name)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op._is_stateful] # pylint: disable=protected-access
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
with self._c_func.get() as func:
c_api.TF_FunctionSetAttrValueProto(func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
g._add_function_recursive(f) # pylint: disable=protected-access
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError(f"shape_func {self._shape_func} produced "
f"{len(shapes):d} shapes, which does not match "
f"{len(op.outputs)} outputs.")
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
|
_DefinedFunction
|
python
|
optuna__optuna
|
optuna/terminator/terminator.py
|
{
"start": 875,
"end": 5257
}
|
class ____(BaseTerminator):
"""Automatic stopping mechanism for Optuna studies.
This class implements an automatic stopping mechanism for Optuna studies, aiming to prevent
unnecessary computation. The study is terminated when the statistical error, e.g.
cross-validation error, exceeds the room left for optimization.
For further information about the algorithm, please refer to the following paper:
- `A. Makarova et al. Automatic termination for hyperparameter optimization.
<https://proceedings.mlr.press/v188/makarova22a.html>`__
Args:
improvement_evaluator:
An evaluator object for assessing the room left for optimization. Defaults to a
:class:`~optuna.terminator.improvement.evaluator.RegretBoundEvaluator` object.
error_evaluator:
An evaluator for calculating the statistical error, e.g. cross-validation error.
Defaults to a :class:`~optuna.terminator.CrossValidationErrorEvaluator`
object.
min_n_trials:
The minimum number of trials before termination is considered. Defaults to ``20``.
Raises:
ValueError: If ``min_n_trials`` is not a positive integer.
Example:
.. testcode::
import logging
import sys
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import optuna
from optuna.terminator import Terminator
from optuna.terminator import report_cross_validation_scores
study = optuna.create_study(direction="maximize")
terminator = Terminator()
min_n_trials = 20
while True:
trial = study.ask()
X, y = load_wine(return_X_y=True)
clf = RandomForestClassifier(
max_depth=trial.suggest_int("max_depth", 2, 32),
min_samples_split=trial.suggest_float("min_samples_split", 0, 1),
criterion=trial.suggest_categorical("criterion", ("gini", "entropy")),
)
scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True))
report_cross_validation_scores(trial, scores)
value = scores.mean()
logging.info(f"Trial #{trial.number} finished with value {value}.")
study.tell(trial, value)
if trial.number > min_n_trials and terminator.should_terminate(study):
logging.info("Terminated by Optuna Terminator!")
break
.. seealso::
Please refer to :class:`~optuna.terminator.TerminatorCallback` for how to use
the terminator mechanism with the :func:`~optuna.study.Study.optimize` method.
"""
def __init__(
self,
improvement_evaluator: BaseImprovementEvaluator | None = None,
error_evaluator: BaseErrorEvaluator | None = None,
min_n_trials: int = DEFAULT_MIN_N_TRIALS,
) -> None:
if min_n_trials <= 0:
raise ValueError("`min_n_trials` is expected to be a positive integer.")
self._improvement_evaluator = improvement_evaluator or RegretBoundEvaluator()
self._error_evaluator = error_evaluator or self._initialize_error_evaluator()
self._min_n_trials = min_n_trials
def _initialize_error_evaluator(self) -> BaseErrorEvaluator:
if isinstance(self._improvement_evaluator, BestValueStagnationEvaluator):
return StaticErrorEvaluator(constant=0)
return CrossValidationErrorEvaluator()
def should_terminate(self, study: Study) -> bool:
"""Judge whether the study should be terminated based on the reported values."""
trials = study.get_trials(states=[TrialState.COMPLETE])
if len(trials) < self._min_n_trials:
return False
improvement = self._improvement_evaluator.evaluate(
trials=study.trials,
study_direction=study.direction,
)
error = self._error_evaluator.evaluate(
trials=study.trials, study_direction=study.direction
)
should_terminate = improvement < error
return should_terminate
|
Terminator
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 35906,
"end": 37233
}
|
class ____(Trace):
def new_arg(self, aval: ShapedArray) -> JaxprTracer:
aval = raise_to_shaped(aval)
tracer = self.builder.new_tracer(self, aval)
self.builder.tracer_to_var[id(tracer)] = Var(aval)
return tracer
def get_or_make_const_tracer(self, val: Any) -> JaxprTracer:
tracer = self.builder.const_tracers.get(id(val))
if tracer is None:
tracer = self.builder.new_tracer(self, raise_to_shaped(get_aval(val)))
self.builder.add_const(tracer, val)
return tracer
pure = lift = get_or_make_const_tracer
def process_primitive(self, primitive, tracers, params):
avals_in = [t.aval for t in tracers]
avals_out = abstract_eval_rules[primitive](*avals_in, **params)
out_tracers = [self.builder.new_tracer(self, a) for a in avals_out]
inputs = [self.builder.getvar(t) for t in tracers]
outvars = [self.builder.add_var(t) for t in out_tracers]
self.builder.add_eqn(JaxprEqn(primitive, inputs, params, outvars))
return out_tracers
@property
def builder(self):
return self.main.global_data
# NB: in JAX, we instead attach abstract eval rules to Primitive instances
abstract_eval_rules = {}
# -
# Notice that we keep as interpreter-global data a builder object, which keeps
# track of variables, constants, and eqns as we build up the jaxpr.
|
JaxprTrace
|
python
|
python__mypy
|
mypyc/ir/ops.py
|
{
"start": 34682,
"end": 35574
}
|
class ____(RegisterOp):
"""box(type, src)
This converts from a potentially unboxed representation to a straight Python object.
Only supported for types with an unboxed representation.
"""
error_kind = ERR_NEVER
def __init__(self, src: Value, line: int = -1) -> None:
super().__init__(line)
self.src = src
self.type = object_rprimitive
# When we box None and bool values, we produce a borrowed result
if is_none_rprimitive(self.src.type) or is_bool_or_bit_rprimitive(self.src.type):
self.is_borrowed = True
def sources(self) -> list[Value]:
return [self.src]
def set_sources(self, new: list[Value]) -> None:
(self.src,) = new
def stolen(self) -> list[Value]:
return [self.src]
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_box(self)
@final
|
Box
|
python
|
google__jax
|
tests/memories_test.py
|
{
"start": 1972,
"end": 6154
}
|
class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
self._default_memory_kind = "device"
@parameterized.named_parameters(
("named_sharding", "named_sharding"),
("single_device_sharding", "single_device_sharding"),
("gspmd_sharding", "gspmd_sharding"),
)
def test_canonicalize_memory_kind(self, name):
if name == "named_sharding":
mesh = jtu.create_mesh((1,), "x")
ns = NamedSharding(mesh, P("x"))
self.assertEqual(ns.memory_kind, self._default_memory_kind)
elif name == "single_device_sharding":
ss = SingleDeviceSharding(jax.devices()[0])
self.assertEqual(ss.memory_kind, self._default_memory_kind)
else:
assert name == "gspmd_sharding"
gs = GSPMDSharding.get_replicated(jax.devices())
self.assertEqual(gs.memory_kind, self._default_memory_kind)
@parameterized.named_parameters(
("named_sharding", "named_sharding"),
("single_device_sharding", "single_device_sharding"),
("gspmd_sharding", "gspmd_sharding"),
)
def test_wrong_memory_kind(self, name):
if name == "named_sharding":
with self.assertRaisesRegex(
ValueError, "Could not find memory addressable by device.*"
):
mesh = jtu.create_mesh((1,), ("x",))
NamedSharding(mesh, P("x"), memory_kind="hbm")
elif name == "single_device_sharding":
with self.assertRaisesRegex(
ValueError,
"Could not find memory addressable by device.*Device.*"
" can address the following memory kinds.*",
):
SingleDeviceSharding(jax.devices()[0], memory_kind="host")
else:
assert name == "gspmd_sharding"
with self.assertRaisesRegex(
ValueError, "Could not find memory addressable by device.*"
):
GSPMDSharding.get_replicated(jax.devices(), memory_kind="my_host")
@parameterized.named_parameters(
("named_sharding", "named_sharding"),
("single_device_sharding", "single_device_sharding"),
("gspmd_sharding", "gspmd_sharding"),
)
def test_correct_tpu_memory_kind(self, name):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("TPU memory kind test.")
if name == "named_sharding":
mesh = jtu.create_mesh((1,), ("x",))
NamedSharding(mesh, P("x"), memory_kind=self._default_memory_kind)
elif name == "single_device_sharding":
SingleDeviceSharding(jax.devices()[0], memory_kind="unpinned_host")
else:
assert name == "gspmd_sharding"
GSPMDSharding.get_replicated(jax.devices(), memory_kind="unpinned_host")
@parameterized.named_parameters(
("named_sharding", "named_sharding"),
("single_device_sharding", "single_device_sharding"),
("gspmd_sharding", "gspmd_sharding"),
)
def test_sharding_eq(self, name):
if name == "named_sharding":
mesh = jtu.create_mesh((1,), ("x",))
s1 = NamedSharding(mesh, P("x"))
s2 = NamedSharding(mesh, P("x"), memory_kind=self._default_memory_kind)
self.assertEqual(s1, s2)
elif name == "single_device_sharding":
s1 = SingleDeviceSharding(jax.devices()[0])
s2 = SingleDeviceSharding(jax.devices()[0], memory_kind=self._default_memory_kind)
self.assertEqual(s1, s2)
elif name == "gspmd_sharding":
s1 = GSPMDSharding.get_replicated(jax.devices())
s2 = GSPMDSharding.get_replicated(jax.devices(), memory_kind=self._default_memory_kind)
self.assertEqual(s1, s2)
def test_sharding_equivalent(self):
mesh = jtu.create_mesh((1,), ("x",))
ndim = 2
ns1 = NamedSharding(mesh, P("x"))
gs1 = GSPMDSharding(
tuple(mesh.devices.flat),
ns1._to_xla_hlo_sharding(ndim),
memory_kind=self._default_memory_kind,
)
self.assertTrue(ns1.is_equivalent_to(gs1, ndim))
ns2 = NamedSharding(mesh, P("x"), memory_kind=self._default_memory_kind)
gs2 = GSPMDSharding(
tuple(mesh.devices.flat), ns2._to_xla_hlo_sharding(ndim)
)
self.assertTrue(ns2.is_equivalent_to(gs2, ndim))
def test_default_memory_kind(self):
dev = jax.devices()[0]
self.assertEqual(dev.default_memory().kind, self._default_memory_kind)
|
ShardingMemoriesTest
|
python
|
dask__distributed
|
distributed/dashboard/components/shared.py
|
{
"start": 11068,
"end": 15563
}
|
class ____(DashboardComponent):
"""Time plots of the current resource usage on the cluster
This is two plots, one for CPU and Memory and another for Network I/O
"""
def __init__(self, server, doc=None, **kwargs):
if doc is not None:
self.doc = weakref.ref(doc)
self.server = server
self.log = self.server.io_loop.profile
self.start = None
self.stop = None
self.ts = {"count": [], "time": []}
self.state = profile.get_profile(self.log)
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
self.profile_plot, self.source = profile.plot_figure(data, **kwargs)
changing = [False] # avoid repeated changes from within callback
@without_property_validation
@log_errors
def cb(attr, old, new):
if changing[0] or len(new) == 0:
return
data = profile.plot_data(self.states[new[0]], profile_interval)
del self.states[:]
self.states.extend(data.pop("states"))
changing[0] = True # don't recursively trigger callback
update(self.source, data)
self.source.selected.indices = old
changing[0] = False
self.source.selected.on_change("indices", cb)
self.ts_source = ColumnDataSource({"time": [], "count": []})
self.ts_plot = figure(
title="Activity over time",
height=150,
x_axis_type="datetime",
active_drag="xbox_select",
tools="xpan,xwheel_zoom,xbox_select,reset",
sizing_mode="stretch_width",
toolbar_location="above",
)
self.ts_plot.line("time", "count", source=self.ts_source)
self.ts_plot.circle(
"time",
"count",
source=self.ts_source,
color=None,
selection_color="orange",
radius=1,
)
self.ts_plot.yaxis.visible = False
self.ts_plot.grid.visible = False
@log_errors
def ts_change(attr, old, new):
selected = self.ts_source.selected.indices
if selected:
start = self.ts_source.data["time"][min(selected)] / 1000
stop = self.ts_source.data["time"][max(selected)] / 1000
self.start, self.stop = min(start, stop), max(start, stop)
else:
self.start = self.stop = None
self.trigger_update()
self.ts_source.selected.on_change("indices", ts_change)
self.reset_button = Button(label="Reset", button_type="success")
self.reset_button.on_click(lambda: self.update(self.state))
self.update_button = Button(label="Update", button_type="success")
self.update_button.on_click(self.trigger_update)
self.root = column(
row(self.reset_button, self.update_button, sizing_mode="scale_width"),
self.profile_plot,
self.ts_plot,
**kwargs,
)
self.subtitle = Title(text=" ", text_font_style="italic")
self.profile_plot.add_layout(self.subtitle, "above")
if not dask.config.get("distributed.worker.profile.enabled"):
self.subtitle.text = "Profiling is disabled."
self.reset_button.disabled = True
self.update_button.disabled = True
elif sys.version_info.minor == 11:
self.subtitle.text = "Profiling is disabled due to a known deadlock in CPython 3.11 that can be triggered by the profiler. See https://github.com/dask/distributed/issues/8616 for more information."
self.reset_button.disabled = True
self.update_button.disabled = True
@without_property_validation
@log_errors
def update(self, state):
self.state = state
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
@without_property_validation
def trigger_update(self):
self.state = profile.get_profile(self.log, start=self.start, stop=self.stop)
data = profile.plot_data(self.state, profile_interval)
self.states = data.pop("states")
update(self.source, data)
times = [t * 1000 for t, _ in self.log]
counts = list(toolz.pluck("count", toolz.pluck(1, self.log)))
self.ts_source.data.update({"time": times, "count": counts})
|
ProfileServer
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/types.py
|
{
"start": 13907,
"end": 15305
}
|
class ____(sqltypes.TIME):
"""MySQL TIME type."""
__visit_name__ = "TIME"
def __init__(self, timezone: bool = False, fsp: Optional[int] = None):
"""Construct a MySQL TIME type.
:param timezone: not used by the MySQL dialect.
:param fsp: fractional seconds precision value.
MySQL 5.6 supports storage of fractional seconds;
this parameter will be used when emitting DDL
for the TIME type.
.. note::
DBAPI driver support for fractional seconds may
be limited; current support includes
MySQL Connector/Python.
"""
super().__init__(timezone=timezone)
self.fsp = fsp
def result_processor(
self, dialect: Dialect, coltype: object
) -> _ResultProcessorType[datetime.time]:
time = datetime.time
def process(value: Any) -> Optional[datetime.time]:
# convert from a timedelta value
if value is not None:
microseconds = value.microseconds
seconds = value.seconds
minutes = seconds // 60
return time(
minutes // 60,
minutes % 60,
seconds - minutes * 60,
microsecond=microseconds,
)
else:
return None
return process
|
TIME
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/models.py
|
{
"start": 2065,
"end": 4285
}
|
class ____(NonStrictDataModel):
"""
:param key: The key uniquely identifying the metadata item inside the given
entity
:type key: str
:param type: The type of the metadata item
:type type: str
:param value: The value stored in the metadata item
:type value: str
"""
_schema = {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": ["string", "null"],
},
"type": {
"description": "The type of the metadata item",
"type": ["string", "null"],
},
"value": {
"description": "The value stored in the metadata item",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self, key: Optional[str] = None, type: Optional[str] = None, value: Optional[str] = None, **kwargs: Any
) -> None:
super(MetadataItem, self).__init__(**kwargs)
self.key = key
self.type = type
self.value = value
@schema_property("key")
def key(self) -> Optional[str]:
return self._property_key
@key.setter
def key(self, value: Optional[str]) -> None:
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
@schema_property("type")
def type(self) -> Optional[str]:
return self._property_type
@type.setter
def type(self, value: Optional[str]) -> None:
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("value")
def value(self) -> Optional[str]:
return self._property_value
@value.setter
def value(self, value: Optional[str]) -> None:
if value is None:
self._property_value = None
return
self.assert_isinstance(value, "value", six.string_types)
self._property_value = value
|
MetadataItem
|
python
|
getsentry__sentry
|
tests/sentry/users/api/endpoints/test_user_permissions_config.py
|
{
"start": 379,
"end": 1722
}
|
class ____(UserPermissionsConfigTest):
method = "GET"
def test_superuser_lookup_self(self) -> None:
self.superuser = self.create_user(is_superuser=True)
self.login_as(user=self.superuser, superuser=True)
self.add_user_permission(self.superuser, "users.admin")
response = self.get_success_response("me", status_code=200)
assert len(response.data) == 3
assert "broadcasts.admin" in response.data
assert "users.admin" in response.data
assert "options.admin" in response.data
@override_options({"staff.ga-rollout": True})
@patch.object(StaffPermission, "has_permission", wraps=StaffPermission().has_permission)
def test_staff_lookup_self(self, mock_has_permission: MagicMock) -> None:
self.staff_user = self.create_user(is_staff=True)
self.login_as(user=self.staff_user, staff=True)
self.add_user_permission(self.staff_user, "users.admin")
response = self.get_success_response("me", status_code=200)
assert len(response.data) == 3
assert "broadcasts.admin" in response.data
assert "users.admin" in response.data
assert "options.admin" in response.data
# ensure we fail the scope check and call is_active_staff
assert mock_has_permission.call_count == 1
|
UserPermissionsConfigGetTest
|
python
|
ray-project__ray
|
rllib/core/models/torch/primitives.py
|
{
"start": 15305,
"end": 22684
}
|
class ____(nn.Module):
"""A model containing a CNNTranspose with N Conv2DTranspose layers.
All layers share the same activation function, bias setup (use bias or not),
and LayerNormalization setup (use layer normalization or not), except for the last
one, which is never activated and never layer norm'd.
Note that there is no reshaping/flattening nor an additional dense layer at the
beginning or end of the stack. The input as well as output of the network are 3D
tensors of dimensions [width x height x num output filters].
"""
def __init__(
self,
*,
input_dims: Union[List[int], Tuple[int, ...]],
cnn_transpose_filter_specifiers: List[List[Union[int, List]]],
cnn_transpose_use_bias: bool = True,
cnn_transpose_activation: str = "relu",
cnn_transpose_use_layernorm: bool = False,
cnn_transpose_kernel_initializer: Optional[Union[str, Callable]] = None,
cnn_transpose_kernel_initializer_config: Optional[Dict] = None,
cnn_transpose_bias_initializer: Optional[Union[str, Callable]] = None,
cnn_transpose_bias_initializer_config: Optional[Dict] = None,
):
"""Initializes a TorchCNNTranspose instance.
Args:
input_dims: The 3D input dimensions of the network (incoming image).
cnn_transpose_filter_specifiers: A list of lists, where each item represents
one Conv2DTranspose layer. Each such Conv2DTranspose layer is further
specified by the elements of the inner lists. The inner lists follow
the format: `[number of filters, kernel, stride]` to
specify a convolutional-transpose layer stacked in order of the
outer list.
`kernel` as well as `stride` might be provided as width x height tuples
OR as single ints representing both dimension (width and height)
in case of square shapes.
cnn_transpose_use_bias: Whether to use bias on all Conv2DTranspose layers.
cnn_transpose_use_layernorm: Whether to insert a LayerNormalization
functionality in between each Conv2DTranspose layer's outputs and its
activation.
The last Conv2DTranspose layer will not be normed, regardless.
cnn_transpose_activation: The activation function to use after each layer
(except for the last Conv2DTranspose layer, which is always
non-activated).
cnn_transpose_kernel_initializer: The initializer function or class to use
for kernel initialization in the CNN layers. If `None` the default
initializer of the respective CNN layer is used. Note, only the
in-place initializers, i.e. ending with an underscore "_" are allowed.
cnn_transpose_kernel_initializer_config: Configuration to pass into the
initializer defined in `cnn_transpose_kernel_initializer`.
cnn_transpose_bias_initializer: The initializer function or class to use for
bias initialization in the CNN layers. If `None` the default initializer
of the respective CNN layer is used. Note, only the in-place
initializers, i.e. ending with an underscore "_" are allowed.
cnn_transpose_bias_initializer_config: Configuration to pass into the
initializer defined in `cnn_transpose_bias_initializer`.
"""
super().__init__()
assert len(input_dims) == 3
cnn_transpose_activation = get_activation_fn(
cnn_transpose_activation, framework="torch"
)
cnn_transpose_kernel_initializer = get_initializer_fn(
cnn_transpose_kernel_initializer, framework="torch"
)
cnn_transpose_bias_initializer = get_initializer_fn(
cnn_transpose_bias_initializer, framework="torch"
)
layers = []
# Add user-specified hidden convolutional layers first
width, height, in_depth = input_dims
in_size = [width, height]
for i, (out_depth, kernel, stride) in enumerate(
cnn_transpose_filter_specifiers
):
is_final_layer = i == len(cnn_transpose_filter_specifiers) - 1
# Resolve stride and kernel width/height values if only int given (squared).
s_w, s_h = (stride, stride) if isinstance(stride, int) else stride
k_w, k_h = (kernel, kernel) if isinstance(kernel, int) else kernel
# Stride the incoming image first.
stride_layer = Stride2D(in_size[0], in_size[1], s_w, s_h)
layers.append(stride_layer)
# Then 0-pad (like in tensorflow's SAME mode).
# This will return the necessary padding such that for stride=1, the output
# image has the same size as the input image, for stride=2, the output image
# is 2x the input image, etc..
padding, out_size = same_padding_transpose_after_stride(
(stride_layer.out_width, stride_layer.out_height), kernel, stride
)
layers.append(nn.ZeroPad2d(padding)) # left, right, top, bottom
# Then do the Conv2DTranspose operation
# (now that we have padded and strided manually, w/o any more padding using
# stride=1).
layer = nn.ConvTranspose2d(
in_depth,
out_depth,
kernel,
# Force-set stride to 1 as we already took care of it.
1,
# Disable torch auto-padding (torch interprets the padding setting
# as: dilation (==1.0) * [`kernel` - 1] - [`padding`]).
padding=(k_w - 1, k_h - 1),
# Last layer always uses bias (b/c has no LayerNorm, regardless of
# config).
bias=cnn_transpose_use_bias or is_final_layer,
)
# Initialize CNN Transpose layer kernel if necessary.
if cnn_transpose_kernel_initializer:
cnn_transpose_kernel_initializer(
layer.weight, **cnn_transpose_kernel_initializer_config or {}
)
# Initialize CNN Transpose layer bias if necessary.
if cnn_transpose_bias_initializer:
cnn_transpose_bias_initializer(
layer.bias, **cnn_transpose_bias_initializer_config or {}
)
layers.append(layer)
# Layernorm (never for final layer).
if cnn_transpose_use_layernorm and not is_final_layer:
layers.append(LayerNorm1D(out_depth, eps=0.001))
# Last layer is never activated (regardless of config).
if cnn_transpose_activation is not None and not is_final_layer:
layers.append(cnn_transpose_activation())
in_size = (out_size[0], out_size[1])
in_depth = out_depth
# Create the final CNNTranspose network.
self.cnn_transpose = nn.Sequential(*layers)
def forward(self, inputs):
# Permute b/c data comes in as [B, dim, dim, channels]:
out = inputs.permute(0, 3, 1, 2)
out = self.cnn_transpose(out)
return out.permute(0, 2, 3, 1)
|
TorchCNNTranspose
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 88627,
"end": 89612
}
|
class ____:
xlDifferenceFrom = 2 # from enum XlPivotFieldCalculation
xlIndex = 9 # from enum XlPivotFieldCalculation
xlNoAdditionalCalculation = -4143 # from enum XlPivotFieldCalculation
xlPercentDifferenceFrom = 4 # from enum XlPivotFieldCalculation
xlPercentOf = 3 # from enum XlPivotFieldCalculation
xlPercentOfColumn = 7 # from enum XlPivotFieldCalculation
xlPercentOfParent = 12 # from enum XlPivotFieldCalculation
xlPercentOfParentColumn = 11 # from enum XlPivotFieldCalculation
xlPercentOfParentRow = 10 # from enum XlPivotFieldCalculation
xlPercentOfRow = 6 # from enum XlPivotFieldCalculation
xlPercentOfTotal = 8 # from enum XlPivotFieldCalculation
xlPercentRunningTotal = 13 # from enum XlPivotFieldCalculation
xlRankAscending = 14 # from enum XlPivotFieldCalculation
xlRankDecending = 15 # from enum XlPivotFieldCalculation
xlRunningTotal = 5 # from enum XlPivotFieldCalculation
|
PivotFieldCalculation
|
python
|
walkccc__LeetCode
|
solutions/2874. Maximum Value of an Ordered Triplet II/2874.py
|
{
"start": 0,
"end": 436
}
|
class ____:
# Same as 2873. Maximum Value of an Ordered Triplet I
def maximumTripletValue(self, nums: list[int]) -> int:
ans = 0
maxDiff = 0 # max(nums[i] - nums[j])
maxNum = 0 # max(nums[i])
for num in nums:
ans = max(ans, maxDiff * num) # num := nums[k]
maxDiff = max(maxDiff, maxNum - num) # num := nums[j]
maxNum = max(maxNum, num) # num := nums[i]
return ans
|
Solution
|
python
|
pypa__warehouse
|
tests/unit/email/test_init.py
|
{
"start": 70151,
"end": 72704
}
|
class ____:
@pytest.fixture
def _organization_project(self, pyramid_user):
self.user = pyramid_user
self.organization_name = "exampleorganization"
self.project_name = "exampleproject"
@pytest.mark.usefixtures("_organization_project")
@pytest.mark.parametrize(
("email_template_name", "send_organization_project_email"),
[
("organization-project-added", email.send_organization_project_added_email),
(
"organization-project-removed",
email.send_organization_project_removed_email,
),
],
)
def test_send_organization_project_email(
self,
db_request,
make_email_renderers,
send_email,
email_template_name,
send_organization_project_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
email_template_name
)
result = send_organization_project_email(
db_request,
self.user,
organization_name=self.organization_name,
project_name=self.project_name,
)
assert result == {
"organization_name": self.organization_name,
"project_name": self.project_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
|
TestOrganizationProjectEmails
|
python
|
great-expectations__great_expectations
|
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_match_powers_of_base.py
|
{
"start": 1737,
"end": 9019
}
|
class ____(ColumnMapExpectation):
"""Expect column values to match powers of Base (Base ** power == column value)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"mostly_powers_of_two_but_one": [
1,
2,
4,
8,
16.0,
11,
67108864,
32.00,
64,
128,
256,
512,
1024,
128,
],
"all_powers_of_3": [
59049,
1,
3,
729,
1594323,
81,
27,
243,
9,
177147,
531441,
2187,
6561,
19683,
],
"all_powers_of_2_increasing_order": [
1,
2,
4,
8,
16,
32,
64,
128,
256,
512,
1024,
2048,
4096,
8192,
],
"all_powers_of_7": [
1,
49,
678223072849,
7,
4747561509943,
343,
823543,
2401,
40353607,
96889010407,
16807,
4747561509943,
1628413597910449,
40353607,
],
},
"tests": [
{
"title": "positive_test_with_mostly_powers_of_two_but_one",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "mostly_powers_of_two_but_one",
"base_integer": 2,
"mostly": 0.8,
},
"out": {
"success": True,
"unexpected_index_list": [5],
"unexpected_list": [11],
},
},
{
"title": "positive_test_with_all_powers_of_3",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_powers_of_3", "base_integer": 3},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "positive_test_with_all_powers_of_7",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "all_powers_of_7",
"base_integer": 7,
"mostly": 0.3,
},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "positive_test_with_all_powers_of_2_increasing_order",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "all_powers_of_2_increasing_order",
"base_integer": 2,
"mostly": 1,
},
"out": {
"success": True,
"unexpected_index_list": [],
"unexpected_list": [],
},
},
{
"title": "negative_test_with_powers_of_two",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "mostly_powers_of_two_but_one",
"base_integer": 2,
"mostly": 1,
},
"out": {
"success": False,
"unexpected_index_list": [5],
"unexpected_list": [11],
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.match_powers_of_base"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"base_integer",
"mostly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {"mostly": 1}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration] = None
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
base_integer = configuration.kwargs["base_integer"]
# # Check other things in configuration.kwargs and raise Exceptions if needed
try:
# Base cannot be less than zero,
# Base must be an Integer
assert base_integer is None or isinstance(base_integer, int)
assert base_integer >= 0
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "beta", # "experimental", "beta", or "production"
"tags": ["beta"], # Tags for this Expectation in the Gallery
"contributors": [ # GitHub handles for all contributors to this Expectation.
"@rifatKomodoDragon", # Don't forget to add your GitHub handle here!
],
}
if __name__ == "__main__":
ExpectColumnValuesToMatchPowersOfBase().print_diagnostic_checklist()
|
ExpectColumnValuesToMatchPowersOfBase
|
python
|
doocs__leetcode
|
solution/2000-2099/2007.Find Original Array From Doubled Array/Solution.py
|
{
"start": 0,
"end": 389
}
|
class ____:
def findOriginalArray(self, changed: List[int]) -> List[int]:
changed.sort()
cnt = Counter(changed)
ans = []
for x in changed:
if cnt[x] == 0:
continue
cnt[x] -= 1
if cnt[x << 1] <= 0:
return []
cnt[x << 1] -= 1
ans.append(x)
return ans
|
Solution
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 230628,
"end": 231656
}
|
class ____(torch.nn.Module):
def forward(self, L_self_buffers_tensor_constant0_: "f32[3, 3, 3]"):
l_self_buffers_tensor_constant0_ = L_self_buffers_tensor_constant0_
alias_default: "f32[3, 3, 3]" = torch.ops.aten.alias.default(l_self_buffers_tensor_constant0_); l_self_buffers_tensor_constant0_ = None
sin_default: "f32[3, 3, 3]" = torch.ops.aten.sin.default(alias_default)
alias_default_1: "f32[3, 3, 3]" = torch.ops.aten.alias.default(alias_default)
cos_default: "f32[3, 3, 3]" = torch.ops.aten.cos.default(alias_default_1); alias_default_1 = None
alias_default_2: "f32[3, 3, 3]" = torch.ops.aten.alias.default(sin_default); alias_default_2 = None
return (alias_default, cos_default, sin_default)
""",
)
wrapped_gm = self._compile_check(wrapper_fn, (x,), fullgraph=False, graph_idx=1)
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/hooks/test_verified_permissions.py
|
{
"start": 915,
"end": 1063
}
|
class ____:
def test_conn_attribute(self):
hook = VerifiedPermissionsHook()
assert hasattr(hook, "conn")
|
TestVerifiedPermissionsHook
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/antlr_asset_selection/generated/AssetSelectionVisitor.py
|
{
"start": 258,
"end": 6217
}
|
class ____(ParseTreeVisitor):
# Visit a parse tree produced by AssetSelectionParser#start.
def visitStart(self, ctx: AssetSelectionParser.StartContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#UpTraversalExpression.
def visitUpTraversalExpression(self, ctx: AssetSelectionParser.UpTraversalExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#AndExpression.
def visitAndExpression(self, ctx: AssetSelectionParser.AndExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#AllExpression.
def visitAllExpression(self, ctx: AssetSelectionParser.AllExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#TraversalAllowedExpression.
def visitTraversalAllowedExpression(
self, ctx: AssetSelectionParser.TraversalAllowedExpressionContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#DownTraversalExpression.
def visitDownTraversalExpression(
self, ctx: AssetSelectionParser.DownTraversalExpressionContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#NotExpression.
def visitNotExpression(self, ctx: AssetSelectionParser.NotExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#OrExpression.
def visitOrExpression(self, ctx: AssetSelectionParser.OrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#UpAndDownTraversalExpression.
def visitUpAndDownTraversalExpression(
self, ctx: AssetSelectionParser.UpAndDownTraversalExpressionContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#AttributeExpression.
def visitAttributeExpression(self, ctx: AssetSelectionParser.AttributeExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#FunctionCallExpression.
def visitFunctionCallExpression(self, ctx: AssetSelectionParser.FunctionCallExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#ParenthesizedExpression.
def visitParenthesizedExpression(
self, ctx: AssetSelectionParser.ParenthesizedExpressionContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#upTraversal.
def visitUpTraversal(self, ctx: AssetSelectionParser.UpTraversalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#downTraversal.
def visitDownTraversal(self, ctx: AssetSelectionParser.DownTraversalContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#functionName.
def visitFunctionName(self, ctx: AssetSelectionParser.FunctionNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#KeyExpr.
def visitKeyExpr(self, ctx: AssetSelectionParser.KeyExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#TagAttributeExpr.
def visitTagAttributeExpr(self, ctx: AssetSelectionParser.TagAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#OwnerAttributeExpr.
def visitOwnerAttributeExpr(self, ctx: AssetSelectionParser.OwnerAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#GroupAttributeExpr.
def visitGroupAttributeExpr(self, ctx: AssetSelectionParser.GroupAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#KindAttributeExpr.
def visitKindAttributeExpr(self, ctx: AssetSelectionParser.KindAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#StatusAttributeExpr.
def visitStatusAttributeExpr(self, ctx: AssetSelectionParser.StatusAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#ColumnAttributeExpr.
def visitColumnAttributeExpr(self, ctx: AssetSelectionParser.ColumnAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#TableNameAttributeExpr.
def visitTableNameAttributeExpr(self, ctx: AssetSelectionParser.TableNameAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#ColumnTagAttributeExpr.
def visitColumnTagAttributeExpr(self, ctx: AssetSelectionParser.ColumnTagAttributeExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#CodeLocationAttributeExpr.
def visitCodeLocationAttributeExpr(
self, ctx: AssetSelectionParser.CodeLocationAttributeExprContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#ChangedInBranchAttributeExpr.
def visitChangedInBranchAttributeExpr(
self, ctx: AssetSelectionParser.ChangedInBranchAttributeExprContext
):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#value.
def visitValue(self, ctx: AssetSelectionParser.ValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by AssetSelectionParser#keyValue.
def visitKeyValue(self, ctx: AssetSelectionParser.KeyValueContext):
return self.visitChildren(ctx)
del AssetSelectionParser
|
AssetSelectionVisitor
|
python
|
python-attrs__attrs
|
tests/test_dunders.py
|
{
"start": 27064,
"end": 27181
}
|
class ____:
pass
# Store this class so that we recreate it.
OriginalC = C
@attr.s(unsafe_hash=True, order=True)
|
C
|
python
|
plotly__plotly.py
|
plotly/utils.py
|
{
"start": 3189,
"end": 6233
}
|
class ____(PrettyPrinter):
"""
PrettyPrinter subclass that elides long lists/arrays/strings
"""
def __init__(self, *args, **kwargs):
self.threshold = kwargs.pop("threshold", 200)
PrettyPrinter.__init__(self, *args, **kwargs)
def _format(self, val, stream, indent, allowance, context, level):
if ElidedWrapper.is_wrappable(val):
elided_val = ElidedWrapper(val, self.threshold, indent)
return self._format(elided_val, stream, indent, allowance, context, level)
else:
return PrettyPrinter._format(
self, val, stream, indent, allowance, context, level
)
def node_generator(node, path=()):
"""
General, node-yielding generator.
Yields (node, path) tuples when it finds values that are dict
instances.
A path is a sequence of hashable values that can be used as either keys to
a mapping (dict) or indices to a sequence (list). A path is always wrt to
some object. Given an object, a path explains how to get from the top level
of that object to a nested value in the object.
:param (dict) node: Part of a dict to be traversed.
:param (tuple[str]) path: Defines the path of the current node.
:return: (Generator)
Example:
>>> for node, path in node_generator({'a': {'b': 5}}):
... print(node, path)
{'a': {'b': 5}} ()
{'b': 5} ('a',)
"""
if not isinstance(node, dict):
return # in case it's called with a non-dict node at top level
yield node, path
for key, val in node.items():
if isinstance(val, dict):
for item in node_generator(val, path + (key,)):
yield item
def get_by_path(obj, path):
"""
Iteratively get on obj for each key in path.
:param (list|dict) obj: The top-level object.
:param (tuple[str]|tuple[int]) path: Keys to access parts of obj.
:return: (*)
Example:
>>> figure = {'data': [{'x': [5]}]}
>>> path = ('data', 0, 'x')
>>> get_by_path(figure, path)
[5]
"""
for key in path:
obj = obj[key]
return obj
def decode_unicode(coll):
if isinstance(coll, list):
for no, entry in enumerate(coll):
if isinstance(entry, (dict, list)):
coll[no] = decode_unicode(entry)
else:
if isinstance(entry, str):
try:
coll[no] = str(entry)
except UnicodeEncodeError:
pass
elif isinstance(coll, dict):
keys, vals = list(coll.keys()), list(coll.values())
for key, val in zip(keys, vals):
if isinstance(val, (dict, list)):
coll[key] = decode_unicode(val)
elif isinstance(val, str):
try:
coll[key] = str(val)
except UnicodeEncodeError:
pass
coll[str(key)] = coll.pop(key)
return coll
|
ElidedPrettyPrinter
|
python
|
RaRe-Technologies__gensim
|
gensim/similarities/docsim.py
|
{
"start": 8988,
"end": 29340
}
|
class ____(interfaces.SimilarityABC):
"""Compute cosine similarity of a dynamic query against a corpus of documents ('the index').
The index supports adding new documents dynamically.
Notes
-----
Scalability is achieved by sharding the index into smaller pieces, each of which fits into core memory
The shards themselves are simply stored as files to disk and mmap'ed back as needed.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.mm'))
>>> index_temp = get_tmpfile("index")
>>> index = Similarity(index_temp, corpus, num_features=400) # create index
>>>
>>> query = next(iter(corpus))
>>> result = index[query] # search similar to `query` in index
>>>
>>> for sims in index[corpus]: # if you have more query documents, you can submit them all at once, in a batch
... pass
>>>
>>> # There is also a special syntax for when you need similarity of documents in the index
>>> # to the index itself (i.e. queries=indexed documents themselves). This special syntax
>>> # uses the faster, batch queries internally and **is ideal for all-vs-all pairwise similarities**:
>>> for similarities in index: # yield similarities of the 1st indexed document, then 2nd...
... pass
See Also
--------
:class:`~gensim.similarities.docsim.MatrixSimilarity`
Index similarity (dense with cosine distance).
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity`
Index similarity (sparse with cosine distance).
:class:`~gensim.similarities.docsim.WmdSimilarity`
Index similarity (with word-mover distance).
"""
def __init__(self, output_prefix, corpus, num_features, num_best=None, chunksize=256, shardsize=32768, norm='l2'):
"""
Parameters
----------
output_prefix : str
Prefix for shard filename. If None, a random filename in temp will be used.
corpus : iterable of list of (int, number)
Corpus in streamed Gensim bag-of-words format.
num_features : int
Size of the dictionary (number of features).
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
chunksize : int, optional
Size of query chunks. Used internally when the query is an entire corpus.
shardsize : int, optional
Maximum shard size, in documents. Choose a value so that a `shardsize x chunksize` matrix of floats fits
comfortably into your RAM.
norm : {'l1', 'l2'}, optional
Normalization to use.
Notes
-----
Documents are split (internally, transparently) into shards of `shardsize` documents each, and each shard
converted to a matrix, for faster BLAS calls. Each shard is stored to disk under `output_prefix.shard_number`.
If you don't specify an output prefix, a random filename in temp will be used.
If your entire index fits in memory (~1 million documents per 1GB of RAM), you can also use the
:class:`~gensim.similarities.docsim.MatrixSimilarity` or
:class:`~gensim.similarities.docsim.SparseMatrixSimilarity` classes directly.
These are more simple but do not scale as well (they keep the entire index in RAM, no sharding).
They also do not support adding new document dynamically.
"""
if output_prefix is None:
# undocumented feature: set output_prefix=None to create the server in temp
self.output_prefix = utils.randfname(prefix='simserver')
else:
self.output_prefix = output_prefix
logger.info("starting similarity index under %s", self.output_prefix)
self.num_features = num_features
self.num_best = num_best
self.norm = norm
self.chunksize = int(chunksize)
self.shardsize = shardsize
self.shards = []
self.fresh_docs, self.fresh_nnz = [], 0
if corpus is not None:
self.add_documents(corpus)
def __len__(self):
"""Get length of index."""
return len(self.fresh_docs) + sum(len(shard) for shard in self.shards)
def __str__(self):
return "%s<%i documents in %i shards stored under %s>" % (
self.__class__.__name__, len(self), len(self.shards), self.output_prefix
)
def add_documents(self, corpus):
"""Extend the index with new documents.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
Notes
-----
Internally, documents are buffered and then spilled to disk when there's `self.shardsize` of them
(or when a query is issued).
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.mm'))
>>> index_temp = get_tmpfile("index")
>>> index = Similarity(index_temp, corpus, num_features=400) # create index
>>>
>>> one_more_corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index.add_documents(one_more_corpus) # add more documents in corpus
"""
min_ratio = 1.0 # 0.5 to only reopen shards that are <50% complete
if self.shards and len(self.shards[-1]) < min_ratio * self.shardsize:
# The last shard was incomplete (<; load it back and add the documents there, don't start a new shard
self.reopen_shard()
for doc in corpus:
if isinstance(doc, numpy.ndarray):
doclen = len(doc)
elif scipy.sparse.issparse(doc):
doclen = doc.nnz
else:
doclen = len(doc)
if doclen < 0.3 * self.num_features:
doc = matutils.unitvec(matutils.corpus2csc([doc], self.num_features).T, self.norm)
else:
doc = matutils.unitvec(matutils.sparse2full(doc, self.num_features), self.norm)
self.fresh_docs.append(doc)
self.fresh_nnz += doclen
if len(self.fresh_docs) >= self.shardsize:
self.close_shard()
if len(self.fresh_docs) % 10000 == 0:
logger.info("PROGRESS: fresh_shard size=%i", len(self.fresh_docs))
def shardid2filename(self, shardid):
"""Get shard file by `shardid`.
Parameters
----------
shardid : int
Shard index.
Return
------
str
Path to shard file.
"""
if self.output_prefix.endswith('.'):
return "%s%s" % (self.output_prefix, shardid)
else:
return "%s.%s" % (self.output_prefix, shardid)
def close_shard(self):
"""Force the latest shard to close (be converted to a matrix and stored to disk).
Do nothing if no new documents added since last call.
Notes
-----
The shard is closed even if it is not full yet (its size is smaller than `self.shardsize`).
If documents are added later via :meth:`~gensim.similarities.docsim.MatrixSimilarity.add_documents`
this incomplete shard will be loaded again and completed.
"""
if not self.fresh_docs:
return
shardid = len(self.shards)
# consider the shard sparse if its density is < 30%
issparse = 0.3 > 1.0 * self.fresh_nnz / (len(self.fresh_docs) * self.num_features)
if issparse:
index = SparseMatrixSimilarity(
self.fresh_docs, num_terms=self.num_features, num_docs=len(self.fresh_docs), num_nnz=self.fresh_nnz
)
else:
index = MatrixSimilarity(self.fresh_docs, num_features=self.num_features)
logger.info("creating %s shard #%s", 'sparse' if issparse else 'dense', shardid)
shard = Shard(self.shardid2filename(shardid), index)
shard.num_best = self.num_best
shard.num_nnz = self.fresh_nnz
self.shards.append(shard)
self.fresh_docs, self.fresh_nnz = [], 0
def reopen_shard(self):
"""Reopen an incomplete shard."""
assert self.shards
if self.fresh_docs:
raise ValueError("cannot reopen a shard with fresh documents in index")
last_shard = self.shards[-1]
last_index = last_shard.get_index()
logger.info("reopening an incomplete shard of %i documents", len(last_shard))
self.fresh_docs = list(last_index.index)
self.fresh_nnz = last_shard.num_nnz
del self.shards[-1] # remove the shard from index, *but its file on disk is not deleted*
logger.debug("reopen complete")
def query_shards(self, query):
"""Apply shard[query] to each shard in `self.shards`. Used internally.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
Document in BoW format or corpus of documents.
Returns
-------
(None, list of individual shard query results)
Query results.
"""
args = zip([query] * len(self.shards), self.shards)
if PARALLEL_SHARDS and PARALLEL_SHARDS > 1:
logger.debug("spawning %i query processes", PARALLEL_SHARDS)
pool = multiprocessing.Pool(PARALLEL_SHARDS)
result = pool.imap(query_shard, args, chunksize=1 + len(self.shards) / PARALLEL_SHARDS)
else:
# serial processing, one shard after another
pool = None
result = map(query_shard, args)
return pool, result
def __getitem__(self, query):
"""Get similarities of the document (or corpus) `query` to all documents in the corpus.
Parameters
----------
query : {iterable of list of (int, number) , list of (int, number))}
A single document in bag-of-words format, or a corpus (iterable) of such documents.
Return
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of the query against this index.
Notes
-----
If `query` is a corpus (iterable of documents), return a matrix of similarities of
all query documents vs. all corpus document. This batch query is more efficient than computing the similarities
one document after another.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> result = index[corpus] # pairwise similarities of each document against each document
"""
self.close_shard() # no-op if no documents added to index since last query
# reset num_best and normalize parameters, in case they were changed dynamically
for shard in self.shards:
shard.num_best = self.num_best
shard.normalize = self.norm
# there are 4 distinct code paths, depending on whether input `query` is
# a corpus (or numpy/scipy matrix) or a single document, and whether the
# similarity result should be a full array or only num_best most similar
# documents.
pool, shard_results = self.query_shards(query)
if self.num_best is None:
# user asked for all documents => just stack the sub-results into a single matrix
# (works for both corpus / single doc query)
result = numpy.hstack(list(shard_results))
else:
# the following uses a lot of lazy evaluation and (optionally) parallel
# processing, to improve query latency and minimize memory footprint.
offsets = numpy.cumsum([0] + [len(shard) for shard in self.shards])
def convert(shard_no, doc):
return [(doc_index + offsets[shard_no], sim) for doc_index, sim in doc]
is_corpus, query = utils.is_corpus(query)
is_corpus = is_corpus or hasattr(query, 'ndim') and query.ndim > 1 and query.shape[0] > 1
if not is_corpus:
# user asked for num_best most similar and query is a single doc
results = (convert(shard_no, result) for shard_no, result in enumerate(shard_results))
result = _nlargest(self.num_best, results)
else:
# the trickiest combination: returning num_best results when query was a corpus
results = []
for shard_no, result in enumerate(shard_results):
shard_result = [convert(shard_no, doc) for doc in result]
results.append(shard_result)
result = []
for parts in zip(*results):
merged = _nlargest(self.num_best, parts)
result.append(merged)
if pool:
# gc doesn't seem to collect the Pools, eventually leading to
# "IOError 24: too many open files". so let's terminate it manually.
pool.terminate()
return result
def vector_by_id(self, docpos):
"""Get the indexed vector corresponding to the document at position `docpos`.
Parameters
----------
docpos : int
Document position
Return
------
:class:`scipy.sparse.csr_matrix`
Indexed vector.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> # Create index:
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> vector = index.vector_by_id(1)
"""
self.close_shard() # no-op if no documents added to index since last query
pos = 0
for shard in self.shards:
pos += len(shard)
if docpos < pos:
break
if not self.shards or docpos < 0 or docpos >= pos:
raise ValueError("invalid document position: %s (must be 0 <= x < %s)" % (docpos, len(self)))
result = shard.get_document_id(docpos - pos + len(shard))
return result
def similarity_by_id(self, docpos):
"""Get similarity of a document specified by its index position `docpos`.
Parameters
----------
docpos : int
Document position in the index.
Return
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of the given document against this index.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath
>>> from gensim.similarities import Similarity
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity('temp', corpus, num_features=400)
>>> similarities = index.similarity_by_id(1)
"""
query = self.vector_by_id(docpos)
norm, self.norm = self.norm, False
result = self[query]
self.norm = norm
return result
def __iter__(self):
"""For each index document in index, compute cosine similarity against all other documents in the index.
Uses :meth:`~gensim.similarities.docsim.Similarity.iter_chunks` internally.
Yields
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Similarities of each document in turn against the index.
"""
# turn off query normalization (vectors in the index are already normalized, save some CPU)
norm, self.norm = self.norm, False
for chunk in self.iter_chunks():
if chunk.shape[0] > 1:
for sim in self[chunk]:
yield sim
else:
yield self[chunk]
self.norm = norm # restore normalization
def iter_chunks(self, chunksize=None):
"""Iteratively yield the index as chunks of document vectors, each of size <= chunksize.
Parameters
----------
chunksize : int, optional
Size of chunk,, if None - `self.chunksize` will be used.
Yields
------
:class:`numpy.ndarray` or :class:`scipy.sparse.csr_matrix`
Chunks of the index as 2D arrays. The arrays are either dense or sparse, depending on
whether the shard was storing dense or sparse vectors.
"""
self.close_shard()
if chunksize is None:
# if not explicitly specified, use the chunksize from the constructor
chunksize = self.chunksize
for shard in self.shards:
query = shard.get_index().index
for chunk_start in range(0, query.shape[0], chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(query.shape[0], chunk_start + chunksize)
chunk = query[chunk_start: chunk_end] # create a view
yield chunk
def check_moved(self):
"""Update shard locations, for case where the server prefix location changed on the filesystem."""
dirname = os.path.dirname(self.output_prefix)
for shard in self.shards:
shard.dirname = dirname
def save(self, fname=None, *args, **kwargs):
"""Save the index object via pickling under `fname`. See also :meth:`~gensim.docsim.Similarity.load()`.
Parameters
----------
fname : str, optional
Path for save index, if not provided - will be saved to `self.output_prefix`.
*args : object
Arguments, see :meth:`gensim.utils.SaveLoad.save`.
**kwargs : object
Keyword arguments, see :meth:`gensim.utils.SaveLoad.save`.
Notes
-----
Will call :meth:`~gensim.similarities.Similarity.close_shard` internally to spill
any unfinished shards to disk first.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora.textcorpus import TextCorpus
>>> from gensim.test.utils import datapath, get_tmpfile
>>> from gensim.similarities import Similarity
>>>
>>> temp_fname = get_tmpfile("index")
>>> output_fname = get_tmpfile("saved_index")
>>>
>>> corpus = TextCorpus(datapath('testcorpus.txt'))
>>> index = Similarity(output_fname, corpus, num_features=400)
>>>
>>> index.save(output_fname)
>>> loaded_index = index.load(output_fname)
"""
self.close_shard()
if fname is None:
fname = self.output_prefix
super(Similarity, self).save(fname, *args, **kwargs)
def destroy(self):
"""Delete all files under self.output_prefix Index is not usable anymore after calling this method."""
import glob
for fname in glob.glob(self.output_prefix + '*'):
logger.info("deleting %s", fname)
os.remove(fname)
|
Similarity
|
python
|
ray-project__ray
|
python/ray/data/preprocessors/chain.py
|
{
"start": 266,
"end": 4159
}
|
class ____(Preprocessor):
"""Combine multiple preprocessors into a single :py:class:`Preprocessor`.
When you call ``fit``, each preprocessor is fit on the dataset produced by the
preceeding preprocessor's ``fit_transform``.
Example:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import *
>>>
>>> df = pd.DataFrame({
... "X0": [0, 1, 2],
... "X1": [3, 4, 5],
... "Y": ["orange", "blue", "orange"],
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>>
>>> preprocessor = Chain(
... StandardScaler(columns=["X0", "X1"]),
... Concatenator(columns=["X0", "X1"], output_column_name="X"),
... LabelEncoder(label_column="Y")
... )
>>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP
Y X
0 1 [-1.224744871391589, -1.224744871391589]
1 0 [0.0, 0.0]
2 1 [1.224744871391589, 1.224744871391589]
Args:
preprocessors: The preprocessors to sequentially compose.
"""
def fit_status(self):
fittable_count = 0
fitted_count = 0
for p in self.preprocessors:
if p.fit_status() == Preprocessor.FitStatus.FITTED:
fittable_count += 1
fitted_count += 1
elif p.fit_status() in (
Preprocessor.FitStatus.NOT_FITTED,
Preprocessor.FitStatus.PARTIALLY_FITTED,
):
fittable_count += 1
else:
assert p.fit_status() == Preprocessor.FitStatus.NOT_FITTABLE
if fittable_count > 0:
if fitted_count == fittable_count:
return Preprocessor.FitStatus.FITTED
elif fitted_count > 0:
return Preprocessor.FitStatus.PARTIALLY_FITTED
else:
return Preprocessor.FitStatus.NOT_FITTED
else:
return Preprocessor.FitStatus.NOT_FITTABLE
def __init__(self, *preprocessors: Preprocessor):
super().__init__()
self.preprocessors = preprocessors
def _fit(self, ds: "Dataset") -> Preprocessor:
for preprocessor in self.preprocessors[:-1]:
ds = preprocessor.fit_transform(ds)
self.preprocessors[-1].fit(ds)
return self
def fit_transform(self, ds: "Dataset") -> "Dataset":
for preprocessor in self.preprocessors:
ds = preprocessor.fit_transform(ds)
return ds
def _transform(
self,
ds: "Dataset",
batch_size: Optional[int],
num_cpus: Optional[float] = None,
memory: Optional[float] = None,
concurrency: Optional[int] = None,
) -> "Dataset":
for preprocessor in self.preprocessors:
ds = preprocessor.transform(
ds,
batch_size=batch_size,
num_cpus=num_cpus,
memory=memory,
concurrency=concurrency,
)
return ds
def _transform_batch(self, df: "DataBatchType") -> "DataBatchType":
for preprocessor in self.preprocessors:
df = preprocessor.transform_batch(df)
return df
def __repr__(self):
arguments = ", ".join(repr(preprocessor) for preprocessor in self.preprocessors)
return f"{self.__class__.__name__}({arguments})"
def _determine_transform_to_use(self) -> BatchFormat:
# This is relevant for BatchPrediction.
# For Chain preprocessor, we picked the first one as entry point.
# TODO (jiaodong): We should revisit if our Chain preprocessor is
# still optimal with context of lazy execution.
return self.preprocessors[0]._determine_transform_to_use()
|
Chain
|
python
|
pytransitions__transitions
|
transitions/extensions/states.py
|
{
"start": 9723,
"end": 9826
}
|
class ____(object):
"""Empty Python object which can be used to assign attributes to."""
|
VolatileObject
|
python
|
explosion__spaCy
|
spacy/lang/he/__init__.py
|
{
"start": 298,
"end": 391
}
|
class ____(Language):
lang = "he"
Defaults = HebrewDefaults
__all__ = ["Hebrew"]
|
Hebrew
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pep8_naming/N802.py
|
{
"start": 635,
"end": 716
}
|
class ____(Visitor):
def visit_Constant(self, node):
pass
|
ExtendsVisitor
|
python
|
qdrant__qdrant-client
|
tools/async_client_generator/transformers/function_def_transformer.py
|
{
"start": 52,
"end": 1109
}
|
class ____(ast.NodeTransformer):
def __init__(self, keep_sync: Optional[list[str]] = None):
self.keep_sync = keep_sync if keep_sync is not None else []
def _keep_sync(self, name: str) -> bool:
return name in self.keep_sync
def visit_FunctionDef(self, sync_node: ast.FunctionDef) -> ast.AST:
if self._keep_sync(sync_node.name):
return self.generic_visit(sync_node)
params: list = [
sync_node.name,
sync_node.args,
sync_node.body,
sync_node.decorator_list,
sync_node.returns,
sync_node.type_comment,
]
if sys.version_info >= (3, 12):
type_params = sync_node.type_params
async_node = ast.AsyncFunctionDef(*params)
async_node.lineno = sync_node.lineno
async_node.col_offset = sync_node.col_offset
async_node.end_lineno = sync_node.end_lineno
async_node.end_col_offset = sync_node.end_col_offset
return self.generic_visit(async_node)
|
FunctionDefTransformer
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/ctl/cli_config.py
|
{
"start": 4337,
"end": 6172
}
|
class ____:
"""Class to keep information about command line argument."""
def __init__(
self,
flags=_UNSET,
help=_UNSET,
action=_UNSET,
default=_UNSET,
nargs=_UNSET,
type=_UNSET,
choices=_UNSET,
required=_UNSET,
metavar=_UNSET,
dest=_UNSET,
):
self.flags = flags
self.kwargs = {}
for k, v in locals().items():
if k not in ("self", "flags") and v is not _UNSET:
self.kwargs[k] = v
def add_to_parser(self, parser: argparse.ArgumentParser):
"""Add this argument to an ArgumentParser."""
if "metavar" in self.kwargs and "type" not in self.kwargs:
if self.kwargs["metavar"] == "DIRPATH":
def type(x):
return self._is_valid_directory(parser, x)
self.kwargs["type"] = type
parser.add_argument(*self.flags, **self.kwargs)
def _is_valid_directory(self, parser, arg):
if not os.path.isdir(arg):
parser.error(f"The directory '{arg}' does not exist!")
return arg
def positive_int(*, allow_zero):
"""Define a positive int type for an argument."""
def _check(value):
try:
value = int(value)
if allow_zero and value == 0:
return value
if value > 0:
return value
except ValueError:
pass
raise argparse.ArgumentTypeError(f"invalid positive int value: '{value}'")
return _check
def string_list_type(val):
"""Parse comma-separated list and returns list of string (strips whitespace)."""
return [x.strip() for x in val.split(",")]
def string_lower_type(val):
"""Lower arg."""
if not val:
return
return val.strip().lower()
|
Arg
|
python
|
huggingface__transformers
|
src/transformers/models/esm/modeling_esmfold.py
|
{
"start": 66912,
"end": 72237
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
c_s = config.sequence_state_dim
c_z = config.pairwise_state_dim
self.pairwise_positional_embedding = EsmFoldRelativePosition(config)
self.blocks = nn.ModuleList([EsmFoldTriangularSelfAttentionBlock(config) for _ in range(config.num_blocks)])
self.recycle_bins = 15
self.recycle_s_norm = nn.LayerNorm(c_s)
self.recycle_z_norm = nn.LayerNorm(c_z)
self.recycle_disto = nn.Embedding(self.recycle_bins, c_z)
self.recycle_disto.weight[0].detach().zero_()
self.structure_module = EsmFoldStructureModule(config.structure_module)
self.trunk2sm_s = nn.Linear(c_s, config.structure_module.sequence_dim)
self.trunk2sm_z = nn.Linear(c_z, config.structure_module.pairwise_dim)
self.chunk_size = config.chunk_size
def set_chunk_size(self, chunk_size):
# This parameter means the axial attention will be computed
# in a chunked manner. This should make the memory used more or less O(L) instead of O(L^2).
# It's equivalent to running a for loop over chunks of the dimension we're iterative over,
# where the chunk_size is the size of the chunks, so 128 would mean to parse 128-length chunks.
self.chunk_size = chunk_size
def forward(self, seq_feats, pair_feats, true_aa, residx, mask, no_recycles):
"""
Inputs:
seq_feats: B x L x C tensor of sequence features pair_feats: B x L x L x C tensor of pair features residx: B
x L long tensor giving the position in the sequence mask: B x L boolean tensor indicating valid residues
Output:
predicted_structure: B x L x (num_atoms_per_residue * 3) tensor wrapped in a Coordinates object
"""
device = seq_feats.device
s_s_0 = seq_feats
s_z_0 = pair_feats
if no_recycles is None:
no_recycles = self.config.max_recycles
else:
if no_recycles < 0:
raise ValueError("Number of recycles must not be negative.")
no_recycles += 1 # First 'recycle' is just the standard forward pass through the model.
def trunk_iter(s, z, residx, mask):
z = z + self.pairwise_positional_embedding(residx, mask=mask)
for block in self.blocks:
s, z = block(s, z, mask=mask, residue_index=residx, chunk_size=self.chunk_size)
return s, z
s_s = s_s_0
s_z = s_z_0
recycle_s = torch.zeros_like(s_s)
recycle_z = torch.zeros_like(s_z)
recycle_bins = torch.zeros(*s_z.shape[:-1], device=device, dtype=torch.int64)
for recycle_idx in range(no_recycles):
with ContextManagers([] if recycle_idx == no_recycles - 1 else [torch.no_grad()]):
# === Recycling ===
recycle_s = self.recycle_s_norm(recycle_s.detach()).to(device)
recycle_z = self.recycle_z_norm(recycle_z.detach()).to(device)
recycle_z += self.recycle_disto(recycle_bins.detach()).to(device)
s_s, s_z = trunk_iter(s_s_0 + recycle_s, s_z_0 + recycle_z, residx, mask)
# === Structure module ===
structure = self.structure_module(
{"single": self.trunk2sm_s(s_s), "pair": self.trunk2sm_z(s_z)},
true_aa,
mask.float(),
)
recycle_s = s_s
recycle_z = s_z
# Distogram needs the N, CA, C coordinates, and bin constants same as alphafold.
recycle_bins = EsmFoldingTrunk.distogram(
structure["positions"][-1][:, :, :3],
3.375,
21.375,
self.recycle_bins,
)
structure["s_s"] = s_s
structure["s_z"] = s_z
return structure
@staticmethod
def distogram(coords, min_bin, max_bin, num_bins):
# Coords are [... L x 3 x 3], where it's [N, CA, C] x 3 coordinates.
boundaries = torch.linspace(
min_bin,
max_bin,
num_bins - 1,
device=coords.device,
)
boundaries = boundaries**2
N, CA, C = [x.squeeze(-2) for x in coords.chunk(3, dim=-2)]
# Infer CB coordinates.
b = CA - N
c = C - CA
a = b.cross(c, dim=-1)
CB = -0.58273431 * a + 0.56802827 * b - 0.54067466 * c + CA
dists = (CB[..., None, :, :] - CB[..., :, None, :]).pow(2).sum(dim=-1, keepdims=True)
bins = torch.sum(dists > boundaries, dim=-1) # [..., L, L]
return bins
# TODO Add information to the docstring about any methods that convert to PDB format, or otherwise prepare
# the outputs for downstream use.
@auto_docstring(
custom_intro="""
ESMForProteinFolding is the HuggingFace port of the original ESMFold model. It consists of an ESM-2 "stem" followed
by a protein folding "head", although unlike most other output heads, this "head" is similar in size and runtime to
the rest of the model combined! It outputs a dictionary containing predicted structural information about the input
protein(s).
"""
)
|
EsmFoldingTrunk
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-gitbook/llama_index/readers/gitbook/gitbook_client.py
|
{
"start": 104,
"end": 3346
}
|
class ____:
"""
Gitbook Restful API Client.
Helper Class to invoke gitbook restful api & parse result
Args:
api_token (str): Gitbook API Token.
api_url (str): Gitbook API Endpoint.
"""
def __init__(self, api_token: str, api_url: str = DEFAULT_GITBOOK_API_URL):
self.api_token = api_token
self.base_url = api_url or DEFAULT_GITBOOK_API_URL
self.headers = {
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json",
}
def _make_request(self, url: str) -> Dict:
"""Helper method to handle common HTTP GET requests."""
try:
response = requests.get(url, headers=self.headers)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return self._handle_error(e)
def get_space(self, space_id) -> Dict:
"""Gets information for a specific space."""
url = f"{self.base_url}/spaces/{space_id}"
return self._make_request(url)
def list_pages(self, space_id) -> List[Dict]:
"""Gets all pages in a specific space."""
space_info = self.get_space(space_id)
url = f"{self.base_url}/spaces/{space_id}/content"
space = self._make_request(url)
pages_info = []
for page in space.get("pages"):
GitbookClient._extract_page_info(
pages_info, page, space_info.get("title", "ROOT")
)
return pages_info
def get_page(self, space_id, page_id) -> Dict:
"""Gets the details of a specific page."""
url = (
f"{self.base_url}/spaces/{space_id}/content/page/{page_id}?format=markdown"
)
return self._make_request(url)
def get_page_markdown(self, space_id, page_id) -> str:
"""Gets the content of a specific page in Markdown format."""
page_content = self.get_page(space_id, page_id)
return page_content.get("markdown")
def _handle_error(self, response):
"""Handles HTTP errors."""
if isinstance(response, requests.exceptions.HTTPError):
error_message = f"Error: {response.response.status_code} Client Error: {response.response.reason}"
else:
error_message = f"Error: {response}"
raise Exception(error_message)
@staticmethod
def _extract_page_info(
pages: list, page: dict, prev_title: str = "", parent: str = ""
):
pageType = page.get("type", "")
title = prev_title + " > " + page.get("title")
id = page.get("id")
if pageType == "document":
pages.append(
{
"id": id,
"title": title,
"path": page.get("path"),
"description": page.get("description", ""),
"parent": parent,
}
)
for _page in page.get("pages"):
GitbookClient._extract_page_info(pages, _page, title, id)
elif pageType == "group":
for _page in page.get("pages"):
GitbookClient._extract_page_info(pages, _page, title, id)
|
GitbookClient
|
python
|
openai__openai-python
|
src/openai/types/responses/response_input_item_param.py
|
{
"start": 6954,
"end": 7383
}
|
class ____(TypedDict, total=False):
commands: Required[SequenceNotStr[str]]
"""Ordered shell commands for the execution environment to run."""
max_output_length: Optional[int]
"""
Maximum number of UTF-8 characters to capture from combined stdout and stderr
output.
"""
timeout_ms: Optional[int]
"""Maximum wall-clock time in milliseconds to allow the shell commands to run."""
|
ShellCallAction
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 24493,
"end": 24826
}
|
class ____(admin.ModelAdmin):
def get_urls(self):
# Disable change_view, but leave other urls untouched
urlpatterns = super().get_urls()
return [p for p in urlpatterns if p.name and not p.name.endswith("_change")]
@admin.display
def callable_on_unknown(obj):
return obj.unknown
|
UnchangeableObjectAdmin
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/operators/control_flow_test.py
|
{
"start": 2234,
"end": 15990
}
|
class ____(testing.AutoGraphTestCase):
def test_tensor(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
constant_op.constant([1, 2, 3, 4]),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, (1234,))
def test_range_tensor(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
math_ops.range(5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'iterate_names': 'i'})
self.assertEqual(s, (1234,))
self.assertOpCreated('StatelessWhile')
def test_range_tensor_explicit_limit_delta(self):
def body(i):
nonlocal s
s = s * 100 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
math_ops.range(-17, -3, 5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'iterate_names': 'i'})
self.assertEqual(s, (-171207,))
self.assertOpCreated('StatelessWhile')
def test_range_tensor_explicit_limit_negative_delta(self):
def body(i):
nonlocal s
s = s * 100 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
math_ops.range(17, 3, -5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'iterate_names': 'i'})
self.assertEqual(s, (171207,))
self.assertOpCreated('StatelessWhile')
def test_range_tensor_random_delta(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
random_one = random_ops.random_uniform((), 1, 2, dtype=dtypes.int32)
control_flow.for_stmt(
math_ops.range(0, 5, random_one),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'iterate_names': 'i'})
self.assertEqual(s, (1234,))
self.assertOpCreated('StatelessWhile')
def test_range_tensor_random_negative_delta(self):
def body(i):
nonlocal s
s = s * 100 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
random_neg_five = random_ops.random_uniform((), -5, -4, dtype=dtypes.int32)
control_flow.for_stmt(
math_ops.range(17, 3, random_neg_five),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'iterate_names': 'i'})
self.assertEqual(s, (171207,))
self.assertOpCreated('StatelessWhile')
def test_tensor_with_extra_test_object_vars(self):
class MutableObject:
field_1 = constant_op.constant(0, dtype=dtypes.int32)
field_2 = constant_op.constant(1, dtype=dtypes.int32)
state = MutableObject()
def body(i):
state.field_1 += i
state.field_2 *= i
def get_state():
return state.field_1, state.field_2
def set_state(loop_vars):
state.field_1, state.field_2 = loop_vars
control_flow.for_stmt(
iter_=constant_op.constant([1, 2, 3, 4]),
body=body,
extra_test=lambda: state.field_1 < 6,
get_state=get_state,
set_state=set_state,
symbol_names=('state.field_1', 'state.field_2'),
opts={})
self.assertEqual((state.field_1, state.field_2), (6, 6))
self.assertOpCreated('StatelessWhile')
def test_python(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
range(5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, 1234)
self.assertNoOpsCreated()
def test_python_generator_with_extra_test(self):
def new_generator():
for i in range(1, 5):
yield i
gen = new_generator()
def run_loop():
s = 0
c = 0
def body(i):
nonlocal s, c
s = s * 10 + i
c += 1
control_flow.for_stmt(
gen,
extra_test=lambda: c == 0, # Break after first iteration
body=body,
get_state=None,
set_state=None,
symbol_names=('s', 'c'),
opts={})
return s, c
self.assertEqual(run_loop(), (1, 1))
self.assertEqual(run_loop(), (2, 1))
self.assertEqual(run_loop(), (3, 1))
self.assertEqual(next(gen), 4)
self.assertNoOpsCreated()
def test_python_generator_with_extra_test_no_iterations(self):
def new_generator():
for i in range(5):
yield i
gen = new_generator()
def run_loop():
s = 0
def body(i):
nonlocal s
s = s * 10 + i
control_flow.for_stmt(
gen,
extra_test=lambda: False, # Break before loop
body=body,
get_state=None,
set_state=None,
symbol_names=('s',),
opts={})
return s
self.assertEqual(run_loop(), 0)
self.assertEqual(run_loop(), 0)
self.assertEqual(next(gen), 0)
self.assertNoOpsCreated()
def test_tf_dataset(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, (1234,))
self.assertOpCreated('ScanDataset')
def test_dataset_with_extra_test(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda: s < 3,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, (12,))
self.assertOpCreated('ScanDataset')
def test_dataset_with_extra_test_collection_vars(self):
def body(i):
nonlocal s
l[0] += i
s += i
def set_state(loop_vars):
nonlocal s
l[0], s = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
l = [constant_op.constant(0, dtype=dtypes.int64)]
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda: s < 3,
body=body,
get_state=lambda: (l[0], s),
set_state=set_state,
symbol_names=('l[0]', 's'),
opts={})
self.assertEqual((l[0], s), (3, 3))
self.assertOpCreated('ScanDataset')
def test_dataset_with_extra_test_iteration_limiting(self):
def body(it):
nonlocal i
with ops.control_dependencies((control_flow_assert.Assert(i < 3, (i,)),)):
i = it
def set_state(loop_vars):
nonlocal i
i, = loop_vars
i = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda: i < 3,
body=body,
get_state=lambda: (i,),
set_state=set_state,
symbol_names=('i',),
opts={})
self.assertEqual(i, (3,))
self.assertOpCreated('ScanDataset')
def test_tf_dataset_no_loop_vars(self):
def body(i):
v.assign(v.read_value() * 10 + i)
v = self.variable('v', 0, dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=None,
body=body,
get_state=lambda: (),
set_state=lambda _: None,
symbol_names=(),
opts={})
self.assertEqual(v.read_value(), 1234)
self.assertOpCreated('ScanDataset')
def test_tf_iterator(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, 1234)
self.assertOpCreated('IteratorGetNextAsOptional')
def test_tf_iterator_shape_invariants(self):
def body(i):
nonlocal s
s = array_ops.concat([s, [i]], 0)
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant([], dtype=dtypes.int64)
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'shape_invariants': [(s, tensor_shape.TensorShape([None]))]})
self.assertAllEqual(s, [0, 1, 2, 3, 4])
self.assertOpCreated('IteratorGetNextAsOptional')
def test_tf_iterator_shape_invariants_with_nested_structures(self):
def body(i):
nonlocal s
nonlocal t
s = array_ops.concat([s, [i]], 0)
t = Test(var=t.var + 1)
def set_state(loop_vars):
nonlocal s
nonlocal t
s, t = loop_vars
s = constant_op.constant([], dtype=dtypes.int64)
Test = collections.namedtuple('Test', ['var'])
t = Test(var=constant_op.constant([0], dtype=dtypes.int64))
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (s, t),
set_state=set_state,
symbol_names=('s', 't'),
opts={'shape_invariants': [(s, tensor_shape.TensorShape([None]))]})
self.assertAllEqual(s, [0, 1, 2, 3, 4])
self.assertEqual(t.var, [5])
self.assertOpCreated('IteratorGetNextAsOptional')
def test_tf_iterator_no_loop_vars(self):
def body(i):
v.assign(v.read_value() * 10 + i)
v = self.variable('v', 0, dtypes.int64)
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (),
set_state=lambda _: None,
symbol_names=(),
opts={})
self.assertEqual(v.read_value(), 1234)
self.assertOpCreated('IteratorGetNextAsOptional')
def test_tf_ragged_tensor(self):
def body(i):
nonlocal s
s = s * 10 + i[0]
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
ragged_factory_ops.constant([[1], [2, 4], [3]]),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, (123,))
self.assertOpCreated('StatelessWhile')
def test_tf_ragged_tensor_higher_dimensional(self):
def body(i):
nonlocal s
s = s * 10 + i[0][0]
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
ragged_3d = [
[[1], [1, 1], [1]],
[[2], [2]],
]
control_flow.for_stmt(
ragged_factory_ops.constant(ragged_3d),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, (12,))
self.assertOpCreated('StatelessWhile')
def test_tf_ragged_tensor_no_loop_vars(self):
v = self.variable('v', 0, dtypes.int32)
def body(i):
v.assign(v.read_value() * 10 + i[0])
control_flow.for_stmt(
ragged_factory_ops.constant([[1], [2, 4], [3]]),
extra_test=None,
body=body,
get_state=lambda: (),
set_state=lambda _: None,
symbol_names=(),
opts={})
# Note: 123 = ((0*10 + 1)*10+2)*10+3 (first element of each row).
self.assertEqual(v.read_value(), 123)
self.assertOpCreated('While')
def _basic_loop(self, init_value, body_fn):
def body(i):
nonlocal s
s = body_fn(i, s)
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = init_value
control_flow.for_stmt(
constant_op.constant([1, 2, 3, 4]),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
return s
def test_tensor_illegal_input(self):
with self.assertRaisesRegex(ValueError, '\'s\' is not allowed to be None'):
self._basic_loop(None, lambda i, s: s)
with self.assertRaisesRegex(ValueError, '\'s\' must be defined'):
self._basic_loop(variable_operators.Undefined(''), lambda i, s: s)
def test_tensor_none_output(self):
with self.assertRaisesRegex(ValueError, '\'s\' is None at the end'):
self._basic_loop(0, lambda i, s: None)
def test_tensor_dtype_change(self):
with self.assertRaisesRegex(TypeError, '\'s\'.* dtype float32 after'):
self._basic_loop(0, lambda i, s: 1.0)
def test_tensor_shape_change(self):
with self.assertRaisesRegex(ValueError, r'\'s\'.* shape \(1,\) after'):
self._basic_loop(0, lambda i, s: np.array([1], dtype=np.int32))
|
ForLoopTest
|
python
|
getsentry__sentry
|
tests/apidocs/endpoints/releases/test_project_release_file_details.py
|
{
"start": 136,
"end": 1514
}
|
class ____(APIDocsTestCase):
def setUp(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
release = self.create_release(project=project, version="1")
file1 = self.create_file(
name="blah.js",
size=42,
type="release.file",
headers={"Content-Type": "application/json"},
checksum="dc1e3f3e411979d336c3057cce64294f3420f93a",
)
releasefile = self.create_release_file(
file=file1, release_id=release.id, name="http://example.com/blah.js"
)
self.url = reverse(
"sentry-api-0-project-release-file-details",
kwargs={
"project_id_or_slug": project.slug,
"organization_id_or_slug": project.organization.slug,
"version": release.version,
"file_id": releasefile.id,
},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_put(self) -> None:
data = {"name": "newfilename.js"}
response = self.client.put(self.url, data)
request = RequestFactory().put(self.url, data)
self.validate_schema(request, response)
|
ProjectReleaseFileDetailsDocsTest
|
python
|
huggingface__transformers
|
tests/models/qwen3/test_modeling_qwen3.py
|
{
"start": 1420,
"end": 1979
}
|
class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Qwen3ModelTester
# TODO (ydshieh): Check this. See https://app.circleci.com/pipelines/github/huggingface/transformers/79245/workflows/9490ef58-79c2-410d-8f51-e3495156cf9c/jobs/1012146
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
return True
@require_torch
|
Qwen3ModelTest
|
python
|
mlflow__mlflow
|
dev/clint/tests/rules/test_markdown_link.py
|
{
"start": 480,
"end": 1077
}
|
class ____:
"""
Class with [another markdown link](https://test.com).
"""
# Good
def function_with_rest_link():
"""
This function has a `reST link <https://example.com>`_.
"""
'''
config = Config(select={MarkdownLink.name})
violations = lint_file(Path("test.py"), code, config, index_path)
assert len(violations) == 3
assert all(isinstance(v.rule, MarkdownLink) for v in violations)
assert violations[0].range == Range(Position(3, 4))
assert violations[1].range == Range(Position(8, 4))
assert violations[2].range == Range(Position(13, 4))
|
MyClass
|
python
|
tornadoweb__tornado
|
tornado/test/web_test.py
|
{
"start": 65741,
"end": 66142
}
|
class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_status(204)
self.finish()
def test_204_headers(self):
response = self.fetch("/")
self.assertEqual(response.code, 204)
self.assertNotIn("Content-Length", response.headers)
self.assertNotIn("Transfer-Encoding", response.headers)
|
Header204Test
|
python
|
vyperlang__vyper
|
vyper/builtins/functions.py
|
{
"start": 88584,
"end": 88702
}
|
class ____(_MinMaxValue):
_id = "min_value"
def _eval(self, type_):
return type_.ast_bounds[0]
|
MinValue
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_slugs.py
|
{
"start": 2280,
"end": 2808
}
|
class ____(util.MdCase):
"""Test GitHub Flavored Markdown style slugs."""
extension = ['markdown.extensions.toc']
extension_configs = {
'markdown.extensions.toc': {
"slugify": slugs.slugify(case="lower-ascii")
}
}
def test_slug(self):
"""Test the slug output."""
self.check_markdown(
r'# Testing GFM unicode-slugs_headers ±♠Ωℑ',
r'<h1 id="testing-gfm-unicode-slugs_headers-Ωℑ">Testing GFM unicode-slugs_headers ±♠Ωℑ</h1>'
)
|
TestGFM
|
python
|
gevent__gevent
|
src/greentest/3.10/test_wsgiref.py
|
{
"start": 794,
"end": 1181
}
|
class ____(WSGIServer):
"""Non-socket HTTP server"""
def __init__(self, server_address, RequestHandlerClass):
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.server_bind()
def server_bind(self):
host, port = self.server_address
self.server_name = host
self.server_port = port
self.setup_environ()
|
MockServer
|
python
|
celery__celery
|
t/unit/events/test_state.py
|
{
"start": 1180,
"end": 1389
}
|
class ____(replay):
def setup(self):
self.events = [
Event('worker-online', hostname='utest1'),
Event('worker-offline', hostname='utest1'),
]
|
ev_worker_online_offline
|
python
|
xlwings__xlwings
|
xlwings/_xlmac.py
|
{
"start": 43780,
"end": 44288
}
|
class ____(base_classes.PageSetup):
def __init__(self, parent, xl):
self.parent = parent
self.xl = xl
@property
def api(self):
return self.xl
@property
def print_area(self):
value = self.xl.print_area.get()
if value == kw.missing_value:
return None
else:
return self.xl.print_area.get()
@print_area.setter
def print_area(self, value):
self.xl.print_area.set("" if value is None else value)
|
PageSetup
|
python
|
pytorch__pytorch
|
test/dynamo/test_modules.py
|
{
"start": 1224,
"end": 1487
}
|
class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(10, 10)
self.scale = torch.randn(1, 10)
def forward(self, x):
return F.relu(self.linear1(x)) * self.scale
|
BasicModule
|
python
|
readthedocs__readthedocs.org
|
readthedocs/oauth/migrations/0016_deprecate_old_vcs.py
|
{
"start": 180,
"end": 1030
}
|
class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("oauth", "0015_increase_avatar_url_length"),
]
operations = [
migrations.AlterField(
model_name="remoterepository",
name="clone_url",
field=models.URLField(
blank=True,
max_length=512,
validators=[
django.core.validators.URLValidator(schemes=["http", "https", "ssh", "git"])
],
verbose_name="Repository clone URL",
),
),
migrations.AlterField(
model_name="remoterepository",
name="vcs",
field=models.CharField(
blank=True, choices=[("git", "Git")], max_length=200, verbose_name="vcs"
),
),
]
|
Migration
|
python
|
pytorch__pytorch
|
torch/cuda/_sanitizer.py
|
{
"start": 7408,
"end": 12096
}
|
class ____:
def __init__(self) -> None:
self.current_sync_states: dict[StreamId, dict[StreamId, SeqNum]] = {}
self.recorded_sync_states: dict[EventId, dict[StreamId, SeqNum]] = {}
self.host_sync_state: dict[StreamId, SeqNum] = {}
self.create_stream(DEFAULT_STREAM_ID)
def _ensure_stream_exists(self, stream: StreamId) -> None:
if stream not in self.current_sync_states:
logger.info(
"Found Stream with id: %s, but no matching stream "
"creation in the trace. Backfilling the trace now. "
"Perhaps the sanitizer was enabled after some torch operations?",
stream,
)
self.create_stream(stream)
def _ensure_event_exists(self, event: EventId) -> None:
if event not in self.recorded_sync_states:
logger.info(
"Found Event with id: %s, but no matching event "
"creation in the trace. Backfilling the trace now. "
"Perhaps the sanitizer was enabled after some torch operations?",
event,
)
self.create_event(event)
def _ensure_event_does_not_exist(self, event: EventId) -> None:
if event in self.recorded_sync_states:
logger.info(
"Found duplicate event creation in the trace for event with "
"id: %s. Assuming the trace for event deletion wasn't caught "
"and backfilling it now. "
"Perhaps the sanitizer was enabled after some torch operations?",
event,
)
self.delete_event(event)
def create_stream(self, stream: StreamId) -> None:
if stream in self.current_sync_states:
logger.info(
"Found duplicate Stream creation in the trace for Stream with "
"id: %s. PyTorch Streams are only created once, so this "
"trace entry is ignored.",
stream,
)
else:
self.host_sync_state[stream] = 0
self.current_sync_states[stream] = self.host_sync_state.copy()
def create_event(self, event: EventId) -> None:
self._ensure_event_does_not_exist(event)
self.recorded_sync_states[event] = {}
def delete_event(self, event: EventId) -> None:
self._ensure_event_exists(event)
del self.recorded_sync_states[event]
def update_seq_num(self, stream: StreamId, seq_num: SeqNum) -> None:
self._ensure_stream_exists(stream)
self.current_sync_states[stream][stream] = seq_num
def record_state(self, event: EventId, stream: StreamId) -> None:
self._ensure_event_exists(event)
self._ensure_stream_exists(stream)
self.recorded_sync_states[event] = self.current_sync_states[stream].copy()
def _state_wait_for_other(
self, state: dict[StreamId, SeqNum], other: dict[StreamId, SeqNum]
) -> None:
for stream, seq_num in other.items():
state[stream] = max(state.get(stream, -1), seq_num)
def stream_wait_for_event(self, stream: StreamId, event: EventId) -> None:
self._ensure_stream_exists(stream)
self._ensure_event_exists(event)
self._state_wait_for_other(
self.current_sync_states[stream], self.recorded_sync_states[event]
)
def all_streams_wait_for_event(self, event: EventId) -> None:
self._ensure_event_exists(event)
for stream in self.current_sync_states:
self.stream_wait_for_event(stream, event)
self._state_wait_for_other(
self.host_sync_state, self.recorded_sync_states[event]
)
def all_streams_wait_for_stream(self, stream: StreamId) -> None:
self._ensure_stream_exists(stream)
for state in self.current_sync_states.values():
self._state_wait_for_other(state, self.current_sync_states[stream])
self._state_wait_for_other(
self.host_sync_state, self.current_sync_states[stream]
)
def sync_all_streams(self) -> None:
for stream, state in self.current_sync_states.items():
self.host_sync_state[stream] = state[stream]
for state in self.current_sync_states.values():
self._state_wait_for_other(state, self.host_sync_state)
def is_ordered_after(
self, current_stream: StreamId, seq_num: SeqNum, other_stream: StreamId
) -> bool:
self._ensure_stream_exists(current_stream)
self._ensure_stream_exists(other_stream)
return seq_num <= self.current_sync_states[current_stream].get(other_stream, -1)
|
StreamSynchronizations
|
python
|
ray-project__ray
|
python/ray/data/_internal/planner/plan_expression/expression_visitors.py
|
{
"start": 3348,
"end": 7413
}
|
class ____(_ExprVisitor[Expr]):
"""Visitor rebinding column references in ``Expression``s.
This visitor traverses given ``Expression`` trees and substitutes column references
according to a provided substitution map.
"""
def __init__(self, column_ref_substitutions: Dict[str, Expr]):
"""Initialize with a column substitution map.
Args:
column_ref_substitutions: Mapping from column names to replacement expressions.
"""
self._col_ref_substitutions = column_ref_substitutions
def visit_column(self, expr: ColumnExpr) -> Expr:
"""Visit a column expression and substitute it.
Args:
expr: The column expression.
Returns:
The substituted expression or the original if no substitution exists.
"""
substitution = self._col_ref_substitutions.get(expr.name)
return substitution if substitution is not None else expr
def visit_literal(self, expr: LiteralExpr) -> Expr:
"""Visit a literal expression (no rewriting needed).
Args:
expr: The literal expression.
Returns:
The original literal expression.
"""
return expr
def visit_binary(self, expr: BinaryExpr) -> Expr:
"""Visit a binary expression and rewrite its operands.
Args:
expr: The binary expression.
Returns:
A new binary expression with rewritten operands.
"""
return BinaryExpr(
expr.op,
self.visit(expr.left),
self.visit(expr.right),
)
def visit_unary(self, expr: UnaryExpr) -> Expr:
"""Visit a unary expression and rewrite its operand.
Args:
expr: The unary expression.
Returns:
A new unary expression with rewritten operand.
"""
return UnaryExpr(expr.op, self.visit(expr.operand))
def visit_udf(self, expr: UDFExpr) -> Expr:
"""Visit a UDF expression and rewrite its arguments.
Args:
expr: The UDF expression.
Returns:
A new UDF expression with rewritten arguments.
"""
new_args = [self.visit(arg) for arg in expr.args]
new_kwargs = {key: self.visit(value) for key, value in expr.kwargs.items()}
return UDFExpr(
fn=expr.fn, data_type=expr.data_type, args=new_args, kwargs=new_kwargs
)
def visit_alias(self, expr: AliasExpr) -> Expr:
"""Visit an alias expression and rewrite its inner expression.
Args:
expr: The alias expression.
Returns:
A new alias expression with rewritten inner expression and preserved name.
"""
# We unalias returned expression to avoid nested aliasing
visited = self.visit(expr.expr)._unalias()
# NOTE: We're carrying over all of the other aspects of the alias
# only replacing inner expre
return replace(
expr,
expr=visited,
# Alias expression will remain a renaming one (ie replacing source column)
# so long as it's referencing another column (and not otherwise)
#
# TODO replace w/ standalone rename expr
_is_rename=expr._is_rename and _is_col_expr(visited),
)
def visit_download(self, expr: "Expr") -> Expr:
"""Visit a download expression (no rewriting needed).
Args:
expr: The download expression.
Returns:
The original download expression.
"""
return expr
def visit_star(self, expr: StarExpr) -> Expr:
"""Visit a star expression (no rewriting needed).
Args:
expr: The star expression.
Returns:
The original star expression.
"""
return expr
def _is_col_expr(expr: Expr) -> bool:
return isinstance(expr, ColumnExpr) or (
isinstance(expr, AliasExpr) and isinstance(expr.expr, ColumnExpr)
)
|
_ColumnSubstitutionVisitor
|
python
|
great-expectations__great_expectations
|
contrib/experimental/great_expectations_experimental/expectations/expect_queried_slowly_changing_table_to_have_no_gaps.py
|
{
"start": 526,
"end": 8293
}
|
class ____(QueryExpectation):
"""Expect Slowly changing table type II to have no gaps between the 'end date' of each row, and the next 'start date' in the next row.
Args:
template_dict: dict with the following keys: \
primary_key (primary key column name or multiple columns, comma separated), \
open_date_column (name of the column representing open date), \
close_date_column (name of the column representing clode date)
threshold: an optional parameter - default is zero. \
if the ratio of "gaps" to total table rows is higher than threshold - error will be raised.
"""
metric_dependencies = ("query.template_values",)
query = """
SELECT SUM(CASE WHEN {close_date_column} != COALESCE(next_start_date, {close_date_column}) THEN 1 ELSE 0 END),
COUNT(1)
FROM(SELECT {primary_key}, {close_date_column}, LEAD({open_date_column}) OVER(PARTITION BY {primary_key} ORDER BY
{open_date_column}) AS next_start_date
FROM {batch})
"""
success_keys = (
"template_dict",
"threshold",
"query",
)
domain_keys = (
"template_dict",
"query",
"batch_id",
"row_condition",
"condition_parser",
)
default_kwarg_values = {
"result_format": "BASIC",
"catch_exceptions": False,
"meta": None,
"threshold": 0,
"query": query,
}
library_metadata = {"tags": ["query-based"], "contributors": ["@itaise"]}
def _validate(
self,
metrics: dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
) -> Union[ExpectationValidationResult, dict]:
configuration = self.configuration
threshold = configuration["kwargs"].get("threshold")
if not threshold:
threshold = self._get_default_value("threshold")
metrics = convert_to_json_serializable(data=metrics)
holes_count: int
total_count: int
holes_count, total_count = list(metrics.get("query.template_values")[0].values())
error_rate = float(holes_count) / total_count
return {
"success": error_rate <= threshold,
"result": {
"threshold": threshold,
"holes_count": holes_count,
"total_count": total_count,
},
}
today = datetime(year=2022, month=8, day=10)
one_day_ago = today - timedelta(days=1)
two_day_ago = today - timedelta(days=2)
three_day_ago = today - timedelta(days=3)
four_day_ago = today - timedelta(days=4)
five_day_ago = today - timedelta(days=5)
six_day_ago = today - timedelta(days=6)
seven_day_ago = today - timedelta(days=7)
eight_day_ago = today - timedelta(days=8)
nine_day_ago = today - timedelta(days=9)
ten_day_ago = today - timedelta(days=10)
examples = [
{
"data": [
{
"data": {
"msid": [
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
],
"uuid": [
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
"aaa",
],
"col1": [1, 2, 2, 3, 4, 5, 6, 7, 8],
"col2": ["a", "a", "b", "b", "a", "a", "a", "a", "a"],
"start_date": [
ten_day_ago,
nine_day_ago,
eight_day_ago,
seven_day_ago,
six_day_ago,
five_day_ago,
four_day_ago,
three_day_ago,
two_day_ago,
],
"end_date": [
nine_day_ago,
eight_day_ago,
seven_day_ago,
six_day_ago,
five_day_ago,
four_day_ago,
three_day_ago,
two_day_ago,
one_day_ago,
],
"start_date_2": [
ten_day_ago,
seven_day_ago,
six_day_ago,
five_day_ago,
four_day_ago,
three_day_ago,
two_day_ago,
two_day_ago,
two_day_ago,
],
"end_date_2": [
nine_day_ago,
six_day_ago,
six_day_ago,
five_day_ago,
four_day_ago,
three_day_ago,
two_day_ago,
two_day_ago,
two_day_ago,
],
},
},
],
"suppress_test_for": [
"mysql",
"mssql",
"postgresql",
"bigquery",
"snowflake",
],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"template_dict": {
"primary_key": "msid,uuid",
"open_date_column": "start_date",
"close_date_column": "end_date",
}
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"template_dict": {
"primary_key": "msid,uuid",
"open_date_column": "start_date_2",
"close_date_column": "end_date_2",
},
"threshold": 0.1,
},
"out": {"success": False},
},
],
},
]
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> None:
super().validate_configuration(configuration)
threshold = configuration["kwargs"].get("threshold")
if not threshold:
threshold = self._get_default_value("threshold")
try:
assert isinstance(threshold, (int, float))
assert threshold >= 0
assert threshold <= 1
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
if __name__ == "__main__":
ExpectQueriedSlowlyChangingTableToHaveNoGaps().print_diagnostic_checklist()
|
ExpectQueriedSlowlyChangingTableToHaveNoGaps
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/hooks/test_glue.py
|
{
"start": 1334,
"end": 23485
}
|
class ____:
def setup_method(self):
self.some_aws_region = "us-west-2"
@mock_aws
@pytest.mark.parametrize("role_path", ["/", "/custom-path/"])
def test_get_iam_execution_role(self, role_path):
expected_role = "my_test_role"
boto3.client("iam").create_role(
Path=role_path,
RoleName=expected_role,
AssumeRolePolicyDocument=json.dumps(
{
"Version": "2012-10-17",
"Statement": {
"Effect": "Allow",
"Principal": {"Service": "glue.amazonaws.com"},
"Action": "sts:AssumeRole",
},
}
),
)
hook = GlueJobHook(
aws_conn_id=None,
job_name="aws_test_glue_job",
s3_bucket="some_bucket",
iam_role_name=expected_role,
)
iam_role = hook.get_iam_execution_role()
assert iam_role is not None
assert "Role" in iam_role
assert "Arn" in iam_role["Role"]
assert iam_role["Role"]["Arn"] == f"arn:aws:iam::123456789012:role{role_path}{expected_role}"
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
@mock.patch.object(GlueJobHook, "conn")
def test_init_iam_role_value_error(self, mock_conn, mock_get_iam_execution_role):
mock_get_iam_execution_role.return_value = mock.MagicMock(
Role={"RoleName": "my_test_role_name", "RoleArn": "my_test_role"}
)
with pytest.raises(ValueError, match="Cannot set iam_role_arn and iam_role_name simultaneously"):
GlueJobHook(
job_name="aws_test_glue_job",
desc="This is test case job from Airflow",
s3_bucket="some-bucket",
iam_role_name="my_test_role_name",
iam_role_arn="my_test_role",
)
@mock.patch.object(AwsBaseHook, "conn")
def test_has_job_exists(self, mock_conn):
job_name = "aws_test_glue_job"
mock_conn.get_job.return_value = {"Job": {"Name": job_name}}
hook = GlueJobHook(aws_conn_id=None, job_name=job_name, s3_bucket="some_bucket")
result = hook.has_job(job_name)
assert result is True
mock_conn.get_job.assert_called_once_with(JobName=hook.job_name)
@mock.patch.object(AwsBaseHook, "conn")
def test_has_job_job_doesnt_exists(self, mock_conn):
class JobNotFoundException(Exception):
pass
mock_conn.exceptions.EntityNotFoundException = JobNotFoundException
mock_conn.get_job.side_effect = JobNotFoundException()
job_name = "aws_test_glue_job"
hook = GlueJobHook(aws_conn_id=None, job_name=job_name, s3_bucket="some_bucket")
result = hook.has_job(job_name)
assert result is False
mock_conn.get_job.assert_called_once_with(JobName=job_name)
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
@mock.patch.object(AwsBaseHook, "conn")
def test_role_arn_has_job_exists(self, mock_conn, mock_get_iam_execution_role):
"""
Calls 'create_or_update_glue_job' with no existing job.
Should create a new job.
"""
class JobNotFoundException(Exception):
pass
expected_job_name = "aws_test_glue_job"
job_description = "This is test case job from Airflow"
role_name = "my_test_role"
role_name_arn = "test_role"
some_s3_bucket = "bucket"
mock_conn.exceptions.EntityNotFoundException = JobNotFoundException
mock_conn.get_job.side_effect = JobNotFoundException()
mock_get_iam_execution_role.return_value = {"Role": {"RoleName": role_name, "Arn": role_name_arn}}
hook = GlueJobHook(
s3_bucket=some_s3_bucket,
job_name=expected_job_name,
desc=job_description,
concurrent_run_limit=2,
retry_limit=3,
num_of_dpus=5,
iam_role_arn=role_name_arn,
create_job_kwargs={"Command": {}},
region_name=self.some_aws_region,
update_config=True,
)
result = hook.create_or_update_glue_job()
mock_conn.get_job.assert_called_once_with(JobName=expected_job_name)
mock_conn.create_job.assert_called_once_with(
Command={},
Description=job_description,
ExecutionProperty={"MaxConcurrentRuns": 2},
LogUri=f"s3://{some_s3_bucket}/logs/glue-logs/{expected_job_name}",
MaxCapacity=5,
MaxRetries=3,
Name=expected_job_name,
Role=role_name_arn,
)
mock_conn.update_job.assert_not_called()
assert result == expected_job_name
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
@mock.patch.object(GlueJobHook, "conn")
def test_create_or_update_glue_job_create_new_job(self, mock_conn, mock_get_iam_execution_role):
"""
Calls 'create_or_update_glue_job' with no existing job.
Should create a new job.
"""
class JobNotFoundException(Exception):
pass
expected_job_name = "aws_test_glue_job"
job_description = "This is test case job from Airflow"
role_name = "my_test_role"
role_name_arn = "test_role"
some_s3_bucket = "bucket"
mock_conn.exceptions.EntityNotFoundException = JobNotFoundException
mock_conn.get_job.side_effect = JobNotFoundException()
mock_get_iam_execution_role.return_value = {"Role": {"RoleName": role_name, "Arn": role_name_arn}}
hook = GlueJobHook(
s3_bucket=some_s3_bucket,
job_name=expected_job_name,
desc=job_description,
concurrent_run_limit=2,
retry_limit=3,
num_of_dpus=5,
iam_role_name=role_name,
create_job_kwargs={"Command": {}},
region_name=self.some_aws_region,
update_config=True,
)
result = hook.create_or_update_glue_job()
mock_conn.get_job.assert_called_once_with(JobName=expected_job_name)
mock_conn.create_job.assert_called_once_with(
Command={},
Description=job_description,
ExecutionProperty={"MaxConcurrentRuns": 2},
LogUri=f"s3://{some_s3_bucket}/logs/glue-logs/{expected_job_name}",
MaxCapacity=5,
MaxRetries=3,
Name=expected_job_name,
Role=role_name_arn,
)
mock_conn.update_job.assert_not_called()
assert result == expected_job_name
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
@mock.patch.object(GlueJobHook, "conn")
def test_create_or_update_glue_job_create_new_job_without_s3_bucket(
self, mock_conn, mock_get_iam_execution_role
):
"""
Calls 'create_or_update_glue_job' with no existing job.
Should create a new job.
"""
class JobNotFoundException(Exception):
pass
expected_job_name = "aws_test_glue_job"
job_description = "This is test case job from Airflow"
role_name = "my_test_role"
role_name_arn = "test_role"
mock_conn.exceptions.EntityNotFoundException = JobNotFoundException
mock_conn.get_job.side_effect = JobNotFoundException()
mock_get_iam_execution_role.return_value = {"Role": {"RoleName": role_name, "Arn": role_name_arn}}
hook = GlueJobHook(
job_name=expected_job_name,
desc=job_description,
concurrent_run_limit=2,
retry_limit=3,
num_of_dpus=5,
iam_role_name=role_name,
create_job_kwargs={"Command": {}},
region_name=self.some_aws_region,
update_config=True,
)
result = hook.create_or_update_glue_job()
mock_conn.get_job.assert_called_once_with(JobName=expected_job_name)
mock_conn.create_job.assert_called_once_with(
Command={},
Description=job_description,
ExecutionProperty={"MaxConcurrentRuns": 2},
MaxCapacity=5,
MaxRetries=3,
Name=expected_job_name,
Role=role_name_arn,
)
mock_conn.update_job.assert_not_called()
assert result == expected_job_name
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
@mock.patch.object(GlueJobHook, "conn")
def test_create_or_update_glue_job_update_existing_job(self, mock_conn, mock_get_iam_execution_role):
"""
Calls 'create_or_update_glue_job' with a existing job.
Should update existing job configurations.
"""
job_name = "aws_test_glue_job"
job_description = "This is test case job from Airflow"
role_name = "my_test_role"
role_name_arn = "test_role"
some_script = "s3://glue-examples/glue-scripts/sample_aws_glue_job.py"
some_s3_bucket = "my-includes"
mock_conn.get_job.return_value = {
"Job": {
"Name": job_name,
"Description": "Old description of job",
"Role": role_name_arn,
}
}
mock_get_iam_execution_role.return_value = {"Role": {"RoleName": role_name, "Arn": "test_role"}}
hook = GlueJobHook(
job_name=job_name,
desc=job_description,
script_location=some_script,
iam_role_name=role_name,
s3_bucket=some_s3_bucket,
region_name=self.some_aws_region,
update_config=True,
)
result = hook.create_or_update_glue_job()
assert mock_conn.get_job.call_count == 2
mock_conn.update_job.assert_called_once_with(
JobName=job_name,
JobUpdate={
"Description": job_description,
"LogUri": f"s3://{some_s3_bucket}/logs/glue-logs/{job_name}",
"Role": role_name_arn,
"ExecutionProperty": {"MaxConcurrentRuns": 1},
"Command": {"Name": "glueetl", "ScriptLocation": some_script},
"MaxRetries": 0,
"MaxCapacity": 10,
},
)
assert result == job_name
@mock_aws
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
def test_create_or_update_glue_job_worker_type(self, mock_get_iam_execution_role):
mock_get_iam_execution_role.return_value = {"Role": {"RoleName": "my_test_role", "Arn": "test_role"}}
some_script = "s3:/glue-examples/glue-scripts/sample_aws_glue_job.py"
some_s3_bucket = "my-includes"
expected_job_name = "aws_test_glue_job_worker_type"
glue_job = GlueJobHook(
job_name=expected_job_name,
desc="This is test case job from Airflow",
script_location=some_script,
iam_role_name="my_test_role",
s3_bucket=some_s3_bucket,
region_name=self.some_aws_region,
create_job_kwargs={"WorkerType": "G.2X", "NumberOfWorkers": 60},
update_config=True,
)
result = glue_job.create_or_update_glue_job()
assert result == expected_job_name
@mock.patch.object(GlueJobHook, "get_iam_execution_role")
@mock.patch.object(GlueJobHook, "conn")
def test_init_worker_type_value_error(self, mock_conn, mock_get_iam_execution_role):
mock_get_iam_execution_role.return_value = mock.MagicMock(Role={"RoleName": "my_test_role"})
some_script = "s3:/glue-examples/glue-scripts/sample_aws_glue_job.py"
some_s3_bucket = "my-includes"
with pytest.raises(ValueError, match="Cannot specify num_of_dpus with custom WorkerType"):
GlueJobHook(
job_name="aws_test_glue_job",
desc="This is test case job from Airflow",
script_location=some_script,
iam_role_name="my_test_role",
s3_bucket=some_s3_bucket,
region_name=self.some_aws_region,
num_of_dpus=20,
create_job_kwargs={"WorkerType": "G.2X", "NumberOfWorkers": 60},
update_config=True,
)
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "conn")
def test_initialize_job(self, mock_conn, mock_get_job_state):
some_data_path = "s3://glue-datasets/examples/medicare/SampleData.csv"
some_script_arguments = {"--s3_input_data_path": some_data_path}
some_run_kwargs = {"NumberOfWorkers": 5}
some_script = "s3:/glue-examples/glue-scripts/sample_aws_glue_job.py"
some_s3_bucket = "my-includes"
mock_conn.start_job_run()
mock_job_run_state = mock_get_job_state.return_value
glue_job_hook = GlueJobHook(
job_name="aws_test_glue_job",
desc="This is test case job from Airflow",
iam_role_name="my_test_role",
script_location=some_script,
s3_bucket=some_s3_bucket,
region_name=self.some_aws_region,
update_config=False,
)
glue_job_run = glue_job_hook.initialize_job(some_script_arguments, some_run_kwargs)
glue_job_run_state = glue_job_hook.get_job_state(glue_job_run["JobName"], glue_job_run["JobRunId"])
assert glue_job_run_state == mock_job_run_state, "Mocks but be equal"
@mock.patch.object(AwsLogsHook, "get_conn")
@mock.patch.object(GlueJobHook, "conn")
def test_print_job_logs_returns_token(self, conn_mock: MagicMock, log_client_mock: MagicMock, caplog):
hook = GlueJobHook(job_name="test")
conn_mock().get_job_run.return_value = {"JobRun": {"LogGroupName": "my_log_group"}}
log_client_mock().get_paginator().paginate.return_value = [
# first response : 2 log lines
{
"events": [
{"logStreamName": "stream", "timestamp": 123, "message": "hello\n"},
{"logStreamName": "stream", "timestamp": 123, "message": "world\n"},
],
"searchedLogStreams": [],
"nextToken": "my_continuation_token",
"ResponseMetadata": {"HTTPStatusCode": 200},
},
# second response, reached end of stream
{"events": [], "searchedLogStreams": [], "ResponseMetadata": {"HTTPStatusCode": 200}},
]
tokens = GlueJobHook.LogContinuationTokens()
with caplog.at_level("INFO"):
hook.print_job_logs("name", "run", tokens)
assert "\thello\n\tworld\n" in caplog.text
assert tokens.output_stream_continuation == "my_continuation_token"
assert tokens.error_stream_continuation == "my_continuation_token"
@mock.patch.object(AwsLogsHook, "get_conn")
@mock.patch.object(GlueJobHook, "conn")
def test_print_job_logs_no_stream_yet(self, conn_mock: MagicMock, client_mock: MagicMock):
hook = GlueJobHook()
conn_mock().get_job_run.return_value = {"JobRun": {"LogGroupName": "my_log_group"}}
client_mock().get_paginator().paginate.side_effect = ClientError(
{"Error": {"Code": "ResourceNotFoundException"}}, "op"
)
tokens = GlueJobHook.LogContinuationTokens()
hook.print_job_logs("name", "run", tokens) # should not error
assert tokens.output_stream_continuation is None
assert tokens.error_stream_continuation is None
assert client_mock().get_paginator().paginate.call_count == 2
@mock.patch.object(GlueJobHook, "get_job_state")
def test_job_completion_success(self, get_state_mock: MagicMock):
hook = GlueJobHook(job_poll_interval=0)
get_state_mock.side_effect = [
"RUNNING",
"RUNNING",
"SUCCEEDED",
]
hook.job_completion("job_name", "run_id")
assert get_state_mock.call_count == 3
get_state_mock.assert_called_with("job_name", "run_id")
@mock.patch.object(GlueJobHook, "get_job_state")
def test_job_completion_failure(self, get_state_mock: MagicMock):
hook = GlueJobHook(job_poll_interval=0)
get_state_mock.side_effect = [
"RUNNING",
"RUNNING",
"FAILED",
]
with pytest.raises(AirflowException):
hook.job_completion("job_name", "run_id")
assert get_state_mock.call_count == 3
@pytest.mark.asyncio
@mock.patch.object(GlueJobHook, "async_get_job_state")
async def test_async_job_completion_success(self, get_state_mock: MagicMock):
hook = GlueJobHook(job_poll_interval=0)
get_state_mock.side_effect = [
"RUNNING",
"RUNNING",
"SUCCEEDED",
]
await hook.async_job_completion("job_name", "run_id")
assert get_state_mock.call_count == 3
get_state_mock.assert_called_with("job_name", "run_id")
@pytest.mark.asyncio
@mock.patch.object(GlueJobHook, "async_get_job_state")
async def test_async_job_completion_failure(self, get_state_mock: MagicMock):
hook = GlueJobHook(job_poll_interval=0)
get_state_mock.side_effect = [
"RUNNING",
"RUNNING",
"FAILED",
]
with pytest.raises(AirflowException):
await hook.async_job_completion("job_name", "run_id")
assert get_state_mock.call_count == 3
@mock.patch.object(GlueJobHook, "conn")
def test_get_job_state_success(self, mock_conn):
hook = GlueJobHook()
job_name = "test_job"
run_id = "test_run_id"
expected_state = "SUCCEEDED"
mock_conn.get_job_run.return_value = {"JobRun": {"JobRunState": expected_state}}
result = hook.get_job_state(job_name, run_id)
assert result == expected_state
mock_conn.get_job_run.assert_called_once_with(
JobName=job_name, RunId=run_id, PredecessorsIncluded=True
)
@mock.patch.object(GlueJobHook, "conn")
def test_get_job_state_retry_on_client_error(self, mock_conn):
hook = GlueJobHook()
job_name = "test_job"
run_id = "test_run_id"
expected_state = "SUCCEEDED"
mock_conn.get_job_run.side_effect = [
ClientError(
{"Error": {"Code": "ThrottlingException", "Message": "Rate exceeded"}}, "get_job_run"
),
{"JobRun": {"JobRunState": expected_state}},
]
result = hook.get_job_state(job_name, run_id)
assert result == expected_state
assert mock_conn.get_job_run.call_count == 2
@mock.patch.object(GlueJobHook, "conn")
def test_get_job_state_fails_after_all_retries(self, mock_conn):
"""Test get_job_state raises exception when all retries are exhausted."""
hook = GlueJobHook()
job_name = "test_job"
run_id = "test_run_id"
mock_conn.get_job_run.side_effect = ClientError(
{"Error": {"Code": "ThrottlingException", "Message": "Rate exceeded"}}, "get_job_run"
)
with pytest.raises(ClientError) as exc_info:
hook.get_job_state(job_name, run_id)
assert exc_info.value.response["Error"]["Code"] == "ThrottlingException"
assert mock_conn.get_job_run.call_count == 5
@pytest.mark.asyncio
@mock.patch.object(GlueJobHook, "get_async_conn")
async def test_async_get_job_state_success(self, mock_get_async_conn):
hook = GlueJobHook()
job_name = "test_job"
run_id = "test_run_id"
expected_state = "RUNNING"
mock_client = mock.AsyncMock()
mock_client.get_job_run.return_value = {"JobRun": {"JobRunState": expected_state}}
mock_context = mock.AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_context.__aexit__.return_value = None
mock_get_async_conn.return_value = mock_context
result = await hook.async_get_job_state(job_name, run_id)
assert result == expected_state
mock_client.get_job_run.assert_called_once_with(JobName=job_name, RunId=run_id)
@pytest.mark.asyncio
@mock.patch.object(GlueJobHook, "get_async_conn")
async def test_async_get_job_state_retry_on_client_error(self, mock_get_async_conn):
hook = GlueJobHook()
job_name = "test_job"
run_id = "test_run_id"
expected_state = "FAILED"
mock_client = mock.AsyncMock()
mock_client.get_job_run.side_effect = [
ClientError(
{"Error": {"Code": "ServiceUnavailable", "Message": "Service temporarily unavailable"}},
"get_job_run",
),
{"JobRun": {"JobRunState": expected_state}},
]
mock_context = mock.AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_context.__aexit__.return_value = None
mock_get_async_conn.return_value = mock_context
result = await hook.async_get_job_state(job_name, run_id)
assert result == expected_state
assert mock_client.get_job_run.call_count == 2
@pytest.mark.asyncio
@mock.patch.object(GlueJobHook, "get_async_conn")
async def test_async_get_job_state_fails_after_all_retries(self, mock_get_async_conn):
hook = GlueJobHook()
job_name = "test_job"
run_id = "test_run_id"
mock_client = mock.AsyncMock()
mock_client.get_job_run.side_effect = ClientError(
{"Error": {"Code": "ServiceUnavailable", "Message": "Service temporarily unavailable"}},
"get_job_run",
)
mock_context = mock.AsyncMock()
mock_context.__aenter__.return_value = mock_client
mock_context.__aexit__.return_value = None
mock_get_async_conn.return_value = mock_context
with pytest.raises(ClientError) as exc_info:
await hook.async_get_job_state(job_name, run_id)
assert exc_info.value.response["Error"]["Code"] == "ServiceUnavailable"
assert mock_client.get_job_run.call_count == 5
|
TestGlueJobHook
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_hyperlink15.py
|
{
"start": 315,
"end": 1063
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink15.xlsx")
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with hyperlinks. This
example doesn't have any link formatting and tests the relationship
linkage code.
"""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url("B2", "external:subdir/blank.xlsx")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
pytorch__pytorch
|
torch/distributions/transforms.py
|
{
"start": 30490,
"end": 31522
}
|
class ____(Transform):
r"""
Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then
normalizing.
This is not bijective and cannot be used for HMC. However this acts mostly
coordinate-wise (except for the final normalization), and thus is
appropriate for coordinate-wise optimization algorithms.
"""
domain = constraints.real_vector
codomain = constraints.simplex
def __eq__(self, other):
return isinstance(other, SoftmaxTransform)
def _call(self, x):
logprobs = x
probs = (logprobs - logprobs.max(-1, True)[0]).exp()
return probs / probs.sum(-1, True)
def _inverse(self, y):
probs = y
return probs.log()
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape
|
SoftmaxTransform
|
python
|
scikit-learn__scikit-learn
|
sklearn/linear_model/_coordinate_descent.py
|
{
"start": 24790,
"end": 41349
}
|
class ____(MultiOutputMixin, RegressorMixin, LinearModel):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function:
.. math::
\\frac{1}{2 n_{\\rm samples}} \\cdot \\|y - X w\\|_2^2
+ \\alpha \\cdot {\\rm l1\\_{ratio}} \\cdot \\|w\\|_1
+ 0.5 \\cdot \\alpha \\cdot (1 - {\\rm l1\\_{ratio}}) \\cdot \\|w\\|_2^2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to:
.. math::
a \\cdot \\|w\\|_1 + 0.5 \\cdot b \\cdot \\|w\\|_2^2
where:
.. math::
\\alpha = a + b, \\quad {\\rm l1\\_{ratio}} = \\frac{a}{a + b}
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter. ``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float, default=0.5
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
precompute : bool or array-like of shape (n_features, n_features),\
default=False
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``False`` to preserve sparsity.
Check :ref:`an example on how to use a precomputed Gram Matrix in ElasticNet
<sphx_glr_auto_examples_linear_model_plot_elastic_net_precomputed_gram_matrix_with_weighted_samples.py>`
for details.
max_iter : int, default=1000
The maximum number of iterations.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are smaller or equal to
``tol``, the optimization code checks the dual gap for optimality and continues
until it is smaller or equal to ``tol``, see Notes below.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
positive : bool, default=False
When set to ``True``, forces the coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_targets, n_features)
Sparse representation of the `coef_`.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : list of int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : float or ndarray of shape (n_targets,)
Given param alpha, the dual gaps at the end of the optimization,
same shape as each observation of y.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
ElasticNetCV : Elastic net model with best model selection by
cross-validation.
SGDRegressor : Implements elastic net regression with incremental training.
SGDClassifier : Implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log_loss", penalty="elasticnet")``).
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The precise stopping criteria based on `tol` are the following: First, check that
that maximum coordinate update, i.e. :math:`\\max_j |w_j^{new} - w_j^{old}|`
is smaller or equal to `tol` times the maximum absolute coefficient,
:math:`\\max_j |w_j|`. If so, then additionally check whether the dual gap is
smaller or equal to `tol` times :math:`||y||_2^2 / n_{\\text{samples}}`.
The underlying coordinate descent solver uses gap safe screening rules to speedup
fitting time, see :ref:`User Guide on coordinate descent <coordinate_descent>`.
Examples
--------
>>> from sklearn.linear_model import ElasticNet
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=2, random_state=0)
>>> regr = ElasticNet(random_state=0)
>>> regr.fit(X, y)
ElasticNet(random_state=0)
>>> print(regr.coef_)
[18.83816048 64.55968825]
>>> print(regr.intercept_)
1.451
>>> print(regr.predict([[0, 0]]))
[1.451]
- :ref:`sphx_glr_auto_examples_linear_model_plot_lasso_and_elasticnet.py`
showcases ElasticNet alongside Lasso and ARD Regression for sparse
signal recovery in the presence of noise and feature correlation.
"""
# "check_input" is used for optimisation and isn't something to be passed
# around in a pipeline.
__metadata_request__fit = {"check_input": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0, None, closed="left")],
"l1_ratio": [Interval(Real, 0, 1, closed="both")],
"fit_intercept": ["boolean"],
"precompute": ["boolean", "array-like"],
"max_iter": [Interval(Integral, 1, None, closed="left"), None],
"copy_X": ["boolean"],
"tol": [Interval(Real, 0, None, closed="left")],
"warm_start": ["boolean"],
"positive": ["boolean"],
"random_state": ["random_state"],
"selection": [StrOptions({"cyclic", "random"})],
}
path = staticmethod(enet_path)
def __init__(
self,
alpha=1.0,
*,
l1_ratio=0.5,
fit_intercept=True,
precompute=False,
max_iter=1000,
copy_X=True,
tol=1e-4,
warm_start=False,
positive=False,
random_state=None,
selection="cyclic",
):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None, check_input=True):
"""Fit model with coordinate descent.
Parameters
----------
X : {ndarray, sparse matrix, sparse array} of (n_samples, n_features)
Data.
Note that large sparse matrices and arrays requiring `int64`
indices are not accepted.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target. Will be cast to X's dtype if necessary.
sample_weight : float or array-like of shape (n_samples,), default=None
Sample weights. Internally, the `sample_weight` vector will be
rescaled to sum to `n_samples`.
.. versionadded:: 0.23
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Fitted estimator.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn(
(
"With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator"
),
stacklevel=2,
)
# Remember if X is copied
X_copied = False
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X_copied = self.copy_X and self.fit_intercept
X, y = validate_data(
self,
X,
y,
accept_sparse="csc",
order="F",
dtype=[np.float64, np.float32],
force_writeable=True,
accept_large_sparse=False,
copy=X_copied,
multi_output=True,
y_numeric=True,
)
y = check_array(
y, order="F", copy=False, dtype=X.dtype.type, ensure_2d=False
)
n_samples, n_features = X.shape
alpha = self.alpha
if isinstance(sample_weight, numbers.Number):
sample_weight = None
if sample_weight is not None:
if check_input:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# TLDR: Rescale sw to sum up to n_samples.
# Long: The objective function of Enet
#
# 1/2 * np.average(squared error, weights=sw)
# + alpha * penalty (1)
#
# is invariant under rescaling of sw.
# But enet_path coordinate descent minimizes
#
# 1/2 * sum(squared error) + alpha' * penalty (2)
#
# and therefore sets
#
# alpha' = n_samples * alpha (3)
#
# inside its function body, which results in objective (2) being
# equivalent to (1) in case of no sw.
# With sw, however, enet_path should set
#
# alpha' = sum(sw) * alpha (4)
#
# Therefore, we use the freedom of Eq. (1) to rescale sw before
# calling enet_path, i.e.
#
# sw *= n_samples / sum(sw)
#
# such that sum(sw) = n_samples. This way, (3) and (4) are the same.
sample_weight = sample_weight * (n_samples / np.sum(sample_weight))
# Note: Alternatively, we could also have rescaled alpha instead
# of sample_weight:
#
# alpha *= np.sum(sample_weight) / n_samples
# Ensure copying happens only once, don't do it again if done above.
# X and y will be rescaled if sample_weight is not None, order='F'
# ensures that the returned X and y are still F-contiguous.
should_copy = self.copy_X and not X_copied
X, y, X_offset, y_offset, X_scale, precompute, Xy = _pre_fit(
X,
y,
None,
self.precompute,
fit_intercept=self.fit_intercept,
copy=should_copy,
check_gram=check_input,
sample_weight=sample_weight,
)
# coordinate descent needs F-ordered arrays and _pre_fit might have
# called _rescale_data
if check_input or sample_weight is not None:
X, y = _set_order(X, y, order="F")
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_targets = y.shape[1]
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype, order="F")
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in range(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = self.path(
X,
y[:, k],
l1_ratio=self.l1_ratio,
eps=None,
n_alphas=None,
alphas=[alpha],
precompute=precompute,
Xy=this_Xy,
copy_X=True,
coef_init=coef_[k],
verbose=False,
return_n_iter=True,
positive=self.positive,
check_input=False,
# from here on **params
tol=self.tol,
X_offset=X_offset,
X_scale=X_scale,
max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
sample_weight=sample_weight,
)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_ = coef_[0]
self.dual_gap_ = dual_gaps_[0]
else:
self.coef_ = coef_
self.dual_gap_ = dual_gaps_
self._set_intercept(X_offset, y_offset, X_scale)
# check for finiteness of coefficients
if not all(np.isfinite(w).all() for w in [self.coef_, self.intercept_]):
raise ValueError(
"Coordinate descent iterations resulted in non-finite parameter"
" values. The input data may contain large values and need to"
" be preprocessed."
)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
"""Sparse representation of the fitted `coef_`."""
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model.
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : ndarray of shape (n_samples,)
The predicted decision function.
"""
check_is_fitted(self)
if sparse.issparse(X):
return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
else:
return super()._decision_function(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
###############################################################################
# Lasso model
|
ElasticNet
|
python
|
getsentry__sentry
|
src/sentry/ratelimits/base.py
|
{
"start": 179,
"end": 1064
}
|
class ____(Service):
__all__ = ("is_limited", "validate", "current_value", "is_limited_with_value")
window = 60
def is_limited(
self, key: str, limit: int, project: Project | None = None, window: int | None = None
) -> bool:
is_limited, _, _ = self.is_limited_with_value(key, limit, project=project, window=window)
return is_limited
def current_value(
self, key: str, project: Project | None = None, window: int | None = None
) -> int:
return 0
def is_limited_with_value(
self, key: str, limit: int, project: Project | None = None, window: int | None = None
) -> tuple[bool, int, int]:
return False, 0, 0
def validate(self) -> None:
raise NotImplementedError
def reset(self, key: str, project: Project | None = None, window: int | None = None) -> None:
return
|
RateLimiter
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1468582,
"end": 1468858
}
|
class ____(sgqlc.types.Type, Node, AuditEntry, EnterpriseAuditEntryData, OrganizationAuditEntryData):
"""Audit log entry for a repository_visibility_change.disable event."""
__schema__ = github_schema
__field_names__ = ()
|
RepositoryVisibilityChangeDisableAuditEntry
|
python
|
mlflow__mlflow
|
mlflow/models/evaluation/base.py
|
{
"start": 32002,
"end": 81247
}
|
class ____:
name: str
evaluator: ModelEvaluator
config: dict[str, Any]
def _resolve_default_evaluator(model_type, default_config) -> list[EvaluatorBundle]:
"""
Determine which built-in evaluators should be used for the given model type by default.
Previously, MLflow evaluate API only had a single "default" evaluator used for all models like
classifier, regressor, etc. We split it into multiple built-in evaluators for different model
types for maintainability, but in order to maintain backward compatibility, we need to map
the "default" provided by users to the correct built-in evaluators.
Args:
model_type: A string describing the model type (e.g., "regressor", "classifier", …).
default_config: A dictionary of configurations for the "default" evaluator. If any
non-default built-in evaluator is applicable, this config will be applied to them.
"""
from mlflow.models.evaluation.evaluator_registry import _model_evaluation_registry
builtin_evaluators = []
for name in _model_evaluation_registry._registry:
evaluator = _model_evaluation_registry.get_evaluator(name)
if (
name != "default"
and _model_evaluation_registry.is_builtin(name)
and evaluator.can_evaluate(model_type=model_type, evaluator_config=default_config)
):
builtin_evaluators.append(EvaluatorBundle(name, evaluator, default_config))
# We should use DefaultEvaluator only if there is no other built-in evaluator applicable.
if not builtin_evaluators:
default_evaluator = _model_evaluation_registry.get_evaluator("default")
builtin_evaluators = [EvaluatorBundle("default", default_evaluator, default_config)]
return builtin_evaluators
def resolve_evaluators_and_configs(
evaluators: str | list[str] | None,
evaluator_config: dict[str, Any] | None,
model_type: str | None = None,
) -> list[EvaluatorBundle]:
"""
The `evaluators` and `evaluator_config` arguments of the `evaluate` API can be specified
in multiple ways. This function normalizes the arguments into a single format for easier
downstream processing.
Args:
evaluators: A string or a list of strings specifying the evaluators to use for model
evaluation. If None, all available evaluators will be used.
evaluator_config: A dictionary containing configuration items for the evaluators.
model_type: A string describing the model type (e.g., "regressor", "classifier", …).
Returns:
A list of EvaluatorBundle that contains name, evaluator, config for each evaluator.
"""
from mlflow.models.evaluation.evaluator_registry import _model_evaluation_registry as rg
# NB: The `databricks-agents` package must be installed to use the 'databricks-agent' model
# type. Ideally this check should be done in the 'databricks-agent' evaluator implementation,
# but we need to do it here because the code won't reach the evaluator implementation if the
# package is not installed.
if model_type == _ModelType.DATABRICKS_AGENT:
try:
import databricks.agents # noqa: F401
except ImportError as e:
raise MlflowException(
message="Databricks Agents SDK must be installed to use the "
f"`{_ModelType.DATABRICKS_AGENT}` model type. Run `pip install databricks-agents` "
"to install the package and try again.",
error_code=INVALID_PARAMETER_VALUE,
) from e
def check_nesting_config_dict(_evaluator_name_list, _evaluator_name_to_conf_map):
return isinstance(_evaluator_name_to_conf_map, dict) and all(
k in _evaluator_name_list and isinstance(v, dict)
for k, v in _evaluator_name_to_conf_map.items()
)
if evaluators is None:
# If no evaluators are specified, use all available evaluators.
evaluators = list(rg._registry.keys())
evaluator_config = evaluator_config or {}
if evaluator_config is not None and not any(
name in evaluator_config for name in evaluators
):
# If evaluator config is passed but any of available evaluator key is not
# in the evaluator config, we assume the evaluator config to be a flat dict,
# which is globally applied to all evaluators.
evaluator_config = dict.fromkeys(evaluators, evaluator_config)
# Filter out evaluators that cannot evaluate the model type.
resolved = []
for name in evaluators:
evaluator = rg.get_evaluator(name)
config = evaluator_config.get(name, {})
if evaluator.can_evaluate(model_type=model_type, evaluator_config=config):
resolved.append(EvaluatorBundle(name=name, evaluator=evaluator, config=config))
# If any of built-in evaluator can apply, skip "default" evaluator.
default = next((ev for ev in resolved if ev.name == "default"), None)
non_default_builtins = [
ev for ev in resolved if ev.name != "default" and rg.is_builtin(ev.name)
]
if default and non_default_builtins:
resolved.remove(default)
# Apply default config (passed like `evaluator_config={"default": config}`) to
# non-default built-in evaluators (e.g., ClassifierEvaluator) if they don't have
# explicitly specified configs. This is for backward compatibility where we only
# had a single "default" evaluator used for all models.
# For example, if the user passes this for a classifier model:
# evaluator_config = {"default": my_config}
# it should be equivalent to
# evaluator_config = {"classifier": my_config, "shap": my_config}
for ev in non_default_builtins:
ev.config = ev.config or default.config
return resolved
elif isinstance(evaluators, str):
# Single evaluator name specified
if not (evaluator_config is None or isinstance(evaluator_config, dict)):
raise MlflowException(
message="If `evaluators` argument is the name of an evaluator, evaluator_config"
" must be None or a dict containing config items for the evaluator.",
error_code=INVALID_PARAMETER_VALUE,
)
evaluator_config = evaluator_config or {}
if evaluators == "default":
# Previously we only had a single "default" evaluator used for all models.
# We need to map "default" to the new dedicated builtin evaluators.
return _resolve_default_evaluator(model_type, evaluator_config)
elif rg.is_registered(evaluators):
return [EvaluatorBundle(evaluators, rg.get_evaluator(evaluators), evaluator_config)]
else:
return []
elif isinstance(evaluators, list):
if evaluator_config is not None and not check_nesting_config_dict(
evaluators, evaluator_config
):
raise MlflowException(
message="If `evaluators` argument is an evaluator name list, evaluator_config "
"must be a dict containing mapping from evaluator name to individual "
"evaluator config dict.",
error_code=INVALID_PARAMETER_VALUE,
)
evaluator_config = evaluator_config or {}
# Previously we only had a single "default" evaluator used for all models.
# We need to map "default" to the new dedicated builtin evaluators.
resolved = []
for name in evaluators:
config = evaluator_config.get(name, {})
if name == "default":
builtin_evaluators = _resolve_default_evaluator(model_type, config)
resolved.extend(builtin_evaluators)
else:
resolved.append(EvaluatorBundle(name, rg.get_evaluator(name), config))
return resolved
else:
raise MlflowException(
message="Invalid `evaluators` and `evaluator_config` arguments. "
"Please refer to the documentation for correct usage.",
error_code=INVALID_PARAMETER_VALUE,
)
def _model_validation_contains_model_comparison(validation_thresholds):
"""
Helper function for determining if validation_thresholds contains
thresholds for model comparison: either min_relative_change or min_absolute_change
"""
if not validation_thresholds:
return False
thresholds = validation_thresholds.values()
return any(
threshold.min_relative_change or threshold.min_absolute_change for threshold in thresholds
)
_last_failed_evaluator = None
def _get_last_failed_evaluator():
"""
Return the evaluator name of the last failed evaluator when calling `evaluate`.
This can be used to check which evaluator fail when `evaluate` API fail.
"""
return _last_failed_evaluator
# DO NOT CHANGE THE ORDER OF THE ARGUMENTS
# The order of the arguments need to be preserved. You can add new arguments at the end
# of the argument list, but do not change the order of the existing arguments.
@record_usage_event(EvaluateEvent)
def _evaluate(
*,
model,
model_type,
model_id,
dataset,
run_id,
# The `evaluator_name_list` and `evaluator_name_to_conf_map` are not used by MLflow at all,
# but we need to keep these for backward compatibility.
evaluator_name_list,
evaluator_name_to_conf_map,
extra_metrics,
custom_artifacts,
predictions,
evaluators,
):
"""
The public API "evaluate" will verify argument first, and then pass normalized arguments
to the _evaluate method.
"""
global _last_failed_evaluator
_last_failed_evaluator = None
client = MlflowClient()
model_uuid = getattr(model, "metadata", None)
if model_uuid is not None:
model_uuid = model_uuid.model_uuid
dataset._log_dataset_tag(client, run_id, model_uuid)
eval_results = []
should_enable_tracing = model is not None # Do not enable tracing if static dataset is provided
for eval_ in evaluators:
_logger.debug(f"Evaluating the model with the {eval_.name} evaluator.")
_last_failed_evaluator = eval_.name
if eval_.evaluator.can_evaluate(model_type=model_type, evaluator_config=eval_.config):
with configure_autologging_for_evaluation(enable_tracing=should_enable_tracing):
eval_result = eval_.evaluator.evaluate(
model=model,
model_type=model_type,
model_id=model_id,
dataset=dataset,
run_id=run_id,
evaluator_config=eval_.config,
extra_metrics=extra_metrics,
custom_artifacts=custom_artifacts,
predictions=predictions,
)
if eval_result is not None:
eval_results.append(eval_result)
_last_failed_evaluator = None
if len(eval_results) == 0:
raise MlflowException(
message="The model could not be evaluated by any of the registered evaluators, please "
"verify that the model type and other configs are set correctly.",
error_code=INVALID_PARAMETER_VALUE,
)
merged_eval_result = EvaluationResult({}, {}, None)
for eval_result in eval_results:
merged_eval_result.metrics.update(eval_result.metrics)
merged_eval_result.artifacts.update(eval_result.artifacts)
return merged_eval_result
def _get_model_from_function(fn):
from mlflow.pyfunc.model import _PythonModelPyfuncWrapper
class ModelFromFunction(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: pd.DataFrame):
return fn(model_input)
python_model = ModelFromFunction()
return _PythonModelPyfuncWrapper(python_model, None, None)
def _is_model_deployment_endpoint_uri(model: Any) -> bool:
if not isinstance(model, str):
return False
from mlflow.metrics.genai.model_utils import _parse_model_uri
try:
schema, path = _parse_model_uri(model)
return schema == "endpoints"
except MlflowException:
return False
def _get_model_from_deployment_endpoint_uri(
endpoint_uri: str, params: dict[str, Any] | None = None
):
from mlflow.metrics.genai.model_utils import _parse_model_uri
from mlflow.pyfunc.model import ModelFromDeploymentEndpoint, _PythonModelPyfuncWrapper
_, endpoint = _parse_model_uri(endpoint_uri)
params = params or {}
python_model = ModelFromDeploymentEndpoint(endpoint, params)
return _PythonModelPyfuncWrapper(python_model, None, None)
def evaluate(
model=None,
data=None,
*,
model_type=None,
targets=None,
predictions=None,
dataset_path=None,
feature_names=None,
evaluators=None,
evaluator_config=None,
extra_metrics=None,
custom_artifacts=None,
env_manager="local",
model_config=None,
inference_params=None,
model_id=None,
_called_from_genai_evaluate=False,
):
'''
Evaluate the model performance on given data and selected metrics.
This function evaluates a PyFunc model or custom callable on the specified dataset using
specified ``evaluators``, and logs resulting metrics & artifacts to MLflow tracking server.
Users can also skip setting ``model`` and put the model outputs in ``data`` directly for
evaluation. For detailed information, please read
`the Model Evaluation documentation <../../model-evaluation/index.html>`_.
Default Evaluator behavior:
- The default evaluator, which can be invoked with ``evaluators="default"`` or
``evaluators=None``, supports model types listed below. For each pre-defined model type, the
default evaluator evaluates your model on a selected set of metrics and generate artifacts
like plots. Please find more details below.
- For both the ``"regressor"`` and ``"classifier"`` model types, the default evaluator
generates model summary plots and feature importance plots using
`SHAP <https://shap.readthedocs.io/en/latest/index.html>`_.
- For regressor models, the default evaluator additionally logs:
- **metrics**: example_count, mean_absolute_error, mean_squared_error,
root_mean_squared_error, sum_on_target, mean_on_target, r2_score, max_error,
mean_absolute_percentage_error.
- For binary classifiers, the default evaluator additionally logs:
- **metrics**: true_negatives, false_positives, false_negatives, true_positives, recall,
precision, f1_score, accuracy_score, example_count, log_loss, roc_auc,
precision_recall_auc.
- **artifacts**: lift curve plot, precision-recall plot, ROC plot.
- For multiclass classifiers, the default evaluator additionally logs:
- **metrics**: accuracy_score, example_count, f1_score_micro, f1_score_macro, log_loss
- **artifacts**: A CSV file for "per_class_metrics" (per-class metrics includes
true_negatives/false_positives/false_negatives/true_positives/recall/precision/roc_auc,
precision_recall_auc), precision-recall merged curves plot, ROC merged curves plot.
- For question-answering models, the default evaluator logs:
- **metrics**: ``exact_match``, ``token_count``, `toxicity`_ (requires `evaluate`_,
`torch`_, `flesch_kincaid_grade_level`_ (requires `textstat`_) and `ari_grade_level`_.
- **artifacts**: A JSON file containing the inputs, outputs, targets (if the ``targets``
argument is supplied), and per-row metrics of the model in tabular format.
.. _toxicity:
https://huggingface.co/spaces/evaluate-measurement/toxicity
.. _torch:
https://pytorch.org/get-started/locally/
.. _transformers:
https://huggingface.co/docs/transformers/installation
.. _ari_grade_level:
https://en.wikipedia.org/wiki/Automated_readability_index
.. _flesch_kincaid_grade_level:
https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch%E2%80%93Kincaid_grade_level
.. _evaluate:
https://pypi.org/project/evaluate
.. _textstat:
https://pypi.org/project/textstat
- For text-summarization models, the default evaluator logs:
- **metrics**: ``token_count``, `ROUGE`_ (requires `evaluate`_, `nltk`_, and
`rouge_score`_ to be installed), `toxicity`_ (requires `evaluate`_, `torch`_,
`transformers`_), `ari_grade_level`_ (requires `textstat`_),
`flesch_kincaid_grade_level`_ (requires `textstat`_).
- **artifacts**: A JSON file containing the inputs, outputs, targets (if the ``targets``
argument is supplied), and per-row metrics of the model in the tabular format.
.. _ROUGE:
https://huggingface.co/spaces/evaluate-metric/rouge
.. _toxicity:
https://huggingface.co/spaces/evaluate-measurement/toxicity
.. _torch:
https://pytorch.org/get-started/locally/
.. _transformers:
https://huggingface.co/docs/transformers/installation
.. _ari_grade_level:
https://en.wikipedia.org/wiki/Automated_readability_index
.. _flesch_kincaid_grade_level:
https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch%E2%80%93Kincaid_grade_level
.. _evaluate:
https://pypi.org/project/evaluate
.. _nltk:
https://pypi.org/project/nltk
.. _rouge_score:
https://pypi.org/project/rouge-score
.. _textstat:
https://pypi.org/project/textstat
- For text models, the default evaluator logs:
- **metrics**: ``token_count``, `toxicity`_ (requires `evaluate`_, `torch`_,
`transformers`_), `ari_grade_level`_ (requires `textstat`_),
`flesch_kincaid_grade_level`_ (requires `textstat`_).
- **artifacts**: A JSON file containing the inputs, outputs, targets (if the ``targets``
argument is supplied), and per-row metrics of the model in tabular format.
.. _evaluate:
https://pypi.org/project/evaluate
.. _toxicity:
https://huggingface.co/spaces/evaluate-measurement/toxicity
.. _torch:
https://pytorch.org/get-started/locally/
.. _transformers:
https://huggingface.co/docs/transformers/installation
.. _ari_grade_level:
https://en.wikipedia.org/wiki/Automated_readability_index
.. _flesch_kincaid_grade_level:
https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch%E2%80%93Kincaid_grade_level
.. _textstat:
https://pypi.org/project/textstat
- For retriever models, the default evaluator logs:
- **metrics**: :mod:`precision_at_k(k) <mlflow.metrics.precision_at_k>`,
:mod:`recall_at_k(k) <mlflow.metrics.recall_at_k>` and
:mod:`ndcg_at_k(k) <mlflow.metrics.ndcg_at_k>` - all have a default value of
``retriever_k`` = 3.
- **artifacts**: A JSON file containing the inputs, outputs, targets, and per-row metrics
of the model in tabular format.
- For sklearn models, the default evaluator additionally logs the model's evaluation criterion
(e.g. mean accuracy for a classifier) computed by `model.score` method.
- The metrics/artifacts listed above are logged to the active MLflow run.
If no active run exists, a new MLflow run is created for logging these metrics and
artifacts.
- Additionally, information about the specified dataset - hash, name (if specified), path
(if specified), and the UUID of the model that evaluated it - is logged to the
``mlflow.datasets`` tag.
- The available ``evaluator_config`` options for the default evaluator include:
- **log_model_explainability**: A boolean value specifying whether or not to log model
explainability insights, default value is True.
- **log_explainer**: If True, log the explainer used to compute model explainability
insights as a model. Default value is False.
- **explainability_algorithm**: A string to specify the SHAP Explainer algorithm for model
explainability. Supported algorithm includes: 'exact', 'permutation', 'partition',
'kernel'.
If not set, ``shap.Explainer`` is used with the "auto" algorithm, which chooses the best
Explainer based on the model.
- **explainability_nsamples**: The number of sample rows to use for computing model
explainability insights. Default value is 2000.
- **explainability_kernel_link**: The kernel link function used by shap kernel explainer.
Available values are "identity" and "logit". Default value is "identity".
- **max_classes_for_multiclass_roc_pr**:
For multiclass classification tasks, the maximum number of classes for which to log
the per-class ROC curve and Precision-Recall curve. If the number of classes is
larger than the configured maximum, these curves are not logged.
- **metric_prefix**: An optional prefix to prepend to the name of each metric and artifact
produced during evaluation.
- **log_metrics_with_dataset_info**: A boolean value specifying whether or not to include
information about the evaluation dataset in the name of each metric logged to MLflow
Tracking during evaluation, default value is True.
- **pos_label**: If specified, the positive label to use when computing classification
metrics such as precision, recall, f1, etc. for binary classification models. For
multiclass classification and regression models, this parameter will be ignored.
- **average**: The averaging method to use when computing classification metrics such as
precision, recall, f1, etc. for multiclass classification models
(default: ``'weighted'``). For binary classification and regression models, this
parameter will be ignored.
- **sample_weights**: Weights for each sample to apply when computing model performance
metrics.
- **col_mapping**: A dictionary mapping column names in the input dataset or output
predictions to column names used when invoking the evaluation functions.
- **retriever_k**: A parameter used when ``model_type="retriever"`` as the number of
top-ranked retrieved documents to use when computing the built-in metric
:mod:`precision_at_k(k) <mlflow.metrics.precision_at_k>`,
:mod:`recall_at_k(k) <mlflow.metrics.recall_at_k>` and
:mod:`ndcg_at_k(k) <mlflow.metrics.ndcg_at_k>`. Default value is 3. For all other
model types, this parameter will be ignored.
- Limitations of evaluation dataset:
- For classification tasks, dataset labels are used to infer the total number of classes.
- For binary classification tasks, the negative label value must be 0 or -1 or False, and
the positive label value must be 1 or True.
- Limitations of metrics/artifacts computation:
- For classification tasks, some metric and artifact computations require the model to
output class probabilities. Currently, for scikit-learn models, the default evaluator
calls the ``predict_proba`` method on the underlying model to obtain probabilities. For
other model types, the default evaluator does not compute metrics/artifacts that require
probability outputs.
- Limitations of default evaluator logging model explainability insights:
- The ``shap.Explainer`` ``auto`` algorithm uses the ``Linear`` explainer for linear models
and the ``Tree`` explainer for tree models. Because SHAP's ``Linear`` and ``Tree``
explainers do not support multi-class classification, the default evaluator falls back to
using the ``Exact`` or ``Permutation`` explainers for multi-class classification tasks.
- Logging model explainability insights is not currently supported for PySpark models.
- The evaluation dataset label values must be numeric or boolean, all feature values
must be numeric, and each feature column must only contain scalar values.
- Limitations when environment restoration is enabled:
- When environment restoration is enabled for the evaluated model (i.e. a non-local
``env_manager`` is specified), the model is loaded as a client that invokes a MLflow
Model Scoring Server process in an independent Python environment with the model's
training time dependencies installed. As such, methods like ``predict_proba`` (for
probability outputs) or ``score`` (computes the evaluation criterian for sklearn models)
of the model become inaccessible and the default evaluator does not compute metrics or
artifacts that require those methods.
- Because the model is an MLflow Model Server process, SHAP explanations are slower to
compute. As such, model explainaibility is disabled when a non-local ``env_manager``
specified, unless the ``evaluator_config`` option **log_model_explainability** is
explicitly set to ``True``.
Args:
model: Optional. If specified, it should be one of the following:
- A pyfunc model instance
- A URI referring to a pyfunc model
- A URI referring to an MLflow Deployments endpoint e.g. ``"endpoints:/my-chat"``
- A callable function: This function should be able to take in model input and
return predictions. It should follow the signature of the
:py:func:`predict <mlflow.pyfunc.PyFuncModel.predict>` method. Here's an example
of a valid function:
.. code-block:: python
model = mlflow.pyfunc.load_model(model_uri)
def fn(model_input):
return model.predict(model_input)
If omitted, it indicates a static dataset will be used for evaluation instead of a
model. In this case, the ``data`` argument must be a Pandas DataFrame or an mlflow
PandasDataset that contains model outputs, and the ``predictions`` argument must be the
name of the column in ``data`` that contains model outputs.
data: One of the
following:
- A numpy array or list of evaluation features, excluding labels.
- A Pandas DataFrame containing evaluation features, labels, and optionally model
outputs. Model outputs are required to be provided when model is unspecified.
If ``feature_names`` argument not specified, all columns except for the label
column and predictions column are regarded as feature columns. Otherwise,
only column names present in ``feature_names`` are regarded as feature columns.
- A Spark DataFrame containing evaluation features and labels. If
``feature_names`` argument not specified, all columns except for the label
column are regarded as feature columns. Otherwise, only column names present in
``feature_names`` are regarded as feature columns. Only the first 10000 rows in
the Spark DataFrame will be used as evaluation data.
- A :py:class:`mlflow.data.dataset.Dataset` instance containing evaluation
features, labels, and optionally model outputs. Model outputs are only supported
with a PandasDataset. Model outputs are required when model is unspecified, and
should be specified via the ``predictions`` property of the PandasDataset.
model_type: (Optional) A string describing the model type. The default evaluator
supports the following model types:
- ``'classifier'``
- ``'regressor'``
- ``'question-answering'``
- ``'text-summarization'``
- ``'text'``
- ``'retriever'``
If no ``model_type`` is specified, then you must provide a a list of
metrics to compute via the ``extra_metrics`` param.
.. note::
``'question-answering'``, ``'text-summarization'``, ``'text'``, and
``'retriever'`` are experimental and may be changed or removed in a
future release.
targets: If ``data`` is a numpy array or list, a numpy array or list of evaluation
labels. If ``data`` is a DataFrame, the string name of a column from ``data``
that contains evaluation labels. Required for classifier and regressor models,
but optional for question-answering, text-summarization, and text models. If
``data`` is a :py:class:`mlflow.data.dataset.Dataset` that defines targets,
then ``targets`` is optional.
predictions: Optional. The name of the column that contains model outputs.
- When ``model`` is specified and outputs multiple columns, ``predictions`` can be used
to specify the name of the column that will be used to store model outputs for
evaluation.
- When ``model`` is not specified and ``data`` is a pandas dataframe,
``predictions`` can be used to specify the name of the column in ``data`` that
contains model outputs.
.. code-block:: python
:caption: Example usage of predictions
# Evaluate a model that outputs multiple columns
data = pd.DataFrame({"question": ["foo"]})
def model(inputs):
return pd.DataFrame({"answer": ["bar"], "source": ["baz"]})
results = evaluate(
model=model,
data=data,
predictions="answer",
# other arguments if needed
)
# Evaluate a static dataset
data = pd.DataFrame({"question": ["foo"], "answer": ["bar"], "source": ["baz"]})
results = evaluate(
data=data,
predictions="answer",
# other arguments if needed
)
dataset_path: (Optional) The path where the data is stored. Must not contain double
quotes (``"``). If specified, the path is logged to the ``mlflow.datasets``
tag for lineage tracking purposes.
feature_names: (Optional) A list. If the ``data`` argument is a numpy array or list,
``feature_names`` is a list of the feature names for each feature. If
``feature_names=None``, then the ``feature_names`` are generated using the
format ``feature_{feature_index}``. If the ``data`` argument is a Pandas
DataFrame or a Spark DataFrame, ``feature_names`` is a list of the names
of the feature columns in the DataFrame. If ``feature_names=None``, then
all columns except the label column and the predictions column are
regarded as feature columns.
evaluators: The name of the evaluator to use for model evaluation, or a list of
evaluator names. If unspecified, all evaluators capable of evaluating the
specified model on the specified dataset are used. The default evaluator
can be referred to by the name ``"default"``. To see all available
evaluators, call :py:func:`mlflow.models.list_evaluators`.
evaluator_config: A dictionary of additional configurations to supply to the evaluator.
If multiple evaluators are specified, each configuration should be
supplied as a nested dictionary whose key is the evaluator name.
extra_metrics:
(Optional) A list of :py:class:`EvaluationMetric <mlflow.models.EvaluationMetric>`
objects. These metrics are computed in addition to the default metrics associated with
pre-defined `model_type`, and setting `model_type=None` will only compute the metrics
specified in `extra_metrics`. See the `mlflow.metrics` module for more information about
the builtin metrics and how to define extra metrics.
.. code-block:: python
:caption: Example usage of extra metrics
import mlflow
import numpy as np
def root_mean_squared_error(eval_df, _builtin_metrics):
return np.sqrt((np.abs(eval_df["prediction"] - eval_df["target"]) ** 2).mean())
rmse_metric = mlflow.models.make_metric(
eval_fn=root_mean_squared_error,
greater_is_better=False,
)
mlflow.evaluate(..., extra_metrics=[rmse_metric])
custom_artifacts:
(Optional) A list of custom artifact functions with the following signature:
.. code-block:: python
def custom_artifact(
eval_df: Union[pandas.Dataframe, pyspark.sql.DataFrame],
builtin_metrics: Dict[str, float],
artifacts_dir: str,
) -> Dict[str, Any]:
"""
Args:
eval_df:
A Pandas or Spark DataFrame containing ``prediction`` and ``target``
column. The ``prediction`` column contains the predictions made by the
model. The ``target`` column contains the corresponding labels to the
predictions made on that row.
builtin_metrics:
A dictionary containing the metrics calculated by the default evaluator.
The keys are the names of the metrics and the values are the scalar
values of the metrics. Refer to the DefaultEvaluator behavior section
for what metrics will be returned based on the type of model (i.e.
classifier or regressor).
artifacts_dir:
A temporary directory path that can be used by the custom artifacts
function to temporarily store produced artifacts. The directory will be
deleted after the artifacts are logged.
Returns:
A dictionary that maps artifact names to artifact objects
(e.g. a Matplotlib Figure) or to artifact paths within ``artifacts_dir``.
"""
...
Object types that artifacts can be represented as:
- A string uri representing the file path to the artifact. MLflow will infer the
type of the artifact based on the file extension.
- A string representation of a JSON object. This will be saved as a .json artifact.
- Pandas DataFrame. This will be resolved as a CSV artifact.
- Numpy array. This will be saved as a .npy artifact.
- Matplotlib Figure. This will be saved as an image artifact. Note that
``matplotlib.pyplot.savefig`` is called behind the scene with default
configurations.
To customize, either save the figure with the desired configurations and return
its file path or define customizations through environment variables in
``matplotlib.rcParams``.
- Other objects will be attempted to be pickled with the default protocol.
.. code-block:: python
:caption: Example usage of custom artifacts
import mlflow
import matplotlib.pyplot as plt
def scatter_plot(eval_df, builtin_metrics, artifacts_dir):
plt.scatter(eval_df["prediction"], eval_df["target"])
plt.xlabel("Targets")
plt.ylabel("Predictions")
plt.title("Targets vs. Predictions")
plt.savefig(os.path.join(artifacts_dir, "example.png"))
plt.close()
return {"pred_target_scatter": os.path.join(artifacts_dir, "example.png")}
def pred_sample(eval_df, _builtin_metrics, _artifacts_dir):
return {"pred_sample": pred_sample.head(10)}
mlflow.evaluate(..., custom_artifacts=[scatter_plot, pred_sample])
env_manager: Specify an environment manager to load the candidate ``model`` in
isolated Python environments and restore their
dependencies. Default value is ``local``, and the following values are
supported:
- ``virtualenv``: (Recommended) Use virtualenv to restore the python
environment that was used to train the model.
- ``conda``: Use Conda to restore the software environment that was used
to train the model.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
model_config: the model configuration to use for loading the model with pyfunc. Inspect
the model's pyfunc flavor to know which keys are supported for your
specific model. If not indicated, the default model configuration
from the model is used (if any).
inference_params: (Optional) A dictionary of inference parameters to be passed to the model
when making predictions, such as ``{"max_tokens": 100}``. This is only used when
the ``model`` is an MLflow Deployments endpoint URI e.g. ``"endpoints:/my-chat"``
model_id: (Optional) The ID of the MLflow LoggedModel or Model Version to which the
evaluation results (e.g. metrics and traces) will be linked. If `model_id` is not
specified but `model` is specified, the ID from `model` will be used.
_called_from_genai_evaluate: (Optional) Only used internally.
Returns:
An :py:class:`mlflow.models.EvaluationResult` instance containing
metrics of evaluating the model with the given dataset.
'''
from mlflow.models.evaluation.evaluator_registry import _model_evaluation_registry
from mlflow.pyfunc import PyFuncModel, _load_model_or_server, _ServedPyFuncModel
from mlflow.utils import env_manager as _EnvManager
# Inference params are currently only supported for passing a deployment endpoint as the model.
# TODO: We should support inference_params for other model types
if inference_params is not None and not _is_model_deployment_endpoint_uri(model):
raise MlflowException(
message="The inference_params argument can only be specified when the model "
"is an MLflow Deployments endpoint URI like `endpoints:/my-chat`",
error_code=INVALID_PARAMETER_VALUE,
)
if evaluator_config is not None:
col_mapping = evaluator_config.get("col_mapping", {})
if isinstance(targets, str):
targets = col_mapping.get(targets, targets)
if isinstance(predictions, str):
predictions = col_mapping.get(predictions, predictions)
if data is None:
raise MlflowException(
message="The data argument cannot be None.",
error_code=INVALID_PARAMETER_VALUE,
)
_EnvManager.validate(env_manager)
# If Dataset is provided, the targets can only be specified by the Dataset,
# not the targets parameters of the mlflow.evaluate() API.
if isinstance(data, Dataset) and targets is not None:
raise MlflowException(
message="The top-level targets parameter should not be specified since a Dataset "
"is used. Please only specify the targets column name in the Dataset. For example: "
"`data = mlflow.data.from_pandas(df=X.assign(y=y), targets='y')`. "
"Meanwhile, please specify `mlflow.evaluate(..., targets=None, ...)`.",
error_code=INVALID_PARAMETER_VALUE,
)
# If Dataset is provided and model is None, then the predictions can only be specified by the
# Dataset, not the predictions parameters of the mlflow.evaluate() API.
if isinstance(data, Dataset) and model is None and predictions is not None:
raise MlflowException(
message="The top-level predictions parameter should not be specified since a Dataset "
"is used. Please only specify the predictions column name in the Dataset. For example:"
" `data = mlflow.data.from_pandas(df=X.assign(y=y), predictions='y')`"
"Meanwhile, please specify `mlflow.evaluate(..., predictions=None, ...)`.",
error_code=INVALID_PARAMETER_VALUE,
)
# If Dataset is provided and model is specified, then the data.predictions cannot be specified.
if (
isinstance(data, Dataset)
and model is not None
and getattr(data, "predictions", None) is not None
):
raise MlflowException(
message="The predictions parameter should not be specified in the Dataset since a "
"model is specified. Please remove the predictions column from the Dataset.",
error_code=INVALID_PARAMETER_VALUE,
)
if model_type in [_ModelType.REGRESSOR, _ModelType.CLASSIFIER]:
if isinstance(data, Dataset):
if getattr(data, "targets", None) is not None:
targets = data.targets
else:
raise MlflowException(
message="The targets column name must be specified in the provided Dataset "
f"for {model_type} models. For example: "
"`data = mlflow.data.from_pandas(df=X.assign(y=y), targets='y')`",
error_code=INVALID_PARAMETER_VALUE,
)
else:
if targets is None:
raise MlflowException(
f"The targets argument must be specified for {model_type} models.",
error_code=INVALID_PARAMETER_VALUE,
)
elif model_type is None:
if not extra_metrics:
raise MlflowException(
message="The extra_metrics argument must be specified model_type is None.",
error_code=INVALID_PARAMETER_VALUE,
)
specified_model_id = model_id
model_id = None
if isinstance(model, str):
model_id = _parse_model_id_if_present(model)
if _is_model_deployment_endpoint_uri(model):
model = _get_model_from_deployment_endpoint_uri(model, inference_params)
else:
model = _load_model_or_server(model, env_manager, model_config)
elif env_manager != _EnvManager.LOCAL:
raise MlflowException(
message="The model argument must be a string URI referring to an MLflow model when a "
"non-local env_manager is specified.",
error_code=INVALID_PARAMETER_VALUE,
)
elif isinstance(model, PyFuncModel):
model_id = model.model_id
if model_config:
raise MlflowException(
message="Indicating ``model_config`` when passing a `PyFuncModel`` object as "
"model argument is not allowed. If you need to change the model configuration "
"for the evaluation model, use "
"``mlflow.pyfunc.load_model(model_uri, model_config=<value>)`` and indicate "
"the desired configuration there.",
error_code=INVALID_PARAMETER_VALUE,
)
elif callable(model):
model = _get_model_from_function(model)
elif model is not None:
raise MlflowException(
message="The model argument must be a string URI referring to an MLflow model, "
"an MLflow Deployments endpoint URI, an instance of `mlflow.pyfunc.PyFuncModel`, "
"a function, or None.",
error_code=INVALID_PARAMETER_VALUE,
)
# If model_id is specified, verify it matches the derived model_id
if specified_model_id is not None and model_id is not None and specified_model_id != model_id:
raise MlflowException(
message=(
f"The specified value of the 'model_id' parameter '{specified_model_id}' "
f"contradicts the model_id '{model_id}' associated with the model. Please ensure "
f"they match or omit the 'model_id' parameter."
),
error_code=INVALID_PARAMETER_VALUE,
)
# Use specified model_id if provided, otherwise use derived model_id
model_id = specified_model_id if specified_model_id is not None else model_id
# If none of the model_id and model is specified, use the active model_id
model_id = model_id or mlflow.get_active_model_id()
evaluators: list[EvaluatorBundle] = resolve_evaluators_and_configs(
evaluators, evaluator_config, model_type
)
# NB: MLflow do not use either of these two variables. However, we need to pass these to
# _evaluate() function for backward compatibility.
evaluator_name_list = [evaluator.name for evaluator in evaluators]
evaluator_name_to_conf_map = {evaluator.name: evaluator.config for evaluator in evaluators}
with _start_run_or_reuse_active_run() as run_id:
if not isinstance(data, Dataset):
# Convert data to `mlflow.data.dataset.Dataset`.
if model is None:
data = convert_data_to_mlflow_dataset(
data=data, targets=targets, predictions=predictions
)
else:
data = convert_data_to_mlflow_dataset(data=data, targets=targets)
from mlflow.data.pyfunc_dataset_mixin import PyFuncConvertibleDatasetMixin
# model_id could be None
with _set_active_model(model_id=model_id) if model_id else nullcontext():
if isinstance(data, Dataset) and issubclass(
data.__class__, PyFuncConvertibleDatasetMixin
):
dataset = data.to_evaluation_dataset(dataset_path, feature_names)
# Use metric_prefix configured for builtin evaluators as a dataset tag
context = None
for e in evaluators:
if _model_evaluation_registry.is_builtin(e.name) and e.config.get(
"metric_prefix"
):
context = e.config.get("metric_prefix")
break
client = MlflowClient()
tags = [InputTag(key=MLFLOW_DATASET_CONTEXT, value=context)] if context else []
dataset_input = DatasetInput(dataset=data._to_mlflow_entity(), tags=tags)
client.log_inputs(
run_id,
[dataset_input],
models=[LoggedModelInput(model_id)] if model_id else None,
)
else:
dataset = EvaluationDataset(
data,
targets=targets,
path=dataset_path,
feature_names=feature_names,
predictions=predictions,
)
predictions_expected_in_model_output = predictions if model is not None else None
try:
evaluate_result = _evaluate(
model=model,
model_type=model_type,
model_id=model_id,
dataset=dataset,
run_id=run_id,
evaluator_name_list=evaluator_name_list,
evaluator_name_to_conf_map=evaluator_name_to_conf_map,
extra_metrics=extra_metrics,
custom_artifacts=custom_artifacts,
predictions=predictions_expected_in_model_output,
evaluators=evaluators,
)
finally:
if isinstance(model, _ServedPyFuncModel):
os.kill(model.pid, signal.SIGTERM)
# if model_id is specified log metrics to the eval run and logged model
if model_id is not None:
mlflow.log_metrics(metrics=evaluate_result.metrics, dataset=data, model_id=model_id)
return evaluate_result
|
EvaluatorBundle
|
python
|
graphql-python__graphene
|
graphene/relay/tests/test_node_custom.py
|
{
"start": 555,
"end": 649
}
|
class ____(Interface):
width = Int(description="The width of the photo in pixels")
|
BasePhoto
|
python
|
numpy__numpy
|
numpy/random/tests/test_direct.py
|
{
"start": 17634,
"end": 18798
}
|
class ____(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/sfc64-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/sfc64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_legacy_pickle(self):
# Pickling format was changed in 2.0.x
import gzip
import pickle
expected_state = np.array(
[
9957867060933711493,
532597980065565856,
14769588338631205282,
13
],
dtype=np.uint64
)
base_path = os.path.split(os.path.abspath(__file__))[0]
pkl_file = os.path.join(base_path, "data", "sfc64_np126.pkl.gz")
with gzip.open(pkl_file) as gz:
sfc = pickle.load(gz)
assert isinstance(sfc, SFC64)
assert_equal(sfc.state["state"]["state"], expected_state)
|
TestSFC64
|
python
|
ray-project__ray
|
python/ray/data/tests/unit/test_expressions.py
|
{
"start": 7785,
"end": 10257
}
|
class ____:
"""Test boolean expression functionality."""
@pytest.mark.parametrize(
"condition",
[
col("age") > lit(18),
col("status") == lit("active"),
col("name").is_not_null(),
(col("age") >= lit(21)) & (col("country") == lit("USA")),
],
ids=["simple_gt", "simple_eq", "is_not_null", "complex_and"],
)
def test_boolean_expressions_directly(self, condition):
"""Test that boolean expressions work directly."""
assert isinstance(condition, Expr)
# Verify the expression structure based on type
if condition.op in [Operation.GT, Operation.EQ]:
assert isinstance(condition, BinaryExpr)
elif condition.op == Operation.IS_NOT_NULL:
assert isinstance(condition, UnaryExpr)
elif condition.op == Operation.AND:
assert isinstance(condition, BinaryExpr)
def test_boolean_combination(self):
"""Test combining boolean expressions with logical operators."""
expr1 = col("age") > 18
expr2 = col("status") == "active"
# Test AND combination
combined_and = expr1 & expr2
assert isinstance(combined_and, BinaryExpr)
assert combined_and.op == Operation.AND
# Test OR combination
combined_or = expr1 | expr2
assert isinstance(combined_or, BinaryExpr)
assert combined_or.op == Operation.OR
# Test NOT operation
negated = ~expr1
assert isinstance(negated, UnaryExpr)
assert negated.op == Operation.NOT
def test_boolean_structural_equality(self):
"""Test structural equality for boolean expressions."""
expr1 = col("age") > 18
expr2 = col("age") > 18
expr3 = col("age") > 21
assert expr1.structurally_equals(expr2)
assert not expr1.structurally_equals(expr3)
def test_complex_boolean_expressions(self):
"""Test complex boolean expressions work correctly."""
# Complex boolean expression
complex_expr = (col("age") >= 21) & (col("country") == "USA")
assert isinstance(complex_expr, BinaryExpr)
assert complex_expr.op == Operation.AND
# Even more complex with OR and NOT
very_complex = ((col("age") > 21) | (col("status") == "VIP")) & ~col("banned")
assert isinstance(very_complex, BinaryExpr)
assert very_complex.op == Operation.AND
|
TestBooleanExpressions
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-plugin_ini/plugin_fail.py
|
{
"start": 3924,
"end": 4597
}
|
class ____(BaseModel):
undefined: Undefined # noqa F821
# MYPY: error: Name "Undefined" is not defined [name-defined]
UndefinedAnnotationModel()
# MYPY: error: Missing named argument "undefined" for "UndefinedAnnotationModel" [call-arg]
Model.model_construct(x=1)
# MYPY: error: Missing named argument "y" for "model_construct" of "Model" [call-arg]
Model.model_construct(_fields_set={'x'}, x=1, y='2')
Model.model_construct(x='1', y='2')
# MYPY: error: Argument "x" to "model_construct" of "Model" has incompatible type "str"; expected "int" [arg-type]
# Strict mode fails
inheriting = InheritingModel(x='1', y='1')
Model(x='1', y='2')
|
UndefinedAnnotationModel
|
python
|
pytorch__pytorch
|
test/dynamo/test_subclasses.py
|
{
"start": 68905,
"end": 79545
}
|
class ____(torch.nn.Module):
def forward(self):
ones: "f32[4, 3]" = torch.ones([4, 3])
return (ones,)
"""
test_recompilation(
f,
torch.randn([3, 4]),
[3, 3, 4, 5],
exp_graphs=[true_graph, true_graph, false_graph, false_graph],
exp_frame_count=[1, 1, 2, 2],
exp_shape_env_guards=[
[],
# s0 is specialized and guarded in outer shape_env when dynamo checks the guards
["Eq(Piecewise((1, Eq(s0, 3)), (0, True)), 1)"],
[
"Eq(Piecewise((1, Eq(s0, 3)), (0, True)), 1)",
"Ne(Piecewise((1, Eq(s0, 4)), (0, True)), 1)",
],
[
"Eq(Piecewise((1, Eq(s0, 3)), (0, True)), 1)",
"Ne(Piecewise((1, Eq(s0, 4)), (0, True)), 1)",
"Ne(Piecewise((1, Eq(s0, 5)), (0, True)), 1)",
],
],
)
test_recompilation(
f,
torch.randn([3, 4]),
[4, 5, 3, 3],
exp_graphs=[false_graph, false_graph, true_graph, true_graph],
exp_frame_count=[1, 1, 2, 2],
exp_shape_env_guards=[
[],
# s0 is specialized and guarded in outer shape_env when dynamo checks the guards
["Ne(Piecewise((1, Eq(s0, 5)), (0, True)), 1)"],
[
"Ne(Piecewise((1, Eq(s0, 5)), (0, True)), 1)",
"Eq(Piecewise((1, Eq(s0, 3)), (0, True)), 1)",
],
[
"Ne(Piecewise((1, Eq(s0, 5)), (0, True)), 1)",
"Eq(Piecewise((1, Eq(s0, 3)), (0, True)), 1)",
"Eq(Piecewise((1, Eq(s0, 3)), (0, True)), 1)",
],
],
)
def test_wrapper_subclass_dynamo_attribute_access_on_intermediate(self):
def f(x_subclass):
tmp_subclass = torch.add(x, 1)
return torch.mul(tmp_subclass._scale, tmp_subclass._constant)
x = ScaledTensor(torch.randn(2, 4), torch.randn(3), constant=2)
out_ref = f(x)
out_test = torch.compile(f, backend="aot_eager", fullgraph=True)(x)
self.assertEqual(out_ref, out_test)
def test_support_bases(self):
import abc
import torch.fx._symbolic_trace
class Meta(abc.ABCMeta, torch.fx._symbolic_trace.ProxyableClassMeta):
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
x.attr = 100
return x
class Multistreamable(abc.ABC): # noqa: B024
pass
class Foo(Multistreamable, metaclass=Meta):
pass
@torch.compile(backend="eager", fullgraph=True)
def f(x):
typ = type(Foo())
typ.__bases__
return typ.__bases__
self.assertEqual(f(torch.randn(1)), (Multistreamable,))
@torch.compile(backend="eager", fullgraph=True)
def g(x):
typ = type(Foo())
typ.__base__
return typ.__base__
self.assertEqual(g(torch.randn(1)), Multistreamable)
@parametrize("dynamic", [False, True])
def test_subclass_views(self, dynamic):
def _get_views(t): # returns (view: Tensor, expects_raises_false)
# Note that any closed-over SymInts will be symbolicized during fake-ification.
yield t.narrow(dim=-1, start=3, length=8), False
yield t.split(5, -1)[2], False
yield t.split_with_sizes([9, 6], -1)[1], False
yield t.unsqueeze(-1).expand(4, 15, 10), False
yield t.select(-1, 6), False
# https://github.com/pytorch/pytorch/issues/128649
yield t[2:3, 5:9], dynamic
yield t.view(-1, 15), False
def f(x):
return x * 2
compiled_f = torch.compile(
f, backend="aot_eager", fullgraph=True, dynamic=dynamic
)
# Take a view of a subclass to pass as input.
t = TwoTensor(torch.randn(4, 15), torch.randn(4, 15))
for view, expects_raises in _get_views(t):
torch._dynamo.reset()
out_ref = f(view)
if expects_raises:
with self.assertRaises(AssertionError):
out_test = compiled_f(view)
else:
out_test = compiled_f(view)
self.assertEqual(out_ref, out_test)
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
@parametrize("dynamic", [True, False])
def test_mark_static_with_subclass_desugaring(self, dynamic):
from collections.abc import Callable
from typing import Any, Optional
from torch._dynamo.decorators import mark_static_address
from torch._inductor.compile_fx import compile_fx
from torch._inductor.cudagraph_utils import BoxedDeviceIndex
from torch._inductor.utils import BoxedBool
x_inner = torch.ones(4)
x = TwoTensor(x_inner, x_inner)
mark_static_address(x, guard=False)
def inner_compile(
gm: torch.fx.GraphModule,
example_inputs: list[torch.Tensor],
cudagraphs: Optional[BoxedBool] = None,
static_input_idxs: Optional[list[int]] = None,
is_backward: bool = False,
graph_id: Optional[int] = None,
cpp_wrapper: bool = False,
aot_mode: bool = False,
is_inference: bool = False,
boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
layout_opt: Optional[bool] = None,
extern_node_serializer: Optional[Callable[[list[Any]], Any]] = None,
):
if dynamic:
self.assertEqual(static_input_idxs, [2, 3, 4])
else:
self.assertEqual(static_input_idxs, [1, 2])
return gm
compiler = functools.partial(compile_fx, inner_compile=inner_compile)
@torch.compile(backend=compiler, dynamic=dynamic)
def fn(t0, t1, t2):
return t0 + t1 + t2 + 2
fn(torch.ones(4), x, torch.ones(4))
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_subclass_parameters_are_static_under_training(self):
from collections.abc import Callable
from typing import Any, Optional
from torch._inductor.compile_fx import compile_fx
from torch._inductor.cudagraph_utils import BoxedDeviceIndex
from torch._inductor.utils import BoxedBool
def inner_compile(
gm: torch.fx.GraphModule,
example_inputs: list[torch.Tensor],
cudagraphs: Optional[BoxedBool] = None,
static_input_idxs: Optional[list[int]] = None,
is_backward: bool = False,
graph_id: Optional[int] = None,
cpp_wrapper: bool = False,
aot_mode: bool = False,
is_inference: bool = False,
boxed_forward_device_index: Optional[BoxedDeviceIndex] = None,
layout_opt: Optional[bool] = None,
extern_node_serializer: Optional[Callable[[list[Any]], Any]] = None,
):
# Important bit: there are 3 params: linear.weight.a, linear.weight.b, linear.bias,
# which are the first 3 args of the graph.
self.assertEqual(static_input_idxs, [0, 1, 2])
return gm
compiler = functools.partial(compile_fx, inner_compile=inner_compile)
mod = torch.nn.Linear(4, 4)
w_a = torch.randn(4, 4)
w_b = torch.randn(4, 4)
w = torch.nn.Parameter(TwoTensor(w_a, w_b).requires_grad_())
mod.weight = w
mod = torch.compile(mod, backend=compiler)
mod(torch.randn(4))
# copied from common_utils.py::NestedTensorTestCase
def assertEqualIgnoringNestedInts(self, a, b):
# unbinding NJTs allows us to compare them as essentially equal without
# caring about exact nested int comparison
def _unbind_njts(x):
if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.jagged:
return x.unbind()
else:
return x
self.assertEqual(
pytree.tree_map(_unbind_njts, a), pytree.tree_map(_unbind_njts, b)
)
def _compile_check(
self,
fn,
inps,
*,
dynamic=True,
fullgraph=True,
call_backward=False,
):
def call_backward_fn(t):
if t.is_nested:
from torch.nested._internal.nested_tensor import buffer_from_jagged
t = buffer_from_jagged(t)
return t.sum().backward(retain_graph=True)
torch.manual_seed(0)
fw_compiler = EagerRecordGraphAndInputs()
bw_compiler = EagerRecordGraphAndInputs()
compiler_fn = aot_autograd(
fw_compiler=make_boxed_compiler(fw_compiler),
bw_compiler=make_boxed_compiler(bw_compiler),
partition_fn=min_cut_rematerialization_partition,
keep_inference_input_mutations=True,
)
c = torch.compile(backend=compiler_fn, dynamic=dynamic, fullgraph=fullgraph)(fn)
for inp in inps:
expected = fn(*inp)
# reset the seed for randn to generate the same tensor
torch.manual_seed(0)
got = c(*inp)
self.assertEqualIgnoringNestedInts(expected, got)
if call_backward:
re = pytree.tree_map_only(
lambda x: isinstance(x, torch.Tensor) and x.requires_grad,
call_backward_fn,
expected,
)
rg = pytree.tree_map_only(
lambda x: isinstance(x, torch.Tensor) and x.requires_grad,
call_backward_fn,
got,
)
self.assertEqualIgnoringNestedInts(re, rg)
if call_backward:
return fw_compiler.graphs, bw_compiler.graphs
return fw_compiler.graphs, None
def test_tensor_subclass_TwoTensor_simple(self):
def f(tt):
return tt * tt.size()[0]
a = torch.ones(3, 4, requires_grad=True)
b = a.detach().clone().requires_grad_(True)
tt = TwoTensor(a, b)
fw, bw = self._compile_check(f, [(tt,)], dynamic=True, call_backward=True)
self.assertExpectedInline(
normalize_gm(fw[0].print_readable(print_output=False, expanded_def=True)),
"""\
|
GraphModule
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/exceptions.py
|
{
"start": 999,
"end": 1130
}
|
class ____(AirflowException):
"""Raised when an error is encountered while trying to merge pod configs."""
|
PodReconciliationError
|
python
|
dask__distributed
|
distributed/shuffle/tests/test_shuffle.py
|
{
"start": 76002,
"end": 87098
}
|
class ____(_ShuffleRunManager):
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self.in_get_or_create_shuffle = asyncio.Event()
self.block_get_or_create_shuffle = asyncio.Event()
self.in_get_shuffle_run = asyncio.Event()
self.block_get_shuffle_run = asyncio.Event()
self.finished_get_shuffle_run = asyncio.Event()
self.allow_fail = False
async def get_or_create(self, *args: Any, **kwargs: Any) -> ShuffleRun:
self.in_get_or_create_shuffle.set()
await self.block_get_or_create_shuffle.wait()
return await super().get_or_create(*args, **kwargs)
async def get_with_run_id(self, *args: Any, **kwargs: Any) -> ShuffleRun:
self.in_get_shuffle_run.set()
await self.block_get_shuffle_run.wait()
result = await super().get_with_run_id(*args, **kwargs)
self.finished_get_shuffle_run.set()
return result
def fail(self, *args: Any, **kwargs: Any) -> None:
if self.allow_fail:
return super().fail(*args, **kwargs)
@mock.patch(
"distributed.shuffle._worker_plugin._ShuffleRunManager",
BlockedShuffleAccessAndFailShuffleRunManager,
)
@gen_cluster(client=True, nthreads=[("", 1)] * 2)
async def test_replace_stale_shuffle(c, s, a, b):
run_manager_A = cast(
BlockedShuffleAccessAndFailShuffleRunManager, get_shuffle_run_manager(a)
)
run_manager_B = cast(
BlockedShuffleAccessAndFailShuffleRunManager, get_shuffle_run_manager(b)
)
# Let A behave normal
run_manager_A.allow_fail = True
run_manager_A.block_get_shuffle_run.set()
run_manager_A.block_get_or_create_shuffle.set()
# B can accept shuffle transfers
run_manager_B.block_get_shuffle_run.set()
df = dask.datasets.timeseries(
start="2000-01-01",
end="2000-01-10",
dtypes={"x": float, "y": float},
freq="100 s",
)
# Initialize first shuffle execution
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.persist(out)
shuffle_id = await wait_until_new_shuffle_is_initialized(s)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, a)
await run_manager_B.finished_get_shuffle_run.wait()
assert shuffle_id in get_active_shuffle_runs(a)
assert shuffle_id in get_active_shuffle_runs(b)
stale_shuffle_run = get_active_shuffle_run(shuffle_id, b)
del out
while s.tasks:
await asyncio.sleep(0)
# A is cleaned
await assert_worker_cleanup(a)
# B is not cleaned
assert shuffle_id in get_active_shuffle_runs(b)
assert not stale_shuffle_run.closed
run_manager_B.finished_get_shuffle_run.clear()
run_manager_B.allow_fail = True
# Initialize second shuffle execution
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = df.shuffle("x", force=True)
out = c.persist(out)
await wait_for_tasks_in_state("shuffle-transfer", "memory", 1, a)
await run_manager_B.finished_get_shuffle_run.wait()
# Stale shuffle run has been replaced
shuffle_run = get_active_shuffle_run(shuffle_id, b)
assert shuffle_run != stale_shuffle_run
assert shuffle_run.run_id > stale_shuffle_run.run_id
# Stale shuffle gets cleaned up
await stale_shuffle_run._closed_event.wait()
# Finish shuffle run
run_manager_B.block_get_shuffle_run.set()
run_manager_B.block_get_or_create_shuffle.set()
run_manager_B.allow_fail = True
await out
del out
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_handle_null_partitions(c, s, a, b):
data = [
{"companies": [], "id": "a", "x": None},
{"companies": [{"id": 3}, {"id": 5}], "id": "b", "x": None},
{"companies": [{"id": 3}, {"id": 4}, {"id": 5}], "id": "c", "x": "b"},
{"companies": [{"id": 9}], "id": "a", "x": "a"},
]
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=2)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
ddf = ddf.shuffle(on="id", ignore_index=True, force=True)
result = await c.compute(ddf)
dd.assert_eq(result, df)
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_handle_null_partitions_2(c, s, a, b):
def make_partition(i):
"""Return null column for every other partition"""
if i % 2 == 1:
return pd.DataFrame({"a": np.random.random(10), "b": [None] * 10})
return pd.DataFrame({"a": np.random.random(10), "b": np.random.random(10)})
with dask.config.set({"dataframe.convert-string": False}):
ddf = dd.from_map(make_partition, range(5), meta={"a": float, "b": float})
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = ddf.shuffle(on="a", ignore_index=True, force=True)
result, expected = c.compute([ddf, out])
del out
result = await result
expected = await expected
dd.assert_eq(result, expected)
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_handle_object_columns(c, s, a, b):
with dask.config.set({"dataframe.convert-string": False}):
df = pd.DataFrame(
{
"a": [1, 2, 3],
"b": [
np.asarray([1, 2, 3]),
np.asarray([4, 5, 6]),
np.asarray([7, 8, 9]),
],
"c": ["foo", "bar", "baz"],
}
)
ddf = dd.from_pandas(
df,
npartitions=2,
)
shuffled = ddf.shuffle(on="a", force=True)
result = await c.compute(shuffled)
dd.assert_eq(result, df)
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_reconcile_partitions(c, s, a, b):
def make_partition(i):
"""Return mismatched column types for every other partition"""
if i % 2 == 1:
return pd.DataFrame(
{"a": np.random.random(10), "b": np.random.randint(1, 10, 10)}
)
return pd.DataFrame({"a": np.random.random(10), "b": np.random.random(10)})
ddf = dd.from_map(make_partition, range(50), meta={"a": float, "b": float})
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = ddf.shuffle(on="a", ignore_index=True, force=True)
result, expected = c.compute([ddf, out])
result = await result
expected = await expected
dd.assert_eq(result, expected)
del result
del out
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_raise_on_incompatible_partitions(c, s, a, b):
def make_partition(i):
"""Return incompatible column types for every other partition"""
if i % 2 == 1:
return pd.DataFrame({"a": np.random.random(10), "b": ["a"] * 10})
return pd.DataFrame({"a": np.random.random(10), "b": np.random.random(10)})
ddf = dd.from_map(make_partition, range(50), meta={"a": float, "b": float})
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
out = ddf.shuffle(on="a", ignore_index=True, force=True)
with raises_with_cause(
RuntimeError,
r"P2P \w* failed",
pa.ArrowTypeError,
"incompatible types",
):
await c.compute(out)
await c.close()
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_handle_categorical_data(c, s, a, b):
"""Regression test for https://github.com/dask/distributed/issues/8186"""
df = dd.from_dict(
{
"a": [1, 2, 3, 4, 5],
"b": [
"x",
"y",
"x",
"y",
"z",
],
},
npartitions=2,
)
df.b = df.b.astype("category")
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
shuffled = df.shuffle("a", force=True)
result, expected = await c.compute([shuffled, df], sync=True)
dd.assert_eq(result, expected, check_categorical=False)
await assert_worker_cleanup(a)
await assert_worker_cleanup(b)
await assert_scheduler_cleanup(s)
@gen_cluster(client=True)
async def test_handle_floats_in_int_meta(c, s, a, b):
"""Regression test for https://github.com/dask/distributed/issues/8183"""
df1 = pd.DataFrame(
{
"a": [1, 2],
},
)
df2 = pd.DataFrame(
{
"b": [1],
},
)
expected = df1.join(df2, how="left")
ddf1 = dd.from_pandas(df1, npartitions=1)
ddf2 = dd.from_pandas(df2, npartitions=1)
result = ddf1.join(ddf2, how="left").shuffle(on="a", force=True)
result = await c.compute(result)
dd.assert_eq(result, expected)
@gen_cluster(client=True)
async def test_set_index(c, s, *workers):
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8], "b": 1})
ddf = dd.from_pandas(df, npartitions=3)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
ddf = ddf.set_index("a", divisions=(1, 3, 8))
assert ddf.npartitions == 2
result = await c.compute(ddf)
dd.assert_eq(result, df.set_index("a"))
await c.close()
await asyncio.gather(*[assert_worker_cleanup(w) for w in workers])
await assert_scheduler_cleanup(s)
def test_shuffle_with_existing_index(client):
df = pd.DataFrame({"a": np.random.randint(0, 3, 20)}, index=np.random.random(20))
ddf = dd.from_pandas(
df,
npartitions=4,
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
ddf = ddf.shuffle("a", force=True)
result = client.compute(ddf, sync=True)
dd.assert_eq(result, df)
def test_set_index_with_existing_index(client):
df = pd.DataFrame({"a": np.random.randint(0, 3, 20)}, index=np.random.random(20))
ddf = dd.from_pandas(
df,
npartitions=4,
)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
ddf = ddf.set_index("a")
result = client.compute(ddf, sync=True)
dd.assert_eq(result, df.set_index("a"))
def test_sort_values_with_existing_divisions(client):
"""Regression test for #8165"""
df = pd.DataFrame(
{"a": np.random.randint(0, 3, 20), "b": np.random.randint(0, 3, 20)}
)
ddf = dd.from_pandas(df, npartitions=4)
with dask.config.set({"dataframe.shuffle.method": "p2p"}):
ddf = ddf.set_index("a").sort_values("b")
result = ddf.compute()
dd.assert_eq(
result,
df.set_index("a").sort_values("b"),
check_index=False,
sort_results=False,
)
|
BlockedShuffleAccessAndFailShuffleRunManager
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance6.py
|
{
"start": 1492,
"end": 1654
}
|
class ____(ParentC[_T3]):
pass
def func6(var: ParentC[int]):
if isinstance(var, ChildC1):
reveal_type(var, expected_text="ChildC1[float]")
|
ChildC1
|
python
|
charliermarsh__ruff
|
crates/ruff_python_formatter/resources/test/fixtures/ruff/docstring.py
|
{
"start": 1942,
"end": 2136
}
|
class ____:
"""Browse module classes and functions in IDLE."""
# This class is also the base class for pathbrowser.PathBrowser.
def __init__(self):
pass
|
CommentAfterDocstring1
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/utils/errors.py
|
{
"start": 1956,
"end": 2129
}
|
class ____(SentryAppBaseError):
error_type = SentryAppErrorType.INTEGRATOR
status_code = 400
# Represents an error that's our (sentry's) fault
|
SentryAppIntegratorError
|
python
|
django__django
|
tests/m2m_through_regress/tests.py
|
{
"start": 2379,
"end": 4460
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.bob = Person.objects.create(name="Bob")
cls.roll = Group.objects.create(name="Roll")
cls.bob_roll = Membership.objects.create(person=cls.bob, group=cls.roll)
def test_serialization(self):
"m2m-through models aren't serialized as m2m fields. Refs #8134"
pks = {
"p_pk": self.bob.pk,
"g_pk": self.roll.pk,
"m_pk": self.bob_roll.pk,
"app_label": "m2m_through_regress",
}
out = StringIO()
management.call_command(
"dumpdata", "m2m_through_regress", format="json", stdout=out
)
self.assertJSONEqual(
out.getvalue().strip(),
'[{"pk": %(m_pk)s, "model": "m2m_through_regress.membership", '
'"fields": {"person": %(p_pk)s, "price": 100, "group": %(g_pk)s}}, '
'{"pk": %(p_pk)s, "model": "m2m_through_regress.person", '
'"fields": {"name": "Bob"}}, '
'{"pk": %(g_pk)s, "model": "m2m_through_regress.group", '
'"fields": {"name": "Roll"}}]' % pks,
)
out = StringIO()
management.call_command(
"dumpdata", "m2m_through_regress", format="xml", indent=2, stdout=out
)
self.assertXMLEqual(
out.getvalue().strip(),
"""
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="%(m_pk)s" model="%(app_label)s.membership">
<field to="%(app_label)s.person" name="person" rel="ManyToOneRel">%(p_pk)s</field>
<field to="%(app_label)s.group" name="group" rel="ManyToOneRel">%(g_pk)s</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="%(p_pk)s" model="%(app_label)s.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="%(g_pk)s" model="%(app_label)s.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
""".strip()
% pks,
)
|
M2MThroughSerializationTestCase
|
python
|
encode__httpx
|
httpx/_decoders.py
|
{
"start": 9806,
"end": 12041
}
|
class ____:
"""
Handles incrementally reading lines from text.
Has the same behaviour as the stdllib splitlines,
but handling the input iteratively.
"""
def __init__(self) -> None:
self.buffer: list[str] = []
self.trailing_cr: bool = False
def decode(self, text: str) -> list[str]:
# See https://docs.python.org/3/library/stdtypes.html#str.splitlines
NEWLINE_CHARS = "\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029"
# We always push a trailing `\r` into the next decode iteration.
if self.trailing_cr:
text = "\r" + text
self.trailing_cr = False
if text.endswith("\r"):
self.trailing_cr = True
text = text[:-1]
if not text:
# NOTE: the edge case input of empty text doesn't occur in practice,
# because other httpx internals filter out this value
return [] # pragma: no cover
trailing_newline = text[-1] in NEWLINE_CHARS
lines = text.splitlines()
if len(lines) == 1 and not trailing_newline:
# No new lines, buffer the input and continue.
self.buffer.append(lines[0])
return []
if self.buffer:
# Include any existing buffer in the first portion of the
# splitlines result.
lines = ["".join(self.buffer) + lines[0]] + lines[1:]
self.buffer = []
if not trailing_newline:
# If the last segment of splitlines is not newline terminated,
# then drop it from our output and start a new buffer.
self.buffer = [lines.pop()]
return lines
def flush(self) -> list[str]:
if not self.buffer and not self.trailing_cr:
return []
lines = ["".join(self.buffer)]
self.buffer = []
self.trailing_cr = False
return lines
SUPPORTED_DECODERS = {
"identity": IdentityDecoder,
"gzip": GZipDecoder,
"deflate": DeflateDecoder,
"br": BrotliDecoder,
"zstd": ZStandardDecoder,
}
if brotli is None:
SUPPORTED_DECODERS.pop("br") # pragma: no cover
if zstandard is None:
SUPPORTED_DECODERS.pop("zstd") # pragma: no cover
|
LineDecoder
|
python
|
ray-project__ray
|
python/ray/serve/tests/unit/test_router.py
|
{
"start": 1433,
"end": 2575
}
|
class ____(ReplicaResult):
def __init__(
self,
replica_id,
is_generator_object: bool,
queue_len_info: Optional[ReplicaQueueLengthInfo] = None,
):
self._replica_id = replica_id
self._is_generator_object = is_generator_object
self._queue_len_info = queue_len_info
self.cancelled = False
async def get_rejection_response(self):
return self._queue_len_info
def get(self, timeout_s: Optional[float]):
raise NotImplementedError
async def get_async(self):
raise NotImplementedError
def __next__(self):
raise NotImplementedError
async def __anext__(self):
raise NotImplementedError
def add_done_callback(self, callback: Callable):
pass
def cancel(self):
self.cancelled = True
def to_object_ref(self, timeout_s: Optional[float]) -> ray.ObjectRef:
raise NotImplementedError
async def to_object_ref_async(self) -> ray.ObjectRef:
raise NotImplementedError
def to_object_ref_gen(self) -> ray.ObjectRefGenerator:
raise NotImplementedError
|
FakeReplicaResult
|
python
|
pytorch__pytorch
|
test/distributed/elastic/utils/util_test.py
|
{
"start": 572,
"end": 1458
}
|
class ____:
_TEST_TIMEOUT = 1234
def __init__(self) -> None:
self.ops = []
def set_timeout(self, timeout: float) -> None:
self.ops.append(("set_timeout", timeout))
@property
def timeout(self) -> datetime.timedelta:
self.ops.append(("timeout",))
return datetime.timedelta(seconds=self._TEST_TIMEOUT)
def set(self, key: str, value: str) -> None:
self.ops.append(("set", key, value))
def get(self, key: str) -> str:
self.ops.append(("get", key))
return "value"
def multi_get(self, keys: list[str]) -> list[str]:
self.ops.append(("multi_get", keys))
return ["value"] * len(keys)
def add(self, key: str, val: int) -> int:
self.ops.append(("add", key, val))
return 3
def wait(self, keys: list[str]) -> None:
self.ops.append(("wait", keys))
|
MockStore
|
python
|
Pylons__pyramid
|
src/pyramid/config/rendering.py
|
{
"start": 267,
"end": 1897
}
|
class ____:
def add_default_renderers(self):
for name, renderer in DEFAULT_RENDERERS:
self.add_renderer(name, renderer)
@action_method
def add_renderer(self, name, factory):
"""
Add a :app:`Pyramid` :term:`renderer` factory to the
current configuration state.
The ``name`` argument is the renderer name. Use ``None`` to
represent the default renderer (a renderer which will be used for all
views unless they name another renderer specifically).
The ``factory`` argument is Python reference to an
implementation of a :term:`renderer` factory or a
:term:`dotted Python name` to same.
"""
factory = self.maybe_dotted(factory)
# if name is None or the empty string, we're trying to register
# a default renderer, but registerUtility is too dumb to accept None
# as a name
if not name:
name = ''
def register():
self.registry.registerUtility(factory, IRendererFactory, name=name)
intr = self.introspectable(
'renderer factories',
name,
self.object_description(factory),
'renderer factory',
)
intr['factory'] = factory
intr['name'] = name
# we need to register renderers early (in phase 1) because they are
# used during view configuration (which happens in phase 3)
self.action(
(IRendererFactory, name),
register,
order=PHASE1_CONFIG,
introspectables=(intr,),
)
|
RenderingConfiguratorMixin
|
python
|
keon__algorithms
|
algorithms/heap/merge_sorted_k_lists.py
|
{
"start": 228,
"end": 2094
}
|
class ____(object):
""" ListNode Class"""
def __init__(self, val):
self.val = val
self.next = None
def merge_k_lists(lists):
""" Merge Lists """
dummy = node = ListNode(0)
list_h = [(n.val, n) for n in lists if n]
heapify(list_h)
while list_h:
_, n_val = list_h[0]
if n_val.next is None:
heappop(list_h) # only change heap size when necessary
else:
heapreplace(list_h, (n_val.next.val, n_val.next))
node.next = n_val
node = node.next
return dummy.next
def merge_k_lists(lists):
""" Merge List """
dummy = ListNode(None)
curr = dummy
q = PriorityQueue()
for node in lists:
if node:
q.put((node.val, node))
while not q.empty():
curr.next = q.get()[1] # These two lines seem to
curr = curr.next # be equivalent to :- curr = q.get()[1]
if curr.next:
q.put((curr.next.val, curr.next))
return dummy.next
"""
I think my code's complexity is also O(nlogk) and not using heap or priority queue,
n means the total elements and k means the size of list.
The mergeTwoLists function in my code comes from the problem Merge Two Sorted Lists
whose complexity obviously is O(n), n is the sum of length of l1 and l2.
To put it simpler, assume the k is 2^x, So the progress of combination is like a full binary tree,
from bottom to top. So on every level of tree, the combination complexity is n,
because every level have all n numbers without repetition.
The level of tree is x, ie log k. So the complexity is O(n log k).
for example, 8 ListNode, and the length of every ListNode is x1, x2,
x3, x4, x5, x6, x7, x8, total is n.
on level 3: x1+x2, x3+x4, x5+x6, x7+x8 sum: n
on level 2: x1+x2+x3+x4, x5+x6+x7+x8 sum: n
on level 1: x1+x2+x3+x4+x5+x6+x7+x8 sum: n
"""
|
ListNode
|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/state_dict.py
|
{
"start": 4586,
"end": 8310
}
|
class ____(StateDictOptions):
fqn_param_mapping: dict[
Union[str, torch.Tensor],
Union[FQNS_T, torch.Tensor],
] = field(default_factory=dict)
shared_params_mapping: dict[
Union[str, torch.Tensor],
Union[FQNS_T, torch.Tensor],
] = field(default_factory=dict)
submodule_prefixes: set[str] = field(default_factory=set)
handle_model: bool = True
handle_optim: bool = True
fsdp_context: Callable = contextlib.nullcontext
fsdp_modules: list[nn.Module] = field(default_factory=list)
def _get_fqns(
model: nn.Module,
name: str,
dsd_fqn_modifiers: str = "_fqn_modifiers",
skip_ddp_prefix: bool = True,
skip_compiler_prefix: bool = True,
) -> FQNS_T:
"""
This API is used to convert the name of a parameter to the FQNs. For FSDP
without `use_orig_params`, the name of FlatParameter can be mapped to
multiple original parameters. As a result, the return type of this function
is `set[str]`.
Args:
module (nn.Module): the root model.
name (str): the name
skip_ddp_prefix (bool): whether to skip DDP's `module` prefix
Returns:
The canonical FQNs based on the model traversal.
"""
# Remove the checkpoint prefix, if it exists.
name = name.replace(_CHECKPOINT_PREFIX, "")
if "." not in name:
return {name}
obj_names = name.split(".")
fqn_obj_names = []
curr_obj = model
for i, curr_obj_name in enumerate(obj_names):
if isinstance(curr_obj, DDP):
if curr_obj_name != "module":
raise AssertionError(f"Expected 'module', got '{curr_obj_name}'")
curr_obj = curr_obj.module
if not skip_ddp_prefix:
fqn_obj_names.append(curr_obj_name)
elif isinstance(curr_obj, FSDP):
if i < len(obj_names) - 1 and obj_names[i + 1] == _FLAT_PARAM:
prefix = ".".join(fqn_obj_names)
flat_param = getattr(curr_obj, _FLAT_PARAM)
if prefix:
prefix = f"{prefix}."
return {f"{prefix}{fqn}" for fqn in flat_param._fqns}
curr_obj = getattr(curr_obj, FSDP_WRAPPED_MODULE)
if curr_obj_name != FSDP_WRAPPED_MODULE:
# pyrefly: ignore [bad-argument-type]
fqn_obj_names.append(curr_obj_name)
curr_obj = getattr(curr_obj, curr_obj_name)
elif isinstance(curr_obj, torch._dynamo.eval_frame.OptimizedModule):
if curr_obj_name != "_orig_mod":
raise AssertionError(f"Expected '_orig_mod', got '{curr_obj_name}'")
curr_obj = curr_obj._orig_mod
if not skip_compiler_prefix:
fqn_obj_names.append(curr_obj_name)
else:
# In some modules, _fqn_modifiers would not shown in the state_dict keys,
# skip them in the fqn to ensure load stat dict successfully for them.
if hasattr(curr_obj, dsd_fqn_modifiers):
if removed_fqn := getattr(curr_obj, dsd_fqn_modifiers)().get(
curr_obj_name
):
if hasattr(curr_obj, removed_fqn):
curr_obj = getattr(curr_obj, removed_fqn)
# pyrefly: ignore [bad-argument-type]
fqn_obj_names.append(curr_obj_name)
if curr_obj_name == nn.modules.module._EXTRA_STATE_KEY_SUFFIX:
if i != len(obj_names) - 1:
raise RuntimeError("Expect `_extra_state` to be the last obj name")
else:
curr_obj = getattr(curr_obj, curr_obj_name)
return {".".join(fqn_obj_names).replace(_CHECKPOINT_PREFIX, "")}
|
_StateDictInfo
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-llama-dataset-metadata/llama_index/packs/llama_dataset_metadata/base.py
|
{
"start": 2090,
"end": 2242
}
|
class ____(BaseMetadata):
"""Baseline data class."""
name: str
config: BaselineConfig
metrics: BaselineMetrics
code_url: str
|
Baseline
|
python
|
wandb__wandb
|
wandb/sdk/wandb_require_helpers.py
|
{
"start": 840,
"end": 1331
}
|
class ____:
requirement = ""
def __init__(self) -> None:
self._check_if_requirements_met()
def __post_init__(self) -> None:
self._check_if_requirements_met()
def _check_if_requirements_met(self) -> None:
env_var = requirement_env_var_mapping[self.requirement]
if not os.getenv(env_var):
raise Exception(
f'You must explicitly enable this feature with `wandb.require("{self.requirement})"'
)
|
RequiresMixin
|
python
|
huggingface__transformers
|
tests/models/efficientloftr/test_modeling_efficientloftr.py
|
{
"start": 5183,
"end": 18312
}
|
class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (EfficientLoFTRForKeypointMatching, EfficientLoFTRModel) if is_torch_available() else ()
test_resize_embeddings = False
has_attentions = True
def setUp(self):
self.model_tester = EfficientLoFTRModelTester(self)
self.config_tester = ConfigTester(self, config_class=EfficientLoFTRConfig, has_text_modality=False)
def test_config(self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="EfficientLoFTRForKeypointMatching does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching does not use feedforward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching is not trainable")
def test_training(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching is not trainable")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching is not trainable")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="EfficientLoFTRForKeypointMatching is not trainable")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(reason="EfficientLoFTR does not output any loss term in the forward pass")
def test_retain_grad_hidden_states_attentions(self):
pass
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_hidden_states = len(self.model_tester.stage_num_blocks) + 1
self.assertEqual(len(hidden_states), expected_num_hidden_states)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.image_height, self.model_tester.image_width],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
def check_attention_output(inputs_dict, config, model_class):
config._attn_implementation = "eager"
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
total_stride = reduce(lambda a, b: a * b, config.stage_stride)
hidden_size = (
self.model_tester.image_height // total_stride * self.model_tester.image_width // total_stride
)
expected_attention_shape = [
self.model_tester.num_attention_heads,
hidden_size,
hidden_size,
]
for i, attention in enumerate(attentions):
self.assertListEqual(
list(attention.shape[-3:]),
expected_attention_shape,
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
check_attention_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
check_attention_output(inputs_dict, config, model_class)
@slow
def test_model_from_pretrained(self):
from_pretrained_ids = ["zju-community/efficientloftr"]
for model_name in from_pretrained_ids:
model = EfficientLoFTRForKeypointMatching.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_forward_labels_should_be_none(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
model_inputs = self._prepare_for_class(inputs_dict, model_class)
# Provide an arbitrary sized Tensor as labels to model inputs
model_inputs["labels"] = torch.rand((128, 128))
with self.assertRaises(ValueError) as cm:
model(**model_inputs)
self.assertEqual(ValueError, cm.exception.__class__)
def test_batching_equivalence(self, atol=1e-5, rtol=1e-5):
"""
This test is overwritten because the model outputs do not contain only regressive values but also keypoint
locations.
Similarly to the problem discussed about SuperGlue implementation
[here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787), the consequence of
having different scores for matching, makes the maximum indices differ. These indices are being used to compute
the keypoint coordinates. The keypoint coordinates, in the model outputs, are floating point tensors, so the
original implementation of this test cover this case. But the resulting tensors may have differences exceeding
the relative and absolute tolerance.
Therefore, similarly to SuperGlue integration test, for the key "keypoints" in the model outputs, we check the
number of differences in keypoint coordinates being less than a TODO given number
"""
def recursive_check(batched_object, single_row_object, model_name, key):
if isinstance(batched_object, (list, tuple)):
for batched_object_value, single_row_object_value in zip(batched_object, single_row_object):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
elif isinstance(batched_object, dict):
for batched_object_value, single_row_object_value in zip(
batched_object.values(), single_row_object.values()
):
recursive_check(batched_object_value, single_row_object_value, model_name, key)
# do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects
elif batched_object is None or not isinstance(batched_object, torch.Tensor):
return
elif batched_object.dim() == 0:
return
# do not compare int or bool outputs as they are mostly computed with max/argmax/topk methods which are
# very sensitive to the inputs (e.g. tiny differences may give totally different results)
elif not torch.is_floating_point(batched_object):
return
else:
# indexing the first element does not always work
# e.g. models that output similarity scores of size (N, M) would need to index [0, 0]
slice_ids = [slice(0, index) for index in single_row_object.shape]
batched_row = batched_object[slice_ids]
if key == "keypoints":
batched_row = torch.sum(batched_row, dim=-1)
single_row_object = torch.sum(single_row_object, dim=-1)
tolerance = 0.02 * single_row_object.shape[-1]
self.assertTrue(
torch.sum(~torch.isclose(batched_row, single_row_object, rtol=rtol, atol=atol)) < tolerance
)
else:
self.assertFalse(
torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}"
)
self.assertFalse(
torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}"
)
self.assertFalse(
torch.isnan(single_row_object).any(),
f"Single row output has `nan` in {model_name} for key={key}",
)
self.assertFalse(
torch.isinf(single_row_object).any(),
f"Single row output has `inf` in {model_name} for key={key}",
)
try:
torch.testing.assert_close(batched_row, single_row_object, atol=atol, rtol=rtol)
except AssertionError as e:
msg = f"Batched and Single row outputs are not equal in {model_name} for key={key}.\n\n"
msg += str(e)
raise AssertionError(msg)
config, batched_input = self.model_tester.prepare_config_and_inputs_for_common()
set_config_for_less_flaky_test(config)
for model_class in self.all_model_classes:
config.output_hidden_states = True
model_name = model_class.__name__
if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"):
config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class)
batched_input_prepared = self._prepare_for_class(batched_input, model_class)
model = model_class(config).to(torch_device).eval()
set_model_for_less_flaky_test(model)
batch_size = self.model_tester.batch_size
single_row_input = {}
for key, value in batched_input_prepared.items():
if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0:
# e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size
single_batch_shape = value.shape[0] // batch_size
single_row_input[key] = value[:single_batch_shape]
else:
single_row_input[key] = value
with torch.no_grad():
model_batched_output = model(**batched_input_prepared)
model_row_output = model(**single_row_input)
if isinstance(model_batched_output, torch.Tensor):
model_batched_output = {"model_output": model_batched_output}
model_row_output = {"model_output": model_row_output}
for key in model_batched_output:
# DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan`
if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key:
model_batched_output[key] = model_batched_output[key][1:]
model_row_output[key] = model_row_output[key][1:]
recursive_check(model_batched_output[key], model_row_output[key], model_name, key)
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image1 = dataset[0]["image"]
image2 = dataset[1]["image"]
image3 = dataset[2]["image"]
return [[image1, image2], [image3, image2]]
@require_torch
@require_vision
|
EfficientLoFTRModelTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_managers.py
|
{
"start": 357,
"end": 5988
}
|
class ____(TestCase):
def setUp(self):
self.user = get(User)
self.another_user = get(User)
self.project = get(
Project,
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
users=[self.user],
main_language_project=None,
versions=[],
)
self.version_public = get(
Version,
privacy_level=PUBLIC,
project=self.project,
active=True,
slug="version_public",
)
self.build_public = get(
Build,
version=self.version_public,
project=self.project,
)
self.version_public_external = get(
Version,
privacy_level=PUBLIC,
project=self.project,
active=True,
type=EXTERNAL,
slug="version_public_external",
)
self.build_public_external = get(
Build,
version=self.version_public_external,
project=self.project,
)
self.version_private = get(
Version,
privacy_level=PRIVATE,
project=self.project,
active=True,
slug="version_private",
)
self.build_private = get(
Build,
version=self.version_private,
project=self.project,
)
self.version_private_external = get(
Version,
privacy_level=PRIVATE,
project=self.project,
active=True,
type=EXTERNAL,
slug="version_private_external",
)
self.build_private_external = get(
Build,
version=self.version_private_external,
project=self.project,
)
self.another_project = get(
Project,
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
users=[self.another_user],
main_language_project=None,
versions=[],
)
self.another_version_public = get(
Version,
privacy_level=PUBLIC,
project=self.another_project,
active=True,
slug="another_version_public",
)
self.another_build_public = get(
Build,
version=self.another_version_public,
project=self.another_project,
)
self.another_version_public_external = get(
Version,
privacy_level=PUBLIC,
project=self.another_project,
active=True,
type=EXTERNAL,
slug="another_version_public_external",
)
self.another_build_public_external = get(
Build,
version=self.another_version_public_external,
project=self.another_project,
)
self.another_version_private = get(
Version,
privacy_level=PRIVATE,
project=self.another_project,
active=True,
slug="another_version_private",
)
self.another_build_private = get(
Build,
version=self.another_version_private,
project=self.another_project,
)
self.another_version_private_external = get(
Version,
privacy_level=PRIVATE,
project=self.another_project,
active=True,
type=EXTERNAL,
slug="another_version_private_external",
)
self.another_build_private_external = get(
Build,
version=self.another_version_private_external,
project=self.another_project,
)
self.shared_project = get(
Project,
privacy_level=PUBLIC,
external_builds_privacy_level=PUBLIC,
users=[self.user, self.another_user],
main_language_project=None,
versions=[],
)
self.shared_version_public = get(
Version,
privacy_level=PUBLIC,
project=self.shared_project,
active=True,
slug="shared_version_public",
)
self.shared_build_public = get(
Build,
version=self.shared_version_public,
project=self.shared_project,
)
self.shared_version_public_external = get(
Version,
privacy_level=PUBLIC,
project=self.shared_project,
active=True,
type=EXTERNAL,
slug="shared_version_public_external",
)
self.shared_build_public_external = get(
Build,
version=self.shared_version_public_external,
project=self.shared_project,
)
self.shared_version_private = get(
Version,
privacy_level=PRIVATE,
project=self.shared_project,
active=True,
slug="shared_version_private",
)
self.shared_build_private = get(
Build,
version=self.shared_version_private,
project=self.shared_project,
)
self.shared_version_private_external = get(
Version,
privacy_level=PRIVATE,
project=self.shared_project,
active=True,
type=EXTERNAL,
slug="shared_version_private_external",
)
self.shared_build_private_external = get(
Build,
version=self.shared_version_private_external,
project=self.shared_project,
)
|
TestBuildManagerBase
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_autofilter01.py
|
{
"start": 315,
"end": 1845
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("autofilter01.xlsx")
self.set_text_file("autofilter_data.txt")
def test_create_file(self):
"""
Test the creation of a simple XlsxWriter file with an autofilter.
This test corresponds to the following examples/autofilter.py example:
Example 1. Autofilter without conditions.
"""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Set the autofilter.
worksheet.autofilter("A1:D51")
# Open a text file with autofilter example data.
textfile = open(self.txt_filename, encoding="utf-8")
# Start writing data from the first worksheet row.
row = 0
# Read the text file and write it to the worksheet.
for line in textfile:
# Split the input data based on whitespace.
data = line.strip("\n").split()
# Convert the number data from the text file.
for i, item in enumerate(data):
try:
data[i] = float(item)
except ValueError:
pass
# Write out the row data.
worksheet.write_row(row, 0, data)
# Move on to the next worksheet row.
row += 1
textfile.close()
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
tensorflow__tensorflow
|
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
|
{
"start": 90890,
"end": 93617
}
|
class ____(test.TestCase):
def _npXent(self, features, labels, dim=-1):
if dim == -1:
dim = len(features.shape) - 1
print("dim ", dim)
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(
features - np.reshape(np.amax(features, axis=dim), one_only_on_dim)
)
probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
bp = probs - labels
tmp = labels * np.log(probs + 1.0e-20)
print("before reduction ", tmp)
l = -np.sum(tmp, axis=dim)
return l, bp
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testXent(
self, np_features, np_labels, use_gpu=True, with_placeholders=False
):
_, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session(use_gpu=use_gpu) as sess:
if with_placeholders:
features_placeholder = array_ops.placeholder(np_features.dtype)
labels_placeholder = array_ops.placeholder(np_labels.dtype)
loss, backprop_ = gen_nn_ops.softmax_cross_entropy_with_logits(
labels=labels_placeholder, features=features_placeholder
)
_, tf_backprop = sess.run(
[loss, backprop_],
feed_dict={
labels_placeholder: np_labels,
features_placeholder: np_features,
},
)
else:
loss, backprop_ = gen_nn_ops.softmax_cross_entropy_with_logits(
np_features, np_labels
)
_, tf_backprop = self.evaluate([loss, backprop_])
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def _testXentWrapper(self, np_features, np_labels, dim=-1, use_gpu=False):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.cached_session(use_gpu=use_gpu) as _:
loss = gen_nn_ops.softmax_cross_entropy_with_logits(
labels=np_labels, logits=np_features, dim=dim
)
tf_loss = self.evaluate(loss)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
def _testAll(self, features, labels, with_placeholders=False):
self._testXent(
features, labels, use_gpu=True, with_placeholders=with_placeholders
)
def testFloat(self):
self._testAll(
np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 4.0]]).astype(
np.float32
),
np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.5, 0.5, 0.0]]).astype(
np.float32
),
)
def testHalf(self):
self._testAll(
np.array([[1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 3.0, 4.0]]).astype(
np.float16
),
np.array([[0.0, 0.0, 0.0, 1.0], [0.0, 0.5, 0.5, 0.0]]).astype(
np.float16
),
)
|
XentTest
|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/filters.py
|
{
"start": 61617,
"end": 62164
}
|
class ____(PrefectFilterBaseModel):
"""Filter by `BlockDocument.block_type_id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of block type ids to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.BlockDocument.block_type_id.in_(self.any_))
return filters
|
BlockDocumentFilterBlockTypeId
|
python
|
pytorch__pytorch
|
test/distributed/test_c10d_gloo.py
|
{
"start": 91888,
"end": 96444
}
|
class ____(ProcessGroupGlooTest):
def setUp(self):
os.environ["TORCH_FR_BUFFER_SIZE"] = "10"
super().setUp()
def tearDown(self) -> None:
del os.environ["TORCH_FR_BUFFER_SIZE"]
return super().tearDown()
def _verify_trace(self, t, is_json):
ver = t["version"]
self.assertEqual(ver, "2.10")
pg_config = t["pg_config"]
self.assertEqual(len(pg_config), 1)
default_pg_info = pg_config["0"]
self.assertIn("name", default_pg_info)
self.assertIn("desc", default_pg_info)
self.assertIn("ranks", default_pg_info)
pg_status = t["pg_status"]
self.assertEqual(len(pg_status), 1)
self.assertEqual(str(pg_status["0"]["last_enqueued_collective"]), "3")
self.assertEqual(str(pg_status["0"]["last_completed_collective"]), "3")
self.assertEqual(
str(pg_status["0"]["last_started_collective"]),
"-1",
)
global_ranks = pg_config["0"]["ranks"]
self.assertEqual(len(json.loads(global_ranks)), self.world_size)
self.assertEqual(len(t["entries"]), 3)
t = t["entries"]
last = t[-1]
self.assertEqual(last["process_group"], ("0", ""))
# No event recorded for Gloo.
self.assertEqual(last["state"], "scheduled")
# we don't collect stack traces in JSON at the moment
if not is_json:
self.assertIn("test_c10d_gloo.py", str(last["frames"]))
self.assertEqual(last["input_sizes"], ((3, 4),))
self.assertEqual(last["input_dtypes"], ["Float"])
self.assertEqual(last["output_sizes"], ((3, 4),))
self.assertEqual(last["output_dtypes"], ["Float"])
self.assertEqual(last["collective_seq_id"], 3)
# TODO: Needs verification
self.assertEqual(last["timeout_ms"], 50000)
self.assertTrue("duration_ms" not in last)
@requires_gloo()
def test_short_json(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(group_name="0")
)
a = torch.full((3, 4), float(self.rank))
for _ in range(2):
f = pg.allreduce(a)
f.wait()
time.sleep(1)
t = json.loads(
torch._C._distributed_c10d._dump_fr_trace_json(includeCollectives=True)
)
self._verify_trace(t, True)
@requires_gloo()
def test_short_pickle(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(group_name="0")
)
a = torch.full((3, 4), float(self.rank))
for _ in range(2):
f = pg.allreduce(a)
f.wait()
time.sleep(1)
t = pickle.loads(
torch._C._distributed_c10d._dump_fr_trace(includeCollectives=True)
)
self._verify_trace(
t,
is_json=False,
)
@requires_gloo()
def test_long(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(group_name="0")
)
a = torch.full((3, 4), float(self.rank))
for _ in range(2):
# test some other primitives to make sure
# their strings are valid
xs = [torch.ones(3, 4)]
pg.broadcast(xs).wait()
pg.allreduce(xs).wait()
pg.reduce(xs).wait()
ys = [[torch.empty(3, 4) for _ in range(self.world_size)]]
pg.allgather(ys, xs).wait()
pg.reduce_scatter(xs, ys).wait()
f = pg.allreduce(a)
f.wait()
t = pickle.loads(torch._C._distributed_c10d._dump_fr_trace())
t = t["entries"]
self.assertEqual(len(t), 10)
first = t[0]
last = t[-1]
self.assertEqual(last["profiling_name"], "gloo:all_reduce")
self.assertEqual(last["state"], "scheduled")
self.assertIn("test_c10d_gloo.py", str(last["frames"]))
self.assertEqual(last["input_sizes"], ((3, 4),))
self.assertEqual(last["input_dtypes"], ["Float"])
self.assertEqual(last["output_sizes"], ((3, 4),))
self.assertEqual(last["output_dtypes"], ["Float"])
self.assertEqual(last["timeout_ms"], 50000)
self.assertEqual(last["collective_seq_id"] - first["collective_seq_id"], 9)
|
ProcessGroupGlooFRTest
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/postgresql/test_types.py
|
{
"start": 53853,
"end": 54701
}
|
class ____(DDLEventWCreateHarness, fixtures.TestBase):
__sparse_driver_backend__ = True
__only_on__ = "postgresql > 8.3"
creates_implicitly_with_table = False
drops_implicitly_with_table = False
requires_table_to_exist = False
@testing.fixture
def produce_subject(self):
return Enum(
"x",
"y",
"z",
name="status",
)
@testing.fixture
def produce_event_target(self, produce_subject, connection):
return produce_subject.dialect_impl(connection.dialect)
@testing.fixture
def produce_table_integrated_subject(self, metadata, produce_subject):
return Table(
"table",
metadata,
Column("id", Integer, primary_key=True),
Column("status", produce_subject),
)
|
EnumDDLEventTest
|
python
|
doocs__leetcode
|
solution/2300-2399/2365.Task Scheduler II/Solution.py
|
{
"start": 0,
"end": 276
}
|
class ____:
def taskSchedulerII(self, tasks: List[int], space: int) -> int:
day = defaultdict(int)
ans = 0
for task in tasks:
ans += 1
ans = max(ans, day[task])
day[task] = ans + space + 1
return ans
|
Solution
|
python
|
apache__airflow
|
providers/hashicorp/src/airflow/providers/hashicorp/hooks/vault.py
|
{
"start": 1275,
"end": 18089
}
|
class ____(BaseHook):
"""
Hook to Interact with HashiCorp Vault KeyValue Secret engine.
HashiCorp hvac documentation:
* https://hvac.readthedocs.io/en/stable/
You connect to the host specified as host in the connection. The login/password from the connection
are used as credentials usually and you can specify different authentication parameters
via init params or via corresponding extras in the connection.
The mount point should be placed as a path in the URL - similarly to Vault's URL schema:
This indicates the "path" the secret engine is mounted on. Default id not specified is "secret".
Note that this ``mount_point`` is not used for authentication if authentication is done via a
different engines. Each engine uses its own engine-specific authentication mount_point.
The extras in the connection are named the same as the parameters ('kv_engine_version', 'auth_type', ...).
You can also use gcp_keyfile_dict extra to pass json-formatted dict in case of 'gcp' authentication.
The URL schemas supported are "vault", "http" (using http to connect to the vault) or
"vaults" and "https" (using https to connect to the vault).
Example URL:
.. code-block::
vault://user:password@host:port/mount_point?kv_engine_version=1&auth_type=github
Login/Password are used as credentials:
* approle: login -> connection.login
* github: password -> token
* token: password -> token
* aws_iam: login -> key_id, password -> secret_id
* azure: login -> client_id, password -> client_secret
* ldap: login -> username, password -> password
* userpass: login -> username, password -> password
* radius: password -> radius_secret
:param vault_conn_id: The id of the connection to use
:param auth_type: Authentication Type for the Vault. Default is ``token``. Available values are:
('approle', 'github', 'gcp', 'kubernetes', 'ldap', 'token', 'userpass')
:param auth_mount_point: It can be used to define mount_point for authentication chosen
Default depends on the authentication method used.
:param kv_engine_version: Select the version of the engine to run (``1`` or ``2``). Defaults to
version defined in connection or ``2`` if not defined in connection.
:param role_id: Role ID for ``aws_iam`` Authentication.
:param region: AWS region for STS API calls (for ``aws_iam`` auth_type).
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type)
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, default:
``/var/run/secrets/kubernetes.io/serviceaccount/token``)
:param token_path: path to file containing authentication token to include in requests sent to Vault
(for ``token`` and ``github`` auth_type).
:param gcp_key_path: Path to Google Cloud Service Account key file (JSON) (for ``gcp`` auth_type)
Mutually exclusive with gcp_keyfile_dict
:param gcp_scopes: Comma-separated string containing OAuth2 scopes (for ``gcp`` auth_type)
:param azure_tenant_id: The tenant id for the Azure Active Directory (for ``azure`` auth_type)
:param azure_resource: The configured URL for the application registered in Azure Active Directory
(for ``azure`` auth_type)
:param radius_host: Host for radius (for ``radius`` auth_type)
:param radius_port: Port for radius (for ``radius`` auth_type)
"""
conn_name_attr = "vault_conn_id"
default_conn_name = "vault_default"
conn_type = "vault"
hook_name = "Hashicorp Vault"
def __init__(
self,
vault_conn_id: str = default_conn_name,
auth_type: str | None = None,
auth_mount_point: str | None = None,
kv_engine_version: int | None = None,
role_id: str | None = None,
region: str | None = None,
kubernetes_role: str | None = None,
kubernetes_jwt_path: str | None = None,
token_path: str | None = None,
gcp_key_path: str | None = None,
gcp_scopes: str | None = None,
azure_tenant_id: str | None = None,
azure_resource: str | None = None,
radius_host: str | None = None,
radius_port: int | None = None,
**kwargs,
):
super().__init__()
self.connection = self.get_connection(vault_conn_id)
if not auth_type:
auth_type = self.connection.extra_dejson.get("auth_type") or "token"
if not auth_mount_point:
auth_mount_point = self.connection.extra_dejson.get("auth_mount_point")
if not kv_engine_version:
conn_version = self.connection.extra_dejson.get("kv_engine_version")
try:
kv_engine_version = int(conn_version) if conn_version else DEFAULT_KV_ENGINE_VERSION
except ValueError:
raise VaultError(f"The version is not an int: {conn_version}. ")
client_kwargs = self.connection.extra_dejson.get("client_kwargs", {})
if kwargs:
client_kwargs = merge_dicts(client_kwargs, kwargs)
if auth_type == "approle" and self.connection.login:
role_id = self.connection.login
if auth_type == "aws_iam":
if not role_id:
role_id = self.connection.extra_dejson.get("role_id")
if not region:
region = self.connection.extra_dejson.get("region")
azure_resource, azure_tenant_id = (
self._get_azure_parameters_from_connection(azure_resource, azure_tenant_id)
if auth_type == "azure"
else (None, None)
)
gcp_key_path, gcp_keyfile_dict, gcp_scopes = (
self._get_gcp_parameters_from_connection(gcp_key_path, gcp_scopes)
if auth_type == "gcp"
else (None, None, None)
)
kubernetes_jwt_path, kubernetes_role = (
self._get_kubernetes_parameters_from_connection(kubernetes_jwt_path, kubernetes_role)
if auth_type == "kubernetes"
else (None, None)
)
radius_host, radius_port = (
self._get_radius_parameters_from_connection(radius_host, radius_port)
if auth_type == "radius"
else (None, None)
)
if self.connection.extra_dejson.get("use_tls") is not None:
if bool(self.connection.extra_dejson.get("use_tls")):
conn_protocol = "https"
else:
conn_protocol = "http"
else:
if self.connection.conn_type == "vault":
conn_protocol = "http"
elif self.connection.conn_type == "vaults":
conn_protocol = "https"
elif self.connection.conn_type == "http":
conn_protocol = "http"
elif self.connection.conn_type == "https":
conn_protocol = "https"
else:
raise VaultError("The url schema must be one of ['http', 'https', 'vault', 'vaults' ]")
url = f"{conn_protocol}://{self.connection.host}"
if self.connection.port:
url += f":{self.connection.port}"
# Schema is really path in the Connection definition. This is pretty confusing because of URL schema
mount_point = self.connection.schema if self.connection.schema else "secret"
client_kwargs.update(
url=url,
auth_type=auth_type,
auth_mount_point=auth_mount_point,
mount_point=mount_point,
kv_engine_version=kv_engine_version,
token=self.connection.password,
token_path=token_path,
username=self.connection.login,
password=self.connection.password,
key_id=self.connection.login,
secret_id=self.connection.password,
role_id=role_id,
region=region,
kubernetes_role=kubernetes_role,
kubernetes_jwt_path=kubernetes_jwt_path,
gcp_key_path=gcp_key_path,
gcp_keyfile_dict=gcp_keyfile_dict,
gcp_scopes=gcp_scopes,
azure_tenant_id=azure_tenant_id,
azure_resource=azure_resource,
radius_host=radius_host,
radius_secret=self.connection.password,
radius_port=radius_port,
)
self.vault_client = _VaultClient(**client_kwargs)
def _get_kubernetes_parameters_from_connection(
self, kubernetes_jwt_path: str | None, kubernetes_role: str | None
) -> tuple[str | None, str | None]:
if not kubernetes_jwt_path:
kubernetes_jwt_path = self.connection.extra_dejson.get("kubernetes_jwt_path")
if not kubernetes_jwt_path:
kubernetes_jwt_path = DEFAULT_KUBERNETES_JWT_PATH
if not kubernetes_role:
kubernetes_role = self.connection.extra_dejson.get("kubernetes_role")
return kubernetes_jwt_path, kubernetes_role
def _get_gcp_parameters_from_connection(
self,
gcp_key_path: str | None,
gcp_scopes: str | None,
) -> tuple[str | None, dict | None, str | None]:
if not gcp_scopes:
gcp_scopes = self.connection.extra_dejson.get("gcp_scopes")
if not gcp_key_path:
gcp_key_path = self.connection.extra_dejson.get("gcp_key_path")
string_keyfile_dict = self.connection.extra_dejson.get("gcp_keyfile_dict")
gcp_keyfile_dict = json.loads(string_keyfile_dict) if string_keyfile_dict else None
return gcp_key_path, gcp_keyfile_dict, gcp_scopes
def _get_azure_parameters_from_connection(
self, azure_resource: str | None, azure_tenant_id: str | None
) -> tuple[str | None, str | None]:
if not azure_tenant_id:
azure_tenant_id = self.connection.extra_dejson.get("azure_tenant_id")
if not azure_resource:
azure_resource = self.connection.extra_dejson.get("azure_resource")
return azure_resource, azure_tenant_id
def _get_radius_parameters_from_connection(
self, radius_host: str | None, radius_port: int | None
) -> tuple[str | None, int | None]:
if not radius_port:
radius_port_str = self.connection.extra_dejson.get("radius_port")
if radius_port_str:
try:
radius_port = int(radius_port_str)
except ValueError:
raise VaultError(f"Radius port was wrong: {radius_port_str}")
if not radius_host:
radius_host = self.connection.extra_dejson.get("radius_host")
return radius_host, radius_port
def get_conn(self) -> hvac.Client:
"""
Retrieve connection to Vault.
:return: connection used.
"""
return self.vault_client.client
def get_secret(self, secret_path: str, secret_version: int | None = None) -> dict | None:
"""
Get secret value from the engine.
:param secret_path: Path of the secret
:param secret_version: Optional version of key to read - can only be used in case of version 2 of KV
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v1.html
and https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
:param secret_path: Path of the secret
:return: secret stored in the vault as a dictionary
"""
return self.vault_client.get_secret(secret_path=secret_path, secret_version=secret_version)
def get_secret_metadata(self, secret_path: str) -> dict | None:
"""
Read secret metadata (including versions) from the engine. It is only valid for KV version 2.
:param secret_path: Path to read from
:return: secret metadata. This is a Dict containing metadata for the secret.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
"""
return self.vault_client.get_secret_metadata(secret_path=secret_path)
def get_secret_including_metadata(
self, secret_path: str, secret_version: int | None = None
) -> dict | None:
"""
Read secret including metadata. It is only valid for KV version 2.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
:param secret_path: Path of the secret
:param secret_version: Optional version of key to read - can only be used in case of version 2 of KV
:return: key info. This is a Dict with "data" mapping keeping secret
and "metadata" mapping keeping metadata of the secret.
"""
return self.vault_client.get_secret_including_metadata(
secret_path=secret_path, secret_version=secret_version
)
def create_or_update_secret(
self, secret_path: str, secret: dict, method: str | None = None, cas: int | None = None
) -> Response:
"""
Create or updates secret.
:param secret_path: Path to read from
:param secret: Secret to create or update for the path specified
:param method: Optional parameter to explicitly request a POST (create) or PUT (update) request to
the selected kv secret engine. If no argument is provided for this parameter, hvac attempts to
intelligently determine which method is appropriate. Only valid for KV engine version 1
:param cas: Set the "cas" value to use a Check-And-Set operation. If not set the write will be
allowed. If set to 0 a write will only be allowed if the key doesn't exist.
If the index is non-zero the write will only be allowed if the key's current version
matches the version specified in the cas parameter. Only valid for KV engine version 2.
:return: The response of the create_or_update_secret request.
See https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v1.html
and https://hvac.readthedocs.io/en/stable/usage/secrets_engines/kv_v2.html for details.
"""
return self.vault_client.create_or_update_secret(
secret_path=secret_path, secret=secret, method=method, cas=cas
)
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, IntegerField, StringField
from wtforms.validators import NumberRange, Optional, any_of
return {
"auth_type": StringField(lazy_gettext("Auth type"), widget=BS3TextFieldWidget()),
"auth_mount_point": StringField(lazy_gettext("Auth mount point"), widget=BS3TextFieldWidget()),
"kv_engine_version": IntegerField(
lazy_gettext("KV engine version"),
validators=[any_of([1, 2])],
widget=BS3TextFieldWidget(),
description="Must be 1 or 2.",
default=DEFAULT_KV_ENGINE_VERSION,
),
"kubernetes_role": StringField(lazy_gettext("Kubernetes role"), widget=BS3TextFieldWidget()),
"kubernetes_jwt_path": StringField(
lazy_gettext("Kubernetes jwt path"), widget=BS3TextFieldWidget()
),
"token_path": StringField(lazy_gettext("Token path"), widget=BS3TextFieldWidget()),
"gcp_key_path": StringField(lazy_gettext("GCP key path"), widget=BS3TextFieldWidget()),
"gcp_scopes": StringField(lazy_gettext("GCP scopes"), widget=BS3TextFieldWidget()),
"azure_tenant_id": StringField(lazy_gettext("Azure tenant ID"), widget=BS3TextFieldWidget()),
"azure_resource": StringField(lazy_gettext("Azure resource"), widget=BS3TextFieldWidget()),
"radius_host": StringField(lazy_gettext("Radius host"), widget=BS3TextFieldWidget()),
"radius_port": IntegerField(
lazy_gettext("Radius port"),
widget=BS3TextFieldWidget(),
validators=[Optional(), NumberRange(min=0)],
),
"use_tls": BooleanField(lazy_gettext("Use TLS"), default=True),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["extra"],
"relabeling": {},
}
def test_connection(self) -> tuple[bool, str]:
"""Test Vault connectivity from UI."""
try:
self.get_conn()
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
|
VaultHook
|
python
|
plotly__plotly.py
|
plotly/graph_objs/choropleth/_legendgrouptitle.py
|
{
"start": 233,
"end": 2960
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "choropleth"
_path_str = "choropleth.legendgrouptitle"
_valid_props = {"font", "text"}
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.choropleth.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super().__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choropleth.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.Legendgrouptitle`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Legendgrouptitle
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/ctl/cli_config.py
|
{
"start": 10354,
"end": 32801
}
|
class ____:
"""Factory class that creates 1-1 mapping with airflowctl/api/operations."""
datamodels_extended_map: dict[str, list[str]]
operations: list[dict]
args_map: dict[tuple, list[Arg]]
func_map: dict[tuple, Callable]
commands_map: dict[str, list[ActionCommand]]
group_commands_list: list[CLICommand]
output_command_list: list[str]
exclude_operation_names: list[str]
exclude_method_names: list[str]
def __init__(self, file_path: str | Path | None = None):
self.datamodels_extended_map = {}
self.func_map = {}
self.operations = []
self.args_map = {}
self.commands_map = {}
self.group_commands_list = []
self.file_path = inspect.getfile(BaseOperations) if file_path is None else file_path
# Excluded Lists are in Class Level for further usage and avoid searching them
# Exclude parameters that are not needed for CLI from datamodels
self.excluded_parameters = ["schema_"]
# This list is used to determine if the command/operation needs to output data
self.output_command_list = ["list", "get", "create", "delete", "update", "trigger"]
self.exclude_operation_names = ["LoginOperations", "VersionOperations", "BaseOperations"]
self.exclude_method_names = [
"error",
"__init__",
"__init_subclass__",
"_check_flag_and_exit_if_server_response_error",
# Excluding bulk operation. Out of scope for CLI. Should use implemented commands.
"bulk",
]
self.excluded_output_keys = [
"total_entries",
]
def _inspect_operations(self) -> None:
"""Parse file and return matching Operation Method with details."""
def get_function_details(node: ast.FunctionDef, parent_node: ast.ClassDef) -> dict:
"""Extract function name, arguments, and return annotation."""
func_name = node.name
args = []
return_annotation: str = ""
for arg in node.args.args:
arg_name = arg.arg
arg_type = ast.unparse(arg.annotation) if arg.annotation else "Any"
if arg_name != "self":
args.append({arg_name: arg_type})
if node.returns:
return_annotation = [
t.strip()
# TODO change this while removing Python 3.9 support
for t in ast.unparse(node.returns).split("|")
if t.strip() != ServerResponseError.__name__
].pop()
return {
"name": func_name,
"parameters": args,
"return_type": return_annotation,
"parent": parent_node,
}
with open(self.file_path, encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=self.file_path)
for node in ast.walk(tree):
if (
isinstance(node, ast.ClassDef)
and "Operations" in node.name
and node.name not in self.exclude_operation_names
and node.body
):
for child in node.body:
if isinstance(child, ast.FunctionDef) and child.name not in self.exclude_method_names:
self.operations.append(get_function_details(node=child, parent_node=node))
@staticmethod
def _sanitize_arg_parameter_key(parameter_key: str) -> str:
return parameter_key.replace("_", "-")
@staticmethod
def _sanitize_method_param_key(parameter_key: str) -> str:
return parameter_key.replace("-", "_")
@staticmethod
def _is_primitive_type(type_name: str) -> bool:
primitive_types = {
"int",
"float",
"bool",
"str",
"bytes",
"list",
"dict",
"tuple",
"set",
"datetime.datetime",
}
return type_name in primitive_types
@staticmethod
def _python_type_from_string(type_name: str | type) -> type | Callable:
"""
Return the corresponding Python *type* for a primitive type name string.
This helper is used when generating ``argparse`` CLI arguments from the
OpenAPI-derived operation signatures. Without this mapping the CLI would
incorrectly assume every primitive parameter is a *string*, potentially
leading to type errors or unexpected behaviour when invoking the REST
API.
"""
if "|" in str(type_name):
type_name = [t.strip() for t in str(type_name).split("|") if t.strip() != "None"].pop()
mapping: dict[str, type | Callable] = {
"int": int,
"float": float,
"bool": bool,
"str": str,
"bytes": bytes,
"list": list,
"dict": dict,
"tuple": tuple,
"set": set,
"datetime.datetime": datetime.datetime,
"dict[str, typing.Any]": dict,
}
# Default to ``str`` to preserve previous behaviour for any unrecognised
# type names while still allowing the CLI to function.
if isinstance(type_name, type):
type_name = type_name.__name__
return mapping.get(str(type_name), str)
@staticmethod
def _create_arg(
arg_flags: tuple,
arg_type: type | Callable,
arg_help: str,
arg_action: argparse.BooleanOptionalAction | None,
arg_dest: str | None = None,
arg_default: Any | None = None,
) -> Arg:
return Arg(
flags=arg_flags,
type=arg_type,
dest=arg_dest,
help=arg_help,
default=arg_default,
action=arg_action,
)
def _create_arg_for_non_primitive_type(
self,
parameter_type: str,
parameter_key: str,
) -> list[Arg]:
"""Create Arg for non-primitive type Pydantic."""
parameter_type_map = getattr(generated_datamodels, parameter_type)
commands = []
if parameter_type_map not in self.datamodels_extended_map.keys():
self.datamodels_extended_map[parameter_type] = []
for field, field_type in parameter_type_map.model_fields.items():
if field in self.excluded_parameters:
continue
self.datamodels_extended_map[parameter_type].append(field)
if type(field_type.annotation) is type:
commands.append(
self._create_arg(
arg_flags=("--" + self._sanitize_arg_parameter_key(field),),
arg_type=self._python_type_from_string(field_type.annotation),
arg_action=argparse.BooleanOptionalAction if field_type.annotation is bool else None, # type: ignore
arg_help=f"{field} for {parameter_key} operation",
arg_default=False if field_type.annotation is bool else None,
)
)
else:
try:
annotation = field_type.annotation.__args__[0]
except AttributeError:
annotation = field_type.annotation
commands.append(
self._create_arg(
arg_flags=("--" + self._sanitize_arg_parameter_key(field),),
arg_type=self._python_type_from_string(annotation),
arg_action=argparse.BooleanOptionalAction if annotation is bool else None, # type: ignore
arg_help=f"{field} for {parameter_key} operation",
arg_default=False if annotation is bool else None,
)
)
return commands
def _create_args_map_from_operation(self):
"""Create Arg from Operation Method checking for parameters and return types."""
for operation in self.operations:
args = []
for parameter in operation.get("parameters"):
for parameter_key, parameter_type in parameter.items():
if self._is_primitive_type(type_name=parameter_type):
is_bool = parameter_type == "bool"
args.append(
self._create_arg(
arg_flags=("--" + self._sanitize_arg_parameter_key(parameter_key),),
arg_type=self._python_type_from_string(parameter_type),
arg_action=argparse.BooleanOptionalAction if is_bool else None,
arg_help=f"{parameter_key} for {operation.get('name')} operation in {operation.get('parent').name}",
arg_default=False if is_bool else None,
)
)
else:
args.extend(
self._create_arg_for_non_primitive_type(
parameter_type=parameter_type, parameter_key=parameter_key
)
)
if any(operation.get("name").startswith(cmd) for cmd in self.output_command_list):
args.extend([ARG_OUTPUT, ARG_AUTH_ENVIRONMENT])
self.args_map[(operation.get("name"), operation.get("parent").name)] = args
def _create_func_map_from_operation(self):
"""Create function map from Operation Method checking for parameters and return types."""
@provide_api_client(kind=ClientKind.CLI)
def _get_func(args: Namespace, api_operation: dict, api_client: Client = NEW_API_CLIENT, **kwargs):
import importlib
imported_operation = importlib.import_module("airflowctl.api.operations")
operation_class_object = getattr(imported_operation, api_operation["parent"].name)
operation_class = operation_class_object(client=api_client)
operation_method_object = getattr(operation_class, api_operation["name"])
# Walk through all args and create a dictionary such as args.abc -> {"abc": "value"}
method_params = {}
datamodel = None
datamodel_param_name = None
args_dict = vars(args)
for parameter in api_operation["parameters"]:
for parameter_key, parameter_type in parameter.items():
if self._is_primitive_type(type_name=parameter_type):
method_params[self._sanitize_method_param_key(parameter_key)] = args_dict[
parameter_key
]
else:
datamodel = getattr(generated_datamodels, parameter_type)
for expanded_parameter in self.datamodels_extended_map[parameter_type]:
if parameter_key not in method_params:
method_params[parameter_key] = {}
datamodel_param_name = parameter_key
if expanded_parameter in self.excluded_parameters:
continue
if expanded_parameter in args_dict.keys():
method_params[parameter_key][
self._sanitize_method_param_key(expanded_parameter)
] = args_dict[expanded_parameter]
if datamodel:
if datamodel_param_name:
method_params[datamodel_param_name] = datamodel.model_validate(
method_params[datamodel_param_name]
)
else:
method_params = datamodel.model_validate(method_params)
method_output = operation_method_object(**method_params)
else:
method_output = operation_method_object(**method_params)
def convert_to_dict(obj: Any, api_operation_name: str) -> dict | Any:
"""Recursively convert an object to a dictionary or list of dictionaries."""
if hasattr(obj, "model_dump"):
return obj.model_dump(mode="json")
# Handle delete operation which returns a string of the deleted entity name
if isinstance(obj, str):
return {"operation": api_operation_name, "entity": obj}
return obj
def check_operation_and_collect_list_of_dict(dict_obj: dict) -> list:
"""Check if the object is a nested dictionary and collect list of dictionaries."""
def is_dict_nested(obj: dict) -> bool:
"""Check if the object is a nested dictionary."""
return any(isinstance(i, dict) or isinstance(i, list) for i in obj.values())
if is_dict_nested(dict_obj):
iteration_dict = dict_obj.copy()
for key, value in iteration_dict.items():
if key in self.excluded_output_keys:
del dict_obj[key]
continue
if isinstance(value, Enum):
dict_obj[key] = value.value
if isinstance(value, list):
dict_obj[key] = value
if isinstance(value, dict):
dict_obj[key] = check_operation_and_collect_list_of_dict(value)
# If dict_obj only have single key return value instead of list
# This can happen since we are excluding some keys from user such as total_entries from list operations
if len(dict_obj) == 1:
return dict_obj[next(iter(dict_obj.keys()))]
# If not nested, return the object as a list which the result should be already a dict
return [dict_obj]
AirflowConsole().print_as(
data=check_operation_and_collect_list_of_dict(
convert_to_dict(method_output, api_operation["name"])
),
output=args.output,
)
for operation in self.operations:
self.func_map[(operation.get("name"), operation.get("parent").name)] = partial(
_get_func, api_operation=operation
)
def _create_group_commands_from_operation(self):
"""Create GroupCommand from Operation Methods."""
for operation in self.operations:
operation_name = operation["name"]
operation_group_name = operation["parent"].name
if operation_group_name not in self.commands_map:
self.commands_map[operation_group_name] = []
self.commands_map[operation_group_name].append(
ActionCommand(
name=operation["name"].replace("_", "-"),
help=f"Perform {operation_name} operation",
func=self.func_map[(operation_name, operation_group_name)],
args=self.args_map[(operation_name, operation_group_name)],
)
)
for group_name, action_commands in self.commands_map.items():
self.group_commands_list.append(
GroupCommand(
name=group_name.replace("Operations", "").lower(),
help=f"Perform {group_name.replace('Operations', '')} operations",
subcommands=action_commands,
)
)
@property
def group_commands(self) -> list[CLICommand]:
"""List of GroupCommands generated for airflowctl."""
self._inspect_operations()
self._create_args_map_from_operation()
self._create_func_map_from_operation()
self._create_group_commands_from_operation()
return self.group_commands_list
def merge_commands(
base_commands: list[CLICommand], commands_will_be_merged: list[CLICommand]
) -> list[CLICommand]:
"""
Merge group commands with existing commands which extends base_commands with will_be_merged commands.
Args:
base_commands: List of base commands to be extended.
commands_will_be_merged: List of group commands to be merged with base_commands.
Returns:
List of merged commands.
"""
merge_command_map = {}
new_commands: list[CLICommand] = []
for command in commands_will_be_merged:
if isinstance(command, ActionCommand):
new_commands.append(command)
if isinstance(command, GroupCommand):
merge_command_map[command.name] = command
merged_commands = []
# Common commands
for command in base_commands:
if command.name in merge_command_map.keys():
merged_command = merge_command_map[command.name]
if isinstance(command, GroupCommand):
# Merge common group command with existing group command
current_subcommands = list(command.subcommands)
current_subcommands.extend(list(merged_command.subcommands))
new_commands.append(
GroupCommand(
name=command.name,
help=command.help,
subcommands=current_subcommands,
api_operation=merged_command.api_operation,
description=merged_command.description,
epilog=command.epilog,
)
)
elif isinstance(command, ActionCommand):
new_commands.append(merged_command)
merged_commands.append(command.name)
else:
new_commands.append(command)
# Discrete commands
new_commands.extend(
[
merged_command
for merged_command in merge_command_map.values()
if merged_command.name not in merged_commands
]
)
return new_commands
command_factory = CommandFactory()
AUTH_COMMANDS = (
ActionCommand(
name="login",
help="Login to the metadata database for personal usage. JWT Token must be provided via parameter.",
description="Login to the metadata database",
func=lazy_load_command("airflowctl.ctl.commands.auth_command.login"),
args=(ARG_AUTH_URL, ARG_AUTH_TOKEN, ARG_AUTH_ENVIRONMENT, ARG_AUTH_USERNAME, ARG_AUTH_PASSWORD),
),
)
CONFIG_COMMANDS = (
ActionCommand(
name="lint",
help="Lint options for the configuration changes while migrating from Airflow 2 to Airflow 3",
description="Lint options for the configuration changes while migrating from Airflow 2 to Airflow 3",
func=lazy_load_command("airflowctl.ctl.commands.config_command.lint"),
args=(
ARG_CONFIG_SECTION,
ARG_CONFIG_OPTION,
ARG_CONFIG_IGNORE_SECTION,
ARG_CONFIG_IGNORE_OPTION,
ARG_CONFIG_VERBOSE,
),
),
)
CONNECTION_COMMANDS = (
ActionCommand(
name="import",
help="Import connections from a file. "
"This feature is compatible with airflow CLI `airflow connections export a.json` command. "
"Export it from `airflow CLI` and import it securely via this command.",
func=lazy_load_command("airflowctl.ctl.commands.connection_command.import_"),
args=(Arg(flags=("file",), metavar="FILEPATH", help="Connections JSON file"),),
),
)
DAG_COMMANDS = (
ActionCommand(
name="pause",
help="Pause a Dag",
func=lazy_load_command("airflowctl.ctl.commands.dag_command.pause"),
args=(
ARG_DAG_ID,
ARG_OUTPUT,
),
),
ActionCommand(
name="unpause",
help="Unpause a Dag",
func=lazy_load_command("airflowctl.ctl.commands.dag_command.unpause"),
args=(
ARG_DAG_ID,
ARG_OUTPUT,
),
),
)
POOL_COMMANDS = (
ActionCommand(
name="import",
help="Import pools",
func=lazy_load_command("airflowctl.ctl.commands.pool_command.import_"),
args=(ARG_FILE,),
),
ActionCommand(
name="export",
help="Export all pools",
func=lazy_load_command("airflowctl.ctl.commands.pool_command.export"),
args=(
ARG_FILE,
ARG_OUTPUT,
),
),
)
VARIABLE_COMMANDS = (
ActionCommand(
name="import",
help="Import variables",
func=lazy_load_command("airflowctl.ctl.commands.variable_command.import_"),
args=(ARG_FILE, ARG_VARIABLE_ACTION_ON_EXISTING_KEY),
),
ActionCommand(
name="export",
help="Export all variables",
func=lazy_load_command("airflowctl.ctl.commands.variable_command.export"),
args=(ARG_FILE,),
),
)
core_commands: list[CLICommand] = [
GroupCommand(
name="auth",
help="Manage authentication for CLI. "
"Either pass token from environment variable/parameter or pass username and password.",
subcommands=AUTH_COMMANDS,
),
GroupCommand(
name="config",
help="View, lint and update configurations.",
subcommands=CONFIG_COMMANDS,
),
GroupCommand(
name="connections",
help="Manage Airflow connections",
subcommands=CONNECTION_COMMANDS,
),
GroupCommand(
name="dags",
help="Manage Airflow Dags",
subcommands=DAG_COMMANDS,
),
GroupCommand(
name="pools",
help="Manage Airflow pools",
subcommands=POOL_COMMANDS,
),
ActionCommand(
name="version",
help="Show version information",
description="Show version information",
func=lazy_load_command("airflowctl.ctl.commands.version_command.version_info"),
args=(
ARG_AUTH_ENVIRONMENT,
ARG_REMOTE,
),
),
GroupCommand(
name="variables",
help="Manage Airflow variables",
subcommands=VARIABLE_COMMANDS,
),
]
# Add generated group commands
core_commands = merge_commands(
base_commands=command_factory.group_commands, commands_will_be_merged=core_commands
)
|
CommandFactory
|
python
|
openai__openai-python
|
src/openai/types/fine_tuning/job_create_params.py
|
{
"start": 3729,
"end": 4333
}
|
class ____(TypedDict, total=False):
batch_size: Union[Literal["auto"], int]
"""Number of examples in each batch.
A larger batch size means that model parameters are updated less frequently, but
with lower variance.
"""
learning_rate_multiplier: Union[Literal["auto"], float]
"""Scaling factor for the learning rate.
A smaller learning rate may be useful to avoid overfitting.
"""
n_epochs: Union[Literal["auto"], int]
"""The number of epochs to train the model for.
An epoch refers to one full cycle through the training dataset.
"""
|
Hyperparameters
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/class_interval.py
|
{
"start": 1567,
"end": 1681
}
|
class ____(B3):
def m0(self, x):
self.m1(x)
def m2(self, x):
_test_sink(x) # Issue here
|
C3
|
python
|
gevent__gevent
|
src/gevent/tests/test__ares_host_result.py
|
{
"start": 310,
"end": 908
}
|
class ____(greentest.TestCase):
# Issue 104: ares.ares_host_result unpickleable
def _test(self, protocol):
r = ares_host_result('family', ('arg1', 'arg2', ))
dumped = pickle.dumps(r, protocol)
loaded = pickle.loads(dumped)
self.assertEqual(r, loaded)
# pylint:disable=no-member
self.assertEqual(r.family, loaded.family)
for i in range(0, pickle.HIGHEST_PROTOCOL):
def make_test(j):
return lambda self: self._test(j)
setattr(TestPickle, 'test' + str(i), make_test(i))
if __name__ == '__main__':
greentest.main()
|
TestPickle
|
python
|
huggingface__transformers
|
tests/models/bertweet/test_tokenization_bertweet.py
|
{
"start": 792,
"end": 2854
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "vinai/bertweet-base"
tokenizer_class = BertweetTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["I", "m", "V@@", "R@@", "r", "e@@"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a m</w>"]
cls.special_tokens_map = {"unk_token": "<unk>"}
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.writelines(f"{token} {vocab_tokens[token]}\n" for token in vocab_tokens)
with open(cls.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs):
kwargs.update(cls.special_tokens_map)
pretrained_name = pretrained_name or cls.tmpdirname
return BertweetTokenizer.from_pretrained(pretrained_name, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "I am VinAI Research"
output_text = "I <unk> m V<unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = BertweetTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "I am VinAI Research"
bpe_tokens = "I a@@ m V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [4, 3, 5, 6, 3, 3, 3, 4, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
|
BertweetTokenizationTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.