language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
numpy__numpy
|
numpy/_core/tests/test_umath.py
|
{
"start": 86071,
"end": 87735
}
|
class ____:
def test_avx_based_ufunc(self):
strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4])
np.random.seed(42)
for func, prop in avx_ufuncs.items():
maxulperr = prop[0]
minval = prop[1]
maxval = prop[2]
# various array sizes to ensure masking in AVX is tested
for size in range(1, 32):
myfunc = getattr(np, func)
x_f32 = np.random.uniform(low=minval, high=maxval,
size=size).astype(np.float32)
x_f64 = x_f32.astype(np.float64)
x_f128 = x_f32.astype(np.longdouble)
y_true128 = myfunc(x_f128)
if maxulperr == 0:
assert_equal(myfunc(x_f32), y_true128.astype(np.float32))
assert_equal(myfunc(x_f64), y_true128.astype(np.float64))
else:
assert_array_max_ulp(myfunc(x_f32),
y_true128.astype(np.float32),
maxulp=maxulperr)
assert_array_max_ulp(myfunc(x_f64),
y_true128.astype(np.float64),
maxulp=maxulperr)
# various strides to test gather instruction
if size > 1:
y_true32 = myfunc(x_f32)
y_true64 = myfunc(x_f64)
for jj in strides:
assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
|
TestAVXUfuncs
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/entity_key.py
|
{
"start": 1091,
"end": 1293
}
|
class ____(graphene.ObjectType):
assetKey = graphene.NonNull(GrapheneAssetKey)
partitions = non_null_list(graphene.String)
class Meta:
name = "AssetLineageInfo"
|
GrapheneAssetLineageInfo
|
python
|
tornadoweb__tornado
|
tornado/netutil.py
|
{
"start": 16750,
"end": 18098
}
|
class ____(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.0
The default `Resolver` now uses `asyncio.loop.getaddrinfo`;
use that instead of this class.
"""
def initialize(
self,
executor: Optional[concurrent.futures.Executor] = None,
close_executor: bool = True,
) -> None:
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self) -> None:
if self.close_executor:
self.executor.shutdown()
self.executor = None # type: ignore
@run_on_executor
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
return _resolve_addr(host, port, family)
|
ExecutorResolver
|
python
|
ray-project__ray
|
python/ray/llm/_internal/common/utils/download_utils.py
|
{
"start": 3721,
"end": 11657
}
|
class ____(CloudModelAccessor):
"""Unified downloader for models stored in cloud storage (S3 or GCS).
Args:
model_id: The model id to download.
mirror_config: The mirror config for the model.
"""
def get_model(
self,
tokenizer_only: bool,
exclude_safetensors: bool = False,
) -> str:
"""Gets a model from cloud storage and stores it locally.
Args:
tokenizer_only: whether to download only the tokenizer files.
exclude_safetensors: whether to download safetensors files to disk.
Returns: file path of model if downloaded, else the model id.
"""
bucket_uri = self.mirror_config.bucket_uri
if bucket_uri is None:
return self.model_id
lock_path = self._get_lock_path()
path = self._get_model_path()
storage_type = self.mirror_config.storage_type
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be acquired, a TimeoutError
# will be thrown.
# This ensures that subsequent processes don't duplicate work.
with FileLock(lock_path, timeout=0):
try:
if exclude_safetensors:
logger.info("Skipping download of safetensors files.")
CloudFileSystem.download_model(
destination_path=path,
bucket_uri=bucket_uri,
tokenizer_only=tokenizer_only,
exclude_safetensors=exclude_safetensors,
)
logger.info(
"Finished downloading %s for %s from %s storage",
"tokenizer" if tokenizer_only else "model and tokenizer",
self.model_id,
storage_type.upper() if storage_type else "cloud",
)
except RuntimeError:
logger.exception(
"Failed to download files for model %s from %s storage",
self.model_id,
storage_type.upper() if storage_type else "cloud",
)
except TimeoutError:
# If the directory is already locked, then wait but do not do anything.
with FileLock(lock_path, timeout=-1):
pass
return get_model_location_on_disk(self.model_id)
def get_extra_files(self) -> List[str]:
"""Gets user-specified extra files from cloud storage and stores them in
provided paths.
Returns: list of file paths of extra files if downloaded.
"""
paths = []
extra_files = self.mirror_config.extra_files or []
if not extra_files:
return paths
lock_path = self._get_lock_path(suffix="-extra_files")
storage_type = self.mirror_config.storage_type
logger.info(
f"Downloading extra files for {self.model_id} from {storage_type} storage"
)
try:
# Timeout 0 means there will be only one attempt to acquire
# the file lock. If it cannot be acquired, a TimeoutError
# will be thrown.
# This ensures that subsequent processes don't duplicate work.
with FileLock(lock_path, timeout=0):
for extra_file in extra_files:
path = Path(
os.path.expandvars(extra_file.destination_path)
).expanduser()
paths.append(path)
CloudFileSystem.download_files(
path=path,
bucket_uri=extra_file.bucket_uri,
)
except TimeoutError:
# If the directory is already locked, then wait but do not do anything.
with FileLock(lock_path, timeout=-1):
pass
return paths
def _log_download_info(
*, source: str, download_model: NodeModelDownloadable, download_extra_files: bool
):
if download_model == NodeModelDownloadable.NONE:
if download_extra_files:
logger.info("Downloading extra files from %s", source)
else:
logger.info("Not downloading anything from %s", source)
elif download_model == NodeModelDownloadable.TOKENIZER_ONLY:
if download_extra_files:
logger.info("Downloading tokenizer and extra files from %s", source)
else:
logger.info("Downloading tokenizer from %s", source)
elif download_model == NodeModelDownloadable.MODEL_AND_TOKENIZER:
if download_extra_files:
logger.info("Downloading model, tokenizer, and extra files from %s", source)
else:
logger.info("Downloading model and tokenizer from %s", source)
def download_model_files(
model_id: Optional[str] = None,
mirror_config: Optional[CloudMirrorConfig] = None,
download_model: NodeModelDownloadable = NodeModelDownloadable.MODEL_AND_TOKENIZER,
download_extra_files: bool = True,
callback: Optional[CallbackBase] = None,
) -> Optional[str]:
"""
Download the model files from the cloud storage. We support two ways to specify
the remote model path in the cloud storage:
Approach 1:
- model_id: The vanilla model id such as "meta-llama/Llama-3.1-8B-Instruct".
- mirror_config: Config for downloading model from cloud storage.
Approach 2:
- model_id: The remote path (s3:// or gs://) in the cloud storage.
- mirror_config: None.
In this approach, we will create a CloudMirrorConfig from the model_id and use that
to download the model.
Args:
model_id: The model id.
mirror_config: Config for downloading model from cloud storage.
download_model: What parts of the model to download.
download_extra_files: Whether to download extra files specified in the mirror config.
callback: Callback to run before downloading model files.
Returns:
The local path to the downloaded model, or the original model ID
if no cloud storage mirror is configured or if the model is not downloaded.
"""
# Create the torch cache kernels directory if it doesn't exist.
# This is a workaround for a torch issue, where the kernels directory
# cannot be created by torch if the parent directory doesn't exist.
torch_cache_home = torch.hub._get_torch_home()
os.makedirs(os.path.join(torch_cache_home, "kernels"), exist_ok=True)
model_path_or_id = model_id
if callback is not None:
callback.run_callback_sync("on_before_download_model_files_distributed")
if model_id is None:
return None
if mirror_config is None:
if is_remote_path(model_id):
logger.info(
"Creating a CloudMirrorConfig from remote model path %s", model_id
)
mirror_config = CloudMirrorConfig(bucket_uri=model_id)
else:
logger.info("No cloud storage mirror configured")
return model_id
storage_type = mirror_config.storage_type
source = (
f"{storage_type.upper()} mirror" if storage_type else "Cloud storage mirror"
)
_log_download_info(
source=source,
download_model=download_model,
download_extra_files=download_extra_files,
)
downloader = CloudModelDownloader(model_id, mirror_config)
if download_model != NodeModelDownloadable.NONE:
model_path_or_id = downloader.get_model(
tokenizer_only=download_model == NodeModelDownloadable.TOKENIZER_ONLY,
exclude_safetensors=download_model
== NodeModelDownloadable.EXCLUDE_SAFETENSORS,
)
if download_extra_files:
downloader.get_extra_files()
return model_path_or_id
|
CloudModelDownloader
|
python
|
scipy__scipy
|
scipy/linalg/tests/test_decomp.py
|
{
"start": 87209,
"end": 95585
}
|
class ____:
def test_qz_single(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n]).astype(float32)
B = rng.random([n, n]).astype(float32)
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.T, A, decimal=5)
assert_array_almost_equal(Q @ BB @ Z.T, B, decimal=5)
assert_array_almost_equal(Q @ Q.T, eye(n), decimal=5)
assert_array_almost_equal(Z @ Z.T, eye(n), decimal=5)
assert_(np.all(diag(BB) >= 0))
def test_qz_double(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n])
B = rng.random([n, n])
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.T, A)
assert_array_almost_equal(Q @ BB @ Z.T, B)
assert_array_almost_equal(Q @ Q.T, eye(n))
assert_array_almost_equal(Z @ Z.T, eye(n))
assert_(np.all(diag(BB) >= 0))
def test_qz_complex(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n]) + 1j*rng.random([n, n])
B = rng.random([n, n]) + 1j*rng.random([n, n])
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.conj().T, A)
assert_array_almost_equal(Q @ BB @ Z.conj().T, B)
assert_array_almost_equal(Q @ Q.conj().T, eye(n))
assert_array_almost_equal(Z @ Z.conj().T, eye(n))
assert_(np.all(diag(BB) >= 0))
assert_(np.all(diag(BB).imag == 0))
def test_qz_complex64(self):
rng = np.random.RandomState(12345)
n = 5
A = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64)
B = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64)
AA, BB, Q, Z = qz(A, B)
assert_array_almost_equal(Q @ AA @ Z.conj().T, A, decimal=5)
assert_array_almost_equal(Q @ BB @ Z.conj().T, B, decimal=5)
assert_array_almost_equal(Q @ Q.conj().T, eye(n), decimal=5)
assert_array_almost_equal(Z @ Z.conj().T, eye(n), decimal=5)
assert_(np.all(diag(BB) >= 0))
assert_(np.all(diag(BB).imag == 0))
def test_qz_double_complex(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n])
B = rng.random([n, n])
AA, BB, Q, Z = qz(A, B, output='complex')
aa = Q @ AA @ Z.conj().T
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = Q @ BB @ Z.conj().T
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(Q @ Q.conj().T, eye(n))
assert_array_almost_equal(Z @ Z.conj().T, eye(n))
assert_(np.all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from https://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
assert_raises(ValueError, qz, A, B, sort=lambda ar, ai, beta: ai == 0)
if False:
AA, BB, Q, Z, sdim = qz(A, B, sort=lambda ar, ai, beta: ai == 0)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(Q @ AA @ Z.T, A)
assert_array_almost_equal(Q @ BB @ Z.T, B)
# test absolute values bc the sign is ambiguous and
# might be platform dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(np.all(np.real(eigenvalues[:sdim] < 0)))
# assert_(np.all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
rng = np.random.RandomState(12345)
n = 5
A = rng.random([n, n])
B = rng.random([n, n])
AA, BB, Q, Z = qz(A, B, check_finite=False)
assert_array_almost_equal(Q @ AA @ Z.T, A)
assert_array_almost_equal(Q @ BB @ Z.T, B)
assert_array_almost_equal(Q @ Q.T, eye(n))
assert_array_almost_equal(Z @ Z.T, eye(n))
assert_(np.all(diag(BB) >= 0))
|
TestQZ
|
python
|
TheAlgorithms__Python
|
data_structures/binary_tree/distribute_coins.py
|
{
"start": 938,
"end": 3261
}
|
class ____(NamedTuple):
moves: int
excess: int
def distribute_coins(root: TreeNode | None) -> int:
"""
>>> distribute_coins(TreeNode(3, TreeNode(0), TreeNode(0)))
2
>>> distribute_coins(TreeNode(0, TreeNode(3), TreeNode(0)))
3
>>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(3)))
3
>>> distribute_coins(None)
0
>>> distribute_coins(TreeNode(0, TreeNode(0), TreeNode(0)))
Traceback (most recent call last):
...
ValueError: The nodes number should be same as the number of coins
>>> distribute_coins(TreeNode(0, TreeNode(1), TreeNode(1)))
Traceback (most recent call last):
...
ValueError: The nodes number should be same as the number of coins
"""
if root is None:
return 0
# Validation
def count_nodes(node: TreeNode | None) -> int:
"""
>>> count_nodes(None)
0
"""
if node is None:
return 0
return count_nodes(node.left) + count_nodes(node.right) + 1
def count_coins(node: TreeNode | None) -> int:
"""
>>> count_coins(None)
0
"""
if node is None:
return 0
return count_coins(node.left) + count_coins(node.right) + node.data
if count_nodes(root) != count_coins(root):
raise ValueError("The nodes number should be same as the number of coins")
# Main calculation
def get_distrib(node: TreeNode | None) -> CoinsDistribResult:
"""
>>> get_distrib(None)
namedtuple("CoinsDistribResult", "0 2")
"""
if node is None:
return CoinsDistribResult(0, 1)
left_distrib_moves, left_distrib_excess = get_distrib(node.left)
right_distrib_moves, right_distrib_excess = get_distrib(node.right)
coins_to_left = 1 - left_distrib_excess
coins_to_right = 1 - right_distrib_excess
result_moves = (
left_distrib_moves
+ right_distrib_moves
+ abs(coins_to_left)
+ abs(coins_to_right)
)
result_excess = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(result_moves, result_excess)
return get_distrib(root)[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
|
CoinsDistribResult
|
python
|
huggingface__transformers
|
src/transformers/models/eomt/modeling_eomt.py
|
{
"start": 34211,
"end": 36638
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
|
EomtAttention
|
python
|
neetcode-gh__leetcode
|
python/0377-combination-sum-iv.py
|
{
"start": 0,
"end": 656
}
|
class ____:
def combinationSum4(self, nums: List[int], target: int) -> int:
cache = {0: 1}
for total in range(1, target + 1):
cache[total] = 0
for n in nums:
cache[total] += cache.get(total - n, 0)
return cache[target]
def dfs(total):
if total == target:
return 1
if total > target:
return 0
if total in cache:
return cache[total]
cache[total] = 0
for n in nums:
cache[total] += dfs(total + n)
return cache[total]
return dfs(0)
|
Solution
|
python
|
faif__python-patterns
|
patterns/structural/decorator.py
|
{
"start": 950,
"end": 1130
}
|
class ____:
"""Represents a base text tag"""
def __init__(self, text: str) -> None:
self._text = text
def render(self) -> str:
return self._text
|
TextTag
|
python
|
matplotlib__matplotlib
|
lib/mpl_toolkits/axisartist/grid_finder.py
|
{
"start": 4644,
"end": 10779
}
|
class ____:
"""
Internal helper for `~.grid_helper_curvelinear.GridHelperCurveLinear`, with
the same constructor parameters; should not be directly instantiated.
"""
def __init__(self,
transform,
extreme_finder=None,
grid_locator1=None,
grid_locator2=None,
tick_formatter1=None,
tick_formatter2=None):
if extreme_finder is None:
extreme_finder = ExtremeFinderSimple(20, 20)
if grid_locator1 is None:
grid_locator1 = MaxNLocator()
if grid_locator2 is None:
grid_locator2 = MaxNLocator()
if tick_formatter1 is None:
tick_formatter1 = FormatterPrettyPrint()
if tick_formatter2 is None:
tick_formatter2 = FormatterPrettyPrint()
self.extreme_finder = extreme_finder
self.grid_locator1 = grid_locator1
self.grid_locator2 = grid_locator2
self.tick_formatter1 = tick_formatter1
self.tick_formatter2 = tick_formatter2
self.set_transform(transform)
def _format_ticks(self, idx, direction, factor, levels):
"""
Helper to support both standard formatters (inheriting from
`.mticker.Formatter`) and axisartist-specific ones; should be called instead of
directly calling ``self.tick_formatter1`` and ``self.tick_formatter2``. This
method should be considered as a temporary workaround which will be removed in
the future at the same time as axisartist-specific formatters.
"""
fmt = _api.check_getitem(
{1: self.tick_formatter1, 2: self.tick_formatter2}, idx=idx)
return (fmt.format_ticks(levels) if isinstance(fmt, mticker.Formatter)
else fmt(direction, factor, levels))
def get_grid_info(self, *args, **kwargs):
"""
Compute positioning information for grid lines and ticks, given the
axes' data *bbox*.
"""
params = _api.select_matching_signature(
[lambda x1, y1, x2, y2: locals(), lambda bbox: locals()], *args, **kwargs)
if "x1" in params:
_api.warn_deprecated("3.11", message=(
"Passing extents as separate arguments to get_grid_info is deprecated "
"since %(since)s and support will be removed %(removal)s; pass a "
"single bbox instead."))
bbox = Bbox.from_extents(
params["x1"], params["y1"], params["x2"], params["y2"])
else:
bbox = params["bbox"]
tbbox = self.extreme_finder._find_transformed_bbox(
self.get_transform().inverted(), bbox)
lon_levs, lon_n, lon_factor = self.grid_locator1(*tbbox.intervalx)
lat_levs, lat_n, lat_factor = self.grid_locator2(*tbbox.intervaly)
lon_values = np.asarray(lon_levs[:lon_n]) / lon_factor
lat_values = np.asarray(lat_levs[:lat_n]) / lat_factor
lon_lines, lat_lines = self._get_raw_grid_lines(lon_values, lat_values, tbbox)
bbox_expanded = bbox.expanded(1 + 2e-10, 1 + 2e-10)
grid_info = {"extremes": tbbox} # "lon", "lat" keys filled below.
for idx, lon_or_lat, levs, factor, values, lines in [
(1, "lon", lon_levs, lon_factor, lon_values, lon_lines),
(2, "lat", lat_levs, lat_factor, lat_values, lat_lines),
]:
grid_info[lon_or_lat] = gi = {
"lines": lines,
"ticks": {"left": [], "right": [], "bottom": [], "top": []},
}
for xys, v, level in zip(lines, values, levs):
all_crossings = _find_line_box_crossings(xys, bbox_expanded)
for side, crossings in zip(
["left", "right", "bottom", "top"], all_crossings):
for crossing in crossings:
gi["ticks"][side].append({"level": level, "loc": crossing})
for side in gi["ticks"]:
levs = [tick["level"] for tick in gi["ticks"][side]]
labels = self._format_ticks(idx, side, factor, levs)
for tick, label in zip(gi["ticks"][side], labels):
tick["label"] = label
return grid_info
def _get_raw_grid_lines(self, lon_values, lat_values, bbox):
trans = self.get_transform()
lons = np.linspace(bbox.x0, bbox.x1, 100) # for interpolation
lats = np.linspace(bbox.y0, bbox.y1, 100)
lon_lines = [trans.transform(np.column_stack([np.full_like(lats, lon), lats]))
for lon in lon_values]
lat_lines = [trans.transform(np.column_stack([lons, np.full_like(lons, lat)]))
for lat in lat_values]
return lon_lines, lat_lines
def set_transform(self, aux_trans):
if isinstance(aux_trans, Transform):
self._aux_transform = aux_trans
elif len(aux_trans) == 2 and all(map(callable, aux_trans)):
self._aux_transform = _User2DTransform(*aux_trans)
else:
raise TypeError("'aux_trans' must be either a Transform "
"instance or a pair of callables")
def get_transform(self):
return self._aux_transform
update_transform = set_transform # backcompat alias.
@_api.deprecated("3.11", alternative="grid_finder.get_transform()")
def transform_xy(self, x, y):
return self._aux_transform.transform(np.column_stack([x, y])).T
@_api.deprecated("3.11", alternative="grid_finder.get_transform().inverted()")
def inv_transform_xy(self, x, y):
return self._aux_transform.inverted().transform(
np.column_stack([x, y])).T
def update(self, **kwargs):
for k, v in kwargs.items():
if k in ["extreme_finder",
"grid_locator1",
"grid_locator2",
"tick_formatter1",
"tick_formatter2"]:
setattr(self, k, v)
else:
raise ValueError(f"Unknown update property {k!r}")
|
GridFinder
|
python
|
python__mypy
|
mypy/meet.py
|
{
"start": 30644,
"end": 53959
}
|
class ____(TypeVisitor[ProperType]):
def __init__(self, s: ProperType) -> None:
self.s = s
def visit_unbound_type(self, t: UnboundType) -> ProperType:
if isinstance(self.s, NoneType):
if state.strict_optional:
return UninhabitedType()
else:
return self.s
elif isinstance(self.s, UninhabitedType):
return self.s
else:
return AnyType(TypeOfAny.special_form)
def visit_any(self, t: AnyType) -> ProperType:
return self.s
def visit_union_type(self, t: UnionType) -> ProperType:
if isinstance(self.s, UnionType):
meets: list[Type] = []
for x in t.items:
for y in self.s.items:
meets.append(meet_types(x, y))
else:
meets = [meet_types(x, self.s) for x in t.items]
return make_simplified_union(meets)
def visit_none_type(self, t: NoneType) -> ProperType:
if state.strict_optional:
if isinstance(self.s, NoneType) or (
isinstance(self.s, Instance) and self.s.type.fullname == "builtins.object"
):
return t
else:
return UninhabitedType()
else:
return t
def visit_uninhabited_type(self, t: UninhabitedType) -> ProperType:
return t
def visit_deleted_type(self, t: DeletedType) -> ProperType:
if isinstance(self.s, NoneType):
if state.strict_optional:
return t
else:
return self.s
elif isinstance(self.s, UninhabitedType):
return self.s
else:
return t
def visit_erased_type(self, t: ErasedType) -> ProperType:
return self.s
def visit_type_var(self, t: TypeVarType) -> ProperType:
if isinstance(self.s, TypeVarType) and self.s.id == t.id:
if self.s.upper_bound == t.upper_bound:
return self.s
return self.s.copy_modified(upper_bound=self.meet(self.s.upper_bound, t.upper_bound))
else:
return self.default(self.s)
def visit_param_spec(self, t: ParamSpecType) -> ProperType:
if self.s == t:
return self.s
else:
return self.default(self.s)
def visit_type_var_tuple(self, t: TypeVarTupleType) -> ProperType:
if isinstance(self.s, TypeVarTupleType) and self.s.id == t.id:
return self.s if self.s.min_len > t.min_len else t
else:
return self.default(self.s)
def visit_unpack_type(self, t: UnpackType) -> ProperType:
raise NotImplementedError
def visit_parameters(self, t: Parameters) -> ProperType:
if isinstance(self.s, Parameters):
if len(t.arg_types) != len(self.s.arg_types):
return self.default(self.s)
from mypy.join import join_types
return t.copy_modified(
arg_types=[join_types(s_a, t_a) for s_a, t_a in zip(self.s.arg_types, t.arg_types)]
)
else:
return self.default(self.s)
def visit_instance(self, t: Instance) -> ProperType:
if isinstance(self.s, Instance):
if t.type == self.s.type:
if is_subtype(t, self.s) or is_subtype(self.s, t):
# Combine type arguments. We could have used join below
# equivalently.
args: list[Type] = []
# N.B: We use zip instead of indexing because the lengths might have
# mismatches during daemon reprocessing.
if t.type.has_type_var_tuple_type:
# We handle meet of variadic instances by simply creating correct mapping
# for type arguments and compute the individual meets same as for regular
# instances. All the heavy lifting is done in the meet of tuple types.
s = self.s
assert s.type.type_var_tuple_prefix is not None
assert s.type.type_var_tuple_suffix is not None
prefix = s.type.type_var_tuple_prefix
suffix = s.type.type_var_tuple_suffix
tvt = s.type.defn.type_vars[prefix]
assert isinstance(tvt, TypeVarTupleType)
fallback = tvt.tuple_fallback
s_prefix, s_middle, s_suffix = split_with_prefix_and_suffix(
s.args, prefix, suffix
)
t_prefix, t_middle, t_suffix = split_with_prefix_and_suffix(
t.args, prefix, suffix
)
s_args = s_prefix + (TupleType(list(s_middle), fallback),) + s_suffix
t_args = t_prefix + (TupleType(list(t_middle), fallback),) + t_suffix
else:
t_args = t.args
s_args = self.s.args
for ta, sa, tv in zip(t_args, s_args, t.type.defn.type_vars):
meet = self.meet(ta, sa)
if isinstance(tv, TypeVarTupleType):
# Correctly unpack possible outcomes of meets of tuples: it can be
# either another tuple type or Never (normalized as *tuple[Never, ...])
if isinstance(meet, TupleType):
args.extend(meet.items)
continue
else:
assert isinstance(meet, UninhabitedType)
meet = UnpackType(tv.tuple_fallback.copy_modified(args=[meet]))
args.append(meet)
return Instance(t.type, args)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
else:
alt_promote = t.type.alt_promote
if alt_promote and alt_promote.type is self.s.type:
return t
alt_promote = self.s.type.alt_promote
if alt_promote and alt_promote.type is t.type:
return self.s
if is_subtype(t, self.s):
return t
elif is_subtype(self.s, t):
# See also above comment.
return self.s
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
elif isinstance(self.s, FunctionLike) and t.type.is_protocol:
call = join.unpack_callback_protocol(t)
if call:
return meet_types(call, self.s)
elif isinstance(self.s, FunctionLike) and self.s.is_type_obj() and t.type.is_metaclass():
if is_subtype(self.s.fallback, t):
return self.s
return self.default(self.s)
elif isinstance(self.s, TypeType):
return meet_types(t, self.s)
elif isinstance(self.s, TupleType):
return meet_types(t, self.s)
elif isinstance(self.s, LiteralType):
return meet_types(t, self.s)
elif isinstance(self.s, TypedDictType):
return meet_types(t, self.s)
return self.default(self.s)
def visit_callable_type(self, t: CallableType) -> ProperType:
if isinstance(self.s, CallableType) and join.is_similar_callables(t, self.s):
if is_equivalent(t, self.s):
return join.combine_similar_callables(t, self.s)
result = meet_similar_callables(t, self.s)
# We set the from_type_type flag to suppress error when a collection of
# concrete class objects gets inferred as their common abstract superclass.
if not (
(t.is_type_obj() and t.type_object().is_abstract)
or (self.s.is_type_obj() and self.s.type_object().is_abstract)
):
result.from_type_type = True
if isinstance(get_proper_type(result.ret_type), UninhabitedType):
# Return a plain None or <uninhabited> instead of a weird function.
return self.default(self.s)
return result
elif isinstance(self.s, TypeType) and t.is_type_obj() and not t.is_generic():
# In this case we are able to potentially produce a better meet.
res = meet_types(self.s.item, t.ret_type)
if not isinstance(res, (NoneType, UninhabitedType)):
return TypeType.make_normalized(res)
return self.default(self.s)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = join.unpack_callback_protocol(self.s)
if call:
return meet_types(t, call)
return self.default(self.s)
def visit_overloaded(self, t: Overloaded) -> ProperType:
# TODO: Implement a better algorithm that covers at least the same cases
# as TypeJoinVisitor.visit_overloaded().
s = self.s
if isinstance(s, FunctionLike):
if s.items == t.items:
return Overloaded(t.items)
elif is_subtype(s, t):
return s
elif is_subtype(t, s):
return t
else:
return meet_types(t.fallback, s.fallback)
elif isinstance(self.s, Instance) and self.s.type.is_protocol:
call = join.unpack_callback_protocol(self.s)
if call:
return meet_types(t, call)
return meet_types(t.fallback, s)
def meet_tuples(self, s: TupleType, t: TupleType) -> list[Type] | None:
"""Meet two tuple types while handling variadic entries.
This is surprisingly tricky, and we don't handle some tricky corner cases.
Most of the trickiness comes from the variadic tuple items like *tuple[X, ...]
since they can have arbitrary partial overlaps (while *Ts can't be split). This
function is roughly a mirror of join_tuples() w.r.t. to the fact that fixed
tuples are subtypes of variadic ones but not vice versa.
"""
s_unpack_index = find_unpack_in_list(s.items)
t_unpack_index = find_unpack_in_list(t.items)
if s_unpack_index is None and t_unpack_index is None:
if s.length() == t.length():
items: list[Type] = []
for i in range(t.length()):
items.append(self.meet(t.items[i], s.items[i]))
return items
return None
if s_unpack_index is not None and t_unpack_index is not None:
# The only simple case we can handle if both tuples are variadic
# is when their structure fully matches. Other cases are tricky because
# a variadic item is effectively a union of tuples of all length, thus
# potentially causing overlap between a suffix in `s` and a prefix
# in `t` (see how this is handled in is_subtype() for details).
# TODO: handle more cases (like when both prefix/suffix are shorter in s or t).
if s.length() == t.length() and s_unpack_index == t_unpack_index:
unpack_index = s_unpack_index
s_unpack = s.items[unpack_index]
assert isinstance(s_unpack, UnpackType)
s_unpacked = get_proper_type(s_unpack.type)
t_unpack = t.items[unpack_index]
assert isinstance(t_unpack, UnpackType)
t_unpacked = get_proper_type(t_unpack.type)
if not (isinstance(s_unpacked, Instance) and isinstance(t_unpacked, Instance)):
return None
meet = self.meet(s_unpacked, t_unpacked)
if not isinstance(meet, Instance):
return None
m_prefix: list[Type] = []
for si, ti in zip(s.items[:unpack_index], t.items[:unpack_index]):
m_prefix.append(meet_types(si, ti))
m_suffix: list[Type] = []
for si, ti in zip(s.items[unpack_index + 1 :], t.items[unpack_index + 1 :]):
m_suffix.append(meet_types(si, ti))
return m_prefix + [UnpackType(meet)] + m_suffix
return None
if s_unpack_index is not None:
variadic = s
unpack_index = s_unpack_index
fixed = t
else:
assert t_unpack_index is not None
variadic = t
unpack_index = t_unpack_index
fixed = s
# If one tuple is variadic one, and the other one is fixed, the meet will be fixed.
unpack = variadic.items[unpack_index]
assert isinstance(unpack, UnpackType)
unpacked = get_proper_type(unpack.type)
if not isinstance(unpacked, Instance):
return None
if fixed.length() < variadic.length() - 1:
return None
prefix_len = unpack_index
suffix_len = variadic.length() - prefix_len - 1
prefix, middle, suffix = split_with_prefix_and_suffix(
tuple(fixed.items), prefix_len, suffix_len
)
items = []
for fi, vi in zip(prefix, variadic.items[:prefix_len]):
items.append(self.meet(fi, vi))
for mi in middle:
items.append(self.meet(mi, unpacked.args[0]))
if suffix_len:
for fi, vi in zip(suffix, variadic.items[-suffix_len:]):
items.append(self.meet(fi, vi))
return items
def visit_tuple_type(self, t: TupleType) -> ProperType:
if isinstance(self.s, TupleType):
items = self.meet_tuples(self.s, t)
if items is None:
return self.default(self.s)
# TODO: What if the fallbacks are different?
return TupleType(items, tuple_fallback(t))
elif isinstance(self.s, Instance):
# meet(Tuple[t1, t2, <...>], Tuple[s, ...]) == Tuple[meet(t1, s), meet(t2, s), <...>].
if self.s.type.fullname in TUPLE_LIKE_INSTANCE_NAMES and self.s.args:
return t.copy_modified(items=[meet_types(it, self.s.args[0]) for it in t.items])
elif is_proper_subtype(t, self.s):
# A named tuple that inherits from a normal class
return t
elif self.s.type.has_type_var_tuple_type and is_subtype(t, self.s):
# This is a bit ad-hoc but more principled handling is tricky, and this
# special case is important for type narrowing in binder to work.
return t
return self.default(self.s)
def visit_typeddict_type(self, t: TypedDictType) -> ProperType:
if isinstance(self.s, TypedDictType):
for name, l, r in self.s.zip(t):
if not is_equivalent(l, r) or (name in t.required_keys) != (
name in self.s.required_keys
):
return self.default(self.s)
item_list: list[tuple[str, Type]] = []
for item_name, s_item_type, t_item_type in self.s.zipall(t):
if s_item_type is not None:
item_list.append((item_name, s_item_type))
else:
# at least one of s_item_type and t_item_type is not None
assert t_item_type is not None
item_list.append((item_name, t_item_type))
items = dict(item_list)
fallback = self.s.create_anonymous_fallback()
required_keys = t.required_keys | self.s.required_keys
readonly_keys = t.readonly_keys | self.s.readonly_keys
return TypedDictType(items, required_keys, readonly_keys, fallback)
elif isinstance(self.s, Instance) and is_subtype(t, self.s):
return t
else:
return self.default(self.s)
def visit_literal_type(self, t: LiteralType) -> ProperType:
if isinstance(self.s, LiteralType) and self.s == t:
return t
elif isinstance(self.s, Instance) and is_subtype(t.fallback, self.s):
return t
else:
return self.default(self.s)
def visit_partial_type(self, t: PartialType) -> ProperType:
# We can't determine the meet of partial types. We should never get here.
assert False, "Internal error"
def visit_type_type(self, t: TypeType) -> ProperType:
if isinstance(self.s, TypeType):
typ = self.meet(t.item, self.s.item)
if not isinstance(typ, NoneType):
typ = TypeType.make_normalized(
typ, line=t.line, is_type_form=self.s.is_type_form and t.is_type_form
)
return typ
elif isinstance(self.s, Instance) and self.s.type.fullname == "builtins.type":
return t
elif isinstance(self.s, CallableType):
return self.meet(t, self.s)
else:
return self.default(self.s)
def visit_type_alias_type(self, t: TypeAliasType) -> ProperType:
assert False, f"This should be never called, got {t}"
def meet(self, s: Type, t: Type) -> ProperType:
return meet_types(s, t)
def default(self, typ: Type) -> ProperType:
if isinstance(typ, UnboundType):
return AnyType(TypeOfAny.special_form)
else:
if state.strict_optional:
return UninhabitedType()
else:
return NoneType()
def meet_similar_callables(t: CallableType, s: CallableType) -> CallableType:
from mypy.join import match_generic_callables, safe_join
t, s = match_generic_callables(t, s)
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_join(t.arg_types[i], s.arg_types[i]))
# TODO in combine_similar_callables also applies here (names and kinds)
# The fallback type can be either 'function' or 'type'. The result should have 'function' as
# fallback only if both operands have it as 'function'.
if t.fallback.type.fullname != "builtins.function":
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(
arg_types=arg_types,
ret_type=meet_types(t.ret_type, s.ret_type),
fallback=fallback,
name=None,
)
def meet_type_list(types: list[Type]) -> Type:
if not types:
# This should probably be builtins.object but that is hard to get and
# it doesn't matter for any current users.
return AnyType(TypeOfAny.implementation_artifact)
met = types[0]
for t in types[1:]:
met = meet_types(met, t)
return met
def typed_dict_mapping_pair(left: Type, right: Type) -> bool:
"""Is this a pair where one type is a TypedDict and another one is an instance of Mapping?
This case requires a precise/principled consideration because there are two use cases
that push the boundary the opposite ways: we need to avoid spurious overlaps to avoid
false positives for overloads, but we also need to avoid spuriously non-overlapping types
to avoid false positives with --strict-equality.
"""
left, right = get_proper_types((left, right))
assert not isinstance(left, TypedDictType) or not isinstance(right, TypedDictType)
if isinstance(left, TypedDictType):
_, other = left, right
elif isinstance(right, TypedDictType):
_, other = right, left
else:
return False
return isinstance(other, Instance) and other.type.has_base("typing.Mapping")
def typed_dict_mapping_overlap(
left: Type, right: Type, overlapping: Callable[[Type, Type], bool]
) -> bool:
"""Check if a TypedDict type is overlapping with a Mapping.
The basic logic here consists of two rules:
* A TypedDict with some required keys is overlapping with Mapping[str, <some type>]
if and only if every key type is overlapping with <some type>. For example:
- TypedDict(x=int, y=str) overlaps with Dict[str, Union[str, int]]
- TypedDict(x=int, y=str) doesn't overlap with Dict[str, int]
Note that any additional non-required keys can't change the above result.
* A TypedDict with no required keys overlaps with Mapping[str, <some type>] if and
only if at least one of key types overlaps with <some type>. For example:
- TypedDict(x=str, y=str, total=False) overlaps with Dict[str, str]
- TypedDict(x=str, y=str, total=False) doesn't overlap with Dict[str, int]
- TypedDict(x=int, y=str, total=False) overlaps with Dict[str, str]
* A TypedDict with at least one ReadOnly[] key does not overlap
with Dict or MutableMapping, because they assume mutable data.
As usual empty, dictionaries lie in a gray area. In general, List[str] and List[str]
are considered non-overlapping despite empty list belongs to both. However, List[int]
and List[Never] are considered overlapping.
So here we follow the same logic: a TypedDict with no required keys is considered
non-overlapping with Mapping[str, <some type>], but is considered overlapping with
Mapping[Never, Never]. This way we avoid false positives for overloads, and also
avoid false positives for comparisons like SomeTypedDict == {} under --strict-equality.
"""
left, right = get_proper_types((left, right))
assert not isinstance(left, TypedDictType) or not isinstance(right, TypedDictType)
if isinstance(left, TypedDictType):
assert isinstance(right, Instance)
typed, other = left, right
else:
assert isinstance(left, Instance)
assert isinstance(right, TypedDictType)
typed, other = right, left
mutable_mapping = next(
(base for base in other.type.mro if base.fullname == "typing.MutableMapping"), None
)
if mutable_mapping is not None and typed.readonly_keys:
return False
mapping = next(base for base in other.type.mro if base.fullname == "typing.Mapping")
other = map_instance_to_supertype(other, mapping)
key_type, value_type = get_proper_types(other.args)
# TODO: is there a cleaner way to get str_type here?
fallback = typed.as_anonymous().fallback
str_type = fallback.type.bases[0].args[0] # typing._TypedDict inherits Mapping[str, object]
# Special case: a TypedDict with no required keys overlaps with an empty dict.
if isinstance(key_type, UninhabitedType) and isinstance(value_type, UninhabitedType):
return not typed.required_keys
if typed.required_keys:
if not overlapping(key_type, str_type):
return False
return all(overlapping(typed.items[k], value_type) for k in typed.required_keys)
else:
if not overlapping(key_type, str_type):
return False
non_required = set(typed.items.keys()) - typed.required_keys
return any(overlapping(typed.items[k], value_type) for k in non_required)
|
TypeMeetVisitor
|
python
|
plotly__plotly.py
|
plotly/graph_objs/waterfall/increasing/_marker.py
|
{
"start": 233,
"end": 3311
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall.increasing"
_path_str = "waterfall.increasing.marker"
_valid_props = {"color", "line"}
@property
def color(self):
"""
Sets the marker color of all increasing values.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.waterfall.increasing.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.waterfall.increasing.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of all increasing values.
line
:class:`plotly.graph_objects.waterfall.increasing.marke
r.Line` instance or dict with compatible properties
"""
def __init__(self, arg=None, color=None, line=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.increasing.Marker`
color
Sets the marker color of all increasing values.
line
:class:`plotly.graph_objects.waterfall.increasing.marke
r.Line` instance or dict with compatible properties
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.increasing.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.increasing.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("line", arg, line)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Marker
|
python
|
numba__llvmlite
|
llvmlite/ir/instructions.py
|
{
"start": 16361,
"end": 17418
}
|
class ____(Instruction):
def __init__(self, parent, ptr, ordering, align, name='', typ=None):
if typ is None:
if isinstance(ptr, AllocaInstr):
typ = ptr.allocated_type
# For compatibility with typed pointers. Eventually this should
# probably be removed (when typed pointers are fully removed).
elif not ptr.type.is_opaque:
typ = ptr.type.pointee
else:
raise ValueError("Load atomic lacks type.")
super(LoadAtomicInstr, self).__init__(parent, typ, "load atomic",
[ptr], name=name)
self.ordering = ordering
self.align = align
def descr(self, buf):
[val] = self.operands
buf.append("load atomic {0}, {1} {2} {3}, align {4}{5}\n".format(
self.type,
val.type,
val.get_reference(),
self.ordering,
self.align,
self._stringify_metadata(leading_comma=True),
))
|
LoadAtomicInstr
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/declarative/test_reflection.py
|
{
"start": 12322,
"end": 15798
}
|
class ____(DeferredInhReflectBase):
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(32)),
Column("data", String(30)),
Column("bar_data", String(30)),
)
def test_basic(self, decl_base):
class Foo(DeferredReflection, ComparableEntity, decl_base):
__tablename__ = "foo"
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "foo",
}
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
DeferredReflection.prepare(testing.db)
self._roundtrip()
def test_add_subclass_column(self, decl_base):
class Foo(DeferredReflection, ComparableEntity, decl_base):
__tablename__ = "foo"
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "foo",
}
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
bar_data = Column(String(30))
DeferredReflection.prepare(testing.db)
self._roundtrip()
def test_add_subclass_mapped_column(self, decl_base):
class Foo(DeferredReflection, ComparableEntity, decl_base):
__tablename__ = "foo"
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "foo",
}
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
bar_data: Mapped[str]
DeferredReflection.prepare(testing.db)
self._roundtrip()
def test_subclass_mapped_column_no_existing(self, decl_base):
class Foo(DeferredReflection, ComparableEntity, decl_base):
__tablename__ = "foo"
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "foo",
}
with expect_raises_message(
exc.ArgumentError,
"Can't use use_existing_column with deferred mappers",
):
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
bar_data: Mapped[str] = mapped_column(use_existing_column=True)
def test_add_pk_column(self, decl_base):
class Foo(DeferredReflection, ComparableEntity, decl_base):
__tablename__ = "foo"
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "foo",
}
id = Column(Integer, primary_key=True)
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
DeferredReflection.prepare(testing.db)
self._roundtrip()
def test_add_pk_mapped_column(self, decl_base):
class Foo(DeferredReflection, ComparableEntity, decl_base):
__tablename__ = "foo"
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "foo",
}
id: Mapped[int] = mapped_column(primary_key=True)
class Bar(Foo):
__mapper_args__ = {"polymorphic_identity": "bar"}
DeferredReflection.prepare(testing.db)
self._roundtrip()
|
DeferredSingleInhReflectionTest
|
python
|
Pylons__pyramid
|
tests/test_csrf.py
|
{
"start": 7138,
"end": 7758
}
|
class ____(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def _callFUT(self, *args, **kwargs):
from pyramid.csrf import new_csrf_token
return new_csrf_token(*args, **kwargs)
def test_no_override_csrf_utility_registered(self):
request = testing.DummyRequest()
self._callFUT(request)
def test_success(self):
self.config.set_csrf_storage_policy(DummyCSRF())
request = testing.DummyRequest()
csrf_token = self._callFUT(request)
self.assertEqual(csrf_token, 'e5e9e30a08b34ff9842ff7d2b958c14b')
|
Test_new_csrf_token
|
python
|
openai__gym
|
gym/envs/classic_control/mountain_car.py
|
{
"start": 286,
"end": 9826
}
|
class ____(gym.Env):
"""
### Description
The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically
at the bottom of a sinusoidal valley, with the only possible actions being the accelerations
that can be applied to the car in either direction. The goal of the MDP is to strategically
accelerate the car to reach the goal state on top of the right hill. There are two versions
of the mountain car domain in gym: one with discrete actions and one with continuous.
This version is the one with discrete actions.
This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf)
```
@TECHREPORT{Moore90efficientmemory-based,
author = {Andrew William Moore},
title = {Efficient Memory-based Learning for Robot Control},
institution = {University of Cambridge},
year = {1990}
}
```
### Observation Space
The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following:
| Num | Observation | Min | Max | Unit |
|-----|--------------------------------------|------|-----|--------------|
| 0 | position of the car along the x-axis | -Inf | Inf | position (m) |
| 1 | velocity of the car | -Inf | Inf | position (m) |
### Action Space
There are 3 discrete deterministic actions:
| Num | Observation | Value | Unit |
|-----|-------------------------|-------|--------------|
| 0 | Accelerate to the left | Inf | position (m) |
| 1 | Don't accelerate | Inf | position (m) |
| 2 | Accelerate to the right | Inf | position (m) |
### Transition Dynamics:
Given an action, the mountain car follows the following transition dynamics:
*velocity<sub>t+1</sub> = velocity<sub>t</sub> + (action - 1) * force - cos(3 * position<sub>t</sub>) * gravity*
*position<sub>t+1</sub> = position<sub>t</sub> + velocity<sub>t+1</sub>*
where force = 0.001 and gravity = 0.0025. The collisions at either end are inelastic with the velocity set to 0
upon collision with the wall. The position is clipped to the range `[-1.2, 0.6]` and
velocity is clipped to the range `[-0.07, 0.07]`.
### Reward:
The goal is to reach the flag placed on top of the right hill as quickly as possible, as such the agent is
penalised with a reward of -1 for each timestep.
### Starting State
The position of the car is assigned a uniform random value in *[-0.6 , -0.4]*.
The starting velocity of the car is always assigned to 0.
### Episode End
The episode ends if either of the following happens:
1. Termination: The position of the car is greater than or equal to 0.5 (the goal position on top of the right hill)
2. Truncation: The length of the episode is 200.
### Arguments
```
gym.make('MountainCar-v0')
```
### Version History
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 30,
}
def __init__(self, render_mode: Optional[str] = None, goal_velocity=0):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.goal_velocity = goal_velocity
self.force = 0.001
self.gravity = 0.0025
self.low = np.array([self.min_position, -self.max_speed], dtype=np.float32)
self.high = np.array([self.max_position, self.max_speed], dtype=np.float32)
self.render_mode = render_mode
self.screen_width = 600
self.screen_height = 400
self.screen = None
self.clock = None
self.isopen = True
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high, dtype=np.float32)
def step(self, action: int):
assert self.action_space.contains(
action
), f"{action!r} ({type(action)}) invalid"
position, velocity = self.state
velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if position == self.min_position and velocity < 0:
velocity = 0
terminated = bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
reward = -1.0
self.state = (position, velocity)
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), reward, terminated, False, {}
def reset(
self,
*,
seed: Optional[int] = None,
options: Optional[dict] = None,
):
super().reset(seed=seed)
# Note that if you use custom reset bounds, it may lead to out-of-bound
# state/observations.
low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4)
self.state = np.array([self.np_random.uniform(low=low, high=high), 0])
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), {}
def _height(self, xs):
return np.sin(3 * xs) * 0.45 + 0.55
def render(self):
if self.render_mode is None:
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError:
raise DependencyNotInstalled(
"pygame is not installed, run `pip install gym[classic_control]`"
)
if self.screen is None:
pygame.init()
if self.render_mode == "human":
pygame.display.init()
self.screen = pygame.display.set_mode(
(self.screen_width, self.screen_height)
)
else: # mode in "rgb_array"
self.screen = pygame.Surface((self.screen_width, self.screen_height))
if self.clock is None:
self.clock = pygame.time.Clock()
world_width = self.max_position - self.min_position
scale = self.screen_width / world_width
carwidth = 40
carheight = 20
self.surf = pygame.Surface((self.screen_width, self.screen_height))
self.surf.fill((255, 255, 255))
pos = self.state[0]
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0))
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
coords = []
for c in [(l, b), (l, t), (r, t), (r, b)]:
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
coords.append(
(
c[0] + (pos - self.min_position) * scale,
c[1] + clearance + self._height(pos) * scale,
)
)
gfxdraw.aapolygon(self.surf, coords, (0, 0, 0))
gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0))
for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]:
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
wheel = (
int(c[0] + (pos - self.min_position) * scale),
int(c[1] + clearance + self._height(pos) * scale),
)
gfxdraw.aacircle(
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
)
gfxdraw.filled_circle(
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
)
flagx = int((self.goal_position - self.min_position) * scale)
flagy1 = int(self._height(self.goal_position) * scale)
flagy2 = flagy1 + 50
gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0))
gfxdraw.aapolygon(
self.surf,
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
(204, 204, 0),
)
gfxdraw.filled_polygon(
self.surf,
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
(204, 204, 0),
)
self.surf = pygame.transform.flip(self.surf, False, True)
self.screen.blit(self.surf, (0, 0))
if self.render_mode == "human":
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
)
def get_keys_to_action(self):
# Control with left and right arrow keys.
return {(): 1, (276,): 0, (275,): 2, (275, 276): 1}
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
|
MountainCarEnv
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_default_format04.py
|
{
"start": 315,
"end": 1074
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_format04.xlsx")
def test_create_file(self):
"""Test the creation of a file with user defined default format"""
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {"font_name": "Arial", "font_size": 12},
"default_row_height": 20,
"default_column_width": 80,
},
)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_docs_tests/test_enhanced_section_headers.py
|
{
"start": 346,
"end": 6705
}
|
class ____:
"""Test enhanced detection of malformed section headers."""
# Using function-based validation approach
def test_missing_colon_detection(self):
"""Test detection of section headers missing colons."""
docstring = '''"""Function with missing colon in section header.
Args
param1: Description of parameter
param2: Another parameter
Returns
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect missing colons as errors
assert result.has_errors()
errors = " ".join(result.errors)
assert "Malformed section header: 'Args' is missing colon (should be 'Args:')" in errors
assert (
"Malformed section header: 'Returns' is missing colon (should be 'Returns:')" in errors
)
def test_incorrect_capitalization_detection(self):
"""Test detection of incorrectly capitalized section headers."""
docstring = '''"""Function with incorrect capitalization.
args:
param1: Description of parameter
returns:
Description of return value
raises:
ValueError: When something goes wrong
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect capitalization errors
assert result.has_errors()
errors = " ".join(result.errors)
assert (
"Malformed section header: 'args:' has incorrect capitalization (should be 'Args:')"
in errors
)
assert (
"Malformed section header: 'returns:' has incorrect capitalization (should be 'Returns:')"
in errors
)
assert (
"Malformed section header: 'raises:' has incorrect capitalization (should be 'Raises:')"
in errors
)
def test_incorrect_spacing_detection(self):
"""Test detection of section headers with incorrect spacing."""
docstring = '''"""Function with spacing issues in headers.
Args :
param1: Description (space before colon)
Returns:
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect spacing issues
assert result.has_errors()
errors = " ".join(result.errors)
assert (
"Malformed section header: 'Args :' has incorrect spacing (should be 'Args:')" in errors
)
def test_corrupted_section_header_detection(self):
"""Test detection of completely corrupted section headers."""
docstring = '''"""Function with corrupted section header.
Argsjdkfjdkjfdk:
param1: Description of parameter
param2: Another parameter
Returns:
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect corrupted header
assert result.has_errors()
errors = " ".join(result.errors)
assert (
"Corrupted section header detected: 'Argsjdkfjdkjfdk:' (possibly should be 'Args:')"
in errors
)
def test_multiple_header_errors(self):
"""Test detection of multiple different header errors in one docstring."""
docstring = '''"""Function with multiple header errors.
args
param1: Missing colon above
RETURNS:
Wrong capitalization above
Examplesjdkfjdk:
Corrupted header above
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect all three types of errors (some might be warnings)
assert result.has_errors() or result.has_warnings()
all_messages = " ".join(result.errors + result.warnings)
assert "missing colon" in all_messages or "'args'" in all_messages
assert "incorrect capitalization" in all_messages or "RETURNS:" in all_messages
assert "Corrupted section header" in all_messages
def test_all_standard_section_headers(self):
"""Test that all standard section headers are recognized when malformed."""
test_cases = [
("args:", "Args:"),
("arguments:", "Arguments:"),
("parameters:", "Parameters:"),
("returns:", "Returns:"),
("return:", "Return:"),
("yields:", "Yields:"),
("yield:", "Yield:"),
("raises:", "Raises:"),
("examples:", "Examples:"),
("example:", "Example:"),
("note:", "Note:"),
("notes:", "Notes:"),
("see also:", "See Also:"),
("attributes:", "Attributes:"),
]
for malformed, correct in test_cases:
docstring = f'''"""Function with malformed {correct} header.
{malformed}
content: Description
"""'''
result = validate_docstring_text(docstring, "test.function")
assert result.has_errors(), f"Should detect error in '{malformed}'"
errors = " ".join(result.errors)
assert f"has incorrect capitalization (should be '{correct}')" in errors
def test_valid_section_headers_pass(self):
"""Test that correctly formatted section headers don't trigger errors."""
docstring = '''"""Function with all correctly formatted headers.
Args:
param1: Description of parameter
param2: Another parameter
Returns:
Description of return value
Raises:
ValueError: When something goes wrong
Examples:
>>> function_call()
'result'
Note:
This is a note section.
See Also:
other_function: Related function
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should not have any section header errors
if result.has_errors():
# Filter out non-section-header errors for this test
section_errors = [e for e in result.errors if "section header" in e.lower()]
assert len(section_errors) == 0, (
f"Should not have section header errors, got: {section_errors}"
)
|
TestEnhancedSectionHeaderDetection
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_mutable.py
|
{
"start": 39551,
"end": 39699
}
|
class ____(MutableCompositeColumnDefaultTest):
@classmethod
def _type_fixture(cls):
return DCPoint
|
MutableDCCompositeColumnDefaultTest
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/qinterpolate_test.py
|
{
"start": 1123,
"end": 2107
}
|
class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, dtype, mode, scale, contig):
f_input = (torch.rand(1, M, N, K) - 0.5) * 256
scale = 0.1
zero_point = 42
self.q_input = torch.quantize_per_tensor(
f_input, scale=scale, zero_point=zero_point, dtype=dtype
)
if not contig:
permute_dims = list(range(self.q_input.ndim))[::-1]
self.q_input = self.q_input.permute(permute_dims)
self.inputs = {"q_input": self.q_input, "scale_factor": scale, "mode": mode}
self.set_module_name("q_interpolate")
def forward(self, q_input, scale_factor: float, mode: str):
return torch.nn.functional.interpolate(
q_input, scale_factor=scale_factor, mode=mode
)
op_bench.generate_pt_test(
qinterpolate_short_configs + qinterpolate_long_configs, QInterpolateBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
QInterpolateBenchmark
|
python
|
Textualize__textual
|
src/textual/_queue.py
|
{
"start": 179,
"end": 1211
}
|
class ____(Generic[QueueType]):
"""A cut-down version of asyncio.Queue
This has just enough functionality to run the message pumps.
"""
def __init__(self) -> None:
self.values: deque[QueueType] = deque()
self.ready_event = Event()
def put_nowait(self, value: QueueType) -> None:
self.values.append(value)
self.ready_event.set()
def qsize(self) -> int:
return len(self.values)
def empty(self) -> bool:
return not self.values
def task_done(self) -> None:
pass
async def get(self) -> QueueType:
if not self.ready_event.is_set():
await self.ready_event.wait()
value = self.values.popleft()
if not self.values:
self.ready_event.clear()
return value
def get_nowait(self) -> QueueType:
if not self.values:
raise asyncio.QueueEmpty()
value = self.values.popleft()
if not self.values:
self.ready_event.clear()
return value
|
Queue
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/build_env.py
|
{
"start": 1206,
"end": 2736
}
|
class ____:
def __init__(self, path: str) -> None:
self.path = path
self.setup = False
scheme = get_scheme("", prefix=path)
self.bin_dir = scheme.scripts
self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)
def get_runnable_pip() -> str:
"""Get a file to pass to a Python executable, to run the currently-running pip.
This is used to run a pip subprocess, for installing requirements into the build
environment.
"""
source = pathlib.Path(pip_location).resolve().parent
if not source.is_dir():
# This would happen if someone is using pip from inside a zip file. In that
# case, we can use that directly.
return str(source)
return os.fsdecode(source / "__pip-runner__.py")
def _get_system_sitepackages() -> Set[str]:
"""Get system site packages
Usually from site.getsitepackages,
but fallback on `get_purelib()/get_platlib()` if unavailable
(e.g. in a virtualenv created by virtualenv<20)
Returns normalized set of strings.
"""
if hasattr(site, "getsitepackages"):
system_sites = site.getsitepackages()
else:
# virtualenv < 20 overwrites site.py without getsitepackages
# fallback on get_purelib/get_platlib.
# this is known to miss things, but shouldn't in the cases
# where getsitepackages() has been removed (inside a virtualenv)
system_sites = [get_purelib(), get_platlib()]
return {os.path.normcase(path) for path in system_sites}
|
_Prefix
|
python
|
walkccc__LeetCode
|
solutions/51. N-Queens/51.py
|
{
"start": 0,
"end": 602
}
|
class ____:
def solveNQueens(self, n: int) -> list[list[str]]:
ans = []
cols = [False] * n
diag1 = [False] * (2 * n - 1)
diag2 = [False] * (2 * n - 1)
def dfs(i: int, board: list[int]) -> None:
if i == n:
ans.append(board)
return
for j in range(n):
if cols[j] or diag1[i + j] or diag2[j - i + n - 1]:
continue
cols[j] = diag1[i + j] = diag2[j - i + n - 1] = True
dfs(i + 1, board + ['.' * j + 'Q' + '.' * (n - j - 1)])
cols[j] = diag1[i + j] = diag2[j - i + n - 1] = False
dfs(0, [])
return ans
|
Solution
|
python
|
pytorch__pytorch
|
torch/_dynamo/aot_compile.py
|
{
"start": 986,
"end": 1571
}
|
class ____:
signature: inspect.Signature
guard_manager: Optional["GuardManagerWrapper"]
guards_state: bytes
backend_id: str
compiled_fn: SerializableCallable
original_code: types.CodeType
runtime_env: GraphRuntimeEnv
source_info: "SourceInfo"
device_type: str
backend_name: str
system_info: SystemInfo = dataclasses.field(default_factory=SystemInfo.current)
def check_compatibility(self) -> None:
current_system = SystemInfo.current()
current_system.check_compatibility(self.system_info, self.device_type)
|
CompileArtifacts
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-shared/dagster_shared_tests/test_checked.py
|
{
"start": 555,
"end": 3597
}
|
class ____: ...
def test_many():
@checked
def big(
name: str,
nick_names: list[str],
age: int,
cool: bool,
thing: Optional[Thing],
other_thing: Thing,
percent: float,
o_s: Optional[str],
o_n: Optional[int],
o_f: Optional[float],
o_b: Optional[bool],
foos: list[Annotated["TestType", ImportFrom("dagster_shared.utils.test")]],
):
return True
assert big(
name="dude",
nick_names=[
"foo",
"bar",
"biz",
],
age=42,
cool=False,
thing=None,
other_thing=Thing(),
percent=0.5,
o_s="x",
o_n=3,
o_f=None,
o_b=None,
foos=[],
)
with pytest.raises(CheckError):
assert big(
name="dude",
nick_names=[
"foo",
"bar",
"biz",
],
age=42,
cool=False,
thing=None,
other_thing=Thing(),
percent=0.5,
o_s="x",
o_n=3,
o_f="surprise_not_float", # type: ignore
o_b=None,
foos=[],
)
def test_no_op():
def foo(): ...
c_foo = checked(foo)
assert c_foo is foo
def bar() -> None: ...
c_bar = checked(bar)
assert c_bar is bar
def test_star():
@checked
def baz(*, i: int): ...
baz(i=1)
with pytest.raises(CheckError):
baz(i="1") # type: ignore
def test_partial():
@checked
def foo(a, b, c: int): ...
foo(1, 2, 3)
def test_class():
class Foo:
@checked
def me(self):
return self
@checked
def yell(self, word: str):
return word
@staticmethod
@checked
def holler(word: str):
return word
@classmethod
@checked
def scream(cls, word: str):
return word
f = Foo()
f.me()
f.yell("HI")
with pytest.raises(CheckError):
f.yell(3) # type: ignore
Foo.holler("hi")
with pytest.raises(CheckError):
Foo.holler(3) # type: ignore
Foo.scream("hi")
with pytest.raises(CheckError):
Foo.scream(3) # type: ignore
def test_defaults():
@checked
def foo(a: int, b: int = 1):
return a + b
assert foo(0) == 1
@checked
def bar(private_list: list = []):
private_list.append(1)
return private_list
assert bar() == [1]
assert bar() == [1] # @checked makes it a not shared container instance
class GlobalThing:
def __init__(self):
self._things = []
def bump(self):
self._things.append(1)
def total(self):
return sum(self._things)
_global = GlobalThing()
@checked
def baz(shared_inst: GlobalThing = _global):
_global.bump()
return _global.total()
assert baz() == 1
assert baz() == 2
|
Thing
|
python
|
coleifer__peewee
|
tests/regressions.py
|
{
"start": 3936,
"end": 4007
}
|
class ____(TestModel):
c = ForeignKeyField(DiC)
d = TextField()
|
DiD
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/variables.py
|
{
"start": 2483,
"end": 3496
}
|
class ____(enum.Enum):
"""Indicates when a distributed variable will be synced.
* `AUTO`: Indicates that the synchronization will be determined by the current
`DistributionStrategy` (eg. With `MirroredStrategy` this would be
`ON_WRITE`).
* `NONE`: Indicates that there will only be one copy of the variable, so
there is no need to sync.
* `ON_WRITE`: Indicates that the variable will be updated across devices
every time it is written.
* `ON_READ`: Indicates that the variable will be aggregated across devices
when it is read (eg. when checkpointing or when evaluating an op that uses
the variable).
Example:
>>> temp_grad=[tf.Variable([0.], trainable=False,
... synchronization=tf.VariableSynchronization.ON_READ,
... aggregation=tf.VariableAggregation.MEAN
... )]
"""
AUTO = 0
NONE = 1
ON_WRITE = 2
ON_READ = 3
# LINT.IfChange
@tf_export("VariableAggregation", v1=[])
|
VariableSynchronization
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/storage/chat_store/base.py
|
{
"start": 242,
"end": 2718
}
|
class ____(BaseComponent):
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "BaseChatStore"
@abstractmethod
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Set messages for a key."""
...
@abstractmethod
def get_messages(self, key: str) -> List[ChatMessage]:
"""Get messages for a key."""
...
@abstractmethod
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
...
@abstractmethod
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Delete messages for a key."""
...
@abstractmethod
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
...
@abstractmethod
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
...
@abstractmethod
def get_keys(self) -> List[str]:
"""Get all keys."""
...
async def aset_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Async version of Get messages for a key."""
await asyncio.to_thread(self.set_messages, key, messages)
async def aget_messages(self, key: str) -> List[ChatMessage]:
"""Async version of Get messages for a key."""
return await asyncio.to_thread(self.get_messages, key)
async def async_add_message(self, key: str, message: ChatMessage) -> None:
"""Async version of Add a message for a key."""
await asyncio.to_thread(self.add_message, key, message)
async def adelete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Async version of Delete messages for a key."""
return await asyncio.to_thread(self.delete_messages, key)
async def adelete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Async version of Delete specific message for a key."""
return await asyncio.to_thread(self.delete_message, key, idx)
async def adelete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Async version of Delete last message for a key."""
return await asyncio.to_thread(self.delete_last_message, key)
async def aget_keys(self) -> List[str]:
"""Async version of Get all keys."""
return await asyncio.to_thread(self.get_keys)
|
BaseChatStore
|
python
|
getsentry__sentry
|
src/sentry/grouping/component.py
|
{
"start": 14192,
"end": 15029
}
|
class ____(BaseGroupingComponent[ExceptionGroupingComponent]):
id: str = "chained_exception"
frame_counts: Counter[str]
reverse_when_serializing: bool = False
def __init__(
self,
values: Sequence[ExceptionGroupingComponent] | None = None,
hint: str | None = None,
contributes: bool | None = None,
frame_counts: Counter[str] | None = None,
):
super().__init__(hint=hint, contributes=contributes, values=values)
self.frame_counts = frame_counts or Counter()
def as_dict(self) -> dict[str, Any]:
result = super().as_dict()
if self.reverse_when_serializing:
result["values"].reverse()
return result
@property
def key(self) -> str:
return _get_exception_component_key(self)
|
ChainedExceptionGroupingComponent
|
python
|
fluentpython__example-code-2e
|
21-async/mojifinder/bottle.py
|
{
"start": 112582,
"end": 112739
}
|
class ____(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
|
WaitressServer
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 542330,
"end": 542993
}
|
class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestTimelineItemEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("PullRequestTimelineItem"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
|
PullRequestTimelineConnection
|
python
|
facebookresearch__faiss
|
tests/test_fast_scan_ivf.py
|
{
"start": 6392,
"end": 7966
}
|
class ____(unittest.TestCase):
""" Verify implem 2 (search with original invlists with uint8 LUTs)
against IndexIVFPQ. Entails some loss in accuracy. """
def eval_quant_loss(self, by_residual, metric=faiss.METRIC_L2):
ds = datasets.SyntheticDataset(32, 2000, 5000, 1000)
index = faiss.index_factory(32, "IVF32,PQ16x4np", metric)
index.train(ds.get_train())
index.add(ds.get_database())
index.nprobe = 4
index.by_residual = by_residual
Da, Ia = index.search(ds.get_queries(), 10)
# loss due to int8 quantization of LUTs
index2 = faiss.IndexIVFPQFastScan(index)
index2.implem = 2
Db, Ib = index2.search(ds.get_queries(), 10)
m3 = three_metrics(Da, Ia, Db, Ib)
ref_results = {
(True, 1): [0.985, 1.0, 9.872],
(True, 0): [ 0.987, 1.0, 9.914],
(False, 1): [0.991, 1.0, 9.907],
(False, 0): [0.986, 1.0, 9.917],
}
ref = ref_results[(by_residual, metric)]
self.assertGreaterEqual(m3[0], ref[0] * 0.995)
self.assertGreaterEqual(m3[1], ref[1] * 0.995)
self.assertGreaterEqual(m3[2], ref[2] * 0.995)
def test_qloss_no_residual(self):
self.eval_quant_loss(False)
def test_qloss_by_residual(self):
self.eval_quant_loss(True)
def test_qloss_no_residual_ip(self):
self.eval_quant_loss(False, faiss.METRIC_INNER_PRODUCT)
def test_qloss_by_residual_ip(self):
self.eval_quant_loss(True, faiss.METRIC_INNER_PRODUCT)
|
TestIVFImplem2
|
python
|
ray-project__ray
|
python/ray/serve/schema.py
|
{
"start": 45348,
"end": 45634
}
|
class ____(ServeActorDetails, frozen=True):
"""Detailed info about a Ray Serve ProxyActor.
Attributes:
status: The current status of the proxy.
"""
status: ProxyStatus = Field(description="Current status of the proxy.")
@PublicAPI(stability="alpha")
|
ProxyDetails
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
|
{
"start": 2724,
"end": 6230
}
|
class ____:
"""State of the composite argument parser."""
mode: ParserMode
remainder: str = ''
consumed: str = ''
boundaries: list[ParserBoundary] = dataclasses.field(default_factory=list)
namespaces: list[t.Any] = dataclasses.field(default_factory=list)
parts: list[str] = dataclasses.field(default_factory=list)
@property
def incomplete(self) -> bool:
"""True if parsing is incomplete (unparsed input remains), otherwise False."""
return self.remainder is not None
def match(self, value: str, choices: list[str]) -> bool:
"""Return True if the given value matches the provided choices, taking into account parsing boundaries, otherwise return False."""
if self.current_boundary:
delimiters, delimiter = self.current_boundary.delimiters, self.current_boundary.match
else:
delimiters, delimiter = '', None
for choice in choices:
if choice.rstrip(delimiters) == choice:
# choice is not delimited
if value == choice:
return True # value matched
else:
# choice is delimited
if f'{value}{delimiter}' == choice:
return True # value and delimiter matched
return False
def read(self) -> str:
"""Read and return the next input segment, taking into account parsing boundaries."""
delimiters = "".join(boundary.delimiters for boundary in self.boundaries)
if delimiters:
pattern = '([' + re.escape(delimiters) + '])'
regex = re.compile(pattern)
parts = regex.split(self.remainder, 1)
else:
parts = [self.remainder]
if len(parts) > 1:
value, delimiter, remainder = parts
else:
value, delimiter, remainder = parts[0], None, None
for boundary in reversed(self.boundaries):
if delimiter and delimiter in boundary.delimiters:
boundary.match = delimiter
self.consumed += value + delimiter
break
boundary.match = None
boundary.ready = False
if boundary.required:
break
self.remainder = remainder
return value
@property
def root_namespace(self) -> t.Any:
"""THe root namespace."""
return self.namespaces[0]
@property
def current_namespace(self) -> t.Any:
"""The current namespace."""
return self.namespaces[-1]
@property
def current_boundary(self) -> t.Optional[ParserBoundary]:
"""The current parser boundary, if any, otherwise None."""
return self.boundaries[-1] if self.boundaries else None
def set_namespace(self, namespace: t.Any) -> None:
"""Set the current namespace."""
self.namespaces.append(namespace)
@contextlib.contextmanager
def delimit(self, delimiters: str, required: bool = True) -> c.Iterator[ParserBoundary]:
"""Context manager for delimiting parsing of input."""
boundary = ParserBoundary(delimiters=delimiters, required=required)
self.boundaries.append(boundary)
try:
yield boundary
finally:
self.boundaries.pop()
if boundary.required and not boundary.match:
raise ParserError('required delimiter not found, hit up-level delimiter or end of input instead')
@dataclasses.dataclass
|
ParserState
|
python
|
pallets__itsdangerous
|
src/itsdangerous/exc.py
|
{
"start": 436,
"end": 882
}
|
class ____(BadData):
"""Raised if a signature does not match."""
def __init__(self, message: str, payload: t.Any | None = None):
super().__init__(message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload: t.Any | None = payload
|
BadSignature
|
python
|
huggingface__transformers
|
src/transformers/models/auto/modeling_auto.py
|
{
"start": 87245,
"end": 87500
}
|
class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TIME_SERIES_PREDICTION_MAPPING
AutoModelForTimeSeriesPrediction = auto_class_update(
AutoModelForTimeSeriesPrediction, head_doc="time-series prediction"
)
|
AutoModelForTimeSeriesPrediction
|
python
|
doocs__leetcode
|
solution/2100-2199/2120.Execution of All Suffix Instructions Staying in a Grid/Solution.py
|
{
"start": 0,
"end": 541
}
|
class ____:
def executeInstructions(self, n: int, startPos: List[int], s: str) -> List[int]:
ans = []
m = len(s)
mp = {"L": [0, -1], "R": [0, 1], "U": [-1, 0], "D": [1, 0]}
for i in range(m):
x, y = startPos
t = 0
for j in range(i, m):
a, b = mp[s[j]]
if 0 <= x + a < n and 0 <= y + b < n:
x, y, t = x + a, y + b, t + 1
else:
break
ans.append(t)
return ans
|
Solution
|
python
|
coleifer__peewee
|
tests/model_save.py
|
{
"start": 4175,
"end": 5101
}
|
class ____(ModelTestCase):
requires = [T5]
def test_save_no_data(self):
t5 = T5.create()
self.assertTrue(t5.id >= 1)
t5.val = 3
t5.save()
t5_db = T5.get(T5.id == t5.id)
self.assertEqual(t5_db.val, 3)
t5.val = None
t5.save()
t5_db = T5.get(T5.id == t5.id)
self.assertTrue(t5_db.val is None)
def test_save_no_data2(self):
t5 = T5.create()
t5_db = T5.get(T5.id == t5.id)
t5_db.save()
t5_db = T5.get(T5.id == t5.id)
self.assertTrue(t5_db.val is None)
def test_save_no_data3(self):
t5 = T5.create()
self.assertRaises(ValueError, t5.save)
def test_save_only_no_data(self):
t5 = T5.create(val=1)
t5.val = 2
self.assertRaises(ValueError, t5.save, only=[])
t5_db = T5.get(T5.id == t5.id)
self.assertEqual(t5_db.val, 1)
|
TestSaveNoData
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/mixins.py
|
{
"start": 11754,
"end": 12643
}
|
class ____(OIDCOnlyMixin):
"""
Mixin for views that should only be accessible when OIDC and OIDC RP-Initiated Logout are enabled.
If either is not enabled:
* if DEBUG is True, raises an ImproperlyConfigured exception explaining why
* otherwise, returns a 404 response, logging the same warning
"""
debug_error_message = (
"The django-oauth-toolkit OIDC RP-Initiated Logout view is not enabled unless you "
"have configured OIDC_RP_INITIATED_LOGOUT_ENABLED in the settings"
)
def dispatch(self, *args, **kwargs):
if not oauth2_settings.OIDC_RP_INITIATED_LOGOUT_ENABLED:
if settings.DEBUG:
raise ImproperlyConfigured(self.debug_error_message)
log.warning(self.debug_error_message)
return HttpResponseNotFound()
return super().dispatch(*args, **kwargs)
|
OIDCLogoutOnlyMixin
|
python
|
keon__algorithms
|
algorithms/graph/minimum_spanning_tree.py
|
{
"start": 327,
"end": 4809
}
|
class ____:
"""
The disjoint set is represented with an list <n> of integers where
<n[i]> is the parent of the node at position <i>.
If <n[i]> = <i>, <i> it's a root, or a head, of a set
"""
def __init__(self, size):
"""
Args:
n (int): Number of vertices in the graph
"""
self.parent = [None] * size # Contains wich node is the parent of the node at poisition <i>
self.size = [1] * size # Contains size of node at index <i>, used to optimize merge
for i in range(size):
self.parent[i] = i # Make all nodes his own parent, creating n sets.
def merge_set(self, node1, node2):
"""
Args:
node1, node2 (int): Indexes of nodes whose sets will be merged.
"""
# Get the set of nodes at position <a> and <b>
# If <a> and <b> are the roots, this will be constant O(1)
node1 = self.find_set(node1)
node2 = self.find_set(node2)
# Join the shortest node to the longest, minimizing tree size (faster find)
if self.size[node1] < self.size[node2]:
self.parent[node1] = node2 # Merge set(a) and set(b)
self.size[node2] += self.size[node1] # Add size of old set(a) to set(b)
else:
self.parent[node2] = node1 # Merge set(b) and set(a)
self.size[node1] += self.size[node2] # Add size of old set(b) to set(a)
def find_set(self, node):
"""
Get the root element of the set containing <a>
"""
if self.parent[node] != node:
# Very important, memoize result of the
# recursion in the list to optimize next
# calls and make this operation practically constant, O(1)
self.parent[node] = self.find_set(self.parent[node])
# node <a> it's the set root, so we can return that index
return self.parent[node]
def kruskal(vertex_count, edges, forest):
"""
Args:
vertex_count (int): Number of vertices in the graph
edges (list of Edge): Edges of the graph
forest (DisjointSet): DisjointSet of the vertices
Returns:
int: sum of weights of the minnimum spanning tree
Kruskal algorithm:
This algorithm will find the optimal graph with less edges and less
total weight to connect all vertices (MST), the MST will always contain
n-1 edges because it's the minimum required to connect n vertices.
Procedure:
Sort the edges (criteria: less weight).
Only take edges of nodes in different sets.
If we take a edge, we need to merge the sets to discard these.
After repeat this until select n-1 edges, we will have the complete MST.
"""
edges.sort(key=lambda edge: edge.weight)
mst = [] # List of edges taken, minimum spanning tree
for edge in edges:
set_u = forest.find_set(edge.u) # Set of the node <u>
set_v = forest.find_set(edge.v) # Set of the node <v>
if set_u != set_v:
forest.merge_set(set_u, set_v)
mst.append(edge)
if len(mst) == vertex_count-1:
# If we have selected n-1 edges, all the other
# edges will be discarted, so, we can stop here
break
return sum([edge.weight for edge in mst])
def main():
"""
Test. How input works:
Input consists of different weighted, connected, undirected graphs.
line 1:
integers n, m
lines 2..m+2:
edge with the format -> node index u, node index v, integer weight
Samples of input:
5 6
1 2 3
1 3 8
2 4 5
3 4 2
3 5 4
4 5 6
3 3
2 1 20
3 1 20
2 3 100
Sum of weights of the optimal paths:
14, 40
"""
for size in sys.stdin:
vertex_count, edge_count = map(int, size.split())
forest = DisjointSet(edge_count)
edges = [None] * edge_count # Create list of size <m>
# Read <m> edges from input
for i in range(edge_count):
source, target, weight = map(int, input().split())
source -= 1 # Convert from 1-indexed to 0-indexed
target -= 1 # Convert from 1-indexed to 0-indexed
edges[i] = Edge(source, target, weight)
# After finish input and graph creation, use Kruskal algorithm for MST:
print("MST weights sum:", kruskal(vertex_count, edges, forest))
if __name__ == "__main__":
main()
|
DisjointSet
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_glue.py
|
{
"start": 1823,
"end": 16324
}
|
class ____:
@pytest.mark.db_test
def test_render_template(self, create_task_instance_of_operator, session):
ti: TaskInstance = create_task_instance_of_operator(
GlueJobOperator,
dag_id=DAG_ID,
task_id=TASK_ID,
script_location="{{ dag.dag_id }}",
script_args="{{ dag.dag_id }}",
create_job_kwargs="{{ dag.dag_id }}",
iam_role_name="{{ dag.dag_id }}",
iam_role_arn="{{ dag.dag_id }}",
s3_bucket="{{ dag.dag_id }}",
job_name="{{ dag.dag_id }}",
)
session.add(ti)
session.commit()
rendered_template: GlueJobOperator = ti.render_templates()
assert rendered_template.script_location == DAG_ID
assert rendered_template.script_args == DAG_ID
assert rendered_template.create_job_kwargs == DAG_ID
assert rendered_template.iam_role_name == DAG_ID
assert rendered_template.iam_role_arn == DAG_ID
assert rendered_template.s3_bucket == DAG_ID
assert rendered_template.job_name == DAG_ID
@pytest.mark.parametrize(
"script_location",
[
"s3://glue-examples/glue-scripts/sample_aws_glue_job.py",
"/glue-examples/glue-scripts/sample_aws_glue_job.py",
],
)
@mock.patch.object(GlueJobHook, "print_job_logs")
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_execute_without_failure(
self,
mock_load_file,
mock_get_conn,
mock_initialize_job,
mock_get_job_state,
mock_print_job_logs,
script_location,
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location=script_location,
aws_conn_id="aws_default",
region_name="us-west-2",
s3_bucket="some_bucket",
iam_role_name="my_test_role",
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
glue.execute(mock.MagicMock())
mock_initialize_job.assert_called_once_with({}, {})
mock_print_job_logs.assert_not_called()
assert glue.job_name == JOB_NAME
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
def test_role_arn_execute_deferrable(self, _, mock_initialize_job):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://folder/file",
aws_conn_id="aws_default",
region_name="us-west-2",
s3_bucket="some_bucket",
iam_role_arn="test_role",
deferrable=True,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
with pytest.raises(TaskDeferred) as defer:
glue.execute(mock.MagicMock())
assert defer.value.trigger.job_name == JOB_NAME
assert defer.value.trigger.run_id == JOB_RUN_ID
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
def test_execute_deferrable(self, _, mock_initialize_job):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://folder/file",
aws_conn_id="aws_default",
region_name="us-west-2",
s3_bucket="some_bucket",
iam_role_name="my_test_role",
deferrable=True,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
with pytest.raises(TaskDeferred) as defer:
glue.execute(mock.MagicMock())
assert defer.value.trigger.job_name == JOB_NAME
assert defer.value.trigger.run_id == JOB_RUN_ID
assert defer.value.trigger.region_name == "us-west-2"
assert not defer.value.trigger.verbose
assert defer.value.trigger.waiter_delay == 60
assert defer.value.trigger.attempts == 75
assert defer.value.trigger.aws_conn_id == "aws_default"
@mock.patch.object(GlueJobHook, "print_job_logs")
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_execute_with_verbose_logging(
self, mock_load_file, mock_get_conn, mock_initialize_job, mock_get_job_state, mock_print_job_logs
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3_uri",
s3_bucket="bucket_name",
iam_role_name="role_arn",
verbose=True,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
glue.execute(mock.MagicMock())
mock_initialize_job.assert_called_once_with({}, {})
mock_print_job_logs.assert_called_once_with(
job_name=JOB_NAME, run_id=JOB_RUN_ID, continuation_tokens=mock.ANY
)
assert glue.job_name == JOB_NAME
@mock.patch.object(GlueJobHook, "print_job_logs")
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_execute_without_verbose_logging(
self, mock_load_file, mock_get_conn, mock_initialize_job, mock_get_job_state, mock_print_job_logs
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3_uri",
s3_bucket="bucket_name",
iam_role_name="role_arn",
verbose=False,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
glue.execute(mock.MagicMock())
mock_initialize_job.assert_called_once_with({}, {})
mock_print_job_logs.assert_not_called()
assert glue.job_name == JOB_NAME
@mock.patch.object(GlueJobHook, "print_job_logs")
@mock.patch.object(GlueJobHook, "job_completion")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_execute_without_waiting_for_completion(
self, mock_load_file, mock_get_conn, mock_initialize_job, mock_job_completion, mock_print_job_logs
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://glue-examples/glue-scripts/sample_aws_glue_job.py",
aws_conn_id="aws_default",
region_name="us-west-2",
s3_bucket="some_bucket",
iam_role_name="my_test_role",
wait_for_completion=False,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
job_run_id = glue.execute(mock.MagicMock())
mock_initialize_job.assert_called_once_with({}, {})
mock_job_completion.assert_not_called()
mock_print_job_logs.assert_not_called()
assert glue.job_name == JOB_NAME
assert job_run_id == JOB_RUN_ID
@mock.patch.object(GlueJobHook, "print_job_logs")
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_log_correct_url(
self, mock_load_file, mock_get_conn, mock_initialize_job, mock_get_job_state, mock_print_job_logs
):
region = "us-west-2"
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://glue-examples/glue-scripts/sample_aws_glue_job.py",
aws_conn_id="aws_default",
region_name=region,
s3_bucket="some_bucket",
iam_role_name="my_test_role",
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
aws_domain = GlueJobRunDetailsLink.get_aws_domain("aws")
glue_job_run_url = (
f"https://console.{aws_domain}/gluestudio/home?region="
f"{region}#/job/test_job_name%2Fwith_slash/run/{JOB_RUN_ID}"
)
with mock.patch.object(glue.log, "info") as mock_log_info:
job_run_id = glue.execute(mock.MagicMock())
assert job_run_id == JOB_RUN_ID
mock_log_info.assert_any_call("You can monitor this Glue Job run at: %s", glue_job_run_url)
@mock.patch.object(GlueJobHook, "conn")
@mock.patch.object(GlueJobHook, "get_conn")
def test_killed_without_stop_job_run_on_kill(
self,
_,
mock_glue_hook,
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://folder/file",
aws_conn_id="aws_default",
region_name="us-west-2",
s3_bucket="some_bucket",
iam_role_name="my_test_role",
)
glue.on_kill()
mock_glue_hook.batch_stop_job_run.assert_not_called()
@mock.patch.object(GlueJobHook, "conn")
@mock.patch.object(GlueJobHook, "get_conn")
def test_killed_with_stop_job_run_on_kill(
self,
_,
mock_glue_hook,
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://folder/file",
aws_conn_id="aws_default",
region_name="us-west-2",
s3_bucket="some_bucket",
iam_role_name="my_test_role",
stop_job_run_on_kill=True,
)
glue._job_run_id = JOB_RUN_ID
glue.on_kill()
mock_glue_hook.batch_stop_job_run.assert_called_once_with(
JobName=JOB_NAME,
JobRunIds=[JOB_RUN_ID],
)
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(GlueJobHook, "conn")
@mock.patch.object(S3Hook, "load_file")
def test_replace_script_file(
self, mock_load_file, mock_conn, mock_get_connection, mock_initialize_job, mock_get_job_state
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="folder/file",
s3_bucket="bucket_name",
iam_role_name="role_arn",
replace_script_file=True,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
glue.execute(mock.MagicMock())
mock_load_file.assert_called_once_with(
"folder/file", "artifacts/glue-scripts/file", bucket_name="bucket_name", replace=True
)
assert glue.s3_script_location == "s3://bucket_name/artifacts/glue-scripts/file"
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(GlueJobHook, "conn")
@mock.patch.object(S3Hook, "load_file")
@mock.patch.object(GlueJobOperator, "upload_etl_script_to_s3")
def test_upload_script_to_s3_no_upload(
self,
mock_upload,
mock_load_file,
mock_conn,
mock_get_connection,
mock_initialize_job,
mock_get_job_state,
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="s3://my_bucket/folder/file",
s3_bucket="bucket_name",
iam_role_name="role_arn",
replace_script_file=True,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
glue.execute(mock.MagicMock())
assert glue.s3_script_location == "s3://my_bucket/folder/file"
mock_load_file.assert_not_called()
mock_upload.assert_not_called()
@mock.patch.object(GlueJobHook, "get_job_state")
@mock.patch.object(GlueJobHook, "initialize_job")
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(GlueJobHook, "conn")
@mock.patch.object(S3Hook, "load_file")
@mock.patch.object(GlueJobOperator, "upload_etl_script_to_s3")
def test_no_script_file(
self,
mock_upload,
mock_load_file,
mock_conn,
mock_get_connection,
mock_initialize_job,
mock_get_job_state,
):
glue = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
iam_role_name="role_arn",
replace_script_file=True,
)
mock_initialize_job.return_value = {"JobRunState": "RUNNING", "JobRunId": JOB_RUN_ID}
mock_get_job_state.return_value = "SUCCEEDED"
glue.execute(mock.MagicMock())
assert glue.s3_script_location is None
mock_upload.assert_not_called()
def test_template_fields(self):
operator = GlueJobOperator(
task_id=TASK_ID,
job_name=JOB_NAME,
script_location="folder/file",
s3_bucket="bucket_name",
iam_role_name="role_arn",
replace_script_file=True,
)
validate_template_fields(operator)
def test_overwritten_conn_passed_to_hook(self):
OVERWRITTEN_CONN = "new-conn-id"
op = GlueJobOperator(
task_id=TASK_ID,
aws_conn_id=OVERWRITTEN_CONN,
iam_role_name="role_arn",
replace_script_file=True,
)
assert op.hook.aws_conn_id == OVERWRITTEN_CONN
def test_default_conn_passed_to_hook(self):
DEFAULT_CONN = "aws_default"
op = GlueJobOperator(
task_id=TASK_ID,
iam_role_name="role_arn",
replace_script_file=True,
)
assert op.hook.aws_conn_id == DEFAULT_CONN
|
TestGlueJobOperator
|
python
|
simonw__datasette
|
datasette/events.py
|
{
"start": 724,
"end": 888
}
|
class ____(Event):
"""
Event name: ``logout``
A user (represented by ``event.actor``) has logged out.
"""
name = "logout"
@dataclass
|
LogoutEvent
|
python
|
pola-rs__polars
|
py-polars/src/polars/datatypes/classes.py
|
{
"start": 27085,
"end": 30107
}
|
class ____(DataType):
"""
A fixed categorical encoding of a unique set of strings.
Parameters
----------
categories
The categories in the dataset; must be a unique set of strings, or an
existing Python string-valued enum.
Examples
--------
Explicitly define enumeration categories:
>>> pl.Enum(["north", "south", "east", "west"])
Enum(categories=['north', 'south', 'east', 'west'])
Initialise from an existing Python enumeration:
>>> from http import HTTPMethod
>>> pl.Enum(HTTPMethod)
Enum(categories=['CONNECT', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT', 'TRACE'])
""" # noqa: W505
categories: Series
def __init__(self, categories: Series | Iterable[str] | type[enum.Enum]) -> None:
if isclass(categories) and issubclass(categories, enum.Enum):
for enum_subclass in (enum.Flag, enum.IntEnum):
if issubclass(categories, enum_subclass):
enum_type_name = categories.__name__
msg = f"Enum categories must be strings; `{enum_type_name}` values are integers"
raise TypeError(msg)
enum_values = [
getattr(v, "value", v) for v in categories.__members__.values()
]
categories = pl.Series(values=enum_values)
elif not isinstance(categories, pl.Series):
categories = pl.Series(values=categories)
if categories.is_empty():
self.categories = pl.Series(name="category", dtype=String)
return
if categories.has_nulls():
msg = "Enum categories must not contain null values"
raise TypeError(msg)
if (dtype := categories.dtype) != String:
msg = f"Enum categories must be strings; found data of type {dtype}"
raise TypeError(msg)
if categories.n_unique() != categories.len():
duplicate = categories.filter(categories.is_duplicated())[0]
msg = f"Enum categories must be unique; found duplicate {duplicate!r}"
raise ValueError(msg)
self.categories = categories.rechunk().alias("category")
def __eq__(self, other: PolarsDataType) -> bool: # type: ignore[override]
# allow comparing object instances to class
if type(other) is DataTypeClass and issubclass(other, Enum):
return True
elif isinstance(other, Enum):
return self.categories.equals(other.categories)
else:
return False
def __hash__(self) -> int:
return hash((self.__class__, tuple(self.categories)))
def __repr__(self) -> str:
class_name = self.__class__.__name__
return f"{class_name}(categories={self.categories.to_list()!r})"
def union(self, other: Enum) -> Enum:
"""Union of two Enums."""
return Enum(
F.concat((self.categories, other.categories)).unique(maintain_order=True)
)
__or__ = union
|
Enum
|
python
|
Textualize__textual
|
tests/option_list/test_option_list_option_subclass.py
|
{
"start": 251,
"end": 451
}
|
class ____(Option):
"""An example subclass of a option."""
def __init__(self, test: int) -> None:
super().__init__(str(test), str(test), False)
self.test = test
|
OptionWithExtras
|
python
|
OmkarPathak__pygorithm
|
tests/test_greedy_algorithm.py
|
{
"start": 383,
"end": 724
}
|
class ____(unittest.TestCase):
def test_activity_selection(self):
start_times = [1 , 3 , 0 , 5 , 8 , 5]
finish_times = [2 , 4 , 6 , 7 , 9 , 9]
self.assertEqual(activity_selection.activity_selection(start_times, finish_times), [0, 1, 3, 4])
if __name__ == '__main__':
unittest.main()
|
TestActivitySelectionProblem
|
python
|
huggingface__transformers
|
src/transformers/models/mobilevit/modeling_mobilevit.py
|
{
"start": 11919,
"end": 12605
}
|
class ____(nn.Module):
def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int) -> None:
super().__init__()
self.layer = nn.ModuleList()
for _ in range(num_stages):
transformer_layer = MobileViTTransformerLayer(
config,
hidden_size=hidden_size,
intermediate_size=int(hidden_size * config.mlp_ratio),
)
self.layer.append(transformer_layer)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer_module in self.layer:
hidden_states = layer_module(hidden_states)
return hidden_states
|
MobileViTTransformer
|
python
|
plotly__plotly.py
|
plotly/graph_objs/ohlc/_increasing.py
|
{
"start": 233,
"end": 2375
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "ohlc"
_path_str = "ohlc.increasing"
_valid_props = {"line"}
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.increasing.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.ohlc.increasing.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def _prop_descriptions(self):
return """\
line
:class:`plotly.graph_objects.ohlc.increasing.Line`
instance or dict with compatible properties
"""
def __init__(self, arg=None, line=None, **kwargs):
"""
Construct a new Increasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.ohlc.Increasing`
line
:class:`plotly.graph_objects.ohlc.increasing.Line`
instance or dict with compatible properties
Returns
-------
Increasing
"""
super().__init__("increasing")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.ohlc.Increasing
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.Increasing`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("line", arg, line)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Increasing
|
python
|
spyder-ide__spyder
|
spyder/plugins/explorer/confpage.py
|
{
"start": 571,
"end": 3849
}
|
class ____(PluginConfigPage):
def setup_page(self):
# Variables
newcb = self.create_checkbox
# Widgets
general_widget = QWidget()
# General options group
basic_group = QGroupBox(_("General options"))
check_show_hidden_files = newcb(_("Show hidden files"), 'show_hidden')
check_single_click = newcb(
_("Single click to open files"), 'single_click_to_open')
basic_layout = QVBoxLayout()
basic_layout.addWidget(check_show_hidden_files)
basic_layout.addWidget(check_single_click)
basic_group.setLayout(basic_layout)
# Filter options group
filter_group = QGroupBox(_("Filter settings"))
description_label = QLabel(
_('Filter files by name, extension, or more using '
'<a href="https://en.wikipedia.org/wiki/Glob_(programming)">glob '
'patterns.</a> Please enter the glob patterns of the files you '
'want to show, separated by commas.'))
description_label.setOpenExternalLinks(True)
description_label.setWordWrap(True)
self.edit_filename_filters = self.create_textedit(
'',
'name_filters',
tip=("Enter values separated by commas"),
content_type=list,
)
self.edit_filename_filters.setEnabled(True)
reset_btn = QPushButton(_("Reset to default values"))
reset_btn.clicked.connect(self.reset_to_default)
filter_layout = QVBoxLayout()
filter_layout.addWidget(description_label)
filter_layout.addWidget(self.edit_filename_filters)
filter_layout.addWidget(reset_btn)
filter_group.setLayout(filter_layout)
associations_widget = QWidget()
self.edit_file_associations = self.create_textedit(
'',
'file_associations',
content_type=dict,
)
file_associations = FileAssociationsWidget()
# Widget setup
file_associations.load_values(self.get_option('file_associations', {}))
# The actual config data is stored on this text edit set to invisible
self.edit_file_associations.setVisible(False)
layout = QVBoxLayout()
layout.addWidget(basic_group)
layout.addWidget(filter_group)
general_widget.setLayout(layout)
layout_file = QVBoxLayout()
layout_file.addWidget(file_associations)
layout_file.addWidget(self.edit_file_associations)
associations_widget.setLayout(layout_file)
self.create_tab(_("General"), general_widget)
self.create_tab(_("File associations"), associations_widget)
# Signals
file_associations.sig_data_changed.connect(self.update_associations)
def update_associations(self, data):
"""
Update the content of the text edit used to store the config data.
"""
textedit = self.edit_file_associations.textbox
textedit.setPlainText(str(data))
def reset_to_default(self):
"""Reset the filter settings to default."""
self.set_option('name_filters', NAME_FILTERS)
textedit = self.edit_filename_filters.textbox
textedit.setPlainText(", ".join(NAME_FILTERS))
|
ExplorerConfigPage
|
python
|
numba__llvmlite
|
llvmlite/binding/typeref.py
|
{
"start": 5519,
"end": 7805
}
|
class ____(_TypeIterator):
def _dispose(self):
self._capi.LLVMPY_DisposeElementIter(self)
def _next(self):
return ffi.lib.LLVMPY_ElementIterNext(self)
# FFI
ffi.lib.LLVMPY_PrintType.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_PrintType.restype = c_void_p
ffi.lib.LLVMPY_TypeIsPointer.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_TypeIsPointer.restype = c_bool
ffi.lib.LLVMPY_TypeIsArray.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_TypeIsArray.restype = c_bool
ffi.lib.LLVMPY_TypeIsVector.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_TypeIsVector.restype = c_bool
ffi.lib.LLVMPY_TypeIsStruct.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_TypeIsStruct.restype = c_bool
ffi.lib.LLVMPY_TypeIsFunction.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_TypeIsFunction.restype = c_bool
ffi.lib.LLVMPY_IsPackedStruct.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_IsPackedStruct.restype = c_bool
ffi.lib.LLVMPY_IsOpaqueStruct.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_IsOpaqueStruct.restype = c_bool
ffi.lib.LLVMPY_IsLiteralStruct.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_IsLiteralStruct.restype = c_bool
ffi.lib.LLVMPY_GetReturnType.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_GetReturnType.restype = ffi.LLVMTypeRef
ffi.lib.LLVMPY_CountParamTypes.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_CountParamTypes.restype = c_uint
ffi.lib.LLVMPY_GetParamTypes.argtypes = [ffi.LLVMTypeRef,
POINTER(ffi.LLVMTypeRef)]
ffi.lib.LLVMPY_GetParamTypes.restype = None
ffi.lib.LLVMPY_IsFunctionVararg.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_IsFunctionVararg.restype = c_bool
ffi.lib.LLVMPY_GetTypeKind.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_GetTypeKind.restype = c_int
ffi.lib.LLVMPY_GetTypeElementCount.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_GetTypeElementCount.restype = c_int
ffi.lib.LLVMPY_GetTypeBitWidth.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_GetTypeBitWidth.restype = c_uint64
ffi.lib.LLVMPY_ElementIter.argtypes = [ffi.LLVMTypeRef]
ffi.lib.LLVMPY_ElementIter.restype = ffi.LLVMElementIterator
ffi.lib.LLVMPY_ElementIterNext.argtypes = [ffi.LLVMElementIterator]
ffi.lib.LLVMPY_ElementIterNext.restype = ffi.LLVMTypeRef
ffi.lib.LLVMPY_DisposeElementIter.argtypes = [ffi.LLVMElementIterator]
|
_TypeListIterator
|
python
|
getsentry__sentry
|
src/sentry/relay/types/rule_condition.py
|
{
"start": 776,
"end": 913
}
|
class ____(TypedDict):
"""Greater than or equal condition"""
op: Literal["gte"]
name: str
value: Value | None
|
GteCondition
|
python
|
django__django
|
tests/view_tests/tests/test_debug.py
|
{
"start": 21030,
"end": 50578
}
|
class ____(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get("/test_view/")
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError at /test_view/</h1>", html)
self.assertIn(
'<pre class="exception_value">Can't find my keys</pre>', html
)
self.assertIn('<th scope="row">Request Method:</th>', html)
self.assertIn('<th scope="row">Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn("<p>jacob</p>", html)
self.assertIn('<th scope="row">Exception Type:</th>', html)
self.assertIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertNotIn("<p>Request data not supplied</p>", html)
self.assertIn("<p>No POST data</p>", html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError</h1>", html)
self.assertIn(
'<pre class="exception_value">Can't find my keys</pre>', html
)
self.assertNotIn('<th scope="row">Request Method:</th>', html)
self.assertNotIn('<th scope="row">Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th scope="row">Exception Type:</th>', html)
self.assertIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
def test_sharing_traceback(self):
try:
raise ValueError("Oops")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn(
'<form action="https://dpaste.com/" name="pasteform" '
'id="pasteform" method="post">',
html,
)
def test_eol_support(self):
"""
The ExceptionReporter supports Unix, Windows and Macintosh EOL markers
"""
LINES = ["print %d" % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ["\n", "\r\n", "\r"]:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:]),
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>Report at /test_view/</h1>", html)
self.assertIn(
'<pre class="exception_value">No exception message supplied</pre>', html
)
self.assertIn('<th scope="row">Request Method:</th>', html)
self.assertIn('<th scope="row">Request URL:</th>', html)
self.assertNotIn('<th scope="row">Exception Type:</th>', html)
self.assertNotIn('<th scope="row">Exception Value:</th>', html)
self.assertNotIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertNotIn("<p>Request data not supplied</p>", html)
def test_suppressed_context(self):
try:
try:
raise RuntimeError("Can't find my keys")
except RuntimeError:
raise ValueError("Can't find my keys") from None
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError</h1>", html)
self.assertIn(
'<pre class="exception_value">Can't find my keys</pre>', html
)
self.assertIn('<th scope="row">Exception Type:</th>', html)
self.assertIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
self.assertNotIn("During handling of the above exception", html)
def test_innermost_exception_without_traceback(self):
try:
try:
raise RuntimeError("Oops")
except Exception as exc:
new_exc = RuntimeError("My context")
exc.__context__ = new_exc
raise
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
self.assertEqual(len(frames), 2)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>RuntimeError</h1>", html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<th scope="row">Exception Type:</th>', html)
self.assertIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
self.assertIn(
"During handling of the above exception (My context), another "
"exception occurred",
html,
)
self.assertInHTML('<li class="frame user">None</li>', html)
self.assertIn("Traceback (most recent call last):\n None", html)
text = reporter.get_traceback_text()
self.assertIn("Exception Type: RuntimeError", text)
self.assertIn("Exception Value: Oops", text)
self.assertIn("Traceback (most recent call last):\n None", text)
self.assertIn(
"During handling of the above exception (My context), another "
"exception occurred",
text,
)
def test_exception_with_notes(self):
request = self.rf.get("/test_view/")
try:
try:
raise RuntimeError("Oops")
except Exception as err:
err.add_note("First Note")
err.add_note("Second Note")
err.add_note(mark_safe("<script>alert(1);</script>"))
raise err
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn(
'<pre class="exception_value">Oops\nFirst Note\nSecond Note\n'
"<script>alert(1);</script></pre>",
html,
)
self.assertIn(
"Exception Value: Oops\nFirst Note\nSecond Note\n"
"<script>alert(1);</script>",
html,
)
text = reporter.get_traceback_text()
self.assertIn(
"Exception Value: Oops\nFirst Note\nSecond Note\n"
"<script>alert(1);</script>",
text,
)
def test_mid_stack_exception_without_traceback(self):
try:
try:
raise RuntimeError("Inner Oops")
except Exception as exc:
new_exc = RuntimeError("My context")
new_exc.__context__ = exc
raise RuntimeError("Oops") from new_exc
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>RuntimeError</h1>", html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<th scope="row">Exception Type:</th>', html)
self.assertIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertInHTML('<li class="frame user">Traceback: None</li>', html)
self.assertIn(
"During handling of the above exception (Inner Oops), another "
"exception occurred:\n Traceback: None",
html,
)
text = reporter.get_traceback_text()
self.assertIn("Exception Type: RuntimeError", text)
self.assertIn("Exception Value: Oops", text)
self.assertIn("Traceback (most recent call last):", text)
self.assertIn(
"During handling of the above exception (Inner Oops), another "
"exception occurred:\n Traceback: None",
text,
)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get("/test_view/")
try:
try:
raise AttributeError(mark_safe("<p>Top level</p>"))
except AttributeError as explicit:
try:
raise ValueError(mark_safe("<p>Second exception</p>")) from explicit
except ValueError:
raise IndexError(mark_safe("<p>Final exception</p>"))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = (
"The above exception ({0}) was the direct cause of the following exception:"
)
implicit_exc = (
"During handling of the above exception ({0}), another exception occurred:"
)
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(
2, html.count(explicit_exc.format("<p>Top level</p>"))
)
self.assertEqual(
2, html.count(implicit_exc.format("<p>Second exception</p>"))
)
self.assertEqual(10, html.count("<p>Final exception</p>"))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("<p>Top level</p>"), text)
self.assertIn(implicit_exc.format("<p>Second exception</p>"), text)
self.assertEqual(3, text.count("<p>Final exception</p>"))
@skipIf(
sys._xoptions.get("no_debug_ranges", False)
or os.environ.get("PYTHONNODEBUGRANGES", False),
"Fine-grained error locations are disabled.",
)
def test_highlight_error_position(self):
request = self.rf.get("/test_view/")
try:
try:
raise AttributeError("Top level")
except AttributeError as explicit:
try:
raise ValueError(mark_safe("<p>2nd exception</p>")) from explicit
except ValueError:
raise IndexError("Final exception")
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn(
"<pre> raise AttributeError("Top level")\n"
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^</pre>",
html,
)
self.assertIn(
"<pre> raise ValueError(mark_safe("
""<p>2nd exception</p>")) from explicit\n"
" "
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^</pre>",
html,
)
self.assertIn(
"<pre> raise IndexError("Final exception")\n"
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^</pre>",
html,
)
# Pastebin.
self.assertIn(
" raise AttributeError("Top level")\n"
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
html,
)
self.assertIn(
" raise ValueError(mark_safe("
""<p>2nd exception</p>")) from explicit\n"
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
html,
)
self.assertIn(
" raise IndexError("Final exception")\n"
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
html,
)
# Text traceback.
text = reporter.get_traceback_text()
self.assertIn(
' raise AttributeError("Top level")\n'
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
text,
)
self.assertIn(
' raise ValueError(mark_safe("<p>2nd exception</p>")) from explicit\n'
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
text,
)
self.assertIn(
' raise IndexError("Final exception")\n'
" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
text,
)
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, "generated", "exec")
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame["context_line"], "<source code not available>")
self.assertEqual(last_frame["filename"], "generated")
self.assertEqual(last_frame["function"], "funcName")
self.assertEqual(last_frame["lineno"], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n <source code not available>',
text,
)
def test_reporting_frames_source_not_match(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, "generated", "exec")
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
with mock.patch(
"django.views.debug.ExceptionReporter._get_source",
return_value=["wrong source"],
):
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame["context_line"], "<source code not available>")
self.assertEqual(last_frame["filename"], "generated")
self.assertEqual(last_frame["function"], "funcName")
self.assertEqual(last_frame["lineno"], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
" <source code not available>",
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n <source code not available>',
text,
)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError("outer") from RuntimeError("inner")
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
msg = (
"Cycle in the exception chain detected: exception 'inner' "
"encountered again."
)
with self.assertWarnsMessage(ExceptionCycleWarning, msg):
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail("Cyclic reference in Exception Reporter.get_traceback_frames()")
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail("Traceback generation failed")
last_frame = tb_frames[-1]
self.assertIn("raise exc.__cause__", last_frame["context_line"])
self.assertEqual(last_frame["filename"], __file__)
self.assertEqual(last_frame["function"], "test_func")
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>Report at /test_view/</h1>", html)
self.assertIn(
'<pre class="exception_value">I'm a little teapot</pre>', html
)
self.assertIn('<th scope="row">Request Method:</th>', html)
self.assertIn('<th scope="row">Request URL:</th>', html)
self.assertNotIn('<th scope="row">Exception Type:</th>', html)
self.assertNotIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertNotIn("<p>Request data not supplied</p>", html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>Report</h1>", html)
self.assertIn(
'<pre class="exception_value">I'm a little teapot</pre>', html
)
self.assertNotIn('<th scope="row">Request Method:</th>', html)
self.assertNotIn('<th scope="row">Request URL:</th>', html)
self.assertNotIn('<th scope="row">Exception Type:</th>', html)
self.assertNotIn('<th scope="row">Exception Value:</th>', html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
def test_non_utf8_values_handling(self):
"""
Non-UTF-8 exceptions/values should not make the output generation
choke.
"""
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b"EXC\xe9EXC"
somevar = b"VAL\xe9VAL" # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn("VAL\\xe9VAL", html)
self.assertIn("EXC\\xe9EXC", html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe("<p>Local variable</p>")
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn(
'<td class="code"><pre>'<p>Local variable</p>'</pre>'
"</td>",
html,
)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError("OOM")
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(""))
try:
class LargeOutput:
def __repr__(self):
return repr("A" * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn(
"<trimmed %d bytes string>" % (large + repr_of_str_adds,), html
)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe("abcdefghijkl<p>mnὀp</p>qrstuwxyz").encode("ascii")
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn("<h2>Unicode error hint</h2>", html)
self.assertIn("The string that could not be encoded/decoded was: ", html)
self.assertIn("<strong><p>mnὀp</p></strong>", html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get("/test_view/")
importlib.import_module("abc.def.invalid.name")
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ModuleNotFoundError at /test_view/</h1>", html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get("/test_view/")
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback",
)
@override_settings(ALLOWED_HOSTS="example.com")
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get("/", headers={"host": "evil.com"})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get("/test_view/?items=Oops")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post("/test_view/", data={"items": "Oops"})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO("filecontent")
request = self.rf.post("/test_view/", data={"name": "filename", "items": fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
"items (application/octet-stream)></pre></td>",
html,
)
# COOKIES
rf = RequestFactory()
rf.cookies["items"] = "Oops"
request = rf.get("/test_view/")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre>'Oops'</pre></td>', html
)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get("/test_view/")
request.user = ExceptionUser()
try:
raise ValueError("Oops")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError at /test_view/</h1>", html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn("<p>[unable to retrieve the current user]</p>", html)
text = reporter.get_traceback_text()
self.assertIn("USER: [unable to retrieve the current user]", text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, "open") as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding="utf-8")
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding="utf-8")
@override_settings(ALLOWED_HOSTS=["example.com"])
def test_get_raw_insecure_uri(self):
factory = RequestFactory(headers={"host": "evil.com"})
tests = [
("////absolute-uri", "http://evil.com//absolute-uri"),
("/?foo=bar", "http://evil.com/?foo=bar"),
("/path/with:colons", "http://evil.com/path/with:colons"),
]
for url, expected in tests:
with self.subTest(url=url):
request = factory.get(url)
reporter = ExceptionReporter(request, None, None, None)
self.assertEqual(reporter._get_raw_insecure_uri(), expected)
|
ExceptionReporterTests
|
python
|
scikit-learn__scikit-learn
|
sklearn/neural_network/_stochastic_optimizers.py
|
{
"start": 1971,
"end": 6085
}
|
class ____(BaseOptimizer):
"""Stochastic gradient descent optimizer with momentum
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, default=0.1
The initial learning rate used. It controls the step-size in updating
the weights
lr_schedule : {'constant', 'adaptive', 'invscaling'}, default='constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate 'learning_rate_' at
each time step 't' using an inverse scaling exponent of 'power_t'.
learning_rate_ = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as the training keeps decreasing.
Each time 2 consecutive epochs fail to decrease the training loss by
tol, or fail to increase validation score by tol if 'early_stopping'
is on, the current learning rate is divided by 5.
momentum : float, default=0.9
Value of momentum used, must be larger than or equal to 0
nesterov : bool, default=True
Whether to use nesterov's momentum or not. Use nesterov's if True
power_t : float, default=0.5
Power of time step 't' in inverse scaling. See `lr_schedule` for
more details.
Attributes
----------
learning_rate : float
the current learning rate
velocities : list, length = len(params)
velocities that are used to update params
"""
def __init__(
self,
params,
learning_rate_init=0.1,
lr_schedule="constant",
momentum=0.9,
nesterov=True,
power_t=0.5,
):
super().__init__(learning_rate_init)
self.lr_schedule = lr_schedule
self.momentum = momentum
self.nesterov = nesterov
self.power_t = power_t
self.velocities = [np.zeros_like(param) for param in params]
def iteration_ends(self, time_step):
"""Perform updates to learning rate and potential other states at the
end of an iteration
Parameters
----------
time_step : int
number of training samples trained on so far, used to update
learning rate for 'invscaling'
"""
if self.lr_schedule == "invscaling":
self.learning_rate = (
float(self.learning_rate_init) / (time_step + 1) ** self.power_t
)
def trigger_stopping(self, msg, verbose):
if self.lr_schedule != "adaptive":
if verbose:
print(msg + " Stopping.")
return True
if self.learning_rate <= 1e-6:
if verbose:
print(msg + " Learning rate too small. Stopping.")
return True
self.learning_rate /= 5.0
if verbose:
print(msg + " Setting learning rate to %f" % self.learning_rate)
return False
def _get_updates(self, grads):
"""Get the values used to update params with given gradients
Parameters
----------
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
-------
updates : list, length = len(grads)
The values to add to params
"""
updates = [
self.momentum * velocity - self.learning_rate * grad
for velocity, grad in zip(self.velocities, grads)
]
self.velocities = updates
if self.nesterov:
updates = [
self.momentum * velocity - self.learning_rate * grad
for velocity, grad in zip(self.velocities, grads)
]
return updates
|
SGDOptimizer
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/layers/lama.py
|
{
"start": 153,
"end": 403
}
|
class ____(nn.Module):
"""@TODO: Docs. Contribution is welcome."""
def forward(self, x: torch.Tensor, mask: torch.Tensor = None) -> torch.Tensor:
"""Forward call."""
x_out = x[:, -1:, :]
return x_out
|
TemporalLastPooling
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/input_lib.py
|
{
"start": 26845,
"end": 40685
}
|
class ____(_IterableInput, composite_tensor.CompositeTensor):
"""Distributed dataset that supports prefetching to multiple devices."""
def __init__(
self,
input_workers,
strategy,
dataset=None,
num_replicas_in_sync=None,
input_context=None,
components=None,
element_spec=None,
enable_get_next_as_optional=None,
build=True,
options=None,
replica_order=None,
):
"""Distribute the dataset on all workers.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
dataset: `tf.data.Dataset` that will be used as the input source. Either
dataset or components field should be passed when constructing
DistributedDataset. Use this when constructing DistributedDataset from a
new `tf.data.Dataset`. Use components when constructing using
DistributedDatasetSpec.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that the
total batch size for each step (across all workers and replicas) adds up
to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
components: datasets when DistributedDataset is constructed from
DistributedDatasetSpec. Either field dataset or components should be
passed.
element_spec: element spec for DistributedDataset when constructing from
DistributedDatasetSpec. This will be used to set the element_spec for
DistributedDataset and verified against element_spec from components.
enable_get_next_as_optional: this is required when components is passed
instead of dataset.
build: whether to build underlying datasets when this object is created.
This is only useful for `ParameterServerStrategy` now.
options: `tf.distribute.InputOptions` used to control options on how this
dataset is distributed.
replica_order: the order of the replicas, which will be used to reorder
the iterators to match the device order.
"""
super(DistributedDataset, self).__init__(input_workers=input_workers)
if input_workers is None or strategy is None:
raise ValueError("input_workers and strategy are required arguments")
if dataset is not None and components is not None:
raise ValueError("Only one of dataset or components should be present")
if dataset is None and components is None:
raise ValueError("At least one of dataset or components should be passed")
self._input_workers = input_workers
self._strategy = strategy
self._options = options
self._input_context = input_context
self._num_replicas_in_sync = num_replicas_in_sync
self._replica_order = replica_order
if dataset is not None:
self._original_dataset = dataset
self._built = False
if build:
self.build()
else:
if not build:
raise ValueError(
"When constructing DistributedDataset with components, build "
"should not be False. This is an internal error. Please file a "
"bug.")
if enable_get_next_as_optional is None:
raise ValueError(
"When constructing DistributedDataset with components, " +
"enable_get_next_as_optional should also be passed")
self._cloned_datasets = components
self._cardinality = _cardinality(self._cloned_datasets[0])
self._enable_get_next_as_optional = enable_get_next_as_optional
assert element_spec is not None
if element_spec != _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec):
raise ValueError("Mismatched element_spec from the passed components")
self._element_spec = element_spec
self._built = True
def build(self, dataset_to_replace=None):
assert not self._built
dataset = dataset_to_replace or self._original_dataset
self._cardinality = _cardinality(dataset)
self._enable_get_next_as_optional = _enable_get_next_as_optional(
self._strategy, dataset, self._cardinality)
distribute_start_time_ns = time.time_ns()
self._create_cloned_datasets_from_dataset(dataset, self._input_context,
self._input_workers,
self._strategy,
self._num_replicas_in_sync)
if context.executing_eagerly():
# Records the time to initialize the distributed dataset.
context.async_wait()
distribute_duration_ms = (time.time_ns() -
distribute_start_time_ns) // 1_000_000
_distributed_dataset_initialization_time_milliseconds.get_cell(
self._strategy.__class__.__name__,
str(self._input_workers.num_workers)).add(distribute_duration_ms)
self._element_spec = _create_distributed_tensor_spec(
self._strategy, self._cloned_datasets[0].element_spec)
self._built = True
def auto_shard(self, num_shards, shard_ix):
assert (
len(self._cloned_datasets) == len(self._input_workers.worker_devices)
), (
f"datasets: {len(self._cloned_datasets)}, "
f"input workers: {len(self._input_workers.worker_devices)}"
)
sharded_datasets = []
for i in range(len(self._input_workers.worker_devices)):
with ops.colocate_with(self._cloned_datasets[i]._variant_tensor): # pylint:disable=protected-access
sharded_datasets.append(
input_ops.auto_shard_dataset(
self._cloned_datasets[i], num_shards, shard_ix,
self._num_replicas_in_sync
))
return DistributedDataset(
self._input_workers,
self._strategy,
components=sharded_datasets,
element_spec=self._element_spec,
options=self._options,
enable_get_next_as_optional=self._enable_get_next_as_optional)
@property
def cardinality(self):
if not self._built:
raise ValueError(
"Cannot get the cardinality of a dataset that is not built")
return self._cardinality
def _create_cloned_datasets_from_dataset(self, dataset, input_context,
input_workers, strategy,
num_replicas_in_sync):
# We clone and shard the dataset on each worker. The current setup tries to
# shard the dataset by files if possible so that each worker sees a
# different subset of files. If that is not possible, will attempt to shard
# the final input such that each worker will run the entire preprocessing
# pipeline and only receive its own shard of the dataset.
# Additionally, we rebatch the dataset on each worker into
# `num_replicas_in_sync` smaller batches to be distributed among that
# worker's replicas, so that the batch size for a global step (across all
# workers and replicas) adds up to the original dataset's batch size.
if num_replicas_in_sync is not None and num_replicas_in_sync > 1:
num_workers = input_context.num_input_pipelines if input_context else len(
input_workers.worker_devices)
rebatch_fn = self._make_rebatch_fn(dataset, num_workers,
num_replicas_in_sync)
else:
rebatch_fn = None
self._cloned_datasets = []
if input_context:
# Between-graph where we rely on the input_context for sharding
assert input_workers.num_workers == 1
if rebatch_fn is not None:
dataset = rebatch_fn(dataset, input_context.input_pipeline_id)
dataset = input_ops.auto_shard_dataset(dataset,
input_context.num_input_pipelines,
input_context.input_pipeline_id,
num_replicas_in_sync)
self._cloned_datasets.append(dataset)
else:
replicated_ds = distribute.replicate(dataset,
input_workers.worker_devices)
for i, worker in enumerate(input_workers.worker_devices):
with ops.device(worker):
cloned_dataset = replicated_ds[worker]
if rebatch_fn is not None:
cloned_dataset = rebatch_fn(cloned_dataset, i)
cloned_dataset = input_ops.auto_shard_dataset(
cloned_dataset, len(input_workers.worker_devices), i,
num_replicas_in_sync)
self._cloned_datasets.append(cloned_dataset)
def _make_rebatch_fn(self, dataset, num_workers, num_replicas_in_sync):
"""Returns a callable that rebatches the input dataset.
Args:
dataset: A `tf.data.Dataset` representing the dataset to be distributed.
num_workers: An integer representing the number of workers to distribute
`dataset` among.
num_replicas_in_sync: An integer representing the number of replicas in
sync across all workers.
"""
if num_replicas_in_sync % num_workers:
raise ValueError(
"tf.distribute expects every worker to have the same number of "
"replicas. However, encountered `num_replicas_in_sync` ({}) that "
"cannot be divided by `num_workers` ({})".format(
num_replicas_in_sync, num_workers))
num_replicas_per_worker = num_replicas_in_sync // num_workers
with ops.colocate_with(dataset._variant_tensor): # pylint: disable=protected-access
batch_size = distribute.compute_batch_size(dataset)
def rebatch_fn(dataset, worker_index):
try:
def apply_rebatch():
batch_sizes = distribute.batch_sizes_for_worker(
batch_size, num_workers, num_replicas_per_worker, worker_index)
return dataset.rebatch(batch_sizes).prefetch(num_replicas_per_worker)
# pylint: disable=protected-access
def apply_legacy_rebatch():
return distribute._LegacyRebatchDataset(
dataset, num_replicas_in_sync).prefetch(num_replicas_per_worker)
with ops.colocate_with(dataset._variant_tensor):
return tf_cond.cond(
math_ops.not_equal(batch_size, -1),
true_fn=apply_rebatch,
false_fn=apply_legacy_rebatch)
except errors.InvalidArgumentError as e:
if "without encountering a batch" in str(e):
six.reraise(
ValueError,
ValueError(
"Call the `batch` method on the input Dataset in order to be "
"able to split your input across {} replicas.\n Please see "
"the tf.distribute.Strategy guide. {}".format(
num_replicas_in_sync, e)),
sys.exc_info()[2])
else:
raise
return rebatch_fn
def __iter__(self):
if not (context.executing_eagerly() or
ops.get_default_graph().building_function):
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
if not self._built:
raise ValueError("To use this dataset, you need to pass this dataset to "
"ClusterCoordinator.create_per_worker_dataset.")
canonicalize_devices = getattr(self._strategy, "_canonicalize_devices",
True)
worker_iterators = _create_iterators_per_worker(
self._cloned_datasets,
self._input_workers,
options=self._options,
canonicalize_devices=canonicalize_devices)
iterator = DistributedIterator(
self._input_workers,
worker_iterators,
self._strategy,
cardinality=self._cardinality,
enable_get_next_as_optional=self._enable_get_next_as_optional,
options=self._options,
replica_order=self._replica_order,
)
iterator._element_spec = self._element_spec # pylint: disable=protected-access
# When async eager is enabled, sometimes the iterator may not finish
# initialization before passing to a multi device function, add a sync point
# here to make sure all underlying iterators are initialized.
if context.executing_eagerly():
context.async_wait()
return iterator
@property
def element_spec(self):
"""The type specification of an element of this dataset."""
# When partial batch handling is enabled, always set the batch dimension to
# None, otherwise we just follow element_spec of the underlying dataset
# (whose batch dimension may also be None). This is because with partial
# batching handling we could always produce empty batches.
if (self._enable_get_next_as_optional and
self._strategy.extended._in_multi_worker_mode()): # pylint: disable=protected-access
return nest.map_structure(
_rebatch_as_dynamic, self._element_spec, expand_composites=False)
return self._element_spec
@property
def _type_spec(self):
return DistributedDatasetSpec(
self._input_workers,
self._element_spec,
self._strategy,
self._options,
enable_get_next_as_optional=self._enable_get_next_as_optional)
|
DistributedDataset
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/scroll_visible.py
|
{
"start": 359,
"end": 611
}
|
class ____(App):
def compose(self) -> ComposeResult:
with VerticalScroll():
yield MyCustomWidget()
def key_t(self) -> None:
self.query_one("#target").scroll_visible()
if __name__ == "__main__":
MyApp().run()
|
MyApp
|
python
|
huggingface__transformers
|
tests/models/prompt_depth_anything/test_image_processing_prompt_depth_anything.py
|
{
"start": 2811,
"end": 9180
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = PromptDepthAnythingImageProcessor if is_vision_available() else None
fast_image_processing_class = PromptDepthAnythingImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = PromptDepthAnythingImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
self.assertTrue(hasattr(image_processing, "prompt_scale_to_meter"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_keep_aspect_ratio(self):
size = {"height": 512, "width": 512}
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(size=size, keep_aspect_ratio=True, ensure_multiple_of=32)
image = np.zeros((489, 640, 3))
pixel_values = image_processor(image, return_tensors="pt").pixel_values
self.assertEqual(list(pixel_values.shape), [1, 3, 512, 672])
def test_prompt_depth_processing(self):
size = {"height": 756, "width": 756}
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(size=size, keep_aspect_ratio=True, ensure_multiple_of=32)
image = np.zeros((756, 1008, 3))
prompt_depth = np.random.random((192, 256))
outputs = image_processor(image, prompt_depth=prompt_depth, return_tensors="pt")
pixel_values = outputs.pixel_values
prompt_depth_values = outputs.prompt_depth
self.assertEqual(list(pixel_values.shape), [1, 3, 768, 1024])
self.assertEqual(list(prompt_depth_values.shape), [1, 1, 192, 256])
@require_torch
@require_vision
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
image = np.zeros((756, 1008, 3))
prompt_depth = np.random.random((192, 256))
size = {"height": 756, "width": 756}
image_processor_slow = self.image_processing_class(
size=size, keep_aspect_ratio=True, ensure_multiple_of=32, do_pad=True, size_divisor=51
)
image_processor_fast = self.fast_image_processing_class(
size=size, keep_aspect_ratio=True, ensure_multiple_of=32, do_pad=True, size_divisor=51
)
encoding_slow = image_processor_slow(image, prompt_depth=prompt_depth, return_tensors="pt")
encoding_fast = image_processor_fast(image, prompt_depth=prompt_depth, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self.assertEqual(encoding_slow.prompt_depth.dtype, encoding_fast.prompt_depth.dtype)
self._assert_slow_fast_tensors_equivalence(encoding_slow.prompt_depth, encoding_fast.prompt_depth)
@require_torch
@require_vision
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
batch_size = self.image_processor_tester.batch_size
images = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=True)
prompt_depths = [np.random.random((192, 256)) for _ in range(batch_size)]
size = {"height": 756, "width": 756}
image_processor_slow = self.image_processing_class(size=size, keep_aspect_ratio=False, ensure_multiple_of=32)
image_processor_fast = self.fast_image_processing_class(
size=size, keep_aspect_ratio=False, ensure_multiple_of=32
)
encoding_slow = image_processor_slow(images, prompt_depth=prompt_depths, return_tensors="pt")
encoding_fast = image_processor_fast(images, prompt_depth=prompt_depths, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self.assertEqual(encoding_slow.prompt_depth.dtype, encoding_fast.prompt_depth.dtype)
self._assert_slow_fast_tensors_equivalence(encoding_slow.prompt_depth, encoding_fast.prompt_depth)
|
PromptDepthAnythingImageProcessingTest
|
python
|
getsentry__sentry
|
tests/snuba/rules/conditions/test_event_frequency.py
|
{
"start": 32548,
"end": 43492
}
|
class ____(StandardIntervalTestBase):
__test__ = Abstract(__module__, __qualname__)
rule_cls = EventUniqueUserFrequencyConditionWithConditions
def increment(self, event, count, environment=None, timestamp=None):
timestamp = timestamp if timestamp else before_now(minutes=1)
data = {"fingerprint": event.data["fingerprint"]}
if environment:
data["environment"] = environment
for _ in range(count):
event_data = deepcopy(data)
event_data["user"] = {"id": uuid4().hex}
self.add_event(
data=event_data,
project_id=self.project.id,
timestamp=timestamp,
)
def test_comparison(self) -> None:
# Test data is 4 events in the current period and 2 events in the comparison period, so
# a 100% increase.
event = self.add_event(
data={
"fingerprint": ["something_random"],
"user": {"id": uuid4().hex},
},
project_id=self.project.id,
timestamp=before_now(minutes=1),
)
self.increment(
event,
3,
timestamp=timezone.now() - timedelta(minutes=1),
)
self.increment(
event,
2,
timestamp=timezone.now() - timedelta(days=1, minutes=20),
)
data = {
"interval": "1h",
"value": 99,
"comparisonType": "percent",
"comparisonInterval": "1d",
"id": "EventFrequencyConditionWithConditions",
}
rule = self.get_rule(
data=data,
rule=Rule(
environment_id=None,
project_id=self.project.id,
data={
"conditions": [data],
"filter_match": "all",
},
),
)
self.assertPasses(rule, event, is_new=False)
data = {
"interval": "1h",
"value": 101,
"comparisonType": "percent",
"comparisonInterval": "1d",
"id": "EventFrequencyConditionWithConditions",
}
rule = self.get_rule(
data=data,
rule=Rule(
environment_id=None,
project_id=self.project.id,
data={
"conditions": [data],
"filter_match": "all",
},
),
)
self.assertDoesNotPass(rule, event, is_new=False)
def test_comparison_empty_comparison_period(self) -> None:
# Test data is 1 event in the current period and 0 events in the comparison period. This
# should always result in 0 and never fire.
event = self.add_event(
data={
"fingerprint": ["something_random"],
"user": {"id": uuid4().hex},
},
project_id=self.project.id,
timestamp=before_now(minutes=1),
)
data = {
"filter_match": "all",
"conditions": [
{
"interval": "1h",
"value": 0,
"comparisonType": "percent",
"comparisonInterval": "1d",
}
],
}
rule = self.get_rule(
data=data, rule=Rule(environment_id=None, project_id=self.project.id, data=data)
)
self.assertDoesNotPass(rule, event, is_new=False)
data = {
"filter_match": "all",
"conditions": [
{
"interval": "1h",
"value": 100,
"comparisonType": "percent",
"comparisonInterval": "1d",
}
],
}
rule = self.get_rule(
data=data, rule=Rule(environment_id=None, project_id=self.project.id, data=data)
)
self.assertDoesNotPass(rule, event, is_new=False)
def _run_test(self, minutes, data, passes, add_events=False):
data["filter_match"] = "all"
data["conditions"] = data.get("conditions", [])
rule = self.get_rule(
data=data,
rule=Rule(environment_id=None, project_id=self.project.id, data=data),
)
environment_rule = self.get_rule(
data=data,
rule=Rule(
environment_id=self.environment.id,
project_id=self.project.id,
data=data,
),
)
event = self.add_event(
data={
"fingerprint": ["something_random"],
"user": {"id": uuid4().hex},
},
project_id=self.project.id,
timestamp=before_now(minutes=minutes),
)
if add_events:
self.increment(
event,
data["value"] + 1,
environment=self.environment.name,
timestamp=timezone.now() - timedelta(minutes=minutes),
)
self.increment(
event,
data["value"] + 1,
timestamp=timezone.now() - timedelta(minutes=minutes),
)
if passes:
self.assertPasses(rule, event, is_new=False)
self.assertPasses(environment_rule, event, is_new=False)
else:
self.assertDoesNotPass(rule, event, is_new=False)
self.assertDoesNotPass(environment_rule, event, is_new=False)
def test_convert_rule_condition_to_snuba_condition() -> None:
# Test non-TaggedEventFilter condition
condition = {"id": "some.other.condition"}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
condition
)
is None
)
# Test TaggedEventFilter conditions
base_condition = {
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "test_key",
"value": "test_value",
}
# Test equality
eq_condition = {**base_condition, "match": MatchType.EQUAL}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
eq_condition
)
== (
"tags[test_key]",
Op.EQ.value,
"test_value",
)
)
# Test inequality
ne_condition = {**base_condition, "match": MatchType.NOT_EQUAL}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
ne_condition
)
== (
"tags[test_key]",
Op.NEQ.value,
"test_value",
)
)
# Test starts with
sw_condition = {**base_condition, "match": MatchType.STARTS_WITH}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
sw_condition
)
== (
"tags[test_key]",
Op.LIKE.value,
"test_value%",
)
)
# Test not starts with
nsw_condition = {**base_condition, "match": MatchType.NOT_STARTS_WITH}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
nsw_condition
)
== (
"tags[test_key]",
Op.NOT_LIKE.value,
"test_value%",
)
)
# Test ends with
ew_condition = {**base_condition, "match": MatchType.ENDS_WITH}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
ew_condition
)
== (
"tags[test_key]",
Op.LIKE.value,
"%test_value",
)
)
# Test not ends with
new_condition = {**base_condition, "match": MatchType.NOT_ENDS_WITH}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
new_condition
)
== (
"tags[test_key]",
Op.NOT_LIKE.value,
"%test_value",
)
)
# Test contains
co_condition = {**base_condition, "match": MatchType.CONTAINS}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
co_condition
)
== (
"tags[test_key]",
Op.LIKE.value,
"%test_value%",
)
)
# Test not contains
nc_condition = {**base_condition, "match": MatchType.NOT_CONTAINS}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
nc_condition
)
== (
"tags[test_key]",
Op.NOT_LIKE.value,
"%test_value%",
)
)
# Test is set
is_condition = {**base_condition, "match": MatchType.IS_SET}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
is_condition
)
== (
"tags[test_key]",
Op.IS_NOT_NULL.value,
None,
)
)
# Test not set
ns_condition = {**base_condition, "match": MatchType.NOT_SET}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
ns_condition
)
== (
"tags[test_key]",
Op.IS_NULL.value,
None,
)
)
# Test is in
in_condition = {
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "test_key",
"value": "test_value_1,test_value_2",
"match": MatchType.IS_IN,
}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
in_condition
)
== (
"tags[test_key]",
Op.IN.value,
["test_value_1", "test_value_2"],
)
)
# Test not in
not_in_condition = {
"id": "sentry.rules.filters.tagged_event.TaggedEventFilter",
"key": "test_key",
"value": "test_value_1,test_value_2",
"match": MatchType.NOT_IN,
}
assert (
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
not_in_condition
)
== (
"tags[test_key]",
Op.NOT_IN.value,
["test_value_1", "test_value_2"],
)
)
# Test unsupported match type
with pytest.raises(ValueError, match="Unsupported match type: unsupported"):
EventUniqueUserFrequencyConditionWithConditions.convert_rule_condition_to_snuba_condition(
{**base_condition, "match": "unsupported"}
)
|
EventUniqueUserFrequencyConditionWithConditionsTestCase
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_syntax_extensions.py
|
{
"start": 1321,
"end": 1940
}
|
class ____(SyntaxExtension, ClauseElement):
_traverse_internals = []
def apply_to_select(self, select_stmt):
select_stmt.apply_syntax_extension_point(
lambda existing: [*existing, self],
"post_criteria",
)
def apply_to_update(self, update_stmt: Update) -> None:
update_stmt.apply_syntax_extension_point(
lambda existing: [self], "post_criteria"
)
def apply_to_delete(self, delete_stmt: Delete) -> None:
delete_stmt.apply_syntax_extension_point(
lambda existing: [self], "post_criteria"
)
|
PostCriteriaClause
|
python
|
django__django
|
django/db/models/fields/related_lookups.py
|
{
"start": 5874,
"end": 5958
}
|
class ____(RelatedLookupMixin, GreaterThanOrEqual):
pass
|
RelatedGreaterThanOrEqual
|
python
|
getsentry__sentry
|
src/sentry/api/permissions.py
|
{
"start": 2609,
"end": 2984
}
|
class ____(BasePermission):
def has_permission(self, request: Request, view: object) -> bool:
enforce_staff_permission = has_staff_option(request.user)
if enforce_staff_permission:
return StaffPermission().has_permission(request, view)
return SuperuserPermission().has_permission(request, view)
|
SuperuserOrStaffFeatureFlaggedPermission
|
python
|
Pylons__pyramid
|
src/pyramid/interfaces.py
|
{
"start": 55832,
"end": 58058
}
|
class ____(Interface):
"""
A cache buster modifies the URL generation machinery for
:meth:`~pyramid.request.Request.static_url`. See :ref:`cache_busting`.
.. versionadded:: 1.6
"""
def __call__(request, subpath, kw):
"""
Modifies a subpath and/or keyword arguments from which a static asset
URL will be computed during URL generation.
The ``subpath`` argument is a path of ``/``-delimited segments that
represent the portion of the asset URL which is used to find the asset.
The ``kw`` argument is a dict of keywords that are to be passed
eventually to :meth:`~pyramid.request.Request.static_url` for URL
generation. The return value should be a two-tuple of
``(subpath, kw)`` where ``subpath`` is the relative URL from where the
file is served and ``kw`` is the same input argument. The return value
should be modified to include the cache bust token in the generated
URL.
The ``kw`` dictionary contains extra arguments passed to
:meth:`~pyramid.request.Request.static_url` as well as some extra
items that may be usful including:
- ``pathspec`` is the path specification for the resource
to be cache busted.
- ``rawspec`` is the original location of the file, ignoring
any calls to :meth:`pyramid.config.Configurator.override_asset`.
The ``pathspec`` and ``rawspec`` values are only different in cases
where an asset has been mounted into a virtual location using
:meth:`pyramid.config.Configurator.override_asset`. For example, with
a call to ``request.static_url('myapp:static/foo.png'), the
``pathspec`` is ``myapp:static/foo.png`` whereas the ``rawspec`` may
be ``themepkg:bar.png``, assuming a call to
``config.override_asset('myapp:static/foo.png', 'themepkg:bar.png')``.
"""
# configuration phases: a lower phase number means the actions associated
# with this phase will be executed earlier than those with later phase
# numbers. The default phase number is 0, FTR.
PHASE0_CONFIG = -30
PHASE1_CONFIG = -20
PHASE2_CONFIG = -10
PHASE3_CONFIG = 0
|
ICacheBuster
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/base.py
|
{
"start": 5207,
"end": 14943
}
|
class ____:
provider_id: str
def get_mocked_response(self):
pass
def get_expected_to_str(self):
raise NotImplementedError
def get_access_token(self) -> str:
return "testac"
def get_refresh_token(self) -> str:
return "testrf"
def get_login_response_json(self, with_refresh_token=True):
response = {
"uid": uuid.uuid4().hex,
"access_token": self.get_access_token(),
}
if with_refresh_token:
response["refresh_token"] = self.get_refresh_token()
return json.dumps(response)
def mocked_response(self, *responses):
return mocked_response(*responses)
def setUp(self):
super(OAuth2TestsMixin, self).setUp()
self.setup_provider()
def setup_provider(self):
self.app = setup_app(self.provider_id)
self.request = RequestFactory().get("/")
self.provider = self.app.get_provider(self.request)
def test_provider_has_no_pkce_params(self):
provider_settings = app_settings.PROVIDERS.get(self.app.provider, {})
provider_settings_with_pkce_set = provider_settings.copy()
provider_settings_with_pkce_set["OAUTH_PKCE_ENABLED"] = False
with self.settings(
SOCIALACCOUNT_PROVIDERS={self.app.provider: provider_settings_with_pkce_set}
):
self.assertEqual(self.provider.get_pkce_params(), {})
def test_provider_has_pkce_params(self):
provider_settings = app_settings.PROVIDERS.get(self.app.provider, {})
provider_settings_with_pkce_set = provider_settings.copy()
provider_settings_with_pkce_set["OAUTH_PKCE_ENABLED"] = True
with self.settings(
SOCIALACCOUNT_PROVIDERS={self.app.provider: provider_settings_with_pkce_set}
):
pkce_params = self.provider.get_pkce_params()
self.assertEqual(
set(pkce_params.keys()),
{"code_challenge", "code_challenge_method", "code_verifier"},
)
hashed_verifier = hashlib.sha256(
pkce_params["code_verifier"].encode("ascii")
)
code_challenge = base64.urlsafe_b64encode(hashed_verifier.digest())
code_challenge_without_padding = code_challenge.rstrip(b"=")
assert pkce_params["code_challenge"] == code_challenge_without_padding
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_login(self):
resp_mock = self.get_mocked_response()
if not resp_mock:
warnings.warn("Cannot test provider %s, no oauth mock" % self.provider.id)
return
resp = self.login(
resp_mock,
)
self.assertRedirects(resp, reverse("socialaccount_signup"))
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_login_with_pkce_disabled(self):
provider_settings = app_settings.PROVIDERS.get(self.app.provider, {})
provider_settings_with_pkce_disabled = provider_settings.copy()
provider_settings_with_pkce_disabled["OAUTH_PKCE_ENABLED"] = False
with self.settings(
SOCIALACCOUNT_PROVIDERS={
self.app.provider: provider_settings_with_pkce_disabled
}
):
resp_mock = self.get_mocked_response()
if not resp_mock:
warnings.warn(
"Cannot test provider %s, no oauth mock" % self.provider.id
)
return
resp = self.login(
resp_mock,
)
self.assertRedirects(resp, reverse("socialaccount_signup"))
@override_settings(SOCIALACCOUNT_AUTO_SIGNUP=False)
def test_login_with_pkce_enabled(self):
provider_settings = app_settings.PROVIDERS.get(self.app.provider, {})
provider_settings_with_pkce_enabled = provider_settings.copy()
provider_settings_with_pkce_enabled["OAUTH_PKCE_ENABLED"] = True
with self.settings(
SOCIALACCOUNT_PROVIDERS={
self.app.provider: provider_settings_with_pkce_enabled
}
):
resp_mock = self.get_mocked_response()
if not resp_mock:
warnings.warn(
"Cannot test provider %s, no oauth mock" % self.provider.id
)
return
resp = self.login(
resp_mock,
)
self.assertRedirects(resp, reverse("socialaccount_signup"))
@override_settings(SOCIALACCOUNT_STORE_TOKENS=True)
def test_account_tokens(self, multiple_login=False):
email = "user@example.com"
user = get_user_model()(is_active=True)
user_email(user, email)
user_username(user, "user")
user.set_password("test")
user.save()
EmailAddress.objects.create(user=user, email=email, primary=True, verified=True)
self.client.login(username=user.username, password="test")
self.login(self.get_mocked_response(), process="connect")
if multiple_login:
self.login(
self.get_mocked_response(),
with_refresh_token=False,
process="connect",
)
# get account
sa = SocialAccount.objects.filter(
user=user, provider=self.provider.app.provider_id or self.provider.id
).get()
provider_account = sa.get_provider_account()
self.assertEqual(provider_account.to_str(), self.get_expected_to_str())
# The following lines don't actually test that much, but at least
# we make sure that the code is hit.
provider_account.get_avatar_url()
provider_account.get_profile_url()
provider_account.get_brand()
# get token
if self.app:
t = sa.socialtoken_set.get()
# verify access_token and refresh_token
self.assertEqual(self.get_access_token(), t.token)
resp = json.loads(self.get_login_response_json(with_refresh_token=True))
if "refresh_token" in resp:
refresh_token = resp.get("refresh_token")
elif "refreshToken" in resp:
refresh_token = resp.get("refreshToken")
else:
refresh_token = ""
self.assertEqual(t.token_secret, refresh_token)
@override_settings(SOCIALACCOUNT_STORE_TOKENS=True)
def test_account_refresh_token_saved_next_login(self):
"""
fails if a login missing a refresh token, deletes the previously
saved refresh token. Systems such as google's oauth only send
a refresh token on first login.
"""
self.test_account_tokens(multiple_login=True)
def login(self, resp_mock=None, process="login", with_refresh_token=True):
with self.mocked_response():
resp = self.client.post(
self.provider.get_login_url(self.request, process=process)
)
p = urlparse(resp["location"])
q = parse_qs(p.query)
pkce_enabled = app_settings.PROVIDERS.get(self.app.provider, {}).get(
"OAUTH_PKCE_ENABLED", self.provider.pkce_enabled_default
)
self.assertEqual("code_challenge" in q, pkce_enabled)
self.assertEqual("code_challenge_method" in q, pkce_enabled)
if pkce_enabled:
code_challenge = q["code_challenge"][0]
self.assertEqual(q["code_challenge_method"][0], "S256")
complete_url = self.provider.get_callback_url()
self.assertGreater(q["redirect_uri"][0].find(complete_url), 0)
response_json = self.get_login_response_json(
with_refresh_token=with_refresh_token
)
if isinstance(resp_mock, list):
resp_mocks = resp_mock
elif resp_mock is None:
resp_mocks = []
else:
resp_mocks = [resp_mock]
with self.mocked_response(
MockedResponse(
HTTPStatus.OK, response_json, {"content-type": "application/json"}
),
*resp_mocks,
):
resp = self.client.get(complete_url, self.get_complete_parameters(q))
# Find the access token POST request, and assert that it contains
# the correct code_verifier if and only if PKCE is enabled
request_calls = requests.Session.request.call_args_list
for args, kwargs in request_calls:
data = kwargs.get("data", {})
if (
args[0] == "POST"
and isinstance(data, dict)
and data.get("redirect_uri", "").endswith(complete_url)
):
self.assertEqual("code_verifier" in data, pkce_enabled)
if pkce_enabled:
hashed_code_verifier = hashlib.sha256(
data["code_verifier"].encode("ascii")
)
expected_code_challenge = (
base64.urlsafe_b64encode(hashed_code_verifier.digest())
.rstrip(b"=")
.decode()
)
self.assertEqual(code_challenge, expected_code_challenge)
return resp
def get_complete_parameters(self, q):
return {"code": "test", "state": q["state"][0]}
def test_authentication_error(self):
resp = self.client.get(self.provider.get_callback_url())
self.assertTemplateUsed(
resp,
"socialaccount/authentication_error.%s"
% getattr(settings, "ACCOUNT_TEMPLATE_EXTENSION", "html"),
)
|
OAuth2TestsMixin
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 8403,
"end": 8799
}
|
class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
if scipy.sparse.issparse(X):
return float(helper_functions.get_value("MissingValues").sum())
else:
return float(np.count_nonzero(helper_functions.get_value("MissingValues")))
@metafeatures.define("PercentageOfMissingValues", dependency="NumberOfMissingValues")
|
NumberOfMissingValues
|
python
|
facebook__pyre-check
|
client/language_server/tests/daemon_connection_test.py
|
{
"start": 841,
"end": 1248
}
|
class ____(AsyncContextManager[T]):
def __init__(self, value: T) -> None:
self.value: T = value
async def __aenter__(self) -> T:
return self.value
async def __aexit__(
self,
typ: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
return False
|
MockAsyncContextManager
|
python
|
mlflow__mlflow
|
mlflow/projects/databricks.py
|
{
"start": 3469,
"end": 22005
}
|
class ____:
"""
Helper class for running an MLflow project as a Databricks Job.
Args:
databricks_profile: Optional Databricks CLI profile to use to fetch hostname &
authentication information when making Databricks API requests.
"""
def __init__(self, databricks_profile_uri):
self.databricks_profile_uri = databricks_profile_uri
def _databricks_api_request(self, endpoint, method, **kwargs):
host_creds = databricks_utils.get_databricks_host_creds(self.databricks_profile_uri)
return rest_utils.http_request_safe(
host_creds=host_creds, endpoint=endpoint, method=method, **kwargs
)
def _jobs_runs_submit(self, req_body):
response = self._databricks_api_request(
endpoint="/api/2.0/jobs/runs/submit", method="POST", json=req_body
)
return json.loads(response.text)
def _upload_to_dbfs(self, src_path, dbfs_fuse_uri):
"""
Upload the file at `src_path` to the specified DBFS URI within the Databricks workspace
corresponding to the default Databricks CLI profile.
"""
_logger.info("=== Uploading project to DBFS path %s ===", dbfs_fuse_uri)
http_endpoint = dbfs_fuse_uri
with open(src_path, "rb") as f:
try:
self._databricks_api_request(endpoint=http_endpoint, method="POST", data=f)
except MlflowException as e:
if "Error 409" in e.message and "File already exists" in e.message:
_logger.info("=== Did not overwrite existing DBFS path %s ===", dbfs_fuse_uri)
else:
raise e
def _dbfs_path_exists(self, dbfs_path):
"""
Return True if the passed-in path exists in DBFS for the workspace corresponding to the
default Databricks CLI profile. The path is expected to be a relative path to the DBFS root
directory, e.g. 'path/to/file'.
"""
host_creds = databricks_utils.get_databricks_host_creds(self.databricks_profile_uri)
response = rest_utils.http_request(
host_creds=host_creds,
endpoint="/api/2.0/dbfs/get-status",
method="GET",
json={"path": f"/{dbfs_path}"},
)
try:
json_response_obj = json.loads(response.text)
except Exception:
raise MlflowException(
f"API request to check existence of file at DBFS path {dbfs_path} failed with "
f"status code {response.status_code}. Response body: {response.text}"
)
# If request fails with a RESOURCE_DOES_NOT_EXIST error, the file does not exist on DBFS
error_code_field = "error_code"
if error_code_field in json_response_obj:
if json_response_obj[error_code_field] == "RESOURCE_DOES_NOT_EXIST":
return False
raise ExecutionException(
f"Got unexpected error response when checking whether file {dbfs_path} "
f"exists in DBFS: {json_response_obj}"
)
return True
def _upload_project_to_dbfs(self, project_dir, experiment_id):
"""
Tars a project directory into an archive in a temp dir and uploads it to DBFS, returning
the HDFS-style URI of the tarball in DBFS (e.g. dbfs:/path/to/tar).
Args:
project_dir: Path to a directory containing an MLflow project to upload to DBFS (e.g.
a directory containing an MLproject file).
"""
with tempfile.TemporaryDirectory() as temp_tarfile_dir:
temp_tar_filename = os.path.join(temp_tarfile_dir, "project.tar.gz")
def custom_filter(x):
return None if os.path.basename(x.name) == "mlruns" else x
directory_size = file_utils._get_local_project_dir_size(project_dir)
_logger.info(
f"=== Creating tarball from {project_dir} in temp directory {temp_tarfile_dir} ==="
)
_logger.info(f"=== Total file size to compress: {directory_size} KB ===")
file_utils.make_tarfile(
temp_tar_filename, project_dir, DB_TARFILE_ARCHIVE_NAME, custom_filter=custom_filter
)
with open(temp_tar_filename, "rb") as tarred_project:
tarfile_hash = hashlib.sha256(tarred_project.read()).hexdigest()
# TODO: Get subdirectory for experiment from the tracking server
dbfs_path = posixpath.join(
DBFS_EXPERIMENT_DIR_BASE,
str(experiment_id),
"projects-code",
f"{tarfile_hash}.tar.gz",
)
tar_size = file_utils._get_local_file_size(temp_tar_filename)
dbfs_fuse_uri = posixpath.join("/dbfs", dbfs_path)
if not self._dbfs_path_exists(dbfs_path):
_logger.info(
f"=== Uploading project tarball (size: {tar_size} KB) to {dbfs_fuse_uri} ==="
)
self._upload_to_dbfs(temp_tar_filename, dbfs_fuse_uri)
_logger.info("=== Finished uploading project to %s ===", dbfs_fuse_uri)
else:
_logger.info("=== Project already exists in DBFS ===")
return dbfs_fuse_uri
def _run_shell_command_job(self, project_uri, command, env_vars, cluster_spec):
"""
Run the specified shell command on a Databricks cluster.
Args:
project_uri: URI of the project from which the shell command originates.
command: Shell command to run.
env_vars: Environment variables to set in the process running ``command``.
cluster_spec: Dictionary containing a `Databricks cluster specification
<https://docs.databricks.com/dev-tools/api/latest/jobs.html#clusterspec>`_
or a `Databricks new cluster specification
<https://docs.databricks.com/dev-tools/api/latest/jobs.html#jobsclusterspecnewcluster>`_
to use when launching a run. If you specify libraries, this function
will add MLflow to the library list. This function does not support
installation of conda environment libraries on the workers.
Returns:
ID of the Databricks job run. Can be used to query the run's status via the
Databricks `Runs Get <https://docs.databricks.com/api/latest/jobs.html#runs-get>`_ API.
"""
if is_release_version():
mlflow_lib = {"pypi": {"package": f"mlflow=={VERSION}"}}
else:
# When running a non-release version as the client the same version will not be
# available within Databricks.
_logger.warning(
"Your client is running a non-release version of MLflow. "
"This version is not available on the databricks runtime. "
"MLflow will fallback the MLflow version provided by the runtime. "
"This might lead to unforeseen issues. "
)
mlflow_lib = {"pypi": {"package": f"'mlflow<={VERSION}'"}}
# Check syntax of JSON - if it contains libraries and new_cluster, pull those out
if "new_cluster" in cluster_spec:
# Libraries are optional, so we don't require that this be specified
cluster_spec_libraries = cluster_spec.get("libraries", [])
libraries = (
# This is for development purposes only. If the cluster spec already includes
# an MLflow Git URI, then we don't append `mlflow_lib` to avoid having
# two different pip requirements for mlflow.
cluster_spec_libraries
if _contains_mlflow_git_uri(cluster_spec_libraries)
else cluster_spec_libraries + [mlflow_lib]
)
cluster_spec = cluster_spec["new_cluster"]
else:
libraries = [mlflow_lib]
# Make jobs API request to launch run.
req_body_json = {
"run_name": f"MLflow Run for {project_uri}",
"new_cluster": cluster_spec,
"shell_command_task": {"command": command, "env_vars": env_vars},
"libraries": libraries,
}
_logger.info("=== Submitting a run to execute the MLflow project... ===")
run_submit_res = self._jobs_runs_submit(req_body_json)
return run_submit_res["run_id"]
def run_databricks_spark_job(
self,
project_uri,
work_dir,
experiment_id,
cluster_spec,
run_id,
project_spec,
entry_point,
parameters,
):
from mlflow.utils.file_utils import get_or_create_tmp_dir
dbfs_fuse_uri = self._upload_project_to_dbfs(work_dir, experiment_id)
env_vars = {
MLFLOW_TRACKING_URI.name: "databricks",
MLFLOW_EXPERIMENT_ID.name: experiment_id,
MLFLOW_RUN_ID.name: run_id,
}
_logger.info(
"=== Running databricks spark job of project %s on Databricks ===", project_uri
)
if project_spec.databricks_spark_job_spec.python_file is not None:
if entry_point != "main" or parameters:
_logger.warning(
"You configured Databricks spark job python_file and parameters within the "
"MLProject file's databricks_spark_job section. '--entry-point' "
"and '--param-list' arguments specified in the 'mlflow run' command are "
"ignored."
)
job_code_file = project_spec.databricks_spark_job_spec.python_file
job_parameters = project_spec.databricks_spark_job_spec.parameters
else:
command = project_spec.get_entry_point(entry_point).compute_command(parameters, None)
command_splits = command.split(" ")
if command_splits[0] != "python":
raise MlflowException(
"Databricks spark job only supports 'python' command in the entry point "
"configuration."
)
job_code_file = command_splits[1]
job_parameters = command_splits[2:]
tmp_dir = Path(get_or_create_tmp_dir())
origin_job_code = (Path(work_dir) / job_code_file).read_text()
job_code_filename = f"{uuid.uuid4().hex}.py"
new_job_code_file = tmp_dir / job_code_filename
project_dir, extracting_tar_command = _get_project_dir_and_extracting_tar_command(
dbfs_fuse_uri
)
env_vars_str = json.dumps(env_vars)
new_job_code_file.write_text(
f"""
import os
import subprocess
os.environ.update({env_vars_str})
extracting_tar_command = \"\"\"
{extracting_tar_command}
\"\"\"
subprocess.check_call(extracting_tar_command, shell=True)
os.chdir('{project_dir}')
{origin_job_code}
"""
)
dbfs_job_code_file_path = posixpath.join(
DBFS_EXPERIMENT_DIR_BASE,
str(experiment_id),
"projects-code",
job_code_filename,
)
job_code_file_dbfs_fuse_uri = posixpath.join("/dbfs", dbfs_job_code_file_path)
if not self._dbfs_path_exists(dbfs_job_code_file_path):
self._upload_to_dbfs(str(new_job_code_file), job_code_file_dbfs_fuse_uri)
libraries_config = [
{"pypi": {"package": python_lib}}
for python_lib in project_spec.databricks_spark_job_spec.python_libraries
]
# Make Databricks Spark jobs API request to launch run.
req_body_json = {
"run_name": f"MLflow Run for {project_uri}",
"new_cluster": cluster_spec,
"libraries": libraries_config,
"spark_python_task": {
"python_file": f"dbfs:/{dbfs_job_code_file_path}",
"parameters": job_parameters,
},
}
_logger.info("=== Submitting a run to execute the MLflow project... ===")
run_submit_res = self._jobs_runs_submit(req_body_json)
return run_submit_res["run_id"]
def run_databricks(
self,
uri,
entry_point,
work_dir,
parameters,
experiment_id,
cluster_spec,
run_id,
env_manager,
):
tracking_uri = _get_tracking_uri_for_run()
dbfs_fuse_uri = self._upload_project_to_dbfs(work_dir, experiment_id)
env_vars = {
MLFLOW_TRACKING_URI.name: tracking_uri,
MLFLOW_EXPERIMENT_ID.name: experiment_id,
}
_logger.info("=== Running entry point %s of project %s on Databricks ===", entry_point, uri)
# Launch run on Databricks
command = _get_databricks_run_cmd(
dbfs_fuse_uri, run_id, entry_point, parameters, env_manager
)
return self._run_shell_command_job(uri, command, env_vars, cluster_spec)
def _get_status(self, databricks_run_id):
run_state = self.get_run_result_state(databricks_run_id)
if run_state is None:
return RunStatus.RUNNING
if run_state == "SUCCESS":
return RunStatus.FINISHED
return RunStatus.FAILED
def get_status(self, databricks_run_id):
return RunStatus.to_string(self._get_status(databricks_run_id))
def get_run_result_state(self, databricks_run_id):
"""
Get the run result state (string) of a Databricks job run.
Args:
databricks_run_id: Integer Databricks job run ID.
Returns:
`RunResultState <https://docs.databricks.com/api/latest/jobs.html#runresultstate>`_ or
None if the run is still active.
"""
res = self.jobs_runs_get(databricks_run_id)
return res["state"].get("result_state", None)
def jobs_runs_cancel(self, databricks_run_id):
response = self._databricks_api_request(
endpoint="/api/2.0/jobs/runs/cancel", method="POST", json={"run_id": databricks_run_id}
)
return json.loads(response.text)
def jobs_runs_get(self, databricks_run_id):
response = self._databricks_api_request(
endpoint="/api/2.0/jobs/runs/get", method="GET", params={"run_id": databricks_run_id}
)
return json.loads(response.text)
def _get_tracking_uri_for_run():
uri = tracking.get_tracking_uri()
if uri.startswith("databricks"):
return "databricks"
return uri
def _get_cluster_mlflow_run_cmd(project_dir, run_id, entry_point, parameters, env_manager):
cmd = [
"mlflow",
"run",
project_dir,
"--entry-point",
entry_point,
]
if env_manager:
cmd += ["--env-manager", env_manager]
mlflow_run_arr = list(map(quote, cmd))
if run_id:
mlflow_run_arr.extend(["-c", json.dumps({MLFLOW_LOCAL_BACKEND_RUN_ID_CONFIG: run_id})])
if parameters:
for key, value in parameters.items():
mlflow_run_arr.extend(["-P", f"{key}={value}"])
return mlflow_run_arr
def _get_project_dir_and_extracting_tar_command(dbfs_fuse_tar_uri):
# Strip ".gz" and ".tar" file extensions from base filename of the tarfile
tar_hash = posixpath.splitext(posixpath.splitext(posixpath.basename(dbfs_fuse_tar_uri))[0])[0]
container_tar_path = posixpath.abspath(
posixpath.join(DB_TARFILE_BASE, posixpath.basename(dbfs_fuse_tar_uri))
)
project_dir = posixpath.join(DB_PROJECTS_BASE, tar_hash)
command = textwrap.dedent(
f"""
# Make local directories in the container into which to copy/extract the tarred project
mkdir -p {DB_TARFILE_BASE} {DB_PROJECTS_BASE} &&
# Rsync from DBFS FUSE to avoid copying archive into local filesystem if it already exists
rsync -a -v --ignore-existing {dbfs_fuse_tar_uri} {DB_TARFILE_BASE} &&
# Extract project into a temporary directory. We don't extract directly into the desired
# directory as tar extraction isn't guaranteed to be atomic
cd $(mktemp -d) &&
tar --no-same-owner -xzvf {container_tar_path} &&
# Atomically move the extracted project into the desired directory
mv -T {DB_TARFILE_ARCHIVE_NAME} {project_dir}"""
)
return project_dir, command
def _get_databricks_run_cmd(dbfs_fuse_tar_uri, run_id, entry_point, parameters, env_manager):
"""
Generate MLflow CLI command to run on Databricks cluster in order to launch a run on Databricks.
"""
project_dir, extracting_tar_command = _get_project_dir_and_extracting_tar_command(
dbfs_fuse_tar_uri
)
mlflow_run_arr = _get_cluster_mlflow_run_cmd(
project_dir,
run_id,
entry_point,
parameters,
env_manager,
)
mlflow_run_cmd = " ".join([quote(elem) for elem in mlflow_run_arr])
shell_command = textwrap.dedent(
f"""
export PATH=$PATH:$DB_HOME/python/bin &&
mlflow --version &&
{extracting_tar_command} &&
{mlflow_run_cmd}
"""
)
return ["bash", "-c", shell_command]
def run_databricks(
remote_run, uri, entry_point, work_dir, parameters, experiment_id, cluster_spec, env_manager
):
"""
Run the project at the specified URI on Databricks, returning a ``SubmittedRun`` that can be
used to query the run's status or wait for the resulting Databricks Job run to terminate.
"""
run_id = remote_run.info.run_id
db_job_runner = DatabricksJobRunner(databricks_profile_uri=tracking.get_tracking_uri())
db_run_id = db_job_runner.run_databricks(
uri, entry_point, work_dir, parameters, experiment_id, cluster_spec, run_id, env_manager
)
submitted_run = DatabricksSubmittedRun(db_run_id, run_id, db_job_runner)
submitted_run._print_description_and_log_tags()
return submitted_run
def run_databricks_spark_job(
remote_run,
uri,
work_dir,
experiment_id,
cluster_spec,
project_spec,
entry_point,
parameters,
):
run_id = remote_run.info.run_id
db_job_runner = DatabricksJobRunner(databricks_profile_uri=tracking.get_tracking_uri())
db_run_id = db_job_runner.run_databricks_spark_job(
uri,
work_dir,
experiment_id,
cluster_spec,
run_id,
project_spec,
entry_point,
parameters,
)
submitted_run = DatabricksSubmittedRun(db_run_id, run_id, db_job_runner)
submitted_run._print_description_and_log_tags()
return submitted_run
|
DatabricksJobRunner
|
python
|
getsentry__sentry
|
src/sentry/services/eventstore/reprocessing/redis.py
|
{
"start": 721,
"end": 7083
}
|
class ____(ReprocessingStore):
def __init__(self, **options: dict[str, Any]) -> None:
cluster = options.pop("cluster", "default")
assert isinstance(cluster, str), "cluster option must be a string"
self.redis = redis_clusters.get(cluster)
def event_count_for_hashes(
self, project_id: int, group_id: int, old_primary_hashes: set[str]
) -> int:
# Events for a group are split and bucketed by their primary hashes. If flushing is to be
# performed on a per-group basis, the event count needs to be summed up across all buckets
# belonging to a single group.
event_count = 0
for primary_hash in old_primary_hashes:
key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
event_count += self.redis.llen(key)
return event_count
def pop_batched_events(
self, project_id: int, group_id: int, primary_hash: str
) -> tuple[list[str], datetime | None, datetime | None]:
"""
For redis key pointing to a list of buffered events structured like
`event id;datetime of event`, returns a list of event IDs, the
earliest datetime, and the latest datetime.
"""
key = _get_old_primary_hash_subset_key(project_id, group_id, primary_hash)
return self.pop_batched_events_by_key(key)
def pop_batched_events_by_key(
self, key: str
) -> tuple[list[str], datetime | None, datetime | None]:
event_ids_batch = []
min_datetime: datetime | None = None
max_datetime: datetime | None = None
for row in self.redis.lrange(key, 0, -1):
datetime_raw, event_id = row.split(";")
parsed_datetime = to_datetime(float(datetime_raw))
assert parsed_datetime is not None
if min_datetime is None or parsed_datetime < min_datetime:
min_datetime = parsed_datetime
if max_datetime is None or parsed_datetime > max_datetime:
max_datetime = parsed_datetime
event_ids_batch.append(event_id)
self.redis.delete(key)
return event_ids_batch, min_datetime, max_datetime
def get_old_primary_hashes(self, project_id: int, group_id: int) -> set[Any]:
# This is a meta key that contains old primary hashes. These hashes are then
# combined with other values to construct a key that points to a list of
# tombstonable events.
primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
return self.redis.smembers(primary_hash_set_key)
def expire_hash(
self,
project_id: int,
group_id: int,
event_id: str,
date_val: datetime,
old_primary_hash: str,
) -> None:
event_key = _get_old_primary_hash_subset_key(project_id, group_id, old_primary_hash)
self.redis.lpush(event_key, f"{date_val.timestamp()};{event_id}")
self.redis.expire(event_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
def add_hash(self, project_id: int, group_id: int, hash: str) -> None:
primary_hash_set_key = f"re2:tombstone-primary-hashes:{project_id}:{group_id}"
self.redis.sadd(primary_hash_set_key, hash)
self.redis.expire(primary_hash_set_key, settings.SENTRY_REPROCESSING_TOMBSTONES_TTL)
def get_remaining_event_count(
self, project_id: int, old_group_id: int, datetime_to_event: list[tuple[datetime, str]]
) -> int:
# We explicitly cluster by only project_id and group_id here such that our
# RENAME command later succeeds.
key = _get_remaining_key(project_id, old_group_id)
if datetime_to_event:
llen = self.redis.lpush(
key,
*(f"{datetime.timestamp()};{event_id}" for datetime, event_id in datetime_to_event),
)
self.redis.expire(key, settings.SENTRY_REPROCESSING_SYNC_TTL)
else:
llen = self.redis.llen(key)
return llen
def rename_key(self, project_id: int, old_group_id: int) -> str | None:
key = _get_remaining_key(project_id, old_group_id)
new_key = f"{key}:{uuid.uuid4().hex}"
try:
# Rename `key` to a new temp key that is passed to a task. We
# use `renamenx` instead of `rename` only to detect UUID collisions.
assert self.redis.renamenx(key, new_key), "UUID collision for new_key?"
return new_key
except redis.exceptions.ResponseError:
# `key` does not exist in Redis. `ResponseError` is a bit too broad
# but it seems we'd have to do string matching on error message
# otherwise.
return None
def mark_event_reprocessed(self, group_id: int, num_events: int) -> bool:
# refresh the TTL of the metadata:
pipe = self.redis.pipeline()
pipe.expire(
name=_get_info_reprocessed_key(group_id), time=settings.SENTRY_REPROCESSING_SYNC_TTL
)
sync_counter_key = _get_sync_counter_key(group_id)
pipe.expire(name=sync_counter_key, time=settings.SENTRY_REPROCESSING_SYNC_TTL)
pipe.decrby(name=sync_counter_key, amount=num_events)
new_decremented_value = pipe.execute()[2]
return new_decremented_value == 0
def start_reprocessing(
self, group_id: int, date_created: Any, sync_count: int, event_count: int
) -> None:
self.redis.setex(
_get_sync_counter_key(group_id), settings.SENTRY_REPROCESSING_SYNC_TTL, sync_count
)
self.redis.setex(
_get_info_reprocessed_key(group_id),
settings.SENTRY_REPROCESSING_SYNC_TTL,
orjson.dumps(
{"dateCreated": date_created, "syncCount": sync_count, "totalEvents": event_count},
).decode(),
)
def get_pending(self, group_id: int) -> tuple[str | None, int]:
pending_key = _get_sync_counter_key(group_id)
pending = self.redis.get(pending_key)
ttl = self.redis.ttl(pending_key)
return pending, ttl
def get_progress(self, group_id: int) -> dict[str, Any] | None:
info = self.redis.get(_get_info_reprocessed_key(group_id))
if info is None:
return None
return orjson.loads(info)
|
RedisReprocessingStore
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/definitions/_internal/expandinput.py
|
{
"start": 1723,
"end": 2995
}
|
class ____(RuntimeError):
"""
Raise when ``get_map_lengths`` cannot populate all mapping metadata.
This is generally due to not all upstream tasks have finished when the
function is called.
"""
def __init__(self, missing: set[str]) -> None:
self.missing = missing
def __str__(self) -> str:
keys = ", ".join(repr(k) for k in sorted(self.missing))
return f"Failed to populate all mapping metadata; missing: {keys}"
# To replace tedious isinstance() checks.
def is_mappable(v: Any) -> TypeGuard[OperatorExpandArgument]:
from airflow.sdk.definitions.xcom_arg import XComArg
return isinstance(v, (MappedArgument, XComArg, Mapping, Sequence)) and not isinstance(v, str)
# To replace tedious isinstance() checks.
def _is_parse_time_mappable(v: OperatorExpandArgument) -> TypeGuard[Mapping | Sequence]:
from airflow.sdk.definitions.xcom_arg import XComArg
return not isinstance(v, (MappedArgument, XComArg))
# To replace tedious isinstance() checks.
def _needs_run_time_resolution(v: OperatorExpandArgument) -> TypeGuard[MappedArgument | XComArg]:
from airflow.sdk.definitions.xcom_arg import XComArg
return isinstance(v, (MappedArgument, XComArg))
@attrs.define(kw_only=True)
|
NotFullyPopulated
|
python
|
numba__numba
|
numba/tests/test_extending.py
|
{
"start": 47813,
"end": 53063
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
many = base_dummy_type_factory("mydummy2")
self.DynTypeType, self.DynType, self.dyn_type_type = many
self.dyn_type = self.DynType()
def test_unboxer_basic(self):
# Implements an unboxer on DynType that calls an intrinsic into the
# unboxer code.
magic_token = 0xCAFE
magic_offset = 123
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
# proof that this is a jit'ed context by calling jit only
# intrinsic
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
out = passthru(self.dyn_type)
self.assertEqual(out, magic_token + magic_offset)
def test_unboxer_raise(self):
# Testing exception raising in jitcode called from unboxing.
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
# The unboxer that calls some jitcode
def bridge(x):
if x > 0:
raise ValueError("cannot be x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.voidptr, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return NativeValue(res, is_error=is_error)
@box(self.DynTypeType)
def boxer(typ, val, c):
# The boxer that returns an integer representation
res = c.builder.ptrtoint(val, cgutils.intp_t)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot be x > 0", str(raises.exception),
)
def test_boxer(self):
# Call jitcode inside the boxer
magic_token = 0xCAFE
magic_offset = 312
@intrinsic
def my_intrinsic(typingctx, val):
# An intrinsic that returns `val + magic_offset`
def impl(context, builder, sig, args):
[val] = args
return builder.add(val, val.type(magic_offset))
sig = signature(val, val)
return sig, impl
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
# Note: this doesn't do proper error handling
def bridge(x):
return my_intrinsic(x)
args = [c.context.get_constant(types.intp, magic_token)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
return c.pyapi.long_from_ssize_t(res)
@njit
def passthru(x):
return x
r = passthru(self.dyn_type)
self.assertEqual(r, magic_token + magic_offset)
def test_boxer_raise(self):
# Call jitcode inside the boxer
@unbox(self.DynTypeType)
def unboxer(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@box(self.DynTypeType)
def boxer(typ, val, c):
def bridge(x):
if x > 0:
raise ValueError("cannot do x > 0")
return x
args = [c.context.get_constant(types.intp, 1)]
sig = signature(types.intp, types.intp)
is_error, res = c.pyapi.call_jit_code(bridge, sig, args)
# The error handling
retval = cgutils.alloca_once(c.builder, c.pyapi.pyobj, zfill=True)
with c.builder.if_then(c.builder.not_(is_error)):
obj = c.pyapi.long_from_ssize_t(res)
c.builder.store(obj, retval)
return c.builder.load(retval)
@njit
def passthru(x):
return x
with self.assertRaises(ValueError) as raises:
passthru(self.dyn_type)
self.assertIn(
"cannot do x > 0", str(raises.exception),
)
def with_objmode_cache_ov_example(x):
# This is the function stub for overloading inside
# TestCachingOverloadObjmode.test_caching_overload_objmode
pass
@skip_if_typeguard
|
TestBoxingCallingJIT
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 108. 单词演变/Solution.py
|
{
"start": 0,
"end": 816
}
|
class ____:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
words = set(wordList)
q = deque([beginWord])
ans = 1
while q:
n = len(q)
for _ in range(n):
s = q.popleft()
s = list(s)
for i in range(len(s)):
ch = s[i]
for j in range(26):
s[i] = chr(ord('a') + j)
t = ''.join(s)
if t not in words:
continue
if t == endWord:
return ans + 1
q.append(t)
words.remove(t)
s[i] = ch
ans += 1
return 0
|
Solution
|
python
|
fluentpython__example-code-2e
|
24-class-metaprog/slots/slots_timing.py
|
{
"start": 612,
"end": 711
}
|
class ____(type):
def __prepare__(name, bases):
return dict(__slots__=('x', 'y'))
|
Correct2
|
python
|
facebook__pyre-check
|
client/command_arguments.py
|
{
"start": 8046,
"end": 8791
}
|
class ____:
working_directory: Path
annotate_attributes: bool = False
annotate_from_existing_stubs: bool = False
debug_infer: bool = False
quote_annotations: bool = False
dequalify: bool = False
enable_memory_profiling: bool = False
enable_profiling: bool = False
log_identifier: Optional[str] = None
logging_sections: Optional[str] = None
use_future_annotations: bool = False
in_place: bool = False
simple_annotations: bool = False
paths_to_modify: Optional[Set[Path]] = None
print_only: bool = False
read_stdin: bool = False
sequential: bool = False
kill_buck_after_build: bool = False
number_of_buck_threads: Optional[int] = None
@dataclass(frozen=True)
|
InferArguments
|
python
|
walkccc__LeetCode
|
solutions/2381. Shifting Letters II/2381.py
|
{
"start": 0,
"end": 471
}
|
class ____:
def shiftingLetters(self, s: str, shifts: list[list[int]]) -> str:
ans = []
currShift = 0
line = [0] * (len(s) + 1)
for start, end, direction in shifts:
diff = 1 if direction else -1
line[start] += diff
line[end + 1] -= diff
for i, c in enumerate(s):
currShift = (currShift + line[i]) % 26
num = (ord(c) - ord('a') + currShift + 26) % 26
ans.append(chr(ord('a') + num))
return ''.join(ans)
|
Solution
|
python
|
PyCQA__pylint
|
tests/functional/g/generic_alias/generic_alias_typing.py
|
{
"start": 3660,
"end": 3713
}
|
class ____(A[str]): # [unsubscriptable-object]
...
|
B
|
python
|
gevent__gevent
|
src/gevent/tests/test__pywsgi.py
|
{
"start": 46958,
"end": 47417
}
|
class ____(TestCase):
@staticmethod
def application(env, start_response):
start_response('304 Not modified', [])
yield b""
yield b""
def test_err(self):
with self.makefile() as fd:
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
read_http(fd, code=304, body='', chunks=False)
garbage = fd.read()
self.assertEqual(garbage, b"")
|
TestEmptyYield304
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_methods_invocations.py
|
{
"start": 407734,
"end": 407886
}
|
class ____(enum.Enum):
TensorList = enum.auto()
ScalarList = enum.auto()
Scalar = enum.auto()
Tensor = enum.auto()
|
ForeachRightmostArgType
|
python
|
pypa__warehouse
|
warehouse/admin/services.py
|
{
"start": 397,
"end": 1580
}
|
class ____:
def __init__(self, base):
# This class should not be used in production, it's trivial for it to
# be used to read arbitrary files from the disk. It is intended ONLY
# for local development with trusted users. To make this clear, we'll
# raise a warning.
warnings.warn(
"LocalSponsorLogoStorage is intended only for use in development, you "
"should not use it in production due to the lack of safe guards "
"for safely locating files on disk.",
InsecureStorageWarning,
)
self.base = base
@classmethod
def create_service(cls, context, request):
return cls(request.registry.settings["sponsorlogos.path"])
def store(self, path, file_path, content_type=None, *, meta=None):
destination = os.path.join(self.base, path)
os.makedirs(os.path.dirname(destination), exist_ok=True)
with open(destination, "wb") as dest_fp:
with open(file_path, "rb") as src_fp:
dest_fp.write(src_fp.read())
dest_fp.flush()
return f"http://files:9001/sponsorlogos/{path}"
|
LocalSponsorLogoStorage
|
python
|
coleifer__peewee
|
tests/regressions.py
|
{
"start": 52748,
"end": 54126
}
|
class ____(ModelTestCase):
requires = [User]
@skip_if(IS_MYSQL) # mysql can't do anything normally.
def test_weird_aliases(self):
User.create(username='huey')
def assertAlias(s, expected):
query = User.select(s).dicts()
row = query[0]
self.assertEqual(list(row)[0], expected)
# When we explicitly provide an alias, use that.
assertAlias(User.username.alias('"username"'), '"username"')
assertAlias(User.username.alias('(username)'), '(username)')
assertAlias(User.username.alias('user(name)'), 'user(name)')
assertAlias(User.username.alias('(username"'), '(username"')
assertAlias(User.username.alias('"username)'), '"username)')
assertAlias(fn.LOWER(User.username).alias('user (name)'), 'user (name)')
# Here peewee cannot tell that an alias was given, so it will attempt
# to clean-up the column name returned by the cursor description.
assertAlias(SQL('"t1"."username" AS "user name"'), 'user name')
assertAlias(SQL('"t1"."username" AS "user (name)"'), 'user (name')
assertAlias(SQL('"t1"."username" AS "(username)"'), 'username')
assertAlias(SQL('"t1"."username" AS "x.y.(username)"'), 'username')
if IS_SQLITE:
assertAlias(SQL('LOWER("t1"."username")'), 'username')
|
TestWeirdAliases
|
python
|
great-expectations__great_expectations
|
tests/metrics/test_metric.py
|
{
"start": 768,
"end": 929
}
|
class ____(ColumnMetric[ColumnValuesAboveResult]):
name = FULLY_QUALIFIED_METRIC_NAME
min_value: Comparable
strict_min: bool = False
|
ColumnValuesAbove
|
python
|
getsentry__sentry
|
src/sentry/ingest/transaction_clusterer/rules.py
|
{
"start": 726,
"end": 876
}
|
class ____(Protocol):
def read(self, project: Project) -> RuleSet: ...
def write(self, project: Project, rules: RuleSet) -> None: ...
|
RuleStore
|
python
|
joke2k__faker
|
tests/providers/test_ssn.py
|
{
"start": 35061,
"end": 36523
}
|
class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("no_NO")
Faker.seed(0)
def test_no_NO_ssn_checksum(self):
assert no_checksum([0, 1, 0, 2, 0, 3, 9, 8, 7], no_Provider.scale1) == 6
assert no_checksum([0, 1, 0, 2, 0, 3, 9, 8, 7, 6], no_Provider.scale2) == 7
def test_no_NO_ssn(self):
for _ in range(100):
ssn = self.fake.ssn()
assert ssn.isdigit()
assert len(ssn) == 11
def test_no_NO_ssn_dob_passed(self):
test_data = [("20010203", "030201"), ("19991231", "311299")]
for date_of_birth, expected_dob_part in test_data:
ssn = self.fake.ssn(dob=date_of_birth)
assert ssn[:6] == expected_dob_part
def test_no_NO_ssn_invalid_dob_passed(self):
with pytest.raises(ValueError):
self.fake.ssn(dob="010401")
with pytest.raises(ValueError):
self.fake.ssn(dob="hello_world")
with pytest.raises(ValueError):
self.fake.ssn(dob="001301")
def test_no_NO_ssn_gender_passed(self):
# Females have even number at index 8
ssn = self.fake.ssn(gender="F")
assert int(ssn[8]) % 2 == 0
# Males have odd number at index 8
ssn = self.fake.ssn(gender="M")
assert int(ssn[8]) % 2 == 1
def test_no_NO_ssn_invalid_gender_passed(self):
with pytest.raises(ValueError):
self.fake.ssn(gender="A")
|
TestNoNO
|
python
|
bottlepy__bottle
|
bottle.py
|
{
"start": 139131,
"end": 139428
}
|
class ____(ServerAdapter):
""" Untested. """
def run(self, handler):
depr(0, 13, "Diesel is not tested or supported and will be removed.")
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
|
DieselServer
|
python
|
ray-project__ray
|
python/ray/dashboard/modules/job/tests/test_cli.py
|
{
"start": 18048,
"end": 18652
}
|
class ____:
def test_address(self, mock_sdk_client):
_job_cli_group_test_address(mock_sdk_client, "status", "fake_job_id")
def test_status(self, mock_sdk_client):
runner = CliRunner()
mock_client_instance = mock_sdk_client.return_value
with set_env_var("RAY_ADDRESS", "env_addr"):
result = runner.invoke(job_cli_group, ["status", "job_id"])
check_exit_code(result, 0)
mock_client_instance.get_job_info.assert_called_with("job_id")
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
TestStatus
|
python
|
pydantic__pydantic
|
pydantic/functional_validators.py
|
{
"start": 18831,
"end": 19218
}
|
class ____(core_schema.ValidatorFunctionWrapHandler, Protocol[_ModelTypeCo]):
"""`@model_validator` decorated function handler argument type. This is used when `mode='wrap'`."""
def __call__( # noqa: D102
self,
value: Any,
outer_location: str | int | None = None,
/,
) -> _ModelTypeCo: # pragma: no cover
...
|
ModelWrapValidatorHandler
|
python
|
jazzband__django-model-utils
|
tests/models.py
|
{
"start": 3928,
"end": 4068
}
|
class ____(models.Model):
name = models.CharField(max_length=25)
name_changed = MonitorField(monitor="name", when=[])
|
MonitorWhenEmpty
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/externaltest/package.py
|
{
"start": 217,
"end": 544
}
|
class ____(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/test-1.0.tar.gz"
version("1.0", md5="1234567890abcdef1234567890abcdef")
depends_on("stuff")
depends_on("externaltool")
def install(self, spec, prefix):
touch(join_path(prefix, "an_installation_file"))
|
Externaltest
|
python
|
Textualize__textual
|
src/textual/demo/widgets.py
|
{
"start": 7491,
"end": 11514
}
|
class ____(containers.VerticalGroup):
"""Demonstrates Logs."""
DEFAULT_CLASSES = "column"
LOGS_MD = """\
## Logs and Rich Logs
A Log widget to efficiently display a scrolling view of text, with optional highlighting.
And a RichLog widget to display Rich renderables.
"""
DEFAULT_CSS = """
Logs {
Log, RichLog {
width: 1fr;
height: 20;
padding: 1;
overflow-x: auto;
border: wide transparent;
&:focus {
border: wide $border;
}
}
TabPane { padding: 0; }
TabbedContent.-maximized {
height: 1fr;
Log, RichLog { height: 1fr; }
}
}
"""
TEXT = """I must not fear.
Fear is the mind-killer.
Fear is the little-death that brings total obliteration.
I will face my fear.
I will permit it to pass over me and through me.
And when it has gone past, I will turn the inner eye to see its path.
Where the fear has gone there will be nothing. Only I will remain.""".splitlines()
CSV = """lane,swimmer,country,time
4,Joseph Schooling,Singapore,50.39
2,Michael Phelps,United States,51.14
5,Chad le Clos,South Africa,51.14
6,László Cseh,Hungary,51.14
3,Li Zhuhao,China,51.26
8,Mehdy Metella,France,51.58
7,Tom Shields,United States,51.73
1,Aleksandr Sadovnikov,Russia,51.84"""
CSV_ROWS = list(csv.reader(io.StringIO(CSV)))
CODE = '''\
def loop_first_last(values: Iterable[T]) -> Iterable[tuple[bool, bool, T]]:
"""Iterate and generate a tuple with a flag for first and last value."""
iter_values = iter(values)
try:
previous_value = next(iter_values)
except StopIteration:
return
first = True
for value in iter_values:
yield first, False, previous_value
first = False
previous_value = value
yield first, True, previous_value\
'''
log_count = var(0)
rich_log_count = var(0)
def compose(self) -> ComposeResult:
yield Markdown(self.LOGS_MD)
with TabbedContent("Log", "RichLog"):
yield Log(max_lines=10_000, highlight=True)
yield RichLog(max_lines=10_000)
def on_mount(self) -> None:
log = self.query_one(Log)
rich_log = self.query_one(RichLog)
log.write("I am a Log Widget")
rich_log.write("I am a Rich Log Widget")
self.set_interval(0.25, self.update_log)
self.set_interval(1, self.update_rich_log)
def update_log(self) -> None:
"""Update the Log with new content."""
log = self.query_one(Log)
if self.is_scrolling:
return
if not self.app.screen.can_view_entire(log) and not log.is_in_maximized_view:
return
self.log_count += 1
line_no = self.log_count % len(self.TEXT)
line = self.TEXT[self.log_count % len(self.TEXT)]
log.write_line(f"fear[{line_no}] = {line!r}")
def update_rich_log(self) -> None:
"""Update the Rich Log with content."""
rich_log = self.query_one(RichLog)
if self.is_scrolling:
return
if (
not self.app.screen.can_view_entire(rich_log)
and not rich_log.is_in_maximized_view
):
return
self.rich_log_count += 1
log_option = self.rich_log_count % 3
if log_option == 0:
rich_log.write("Syntax highlighted code", animate=True)
rich_log.write(Syntax(self.CODE, lexer="python"), animate=True)
elif log_option == 1:
rich_log.write("A Rich Table", animate=True)
table = Table(*self.CSV_ROWS[0])
for row in self.CSV_ROWS[1:]:
table.add_row(*row)
rich_log.write(table, animate=True)
elif log_option == 2:
rich_log.write("A Rich Traceback", animate=True)
try:
1 / 0
except Exception:
traceback = Traceback()
rich_log.write(traceback, animate=True)
|
Logs
|
python
|
pytorch__pytorch
|
tools/test/heuristics/test_interface.py
|
{
"start": 10909,
"end": 14726
}
|
class ____(TestTD):
def check(
self,
tests: list[str],
test_prioritizations: list[dict[TestRun, float]],
expected: dict[TestRun, float],
) -> None:
aggregated_heuristics = interface.AggregatedHeuristics(tests)
for i, test_prioritization in enumerate(test_prioritizations):
heuristic = self.make_heuristic(f"H{i}")
aggregated_heuristics.add_heuristic_results(
heuristic(), interface.TestPrioritizations(tests, test_prioritization)
)
final_prioritzations = aggregated_heuristics.get_aggregated_priorities()
self.assert_test_scores_almost_equal(
final_prioritzations._test_scores,
expected,
)
def test_get_aggregated_priorities_mix_1(self) -> None:
tests = ["test_a", "test_b", "test_c"]
self.check(
tests,
[
{TestRun("test_a"): 0.5},
{TestRun("test_a::TestA"): 0.25},
{TestRun("test_c"): 0.8},
],
{
TestRun("test_a", excluded=["TestA"]): 0.5,
TestRun("test_a", included=["TestA"]): 0.75,
TestRun("test_b"): 0.0,
TestRun("test_c"): 0.8,
},
)
def test_get_aggregated_priorities_mix_2(self) -> None:
tests = ["test_a", "test_b", "test_c"]
self.check(
tests,
[
{
TestRun("test_a", included=["TestC"]): 0.5,
TestRun("test_b"): 0.25,
TestRun("test_a", excluded=["TestA", "TestB", "TestC"]): 0.8,
},
{
TestRun("test_a::TestA"): 0.25,
TestRun("test_b::TestB"): 0.5,
TestRun("test_a::TestB"): 0.75,
TestRun("test_a", excluded=["TestA", "TestB"]): 0.8,
},
{TestRun("test_c"): 0.8},
],
{
TestRun("test_a", included=["TestA"]): 0.25,
TestRun("test_a", included=["TestB"]): 0.75,
TestRun("test_a", included=["TestC"]): 1.3,
TestRun("test_a", excluded=["TestA", "TestB", "TestC"]): 1.6,
TestRun("test_b", included=["TestB"]): 0.75,
TestRun("test_b", excluded=["TestB"]): 0.25,
TestRun("test_c"): 0.8,
},
)
def test_get_aggregated_priorities_mix_3(self) -> None:
tests = ["test_a"]
self.check(
tests,
[
{
TestRun("test_a", included=["TestA"]): 0.1,
TestRun("test_a", included=["TestC"]): 0.1,
TestRun("test_a", excluded=["TestA", "TestB", "TestC"]): 0.1,
},
{
TestRun("test_a", excluded=["TestD"]): 0.1,
},
{
TestRun("test_a", included=["TestC"]): 0.1,
},
{
TestRun("test_a", included=["TestB", "TestC"]): 0.1,
},
{
TestRun("test_a", included=["TestC"]): 0.1,
TestRun("test_a", included=["TestD"]): 0.1,
},
{
TestRun("test_a"): 0.1,
},
],
{
TestRun("test_a", included=["TestA"]): 0.3,
TestRun("test_a", included=["TestB"]): 0.3,
TestRun("test_a", included=["TestC"]): 0.6,
TestRun("test_a", included=["TestD"]): 0.3,
TestRun("test_a", excluded=["TestA", "TestB", "TestC", "TestD"]): 0.3,
},
)
|
TestAggregatedHeuristics
|
python
|
mlflow__mlflow
|
mlflow/store/tracking/dbmodels/models.py
|
{
"start": 22556,
"end": 25472
}
|
class ____(Base):
__tablename__ = "trace_info"
request_id = Column(String(50), nullable=False)
"""
Trace ID: `String` (limit 50 characters). *Primary Key* for ``trace_info`` table.
Named as "trace_id" in V3 format.
"""
experiment_id = Column(Integer, ForeignKey("experiments.experiment_id"), nullable=False)
"""
Experiment ID to which this trace belongs: *Foreign Key* into ``experiments`` table.
"""
timestamp_ms = Column(BigInteger, nullable=False)
"""
Start time of the trace, in milliseconds. Named as "request_time" in V3 format.
"""
execution_time_ms = Column(BigInteger, nullable=True)
"""
Duration of the trace, in milliseconds. Could be *null* if the trace is still in progress
or not ended correctly for some reason. Named as "execution_duration" in V3 format.
"""
status = Column(String(50), nullable=False)
"""
State of the trace. The values are defined in
:py:class:`mlflow.entities.trace_status.TraceStatus` enum but we don't enforce
constraint at DB level. Named as "state" in V3 format.
"""
client_request_id = Column(String(50), nullable=True)
"""
Client request ID: `String` (limit 50 characters). Could be *null*. Newly added in V3 format.
"""
request_preview = Column(String(1000), nullable=True)
"""
Request preview: `String` (limit 1000 characters). Could be *null*. Newly added in V3 format.
"""
response_preview = Column(String(1000), nullable=True)
"""
Response preview: `String` (limit 1000 characters). Could be *null*. Newly added in V3 format.
"""
__table_args__ = (
PrimaryKeyConstraint("request_id", name="trace_info_pk"),
# The most frequent query will be get all traces in an experiment sorted by timestamp desc,
# which is the default view in the UI. Also every search query should have experiment_id(s)
# in the where clause.
Index(f"index_{__tablename__}_experiment_id_timestamp_ms", "experiment_id", "timestamp_ms"),
)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
:py:class:`mlflow.entities.TraceInfo` object.
"""
return TraceInfo(
trace_id=self.request_id,
trace_location=TraceLocation.from_experiment_id(str(self.experiment_id)),
request_time=self.timestamp_ms,
execution_duration=self.execution_time_ms,
state=TraceState(self.status),
tags={t.key: t.value for t in self.tags},
trace_metadata={m.key: m.value for m in self.request_metadata},
client_request_id=self.client_request_id,
request_preview=self.request_preview,
response_preview=self.response_preview,
assessments=[a.to_mlflow_entity() for a in self.assessments],
)
|
SqlTraceInfo
|
python
|
doocs__leetcode
|
solution/0300-0399/0376.Wiggle Subsequence/Solution.py
|
{
"start": 0,
"end": 440
}
|
class ____:
def wiggleMaxLength(self, nums: List[int]) -> int:
n = len(nums)
ans = 1
f = [1] * n
g = [1] * n
for i in range(1, n):
for j in range(i):
if nums[j] < nums[i]:
f[i] = max(f[i], g[j] + 1)
elif nums[j] > nums[i]:
g[i] = max(g[i], f[j] + 1)
ans = max(ans, f[i], g[i])
return ans
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/releases/endpoints/test_project_release_details.py
|
{
"start": 1818,
"end": 7586
}
|
class ____(APITestCase):
def test_simple(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
project2 = self.create_project(name="bar", organization=project.organization)
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
release.add_project(project2)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.put(url, {"ref": "master"})
assert response.status_code == 200, response.content
assert response.data["version"] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == "master"
def test_commits(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
project2 = self.create_project(name="bar", organization=project.organization)
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
release.add_project(project2)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.put(url, data={"commits": [{"id": "a" * 40}, {"id": "b" * 40}]})
assert response.status_code == 200, (response.status_code, response.content)
rc_list = list(
ReleaseCommit.objects.filter(release=release)
.select_related("commit", "commit__author")
.order_by("order")
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id
def test_activity_generation(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
project2 = self.create_project(name="bar", organization=project.organization)
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
release.add_project(project2)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.put(url, data={"dateReleased": datetime.now(UTC).isoformat()})
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=ActivityType.RELEASE.value, project=project, ident=release.version
)
assert activity.exists()
def test_activity_generation_long_version(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
project2 = self.create_project(name="bar", organization=project.organization)
release = Release.objects.create(organization_id=project.organization_id, version="x" * 65)
release.add_project(project)
release.add_project(project2)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.put(url, data={"dateReleased": datetime.now(UTC).isoformat()})
assert response.status_code == 200, (response.status_code, response.content)
release = Release.objects.get(id=release.id)
assert release.date_released
activity = Activity.objects.filter(
type=ActivityType.RELEASE.value, project=project, ident=release.version[:64]
)
assert activity.exists()
def test_org_auth_token(self) -> None:
project = self.create_project(name="foo")
project2 = self.create_project(name="bar", organization=project.organization)
good_token_str = generate_token(project.organization.slug, "")
self.create_org_auth_token(
organization_id=project.organization.id,
name="token 1",
token_hashed=hash_token(good_token_str),
token_last_characters="ABCD",
scope_list=["org:ci"],
date_last_used=None,
)
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
release.add_project(project2)
url = reverse(
"sentry-api-0-project-release-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
response = self.client.put(
url,
data={"ref": "master"},
HTTP_AUTHORIZATION=f"Bearer {good_token_str}",
)
assert response.status_code == 200, response.content
assert response.data["version"] == release.version
release = Release.objects.get(id=release.id)
assert release.ref == "master"
|
UpdateReleaseDetailsTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-building-where-alice-and-bob-can-meet.py
|
{
"start": 2166,
"end": 3064
}
|
class ____(object):
def leftmostBuildingQueries(self, heights, queries):
"""
:type heights: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
result = [-1]*len(queries)
qs = [[] for _ in xrange(len(heights))]
for i, (a, b) in enumerate(queries):
if a > b:
a, b = b, a
if a == b or heights[a] < heights[b]:
result[i] = b
else:
qs[b].append((heights[a], i))
min_heap = []
for i, h in enumerate(heights):
for q in qs[i]:
heapq.heappush(min_heap, q)
while min_heap and min_heap[0][0] < h:
_, j = heapq.heappop(min_heap)
result[j] = i
return result
# Time: O(n + qlogn)
# Space: O(n + q)
# offline solution, mono stack, binary search
|
Solution2
|
python
|
rapidsai__cudf
|
python/cudf_polars/cudf_polars/dsl/ir.py
|
{
"start": 110159,
"end": 111604
}
|
class ____(IR):
"""Concatenate dataframes vertically."""
__slots__ = ("zlice",)
_non_child = ("schema", "zlice")
zlice: Zlice | None
"""Optional slice to apply to the result."""
def __init__(self, schema: Schema, zlice: Zlice | None, *children: IR):
self.schema = schema
self.zlice = zlice
self._non_child_args = (zlice,)
self.children = children
schema = self.children[0].schema
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Union")
def do_evaluate(
cls, zlice: Zlice | None, *dfs: DataFrame, context: IRExecutionContext
) -> DataFrame:
"""Evaluate and return a dataframe."""
stream = get_joined_cuda_stream(
context.get_cuda_stream, upstreams=[df.stream for df in dfs]
)
# TODO: only evaluate what we need if we have a slice?
result = DataFrame.from_table(
plc.concatenate.concatenate([df.table for df in dfs], stream=stream),
dfs[0].column_names,
dfs[0].dtypes,
stream=stream,
).slice(zlice)
# now join the original streams *back* to the new result stream
# to ensure that the deallocations (on the original streams)
# happen after the result is ready
join_cuda_streams(
downstreams=[df.stream for df in dfs], upstreams=(result.stream,)
)
return result
|
Union
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-fish-in-a-grid.py
|
{
"start": 1146,
"end": 2131
}
|
class ____(object):
def findMaxFish(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
def dfs(i, j):
result = grid[i][j]
grid[i][j] = 0
stk = [(i, j)]
while stk:
i, j = stk.pop()
for di, dj in reversed(DIRECTIONS):
ni, nj = i+di, j+dj
if not (0 <= ni < len(grid) and
0 <= nj < len(grid[0]) and
grid[ni][nj]):
continue
result += grid[ni][nj]
grid[ni][nj] = 0
stk.append((ni, nj))
return result
result = 0
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
if grid[i][j]:
result = max(result, dfs(i, j))
return result
|
Solution2
|
python
|
TheAlgorithms__Python
|
data_structures/suffix_tree/suffix_tree.py
|
{
"start": 354,
"end": 2005
}
|
class ____:
def __init__(self, text: str) -> None:
"""
Initializes the suffix tree with the given text.
Args:
text (str): The text for which the suffix tree is to be built.
"""
self.text: str = text
self.root: SuffixTreeNode = SuffixTreeNode()
self.build_suffix_tree()
def build_suffix_tree(self) -> None:
"""
Builds the suffix tree for the given text by adding all suffixes.
"""
text = self.text
n = len(text)
for i in range(n):
suffix = text[i:]
self._add_suffix(suffix, i)
def _add_suffix(self, suffix: str, index: int) -> None:
"""
Adds a suffix to the suffix tree.
Args:
suffix (str): The suffix to add.
index (int): The starting index of the suffix in the original text.
"""
node = self.root
for char in suffix:
if char not in node.children:
node.children[char] = SuffixTreeNode()
node = node.children[char]
node.is_end_of_string = True
node.start = index
node.end = index + len(suffix) - 1
def search(self, pattern: str) -> bool:
"""
Searches for a pattern in the suffix tree.
Args:
pattern (str): The pattern to search for.
Returns:
bool: True if the pattern is found, False otherwise.
"""
node = self.root
for char in pattern:
if char not in node.children:
return False
node = node.children[char]
return True
|
SuffixTree
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_seer_models.py
|
{
"start": 239,
"end": 3911
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-seer-models"
def setUp(self) -> None:
super().setUp()
self.url = "/api/0/seer/models/"
@patch("sentry.api.endpoints.seer_models.requests.get")
def test_get_models_successful(self, mock_get: MagicMock) -> None:
"""Test successful retrieval of models from Seer."""
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
"models": ["gpt-4", "claude-3", "gemini-pro"],
}
mock_get.return_value = mock_response
response = self.client.get(self.url)
assert response.status_code == status.HTTP_200_OK
assert response.data == {"models": ["gpt-4", "claude-3", "gemini-pro"]}
expected_url = f"{settings.SEER_AUTOFIX_URL}/v1/models"
expected_headers = {
"content-type": "application/json;charset=utf-8",
**sign_with_seer_secret(b""),
}
mock_get.assert_called_once_with(
expected_url,
headers=expected_headers,
timeout=5,
)
@patch("sentry.api.endpoints.seer_models.requests.get")
def test_get_models_no_authentication_required(self, mock_get: MagicMock) -> None:
"""Test that the endpoint works without authentication."""
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"models": ["gpt-4"]}
mock_get.return_value = mock_response
response = self.client.get(self.url)
assert response.status_code == status.HTTP_200_OK
@patch("sentry.api.endpoints.seer_models.requests.get")
def test_get_models_timeout(self, mock_get: MagicMock) -> None:
"""Test handling of timeout errors."""
mock_get.side_effect = requests.exceptions.Timeout("Request timed out")
response = self.client.get(self.url)
assert response.status_code == status.HTTP_504_GATEWAY_TIMEOUT
assert response.data == {"detail": "Request to Seer timed out"}
@patch("sentry.api.endpoints.seer_models.requests.get")
def test_get_models_request_exception(self, mock_get: MagicMock) -> None:
"""Test handling of request exceptions."""
mock_get.side_effect = requests.exceptions.RequestException("Connection error")
response = self.client.get(self.url)
assert response.status_code == status.HTTP_502_BAD_GATEWAY
assert response.data == {"detail": "Failed to fetch models from Seer"}
@patch("sentry.api.endpoints.seer_models.requests.get")
def test_get_models_http_error(self, mock_get: MagicMock) -> None:
"""Test handling of HTTP errors from Seer."""
mock_response = MagicMock()
mock_response.status_code = 500
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("Server error")
mock_get.return_value = mock_response
response = self.client.get(self.url)
assert response.status_code == status.HTTP_502_BAD_GATEWAY
assert response.data == {"detail": "Failed to fetch models from Seer"}
@patch("sentry.api.endpoints.seer_models.requests.get")
def test_get_models_empty_response(self, mock_get: MagicMock) -> None:
"""Test handling of empty models list."""
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {"models": []}
mock_get.return_value = mock_response
response = self.client.get(self.url)
assert response.status_code == status.HTTP_200_OK
assert response.data == {"models": []}
|
TestSeerModels
|
python
|
RobertCraigie__pyright-python
|
src/pyright/_mureq.py
|
{
"start": 8309,
"end": 8942
}
|
class ____(HTTPException):
"""HTTPErrorStatus is raised by Response.raise_for_status() to indicate an
HTTP error code (a 40x or a 50x). Note that a well-formed response with an
error code does not result in an exception unless raise_for_status() is
called explicitly.
"""
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return f"HTTP response returned error code {self.status_code:d}"
# end public API, begin internal implementation details
_JSON_CONTENTTYPE = 'application/json'
_FORM_CONTENTTYPE = 'application/x-www-form-urlencoded'
|
HTTPErrorStatus
|
python
|
catalyst-team__catalyst
|
catalyst/core/callback.py
|
{
"start": 4802,
"end": 5014
}
|
class ____(Callback):
"""Scheduler callback interface, abstraction over scheduler step."""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.Scheduler)
|
ISchedulerCallback
|
python
|
astropy__astropy
|
astropy/modeling/projections.py
|
{
"start": 22318,
"end": 22743
}
|
class ____(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi)
y = np.array(theta)
return x, y
|
Sky2Pix_PlateCarree
|
python
|
getsentry__sentry
|
src/sentry/release_health/base.py
|
{
"start": 5059,
"end": 5235
}
|
class ____(TypedDict):
date: datetime
total_users: int
crash_free_users: float | None
total_sessions: int
crash_free_sessions: float | None
|
CrashFreeBreakdown
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/higher_order_ops.py
|
{
"start": 120444,
"end": 123109
}
|
class ____(WrapHigherOrderVariable):
def install_subgraph_in_output_graph(
self, tx, fn_vt, fn_args_vt, kwargs, body_gmod, attr_name="wrap_body"
):
return tx.output.install_subgraph(
"hints_wrapper_body",
body_gmod,
)
@raise_hard_error_if_graph_break(
reason="hints_wrapper doesn't work unless it is captured completely with torch.compile."
)
def _call_function(
self, tx, args: "list[VariableTracker]", kwargs: "dict[str, VariableTracker]"
) -> "VariableTracker":
_check_supported_callable_arg(tx, args[0], "body_fn")
# inputs
if (
len(args) != 3
or not isinstance(args[1], (ListVariable, TupleVariable))
or not isinstance(args[2], ConstDictVariable)
or len(kwargs) != 1
or "hints" not in kwargs
):
unimplemented(
gb_type="hints_wrapper: improper args/kwargs",
context=f"args: {args}, kwargs: {kwargs}",
explanation=f"hints_wrapper expects 3 positional arguments (got {len(args)}) "
f"and 1 keyword argument (got {len(kwargs)}). "
"Usage: hints_wrapper(body_fn, args, kwargs, hints=...). "
"args is expected to be list/tuple and kwargs is expected to be a dict.",
hints=[
*graph_break_hints.USER_ERROR,
],
)
operands = args[1].unpack_var_sequence(tx)
fn_kwargs = args[2].as_python_constant()
# Use create_wrapped_node from WrapHigherOrderVariable
(
p_args,
_,
example_value,
body_r,
body_gmod,
_,
body_graph_output_vts,
) = self.create_wrapped_node(
tx,
args[0], # function
operands,
fn_kwargs,
"hints_wrapper",
)
# hints_wrapper expects (body_node, args, kwargs) as positional args
# So we need to restructure p_args from (body_node, *lifted_args)
# to (body_node, lifted_args_tuple, {})
body_node = p_args[0]
lifted_args = p_args[1:]
p_args = (body_node, tuple(lifted_args), {})
# add hints into p_kwargs
p_kwargs = {}
p_kwargs["hints"] = kwargs["hints"].as_python_constant()
return _call_function_with_auto_output_flattening(
tx,
self.value,
p_args,
p_kwargs,
example_value,
body_r,
body_graph_output_vts,
)
|
HintsWrapperHigherOrderVariable
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.