language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
conda__conda
conda/exceptions.py
{ "start": 44462, "end": 47478 }
class ____(CondaError): def __init__(self, msg: str, *args, **kwargs): super().__init__(msg, *args, **kwargs) def maybe_raise(error: BaseException, context: Context): if isinstance(error, CondaMultiError): groups = groupby(lambda e: isinstance(e, ClobberError), error.errors) clobber_errors = groups.get(True, ()) groups = groupby(lambda e: isinstance(e, SafetyError), groups.get(False, ())) safety_errors = groups.get(True, ()) other_errors = groups.get(False, ()) if ( (safety_errors and context.safety_checks == SafetyChecks.enabled) or ( clobber_errors and context.path_conflict == PathConflict.prevent and not context.clobber ) or other_errors ): raise error elif (safety_errors and context.safety_checks == SafetyChecks.warn) or ( clobber_errors and context.path_conflict == PathConflict.warn and not context.clobber ): print_conda_exception(error) elif isinstance(error, ClobberError): if context.path_conflict == PathConflict.prevent and not context.clobber: raise error elif context.path_conflict == PathConflict.warn and not context.clobber: print_conda_exception(error) elif isinstance(error, SafetyError): if context.safety_checks == SafetyChecks.enabled: raise error elif context.safety_checks == SafetyChecks.warn: print_conda_exception(error) else: raise error def print_conda_exception(exc_val: CondaError, exc_tb: TracebackType | None = None): from .base.context import context rc = getattr(exc_val, "return_code", None) if context.debug or (not isinstance(exc_val, DryRunExit) and context.info): print(_format_exc(exc_val, exc_tb), file=sys.stderr) elif context.json: if isinstance(exc_val, DryRunExit): return logger = getLogger("conda.stdout" if rc else "conda.stderr") exc_json = json_dumps(exc_val.dump_map(), sort_keys=True) logger.info(f"{exc_json}\n") else: stderrlog = getLogger("conda.stderr") stderrlog.error("\n%r\n", exc_val) # An alternative which would allow us not to reload sys with newly setdefaultencoding() # is to not use `%r`, e.g.: # Still, not being able to use `%r` seems too great a price to pay. # stderrlog.error("\n" + exc_val.__repr__() + \n") def _format_exc( exc_val: BaseException | None = None, exc_tb: TracebackType | None = None ): if exc_val is None: exc_type, exc_val, exc_tb = sys.exc_info() else: exc_type = type(exc_val) if exc_tb: formatted_exception = format_exception(exc_type, exc_val, exc_tb) else: formatted_exception = format_exception_only(exc_type, exc_val) return "".join(formatted_exception)
SpecNotFoundInPackageCache
python
pallets__jinja
src/jinja2/nativetypes.py
{ "start": 2861, "end": 4205 }
class ____(Template): environment_class = NativeEnvironment def render(self, *args: t.Any, **kwargs: t.Any) -> t.Any: """Render the template to produce a native Python type. If the result is a single node, its value is returned. Otherwise, the nodes are concatenated as strings. If the result can be parsed with :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the string is returned. """ ctx = self.new_context(dict(*args, **kwargs)) try: return self.environment_class.concat( # type: ignore self.root_render_func(ctx) ) except Exception: return self.environment.handle_exception() async def render_async(self, *args: t.Any, **kwargs: t.Any) -> t.Any: if not self.environment.is_async: raise RuntimeError( "The environment was not created with async mode enabled." ) ctx = self.new_context(dict(*args, **kwargs)) try: return self.environment_class.concat( # type: ignore [n async for n in self.root_render_func(ctx)] # type: ignore ) except Exception: return self.environment.handle_exception() NativeEnvironment.template_class = NativeTemplate
NativeTemplate
python
apache__airflow
providers/common/messaging/tests/unit/common/messaging/triggers/test_msg_queue.py
{ "start": 2821, "end": 8722 }
class ____: """Test cases for MessageQueueTrigger error handling and provider matching.""" def test_no_providers_available(self): """Test error when no message queue providers are available.""" trigger = MessageQueueTrigger(queue=TEST_QUEUE) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, []): with pytest.raises(ValueError, match=NO_PROVIDERS_ERROR): _ = trigger.trigger def test_queue_not_recognized_by_any_provider(self): """Test error when queue is not recognized by any provider.""" # Create mock providers that don't match the queue provider1 = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) provider2 = MockProvider(PROVIDER_2_NAME, PROVIDER_2_PATTERN) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider1, provider2]): trigger = MessageQueueTrigger(queue=UNKNOWN_QUEUE) with pytest.raises( ValueError, match=NOT_RECOGNIZED_ERROR.format(queue_name=UNKNOWN_QUEUE, match_type="queue") ): _ = trigger.trigger def test_queue_recognized_by_multiple_providers(self): """Test error when queue is recognized by multiple providers (collision).""" # Create mock providers that both match the same queue pattern provider1 = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) provider2 = MockProvider(PROVIDER_2_NAME, PROVIDER_1_PATTERN) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider1, provider2]): trigger = MessageQueueTrigger(queue=PROVIDER_1_QUEUE) with pytest.raises( ValueError, match=MULTIPLE_PROVIDERS_ERROR.format(queue=PROVIDER_1_QUEUE, match_type="queue") ): _ = trigger.trigger def test_successful_provider_matching(self): """Test successful provider matching and trigger creation.""" provider1 = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) provider2 = MockProvider(PROVIDER_2_NAME, PROVIDER_2_PATTERN) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider1, provider2]): trigger = MessageQueueTrigger(queue=PROVIDER_1_QUEUE, extra_param="value") result_trigger = trigger.trigger assert result_trigger is not None def test_provider_class_names_in_logging(self): """Test that provider class names (not objects) are logged in error messages.""" provider1 = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) provider2 = MockProvider(PROVIDER_2_NAME, PROVIDER_2_PATTERN) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider1, provider2]): trigger = MessageQueueTrigger(queue=UNSUPPORTED_QUEUE) with pytest.raises(ValueError, match=UNSUPPORTED_QUEUE): _ = trigger.trigger def test_trigger_kwargs_passed_correctly(self): """Test that kwargs are passed correctly to the selected provider.""" provider = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) mock_trigger_class = MagicMock() mock_trigger_instance = MagicMock(spec=BaseEventTrigger) mock_trigger_class.return_value = mock_trigger_instance provider.trigger_class = MagicMock(return_value=mock_trigger_class) provider.trigger_kwargs = MagicMock(return_value={"processed_queue": "test://processed"}) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider]): trigger = MessageQueueTrigger(queue=PROVIDER_1_QUEUE, param1="value1", param2="value2") result = trigger.trigger provider.trigger_kwargs.assert_called_once_with( PROVIDER_1_QUEUE, param1="value1", param2="value2" ) # Verify trigger class was instantiated with combined kwargs mock_trigger_class.assert_called_once_with( processed_queue="test://processed", param1="value1", param2="value2" ) assert result == mock_trigger_instance def test_serialize_delegates_to_underlying_trigger(self): """Test that serialize method delegates to the underlying trigger.""" provider = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) mock_trigger_instance = MagicMock(spec=BaseEventTrigger) mock_trigger_instance.serialize.return_value = ("test.module.TestTrigger", {"param": "value"}) mock_trigger_class = MagicMock(return_value=mock_trigger_instance) provider.trigger_class = MagicMock(return_value=mock_trigger_class) provider.trigger_kwargs = MagicMock(return_value={}) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider]): trigger = MessageQueueTrigger(queue=PROVIDER_1_QUEUE) result = trigger.serialize() mock_trigger_instance.serialize.assert_called_once() assert result == ("test.module.TestTrigger", {"param": "value"}) @pytest.mark.asyncio async def test_run_delegates_to_underlying_trigger(self): """Test that run method delegates to the underlying trigger.""" provider = MockProvider(PROVIDER_1_NAME, PROVIDER_1_PATTERN) mock_trigger_instance = MagicMock(spec=BaseEventTrigger) async def mock_run(): yield MagicMock() mock_trigger_instance.run.return_value = mock_run() mock_trigger_class = MagicMock(return_value=mock_trigger_instance) provider.trigger_class = MagicMock(return_value=mock_trigger_class) provider.trigger_kwargs = MagicMock(return_value={}) with mock.patch(MESSAGE_QUEUE_PROVIDERS_PATH, [provider]): trigger = MessageQueueTrigger(queue=PROVIDER_1_QUEUE) async_gen = trigger.run() event = await async_gen.__anext__() mock_trigger_instance.run.assert_called_once() assert event is not None
TestMessageQueueTrigger
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py
{ "start": 519, "end": 864 }
class ____(ResolverException): def __init__(self, criterion): super(RequirementsConflicted, self).__init__(criterion) self.criterion = criterion def __str__(self): return "Requirements conflict: {}".format( ", ".join(repr(r) for r in self.criterion.iter_requirement()), )
RequirementsConflicted
python
facelessuser__soupsieve
tests/test_level4/test_playing.py
{ "start": 52, "end": 940 }
class ____(util.TestCase): """Test playing selectors.""" MARKUP = """ <!DOCTYPE html> <html> <body> <video id="vid" width="320" height="240" controls> <source src="movie.mp4" type="video/mp4"> <source src="movie.ogg" type="video/ogg"> Your browser does not support the video tag. </video> </body> </html> """ def test_playing(self): """Test playing (matches nothing).""" # Not actually sure how this is used, but it won't match anything anyways self.assert_selector( self.MARKUP, "video:playing", [], flags=util.HTML ) def test_not_playing(self): """Test not playing.""" self.assert_selector( self.MARKUP, "video:not(:playing)", ["vid"], flags=util.HTML )
TestPlaying
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 921730, "end": 922470 }
class ____(sgqlc.types.relay.Connection): """The connection type for ReleaseAsset.""" __schema__ = github_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("ReleaseAssetEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("ReleaseAsset"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
ReleaseAssetConnection
python
django__django
tests/sessions_tests/models.py
{ "start": 379, "end": 640 }
class ____(AbstractBaseSession): """ A session model with a column for an account ID. """ account_id = models.IntegerField(null=True, db_index=True) @classmethod def get_session_store_class(cls): return SessionStore
CustomSession
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linear_operator_kronecker_test.py
{ "start": 2157, "end": 3133 }
class ____(test.TestCase): """Test of `_kronecker_dense` function.""" def test_kronecker_dense_matrix(self): x = ops.convert_to_tensor([[2., 3.], [1., 2.]], dtype=dtypes.float32) y = ops.convert_to_tensor([[1., 2.], [5., -1.]], dtype=dtypes.float32) # From explicitly writing out the kronecker product of x and y. z = ops.convert_to_tensor([ [2., 4., 3., 6.], [10., -2., 15., -3.], [1., 2., 2., 4.], [5., -1., 10., -2.]], dtype=dtypes.float32) # From explicitly writing out the kronecker product of y and x. w = ops.convert_to_tensor([ [2., 3., 4., 6.], [1., 2., 2., 4.], [10., 15., -2., -3.], [5., 10., -1., -2.]], dtype=dtypes.float32) self.assertAllClose( self.evaluate(_kronecker_dense([x, y])), self.evaluate(z)) self.assertAllClose( self.evaluate(_kronecker_dense([y, x])), self.evaluate(w)) @test_util.run_all_in_graph_and_eager_modes
KroneckerDenseTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/index1.py
{ "start": 1983, "end": 2155 }
class ____: __getitem__ = ClassH() reveal_type(ClassI()[0], expected_text="ClassH") def func4(l: list[Literal["a", "b"]]): l[0] = "a" l[0:0] = ["a", "b"]
ClassI
python
scikit-learn__scikit-learn
sklearn/utils/_param_validation.py
{ "start": 10105, "end": 10522 }
class ____(_Constraint): """Constraint representing instances of a given type. Parameters ---------- type : type The valid type. """ def __init__(self, type): super().__init__() self.type = type def is_satisfied_by(self, val): return isinstance(val, self.type) def __str__(self): return f"an instance of {_type_name(self.type)!r}"
_InstancesOf
python
ray-project__ray
python/ray/tests/test_node_manager.py
{ "start": 10214, "end": 14092 }
class ____(RuntimeEnvPlugin): """ The first worker will start up normally, but all subsequent workers will hang at start up indefinitely. How it works: Ray RuntimeEnvAgent caches the modified context so we can't do it in modify_context. Instead, we use a bash command to read a file and hang forever. We don't have a good file lock mechanism in bash (flock is not installed by default in macos), so we also serialize the worker startup. """ name = MyPlugin def __init__(self): # Each URI has a temp dir, a counter file, and a hang.sh script. self.uris = collections.defaultdict(dict) def get_uris(self, runtime_env: "RuntimeEnv") -> List[str]: # noqa: F821 return [runtime_env[self.name]] async def create( self, uri: Optional[str], runtime_env, context: RuntimeEnvContext, logger: logging.Logger, ) -> float: d = self.uris[uri] d["temp_dir"] = tempfile.mkdtemp() logger.info(f"caching temp dir {d['temp_dir']} for uri {uri}") d["counter_file"] = os.path.join(d["temp_dir"], "script_run_count") with open(d["counter_file"], "w+") as f: f.write("0") d["hang_sh"] = os.path.join(d["temp_dir"], "hang.sh") with open(d["hang_sh"], "w+") as f: f.write( f"""#!/bin/bash counter_file="{d['counter_file']}" count=$(cat "$counter_file") if [ "$count" -eq "0" ]; then echo "1" > "$counter_file" echo "first time run" exit 0 elif [ "$count" -eq "1" ]; then echo "2" > "$counter_file" echo "second time run, sleeping..." sleep 1000 fi """ ) os.chmod(d["hang_sh"], 0o755) return 0.1 def modify_context( self, uris: List[str], runtime_env: "RuntimeEnv", # noqa: F821 ctx: RuntimeEnvContext, logger: logging.Logger, ) -> None: logger.info(f"Starting worker: {uris}, {runtime_env}") if self.name not in runtime_env: return assert len(uris) == 1 uri = uris[0] hang_sh = self.uris[uri]["hang_sh"] ctx.command_prefix += ["bash", hang_sh, "&&"] def delete_uri(self, uri: str, logger: logging.Logger) -> float: temp_dir = self.uris[uri]["temp_dir"] shutil.rmtree(temp_dir) del self.uris[uri] logger.info(f"temp_dir removed: {temp_dir}") @pytest.fixture def serialize_worker_startup(monkeypatch): """Only one worker starts up each time, since our bash script is not process-safe""" monkeypatch.setenv("RAY_worker_maximum_startup_concurrency", "1") yield @pytest.mark.parametrize( "set_runtime_env_plugins", [ '[{"class":"' + MY_PLUGIN_CLASS_PATH + '"}]', ], indirect=True, ) def test_can_reuse_released_workers( serialize_worker_startup, set_runtime_env_plugins, ray_start_cluster ): """ Uses a runtime env plugin to make sure only 1 worker can start and all subsequent workers will hang in runtime start up forever. We issue 10 tasks and test that all the following tasks can still be scheduled on the first worker released from the first task, i.e. tasks are not binded to the workers that they requested to start. """ cluster = ray_start_cluster cluster.add_node(num_cpus=2) ray.init(address=cluster.address) @ray.remote(runtime_env={"env_vars": {"HELLO": "WORLD"}, MyPlugin: "key"}) def f(): # Sleep for a while to make sure other tasks also request workers. time.sleep(1) print(f"pid={os.getpid()}, env HELLO={os.environ.get('HELLO')}") return os.getpid() objs = [f.remote() for i in range(10)] pids = ray.get(objs) for pid in pids: assert pid == pids[0] if __name__ == "__main__": sys.exit(pytest.main(["-sv", __file__]))
HangOnSecondWorkerPlugin
python
great-expectations__great_expectations
great_expectations/metrics/column/sample_values.py
{ "start": 215, "end": 556 }
class ____(ColumnMetric[ColumnSampleValuesResult]): """ This metric returns a list of sample values from the column. It is only supported for SQLAlchemy execution engines at this time. Args: count: The number of sample values to return. """ name = "column.sample_values" count: int = 20
ColumnSampleValues
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_contextlib.py
{ "start": 673, "end": 1812 }
class ____(importlib.abc.MetaPathFinder): def find_spec(self, fullname, path, target=None): # Check if the import is the problematic one if fullname in redirect_imports: try: # Attempt to import the standalone module name = fullname.removeprefix("test.") r = importlib.import_module(name) # Redirect the module in sys.modules sys.modules[fullname] = r # Return a module spec from the found module return importlib.util.find_spec(name) except ImportError: return None return None # Add the custom finder to sys.meta_path sys.meta_path.insert(0, RedirectImportFinder()) # ======= END DYNAMO PATCH ======= """Unit tests for contextlib.py, and other context managers.""" import io import os import sys import tempfile import threading import traceback import unittest from contextlib import * # Tests __all__ from test import support from test.support import os_helper from test.support.testcase import ExceptionIsLikeMixin import weakref
RedirectImportFinder
python
coleifer__peewee
playhouse/postgres_ext.py
{ "start": 11042, "end": 12127 }
class ____(object): __slots__ = ('cursor', 'array_size', 'exhausted', 'iterable') def __init__(self, cursor, array_size=None): self.cursor = cursor self.array_size = array_size or cursor.itersize self.exhausted = False self.iterable = self.row_gen() def __del__(self): if self.cursor and not self.cursor.closed: try: self.cursor.close() except Exception: pass @property def description(self): return self.cursor.description def close(self): self.cursor.close() def row_gen(self): try: while True: rows = self.cursor.fetchmany(self.array_size) if not rows: return for row in rows: yield row finally: self.close() def fetchone(self): if self.exhausted: return try: return next(self.iterable) except StopIteration: self.exhausted = True
FetchManyCursor
python
ray-project__ray
python/ray/llm/tests/serve/cpu/deployments/test_prefix_tree.py
{ "start": 4569, "end": 11772 }
class ____: def test_insert_non_existent_tenant(self, tree: PrefixTree) -> None: """Test inserting a string for a non-existent tenant fails.""" # Insert without adding tenant first tree.insert("hello", "nonexistent", 1) # Verify insert did nothing since tenant doesn't exist assert "nonexistent" not in tree.tenant_to_char_count assert get_lru_texts_from_tree(tree, "nonexistent") == [] assert "h" not in tree.root.edge_label_to_child def test_insert_single_string(self, tree: PrefixTree) -> None: """Test inserting a single string after adding a tenant.""" tree.add_tenants(["tenant_1"], 0) tree.insert("hello", "tenant_1", 1) assert tree.tenant_to_char_count == {"tenant_1": 5} assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello"] root_node = tree.root assert root_node.tenant_to_last_access_time == {"tenant_1": 1} assert set(root_node.edge_label_to_child.keys()) == {"h"} hello_node = root_node.edge_label_to_child["h"] assert hello_node.text == "hello" assert hello_node.parent == root_node assert hello_node.tenant_to_last_access_time == {"tenant_1": 1} assert hello_node.edge_label_to_child == {} def test_insert_duplicate_string(self, tree: PrefixTree) -> None: """Test inserting a duplicate string for the same tenant.""" tree.add_tenants(["tenant_1"], 0) tree.insert("hello", "tenant_1", 1) # Initial insert tree.insert("hello", "tenant_1", 1) # Duplicate insert with the same timestamp assert tree.tenant_to_char_count == {"tenant_1": 5} # Char count unchanged assert get_lru_texts_from_tree(tree, "tenant_1") == [ "", "hello", ] # LRU order same hello_node = tree.root.edge_label_to_child["h"] assert tree.root.tenant_to_last_access_time == {"tenant_1": 1} assert hello_node.tenant_to_last_access_time == {"tenant_1": 1} tree.insert("hello", "tenant_1", 2) # Duplicate insert with new timestamp assert tree.tenant_to_char_count == {"tenant_1": 5} # Char count unchanged assert get_lru_texts_from_tree(tree, "tenant_1") == [ "", "hello", ] # LRU order same hello_node = tree.root.edge_label_to_child["h"] assert tree.root.tenant_to_last_access_time == {"tenant_1": 2} assert hello_node.tenant_to_last_access_time == {"tenant_1": 2} def test_insert_multiple_tenants(self, tree: PrefixTree) -> None: """Test inserting the same string for different tenants.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("hello", "tenant_1", 1) tree.insert("hello", "tenant_2", 2) assert tree.tenant_to_char_count == {"tenant_1": 5, "tenant_2": 5} assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello"] assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "hello"] hello_node = tree.root.edge_label_to_child["h"] assert tree.root.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2} assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2} def test_insert_node_split(self, tree: PrefixTree) -> None: """Test insertion that causes an existing node to split due to differing suffixes.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("helloworld", "tenant_1", 1) tree.insert("hellothere", "tenant_2", 2) # "hello" is common prefix assert tree.tenant_to_char_count == {"tenant_1": 10, "tenant_2": 10} assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"] assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "there", "hello"] hello_node = tree.root.edge_label_to_child["h"] assert hello_node.text == "hello" assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2} assert set(hello_node.edge_label_to_child.keys()) == {"w", "t"} world_node = hello_node.edge_label_to_child["w"] assert world_node.text == "world" assert world_node.tenant_to_last_access_time == {"tenant_1": 1} there_node = hello_node.edge_label_to_child["t"] assert there_node.text == "there" assert there_node.tenant_to_last_access_time == {"tenant_2": 2} def test_insert_longer_string_with_shared_prefix(self, tree: PrefixTree) -> None: """Test inserting a longer string that shares a prefix with an existing node string.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("hello", "tenant_1", 1) tree.insert("helloworld", "tenant_2", 2) # "hello" is prefix of "helloworld" assert tree.tenant_to_char_count == {"tenant_1": 5, "tenant_2": 10} assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello"] assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "world", "hello"] hello_node = tree.root.edge_label_to_child["h"] assert hello_node.text == "hello" assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2} assert set(hello_node.edge_label_to_child.keys()) == {"w"} world_node = hello_node.edge_label_to_child["w"] assert world_node.text == "world" assert world_node.tenant_to_last_access_time == {"tenant_2": 2} # Ensure no empty non-root nodes created empty_text_nodes: List[Node] = [] nodes_to_check: List[Node] = [tree.root] visited_nodes: Set[Node] = {tree.root} while nodes_to_check: node: Node = nodes_to_check.pop() if node.text == "" and node != tree.root: # check for non-root empty nodes empty_text_nodes.append(node) for child in node.edge_label_to_child.values(): if child not in visited_nodes: nodes_to_check.append(child) visited_nodes.add(child) assert not empty_text_nodes def test_insert_shorter_string_with_shared_prefix(self, tree: PrefixTree) -> None: """Test inserting a shorter string that is a prefix of an existing longer string, causing split.""" tree.add_tenants(["tenant_1", "tenant_2"], 0) tree.insert("helloworld", "tenant_1", 1) tree.insert( "hello", "tenant_2", 2 ) # "hello" is prefix, causes "helloworld" to split assert tree.tenant_to_char_count == {"tenant_1": 10, "tenant_2": 5} assert get_lru_texts_from_tree(tree, "tenant_1") == ["", "hello", "world"] assert get_lru_texts_from_tree(tree, "tenant_2") == ["", "hello"] hello_node = tree.root.edge_label_to_child["h"] assert hello_node.text == "hello" assert hello_node.tenant_to_last_access_time == {"tenant_1": 1, "tenant_2": 2} assert set(hello_node.edge_label_to_child.keys()) == {"w"} world_node = hello_node.edge_label_to_child["w"] assert world_node.text == "world" assert world_node.tenant_to_last_access_time == {"tenant_1": 1}
TestPrefixTreeInsert
python
ray-project__ray
python/ray/autoscaler/v2/tests/util.py
{ "start": 1015, "end": 2120 }
class ____: def __init__(self): self.events = [] def notify(self, events): self.events.extend(events) def clear(self): self.events.clear() def events_by_id(self, instance_id): return [e for e in self.events if e.instance_id == instance_id] def make_autoscaler_instance( im_instance: Optional[Instance] = None, ray_node: Optional[autoscaler_pb2.NodeState] = None, cloud_instance_id: Optional[str] = None, ) -> AutoscalerInstance: if cloud_instance_id: if im_instance: im_instance.cloud_instance_id = cloud_instance_id if ray_node: ray_node.instance_id = cloud_instance_id return AutoscalerInstance( im_instance=im_instance, ray_node=ray_node, cloud_instance_id=cloud_instance_id, ) def get_cluster_resource_state(stub) -> autoscaler_pb2.ClusterResourceState: request = autoscaler_pb2.GetClusterResourceStateRequest( last_seen_cluster_resource_state_version=0 ) return stub.GetClusterResourceState(request).cluster_resource_state
MockSubscriber
python
numba__numba
numba/core/types/abstract.py
{ "start": 2694, "end": 7733 }
class ____(metaclass=_TypeMetaclass): """ The base class for all Numba types. It is essential that proper equality comparison is implemented. The default implementation uses the "key" property (overridable in subclasses) for both comparison and hashing, to ensure sane behaviour. """ mutable = False # Rather the type is reflected at the python<->nopython boundary reflected = False def __init__(self, name): self.name = name @property def key(self): """ A property used for __eq__, __ne__ and __hash__. Can be overridden in subclasses. """ return self.name @property def mangling_args(self): """ Returns `(basename, args)` where `basename` is the name of the type and `args` is a sequence of parameters of the type. Subclass should override to specialize the behavior. By default, this returns `(self.name, ())`. """ return self.name, () def __repr__(self): return self.name def __str__(self): return self.name def __hash__(self): return hash(self.key) def __eq__(self, other): return self.__class__ is other.__class__ and self.key == other.key def __ne__(self, other): return not (self == other) def __reduce__(self): reconstructor, args, state = super(Type, self).__reduce__() return (_type_reconstructor, (reconstructor, args, state)) def unify(self, typingctx, other): """ Try to unify this type with the *other*. A third type must be returned, or None if unification is not possible. Only override this if the coercion logic cannot be expressed as simple casting rules. """ return None def can_convert_to(self, typingctx, other): """ Check whether this type can be converted to the *other*. If successful, must return a string describing the conversion, e.g. "exact", "promote", "unsafe", "safe"; otherwise None is returned. """ return None def can_convert_from(self, typingctx, other): """ Similar to *can_convert_to*, but in reverse. Only needed if the type provides conversion from other types. """ return None def is_precise(self): """ Whether this type is precise, i.e. can be part of a successful type inference. Default implementation returns True. """ return True def augment(self, other): """ Augment this type with the *other*. Return the augmented type, or None if not supported. """ return None # User-facing helpers. These are not part of the core Type API but # are provided so that users can write e.g. `numba.boolean(1.5)` # (returns True) or `types.int32(types.int32[:])` (returns something # usable as a function signature). def __call__(self, *args): from numba.core.typing import signature if len(args) == 1 and not isinstance(args[0], Type): return self.cast_python_value(args[0]) return signature(self, # return_type *args) def __getitem__(self, args): """ Return an array of this type. """ from numba.core.types import Array ndim, layout = self._determine_array_spec(args) return Array(dtype=self, ndim=ndim, layout=layout) def _determine_array_spec(self, args): # XXX non-contiguous by default, even for 1d arrays, # doesn't sound very intuitive def validate_slice(s): return isinstance(s, slice) and s.start is None and s.stop is None if isinstance(args, (tuple, list)) and all(map(validate_slice, args)): ndim = len(args) if args[0].step == 1: layout = 'F' elif args[-1].step == 1: layout = 'C' else: layout = 'A' elif validate_slice(args): ndim = 1 if args.step == 1: layout = 'C' else: layout = 'A' else: # Raise a KeyError to not be handled by collection constructors (e.g. list). raise KeyError(f"Can only index numba types with slices with no start or stop, got {args}.") return ndim, layout def cast_python_value(self, args): raise NotImplementedError @property def is_internal(self): """ Returns True if this class is an internally defined Numba type by virtue of the module in which it is instantiated, False else.""" return self._is_internal def dump(self, tab=''): print(f'{tab}DUMP {type(self).__name__}[code={self._code}, name={self.name}]') # XXX we should distinguish between Dummy (no meaningful # representation, e.g. None or a builtin function) and Opaque (has a # meaningful representation, e.g. ExternalFunctionPointer)
Type
python
scipy__scipy
scipy/special/tests/test_spherical_bessel.py
{ "start": 10200, "end": 10399 }
class ____(SphericalDerivativesTestCase): def f(self, n, z): return spherical_yn(n, z) def df(self, n, z): return spherical_yn(n, z, derivative=True)
TestSphericalYnDerivatives
python
huggingface__transformers
src/transformers/models/unispeech/modeling_unispeech.py
{ "start": 17418, "end": 20348 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.tensor, attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2]) hidden_states[~expand_attention_mask] = 0 attention_mask = create_bidirectional_mask( config=self.config, input_embeds=hidden_states, attention_mask=attention_mask, ) position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for layer in self.layers: if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
UniSpeechEncoder
python
kamyu104__LeetCode-Solutions
Python/second-minimum-time-to-reach-destination.py
{ "start": 107, "end": 1933 }
class ____(object): def secondMinimum(self, n, edges, time, change): """ :type n: int :type edges: List[List[int]] :type time: int :type change: int :rtype: int """ # Template: # https://github.com/kamyu104/LeetCode-Solutions/blob/master/Python/find-if-path-exists-in-graph.py def bi_bfs(adj, start, target): left, right = {start}, {target} lookup = set() result = steps = 0 while left and (not result or result+2 > steps): # modified for u in left: lookup.add(u) new_left = set() for u in left: if u in right: if not result: # modified result = steps elif result < steps: # modifeid return result+1 for v in adj[u]: if v in lookup: continue new_left.add(v) left = new_left steps += 1 if len(left) > len(right): left, right = right, left return result+2 # modified def calc_time(time, change, dist): result = 0 for _ in xrange(dist): if result//change%2: result = (result//change+1)*change result += time return result adj = [[] for _ in xrange(n)] for u, v in edges: adj[u-1].append(v-1) adj[v-1].append(u-1) return calc_time(time, change, bi_bfs(adj, 0, n-1)) # Time: O(|V| + |E|) = O(|E|) since graph is connected, O(|E|) >= O(|V|) # Space: O(|V| + |E|) = O(|E|)
Solution
python
openai__openai-python
src/openai/resources/chat/completions/messages.py
{ "start": 7570, "end": 7783 }
class ____: def __init__(self, messages: Messages) -> None: self._messages = messages self.list = to_streamed_response_wrapper( messages.list, )
MessagesWithStreamingResponse
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py
{ "start": 6853, "end": 10673 }
class ____(test.TestCase): """Test that the order of addition is done as specified by tiers.""" def test_tier_0_additions_done_in_tier_0(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) diag3 = linalg.LinearOperatorDiag([1.]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], [_BadAdder()], ] # Should not raise since all were added in tier 0, and tier 1 (with the # _BadAdder) was never reached. op_sum = add_operators([diag1, diag2, diag3], addition_tiers=addition_tiers) self.assertEqual(1, len(op_sum)) self.assertIsInstance(op_sum[0], linalg.LinearOperatorDiag) def test_tier_1_additions_done_by_tier_1(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) tril = linalg.LinearOperatorLowerTriangular([[1.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], [linear_operator_addition._AddAndReturnTriL()], [_BadAdder()], ] # Should not raise since all were added by tier 1, and the # _BadAdder) was never reached. op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers) self.assertEqual(1, len(op_sum)) self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular) def test_tier_1_additions_done_by_tier_1_with_order_flipped(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) tril = linalg.LinearOperatorLowerTriangular([[1.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnTriL()], [linear_operator_addition._AddAndReturnDiag()], [_BadAdder()], ] # Tier 0 could convert to TriL, and this converted everything to TriL, # including the Diags. # Tier 1 was never used. # Tier 2 was never used (therefore, _BadAdder didn't raise). op_sum = add_operators([diag1, diag2, tril], addition_tiers=addition_tiers) self.assertEqual(1, len(op_sum)) self.assertIsInstance(op_sum[0], linalg.LinearOperatorLowerTriangular) @test_util.run_deprecated_v1 def test_cannot_add_everything_so_return_more_than_one_operator(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([2.]) tril5 = linalg.LinearOperatorLowerTriangular([[5.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], ] # Tier 0 (the only tier) can only convert to Diag, so it combines the two # diags, but the TriL is unchanged. # Result should contain two operators, one Diag, one TriL. op_sum = add_operators([diag1, diag2, tril5], addition_tiers=addition_tiers) self.assertEqual(2, len(op_sum)) found_diag = False found_tril = False with self.cached_session(): for op in op_sum: if isinstance(op, linalg.LinearOperatorDiag): found_diag = True self.assertAllClose([[3.]], op.to_dense()) if isinstance(op, linalg.LinearOperatorLowerTriangular): found_tril = True self.assertAllClose([[5.]], op.to_dense()) self.assertTrue(found_diag and found_tril) def test_intermediate_tier_is_not_skipped(self): diag1 = linalg.LinearOperatorDiag([1.]) diag2 = linalg.LinearOperatorDiag([1.]) tril = linalg.LinearOperatorLowerTriangular([[1.]]) addition_tiers = [ [linear_operator_addition._AddAndReturnDiag()], [_BadAdder()], [linear_operator_addition._AddAndReturnTriL()], ] # tril cannot be added in tier 0, and the intermediate tier 1 with the # BadAdder will catch it and raise. with self.assertRaisesRegex(AssertionError, "BadAdder.can_add called"): add_operators([diag1, diag2, tril], addition_tiers=addition_tiers)
LinearOperatorOrderOfAdditionTest
python
pytorch__pytorch
torch/fx/experimental/migrate_gradual_types/constraint.py
{ "start": 1609, "end": 1816 }
class ____(Constraint): """ True """ def __init__(self) -> None: pass def __eq__(self, other): return isinstance(other, T) def __repr__(self): return "True"
T
python
tensorflow__tensorflow
tensorflow/lite/python/interpreter_test.py
{ "start": 17278, "end": 19859 }
class ____(test_util.TensorFlowTestCase): def setUp(self): super(InterpreterTensorAccessorTest, self).setUp() self.interpreter = interpreter_wrapper.Interpreter( model_path=resource_loader.get_path_to_datafile( 'testdata/permute_float.tflite')) self.interpreter.allocate_tensors() self.input0 = self.interpreter.get_input_details()[0]['index'] self.initial_data = np.array([[-1., -2., -3., -4.]], np.float32) def testTensorAccessor(self): """Check that tensor returns a reference.""" array_ref = self.interpreter.tensor(self.input0) np.copyto(array_ref(), self.initial_data) self.assertAllEqual(array_ref(), self.initial_data) self.assertAllEqual( self.interpreter.get_tensor(self.input0), self.initial_data) def testGetTensorAccessor(self): """Check that get_tensor returns a copy.""" self.interpreter.set_tensor(self.input0, self.initial_data) array_initial_copy = self.interpreter.get_tensor(self.input0) new_value = np.add(1., array_initial_copy) self.interpreter.set_tensor(self.input0, new_value) self.assertAllEqual(array_initial_copy, self.initial_data) self.assertAllEqual(self.interpreter.get_tensor(self.input0), new_value) def testBase(self): self.assertTrue(self.interpreter._safe_to_run()) _ = self.interpreter.tensor(self.input0) self.assertTrue(self.interpreter._safe_to_run()) in0 = self.interpreter.tensor(self.input0)() self.assertFalse(self.interpreter._safe_to_run()) in0b = self.interpreter.tensor(self.input0)() self.assertFalse(self.interpreter._safe_to_run()) # Now get rid of the buffers so that we can evaluate. del in0 del in0b self.assertTrue(self.interpreter._safe_to_run()) def testBaseProtectsFunctions(self): in0 = self.interpreter.tensor(self.input0)() # Make sure we get an exception if we try to run an unsafe operation with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'): _ = self.interpreter.allocate_tensors() # Make sure we get an exception if we try to run an unsafe operation with self.assertRaisesRegex(RuntimeError, 'There is at least 1 reference'): _ = self.interpreter.invoke() # pylint: disable=assignment-from-no-return # Now test that we can run del in0 # this is our only buffer reference, so now it is safe to change in0safe = self.interpreter.tensor(self.input0) _ = self.interpreter.allocate_tensors() del in0safe # make sure in0Safe is held but lint doesn't complain
InterpreterTensorAccessorTest
python
ray-project__ray
python/ray/data/_internal/datasource/torch_datasource.py
{ "start": 308, "end": 2199 }
class ____(Datasource): """Torch datasource, for reading from `Torch datasets <https://pytorch.org/docs/stable/data.html/>`_. This datasource implements a streaming read using a single read task. """ def __init__( self, dataset: "torch.utils.data.Dataset", ): self._dataset = dataset def get_read_tasks( self, parallelism: int, per_task_row_limit: Optional[int] = None ): assert parallelism == 1 meta = BlockMetadata( # Note: avoid len(self._dataset) because it will trigger # iterating through IterableDataset, which can cause OOM. num_rows=None, size_bytes=None, input_files=None, exec_stats=None, ) read_task = ReadTask( lambda subset=self._dataset: _read_subset( subset, ), metadata=meta, per_task_row_limit=per_task_row_limit, ) return [read_task] def estimate_inmemory_data_size(self): return None def _read_subset(subset: "torch.utils.data.Subset"): batch = [] # Get items from dataset based on its type if hasattr(subset, "__iter__"): # IterableDataset: Use the iterator directly items = subset else: # Map-style dataset: Respect __len__ items = (subset[i] for i in range(len(subset))) # Process items in batches for item in items: batch.append(item) if len(batch) == TORCH_DATASOURCE_READER_BATCH_SIZE: builder = DelegatingBlockBuilder() builder.add_batch({"item": batch}) yield builder.build() batch.clear() # Handle any remaining items if len(batch) > 0: builder = DelegatingBlockBuilder() builder.add_batch({"item": batch}) yield builder.build()
TorchDatasource
python
django__django
tests/template_tests/test_partials.py
{ "start": 5527, "end": 7753 }
class ____(TestCase): def override_get_template(self, **kwargs): class TemplateWithCustomAttrs: def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def render(self, context): return "rendered content" template = TemplateWithCustomAttrs(**kwargs) origin = self.id() return mock.patch.object( engine.engine, "find_template", return_value=(template, origin), ) def test_template_without_extra_data_attribute(self): partial_name = "some_partial_name" with ( self.override_get_template(), self.assertRaisesMessage(TemplateDoesNotExist, partial_name), ): engine.get_template(f"some_template.html#{partial_name}") def test_template_extract_extra_data_robust(self): partial_name = "some_partial_name" for extra_data in ( None, 0, [], {}, {"wrong-key": {}}, {"partials": None}, {"partials": {}}, {"partials": []}, {"partials": 0}, ): with ( self.subTest(extra_data=extra_data), self.override_get_template(extra_data=extra_data), self.assertRaisesMessage(TemplateDoesNotExist, partial_name), ): engine.get_template(f"template.html#{partial_name}") def test_nested_partials_rendering_with_context(self): template_source = """ {% partialdef outer inline %} Hello {{ name }}! {% partialdef inner inline %} Your age is {{ age }}. {% endpartialdef inner %} Nice to meet you. {% endpartialdef outer %} """ template = Template(template_source, origin=Origin(name="template.html")) context = Context({"name": "Alice", "age": 25}) rendered = template.render(context) self.assertIn("Hello Alice!", rendered) self.assertIn("Your age is 25.", rendered) self.assertIn("Nice to meet you.", rendered)
RobustPartialHandlingTests
python
dateutil__dateutil
tests/test_rrule.py
{ "start": 214327, "end": 216396 }
class ____(unittest.TestCase): def testInvalidNthWeekday(self): with self.assertRaises(ValueError): FR(0) def testWeekdayCallable(self): # Calling a weekday instance generates a new weekday instance with the # value of n changed. from dateutil.rrule import weekday self.assertEqual(MO(1), weekday(0, 1)) # Calling a weekday instance with the identical n returns the original # object FR_3 = weekday(4, 3) self.assertIs(FR_3(3), FR_3) def testWeekdayEquality(self): # Two weekday objects are not equal if they have different values for n self.assertNotEqual(TH, TH(-1)) self.assertNotEqual(SA(3), SA(2)) def testWeekdayEqualitySubclass(self): # Two weekday objects equal if their "weekday" and "n" attributes are # available and the same class BasicWeekday(object): def __init__(self, weekday): self.weekday = weekday class BasicNWeekday(BasicWeekday): def __init__(self, weekday, n=None): super(BasicNWeekday, self).__init__(weekday) self.n = n MO_Basic = BasicWeekday(0) self.assertNotEqual(MO, MO_Basic) self.assertNotEqual(MO(1), MO_Basic) TU_BasicN = BasicNWeekday(1) self.assertEqual(TU, TU_BasicN) self.assertNotEqual(TU(3), TU_BasicN) WE_Basic3 = BasicNWeekday(2, 3) self.assertEqual(WE(3), WE_Basic3) self.assertNotEqual(WE(2), WE_Basic3) def testWeekdayReprNoN(self): no_n_reprs = ('MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU') no_n_wdays = (MO, TU, WE, TH, FR, SA, SU) for repstr, wday in zip(no_n_reprs, no_n_wdays): self.assertEqual(repr(wday), repstr) def testWeekdayReprWithN(self): with_n_reprs = ('WE(+1)', 'TH(-2)', 'SU(+3)') with_n_wdays = (WE(1), TH(-2), SU(+3)) for repstr, wday in zip(with_n_reprs, with_n_wdays): self.assertEqual(repr(wday), repstr)
WeekdayTest
python
python__mypy
test-data/unit/plugins/add_overloaded_method.py
{ "start": 236, "end": 1447 }
class ____(Plugin): def get_base_class_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None: if "AddOverloadedMethod" in fullname: return add_overloaded_method_hook return None def add_overloaded_method_hook(ctx: ClassDefContext) -> None: add_overloaded_method_to_class(ctx.api, ctx.cls, "method", _generate_method_specs(ctx)) add_overloaded_method_to_class( ctx.api, ctx.cls, "clsmethod", _generate_method_specs(ctx), is_classmethod=True ) add_overloaded_method_to_class( ctx.api, ctx.cls, "stmethod", _generate_method_specs(ctx), is_staticmethod=True ) def _generate_method_specs(ctx: ClassDefContext) -> list[MethodSpec]: return [ MethodSpec( args=[Argument(Var("arg"), ctx.api.named_type("builtins.int"), None, ARG_POS)], return_type=ctx.api.named_type("builtins.str"), ), MethodSpec( args=[Argument(Var("arg"), ctx.api.named_type("builtins.str"), None, ARG_POS)], return_type=ctx.api.named_type("builtins.int"), ), ] def plugin(version: str) -> type[OverloadedMethodPlugin]: return OverloadedMethodPlugin
OverloadedMethodPlugin
python
getsentry__sentry
tests/sentry/users/models/test_user.py
{ "start": 8218, "end": 20825 }
class ____(BackupTestCase, HybridCloudTestMixin): def verify_model_existence_by_user( self, models: list[type[Model]], *, present: list[User], absent: list[User] ) -> None: for model in sorted(models, key=lambda x: get_model_name(x)): model_relations = dependencies()[get_model_name(model)] user_refs = [k for k, v in model_relations.foreign_keys.items() if v.model == User] is_region_model = SiloMode.REGION in model_relations.silos with assume_test_silo_mode(SiloMode.REGION if is_region_model else SiloMode.CONTROL): for present_user in present: q = Q() for ref in user_refs: args = {} args[f"{ref}"] = present_user.id q |= Q(**args) assert ( model.objects.filter(q).count() > 0 ), "There seems to be an issue with merging objects from one user to another. This can be fixed by adding the model to the model_list in merge_users() in src/sentry/organizations/services/organization/impl.py, which then takes care of merging objects that have a foreign key on the user_id. " for absent_user in absent: q = Q() for ref in user_refs: args = {} args[f"{ref}"] = absent_user.id q |= Q(**args) assert not model.objects.filter(q).exists() def test_simple(self) -> None: from_user = self.create_exhaustive_user("foo@example.com") self.create_exhaustive_api_keys_for_user(from_user) to_user = self.create_exhaustive_user("bar@example.com") self.create_exhaustive_api_keys_for_user(to_user) org = self.create_organization(name="simple-org") proj = self.create_project(name="simple-proj", organization=org) self.create_exhaustive_organization_auth(from_user, org, proj) Authenticator.objects.get(user=from_user, type=1) to_auth_dup = Authenticator.objects.get(user=to_user, type=1) from_auth_uniq = Authenticator.objects.create(user=from_user, type=2, config={}) to_auth_uniq = Authenticator.objects.create(user=to_user, type=3, config={}) from_user.merge_to(to_user) with assume_test_silo_mode(SiloMode.REGION): assert not OrganizationMember.objects.filter(user_id=from_user.id).exists() for member in OrganizationMember.objects.filter(user_id=to_user.id): self.assert_org_member_mapping(org_member=member) assert UserEmail.objects.filter( user=to_user, email=to_user.email, is_verified=True ).exists() assert UserEmail.objects.filter( user=to_user, email=from_user.email, is_verified=True ).exists() # dupes shouldn't get merged. assert Authenticator.objects.filter(user=to_user, id=to_auth_dup.id).exists() assert Authenticator.objects.filter(user=to_user, id=from_auth_uniq.id).exists() assert Authenticator.objects.filter(user=to_user, id=to_auth_uniq.id).exists() assert AuthIdentity.objects.filter(user=to_user).count() == 1 assert not AuthIdentity.objects.filter(user=from_user).exists() def test_merge_handles_groupseen_conflicts(self) -> None: from_user = self.create_user("from-user@example.com") to_user = self.create_user("to-user@example.com") org = self.create_organization(name="conflict-org") with outbox_runner(): with assume_test_silo_mode(SiloMode.REGION): self.create_member(user=from_user, organization=org) with assume_test_silo_mode(SiloMode.REGION): project = self.create_project(organization=org) group = self.create_group(project=project) # Create conflicting GroupSeen entries for both users on the same group GroupSeen.objects.create(project=project, group=group, user_id=from_user.id) GroupSeen.objects.create(project=project, group=group, user_id=to_user.id) # Execute the merge; should not raise and should dedupe GroupSeen with outbox_runner(): from_user.merge_to(to_user) with assume_test_silo_mode(SiloMode.REGION): assert not GroupSeen.objects.filter(group=group, user_id=from_user.id).exists() assert GroupSeen.objects.filter(group=group, user_id=to_user.id).count() == 1 @expect_models( ORG_MEMBER_MERGE_TESTED, OrgAuthToken, OrganizationMember, OrganizationMemberMapping, ) def test_duplicate_memberships(self, expected_models: list[type[Model]]) -> None: from_user = self.create_user("foo@example.com") to_user = self.create_user("bar@example.com") org_slug = "org-with-duplicate-members-being-merged" org = self.create_organization(name=org_slug) team_1 = self.create_team(organization=org) team_2 = self.create_team(organization=org) team_3 = self.create_team(organization=org) all_teams = [team_1, team_2, team_3] from_user_member = self.create_member( organization=org, user=from_user, role="owner", teams=[team_1, team_2] ) # to_user should have less roles to_user_member = self.create_member( organization=org, user=to_user, role="member", teams=[team_2, team_3] ) OrgAuthToken.objects.create( created_by=from_user, organization_id=org.id, name=f"token 1 for {org.slug}", token_hashed=f"ABCDEF{org.slug}{from_user.id}", token_last_characters="xyz1", scope_list=["org:ci"], date_last_used=None, ) OrgAuthToken.objects.create( created_by=to_user, organization_id=org.id, name=f"token 1 for {org.slug}", token_hashed=f"ABCDEF{org.slug}{to_user.id}", token_last_characters="xyz1", scope_list=["org:ci"], date_last_used=None, ) # Access requests should cancel out once users are merged. with assume_test_silo_mode(SiloMode.REGION): OrganizationAccessRequest.objects.create( team=team_1, member=from_user_member, requester_id=to_user.id ) OrganizationAccessRequest.objects.create( team=team_3, member=to_user_member, requester_id=from_user.id ) assert OrganizationAccessRequest.objects.filter(team__in=all_teams).count() == 2 self.verify_model_existence_by_user( expected_models, present=[from_user, to_user], absent=[] ) with outbox_runner(): from_user.merge_to(to_user) self.verify_model_existence_by_user(expected_models, present=[to_user], absent=[from_user]) with assume_test_silo_mode(SiloMode.REGION): for member in OrganizationMember.objects.filter(user_id__in=[from_user.id, to_user.id]): self.assert_org_member_mapping(org_member=member) member = OrganizationMember.objects.get(user_id=to_user.id) assert member.role == "owner" assert list(member.teams.all().order_by("pk")) == all_teams with assume_test_silo_mode(SiloMode.REGION): assert not OrganizationAccessRequest.objects.filter(team__in=all_teams).exists() @expect_models( ORG_MEMBER_MERGE_TESTED, Activity, AlertRule, AlertRuleActivity, CustomDynamicSamplingRule, Dashboard, DashboardFavoriteUser, GroupAssignee, GroupBookmark, GroupSeen, GroupShare, GroupSearchView, GroupSearchViewLastVisited, GroupSearchViewStarred, GroupSubscription, IncidentActivity, Monitor, OrganizationAccessRequest, OrganizationMember, OrgAuthToken, ProjectBookmark, RecentSearch, Rule, RuleActivity, RuleSnooze, SavedSearch, ) def test_only_source_user_is_member_of_organization( self, expected_models: list[type[Model]] ) -> None: from_user = self.create_exhaustive_user("foo@example.com") to_user = self.create_exhaustive_user("bar@example.com") org_slug = "org-only-from-user-is-member-of" self.create_exhaustive_organization( slug=org_slug, owner=from_user, member=self.create_user("random@example.com") ) self.verify_model_existence_by_user(expected_models, present=[from_user], absent=[to_user]) with outbox_runner(): from_user.merge_to(to_user) self.verify_model_existence_by_user(expected_models, present=[to_user], absent=[from_user]) @expect_models( ORG_MEMBER_MERGE_TESTED, Activity, AlertRule, AlertRuleActivity, CustomDynamicSamplingRule, Dashboard, DashboardFavoriteUser, GroupAssignee, GroupBookmark, GroupSeen, GroupShare, GroupSearchView, GroupSearchViewLastVisited, GroupSearchViewStarred, GroupSubscription, IncidentActivity, Monitor, OrganizationAccessRequest, OrganizationMember, OrgAuthToken, ProjectBookmark, RecentSearch, Rule, RuleActivity, RuleSnooze, SavedSearch, ) def test_both_users_are_members_of_organization( self, expected_models: list[type[Model]] ) -> None: from_user = self.create_exhaustive_user("foo@example.com") to_user = self.create_exhaustive_user("bar@example.com") random_user = self.create_user("random@example.com") org_slug = "org-both-users-are-member-of" org = self.create_exhaustive_organization( slug=org_slug, owner=from_user, member=to_user, other_members=[random_user] ) with assume_test_silo_mode(SiloMode.REGION): from_member = OrganizationMember.objects.get(organization=org, user_id=from_user.id) rand_member = OrganizationMember.objects.get(organization=org, user_id=random_user.id) team_1 = self.create_team(organization=org, members=[from_member]) team_2 = self.create_team(organization=org, members=[rand_member]) OrganizationAccessRequest.objects.create( member=from_member, team=team_1, requester_id=random_user.id, ) OrganizationAccessRequest.objects.create( member=rand_member, team=team_2, requester_id=from_user.id, ) self.verify_model_existence_by_user(expected_models, present=[from_user], absent=[]) with outbox_runner(): from_user.merge_to(to_user) self.verify_model_existence_by_user(expected_models, present=[to_user], absent=[from_user]) with assume_test_silo_mode(SiloMode.REGION): to_member = OrganizationMember.objects.get(organization=org, user_id=to_user.id) assert OrganizationAccessRequest.objects.filter( member=to_member, requester_id=random_user.id, ).exists() assert OrganizationAccessRequest.objects.filter( requester_id=to_user.id, ).exists() @expect_models(ORG_MEMBER_MERGE_TESTED, OrganizationMemberInvite) def test_member_invite(self, expected_models: list[type[Model]]) -> None: """ Member invite only depends on email and thus should not be transferred to the to user. """ from_user = self.create_exhaustive_user("foo@example.com") to_user = self.create_exhaustive_user("bar@example.com") org_slug = "hojicha" org = self.create_organization(name=org_slug) with assume_test_silo_mode(SiloMode.REGION): self.create_member_invite(organization=org, email=from_user.email) with outbox_runner(): from_user.merge_to(to_user) with assume_test_silo_mode(SiloMode.REGION): assert OrganizationMemberInvite.objects.filter( organization=org, email=from_user.email ).exists() assert not OrganizationMemberInvite.objects.filter( organization=org, email=to_user.email ).exists()
UserMergeToTest
python
readthedocs__readthedocs.org
readthedocs/projects/validators.py
{ "start": 1141, "end": 8148 }
class ____: disallow_relative_url = True # Pattern for ``git@github.com:user/repo`` pattern re_git_user = re.compile(r"^[\w]+@.+") def __call__(self, value): public_schemes = ["https", "http", "git", "ftps", "ftp"] private_schemes = ["ssh", "ssh+git"] local_schemes = ["file"] valid_schemes = public_schemes if settings.ALLOW_PRIVATE_REPOS: valid_schemes += private_schemes if settings.DEBUG: # allow `file://` urls in dev valid_schemes += local_schemes url = urlparse(value) # Malicious characters go first if "&&" in value or "|" in value: raise ValidationError(_("Invalid character in the URL")) if url.scheme in valid_schemes: return value # Repo URL is not a supported scheme at this point, but there are # several cases where we might support it # Launchpad if value.startswith("lp:"): return value # Relative paths are conditionally supported if value.startswith(".") and not self.disallow_relative_url: return value # SSH cloning and ``git@github.com:user/project.git`` if self.re_git_user.search(value) or url.scheme in private_schemes: if settings.ALLOW_PRIVATE_REPOS: return value # Throw a more helpful error message raise ValidationError("Manual cloning via SSH is not supported") # No more valid URLs without supported URL schemes raise ValidationError(_("Invalid scheme for URL")) validate_repository_url = RepositoryURLValidator() def validate_build_config_file(path): """ Validate that user input is a good relative repository path. By 'good', we mean that it's a valid unix path, but not all valid unix paths are good repository paths. This validator checks for common mistakes. """ invalid_characters = "[]{}()`'\"\\%&<>|," valid_filenames = [".readthedocs.yaml"] if path.startswith("/"): raise ValidationError( _( "Use a relative path. It should not begin with '/'. " "The path is relative to the root of your repository." ), code="path_invalid", ) if path.endswith("/"): raise ValidationError( _("The path cannot end with '/', as it cannot be a directory."), code="path_invalid", ) if ".." in path: raise ValidationError( _("Found invalid sequence in path: '..'"), code="path_invalid", ) if any(ch in path for ch in invalid_characters): raise ValidationError( format_html( _( "Found invalid character. Avoid these characters: " "<code>{invalid_characters}</code>" ), invalid_characters=invalid_characters, ), code="path_invalid", ) is_valid = any(fn == path for fn in valid_filenames) or any( path.endswith(f"/{fn}") for fn in valid_filenames ) if not is_valid and len(valid_filenames) == 1: raise ValidationError( format_html( _("The only allowed filename is <code>{filename}</code>."), filename=valid_filenames[0], ), code="path_invalid", ) if not is_valid: raise ValidationError( format_html( _("The only allowed filenames are <code>{filenames}</code>."), filenames=", ".join(valid_filenames), ), code="path_invalid", ) def validate_custom_prefix(project, prefix): """ Validate and clean the custom path prefix for a project. We validate that the prefix is defined in the correct project. Prefixes must be defined in the main project if the project is a translation. Raises ``ValidationError`` if the prefix is invalid. :param project: Project to validate the prefix :param prefix: Prefix to validate """ if not prefix: return if project.main_language_project: raise ValidationError( "This project is a translation of another project, " "the custom prefix must be defined in the main project.", code="invalid_project", ) return _clean_prefix(prefix) def validate_custom_subproject_prefix(project, prefix): """ Validate and clean the custom subproject prefix for a project. We validate that the subproject prefix is defined in a super project, not in a subproject. Raises ``ValidationError`` if the prefix is invalid. :param project: Project to validate the prefix :param prefix: Subproject prefix to validate """ if not prefix: return main_project = project.main_language_project or project if main_project.is_subproject: raise ValidationError( "This project is a subproject, the subproject prefix must " 'be defined in the parent project "custom_subproject_prefix" attribute.', code="invalid_project", ) prefix = _clean_prefix(prefix) project_prefix = project.custom_prefix or "/" # If the custom project prefix and subproject prefix overlap, # we need to check that the first non-overlapping component isn't a valid language. # Since this will result in an ambiguous path that can't be resolved as a subproject. # This check is only needed if the project supports translations. if project.supports_translations and prefix.startswith(project_prefix): first_component = prefix.removeprefix(project_prefix).split("/")[0] valid_languages = [language[0] for language in LANGUAGES] if first_component in valid_languages: raise ValidationError( "Ambiguous path from overlapping prefixes. The component after " f"{project_prefix} from the custom subproject prefix can't be a language.", code="ambiguous_path", ) return prefix def _clean_prefix(prefix): """ Validate and clean a prefix. Prefixes must: - Start and end with a slash :param prefix: Prefix to clean and validate """ # TODO we could validate that only alphanumeric characters are used? prefix = prefix.strip("/") if not prefix: return "/" return f"/{prefix}/" def validate_environment_variable_size(project, new_env_value, error_class=ValidationError): existing_size = ( project.environmentvariable_set.annotate(size=Length("value")).aggregate( total_size=Sum("size") )["total_size"] or 0 ) if existing_size + len(new_env_value) > MAX_SIZE_ENV_VARS_PER_PROJECT: raise error_class( _("The total size of all environment variables in the project cannot exceed 256 KB.") )
RepositoryURLValidator
python
Pylons__pyramid
tests/test_predicates.py
{ "start": 5517, "end": 6862 }
class ____(unittest.TestCase): def _makeOne(self, val): from pyramid.predicates import MatchParamPredicate return MatchParamPredicate(val, None) def test___call___true_single(self): inst = self._makeOne('abc=1') request = Dummy() request.matchdict = {'abc': '1'} result = inst(None, request) self.assertTrue(result) def test___call___true_multi(self): inst = self._makeOne(('abc=1', 'def=2')) request = Dummy() request.matchdict = {'abc': '1', 'def': '2'} result = inst(None, request) self.assertTrue(result) def test___call___false(self): inst = self._makeOne('abc=1') request = Dummy() request.matchdict = {} result = inst(None, request) self.assertFalse(result) def test___call___matchdict_is_None(self): inst = self._makeOne('abc=1') request = Dummy() request.matchdict = None result = inst(None, request) self.assertFalse(result) def test_text(self): inst = self._makeOne(('def= 1', 'abc =2')) self.assertEqual(inst.text(), 'match_param abc=2,def=1') def test_phash(self): inst = self._makeOne(('def= 1', 'abc =2')) self.assertEqual(inst.phash(), 'match_param abc=2,def=1')
TestMatchParamPredicate
python
astropy__astropy
astropy/cosmology/_src/tests/flrw/test_flrw.py
{ "start": 589, "end": 3834 }
class ____(FLRWTest): """Test :class:`astropy.cosmology.FLRW`.""" abstract_w = True def setup_class(self): """ Setup for testing. FLRW is abstract, so tests are done on a subclass. """ super().setup_class(self) # make sure SubCosmology is known _COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW self.cls = SubFLRW def teardown_class(self): super().teardown_class(self) _COSMOLOGY_CLASSES.pop("SubFLRW", None) # =============================================================== # Method & Attribute Tests # --------------------------------------------------------------- # Methods def test_w(self, cosmo): """Test abstract :meth:`astropy.cosmology.FLRW.w`.""" with pytest.raises(NotImplementedError, match="not implemented"): cosmo.w(1) def test_Otot(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.Otot`.""" exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): assert cosmo.Otot(1) def test_efunc_vs_invefunc(self, cosmo): """ Test that efunc and inv_efunc give inverse values. Here they just fail b/c no ``w(z)`` or no scipy. """ exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError with pytest.raises(exception): cosmo.efunc(0.5) with pytest.raises(exception): cosmo.inv_efunc(0.5) @pytest.mark.skip(reason="w(z) is abstract") def test_luminosity_distance_pandas(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.luminosity_distance`.""" _FLRW_redshift_methods = get_redshift_methods( FLRW, include_private=True, include_z2=False ) - {"w"} @pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed") @pytest.mark.parametrize("z, exc", invalid_zs) @pytest.mark.parametrize("method", sorted(_FLRW_redshift_methods)) def test_redshift_method_bad_input(self, cosmo, method, z, exc): """Test all the redshift methods for bad input.""" with pytest.raises(exc): getattr(cosmo, method)(z) # =============================================================== # Usage Tests @pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.") @pytest.mark.parametrize("method", ("Om", "Ode", "w", "de_density_scale")) def test_distance_broadcast(self, cosmo, method): with pytest.raises(NotImplementedError): super().test_distance_broadcast(cosmo, method) @pytest.mark.skip(reason="w(z) is abstract") def test_comoving_distance_1arg_equal_to_2arg(self, cosmo): """Test :meth:`astropy.cosmology.FLRW.luminosity_distance`.""" @pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.") @pytest.mark.parametrize( ("args", "kwargs", "expected"), [((70, 0.27, 0.73), {"Tcmb0": 3.0, "Ob0": 0.03}, None)], ) def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected): with pytest.raises(NotImplementedError): super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected)
TestFLRW
python
dagster-io__dagster
python_modules/libraries/dagster-shared/dagster_shared/yaml_utils/source_position.py
{ "start": 294, "end": 460 }
class ____(NamedTuple): filename: str start: LineCol end: LineCol def __str__(self): return f"{self.filename}:{self.start.line}"
SourcePosition
python
davidhalter__parso
parso/python/tree.py
{ "start": 15795, "end": 19347 }
class ____(ClassOrFunc): """ Used to store the parsed contents of a python function. Children:: 0. <Keyword: def> 1. <Name> 2. parameter list (including open-paren and close-paren <Operator>s) 3. or 5. <Operator: :> 4. or 6. Node() representing function body 3. -> (if annotation is also present) 4. annotation (if present) """ type = 'funcdef' __slots__ = () def __init__(self, children): super().__init__(children) parameters = self.children[2] # After `def foo` parameters_children = parameters.children[1:-1] # If input parameters list already has Param objects, keep it as is; # otherwise, convert it to a list of Param objects. if not any(isinstance(child, Param) for child in parameters_children): parameters.children[1:-1] = _create_params(parameters, parameters_children) def _get_param_nodes(self): return self.children[2].children def get_params(self): """ Returns a list of `Param()`. """ return [p for p in self._get_param_nodes() if p.type == 'param'] @property def name(self): return self.children[1] # First token after `def` def iter_yield_exprs(self): """ Returns a generator of `yield_expr`. """ def scan(children): for element in children: if element.type in ('classdef', 'funcdef', 'lambdef'): continue try: nested_children = element.children except AttributeError: if element.value == 'yield': if element.parent.type == 'yield_expr': yield element.parent else: yield element else: yield from scan(nested_children) return scan(self.children) def iter_return_stmts(self): """ Returns a generator of `return_stmt`. """ def scan(children): for element in children: if element.type == 'return_stmt' \ or element.type == 'keyword' and element.value == 'return': yield element if element.type in _RETURN_STMT_CONTAINERS: yield from scan(element.children) return scan(self.children) def iter_raise_stmts(self): """ Returns a generator of `raise_stmt`. Includes raise statements inside try-except blocks """ def scan(children): for element in children: if element.type == 'raise_stmt' \ or element.type == 'keyword' and element.value == 'raise': yield element if element.type in _RETURN_STMT_CONTAINERS: yield from scan(element.children) return scan(self.children) def is_generator(self): """ :return bool: Checks if a function is a generator or not. """ return next(self.iter_yield_exprs(), None) is not None @property def annotation(self): """ Returns the test node after `->` or `None` if there is no annotation. """ try: if self.children[3] == "->": return self.children[4] assert self.children[3] == ":" return None except IndexError: return None
Function
python
facebook__pyre-check
tools/generate_taint_models/model.py
{ "start": 1204, "end": 5133 }
class ____(Model): callable_name: str parameters: List[Parameter] annotations: AnnotationSpecification whitelist: WhitelistSpecification returns: Optional[str] = None def __init__( self, parameter_annotation: Optional[ParameterAnnotation] = None, returns: Optional[str] = None, parameter_type_whitelist: Optional[Iterable[str]] = None, parameter_name_whitelist: Optional[Set[str]] = None, annotations: Optional[AnnotationSpecification] = None, whitelist: Optional[WhitelistSpecification] = None, ) -> None: if annotations: self.annotations = annotations else: self.annotations = AnnotationSpecification( parameter_annotation=parameter_annotation, returns=returns ) if whitelist: self.whitelist = whitelist else: self.whitelist = WhitelistSpecification( parameter_type=set(parameter_type_whitelist) if parameter_type_whitelist else None, parameter_name=parameter_name_whitelist, ) callable_name = self._get_fully_qualified_callable_name() # Object construction should fail if any child class passes in a None. if not callable_name or "-" in callable_name: raise UnsupportedCallable("The callable is not supported") self.callable_name = callable_name self.parameters = self._generate_parameters() @abc.abstractmethod def _generate_parameters(self) -> List["Parameter"]: ... @abc.abstractmethod def _get_fully_qualified_callable_name(self) -> Optional[str]: ... def __str__(self) -> str: serialized_parameters = [] name_whitelist = self.whitelist.parameter_name type_whitelist = self.whitelist.parameter_type for parameter in self.parameters: should_annotate = True if name_whitelist is not None and parameter.name in name_whitelist: should_annotate = False if type_whitelist is not None and parameter.annotation in type_whitelist: should_annotate = False if should_annotate: parameter_annotation = self.annotations.parameter_annotation if parameter_annotation is not None: taint = parameter_annotation.get(parameter) else: taint = None else: taint = None # * parameters indicate kwargs after the parameter position, and can't be # tainted. Example: `def foo(x, *, y): ...` if parameter.name != "*" and taint: serialized_parameters.append(f"{parameter.name}: {taint}") else: serialized_parameters.append(parameter.name) returns = self.annotations.returns if returns: return_annotation = f" -> {returns}" else: return_annotation = "" return ( f"def {self.callable_name}({', '.join(serialized_parameters)})" f"{return_annotation}: ..." ) def __eq__(self, other: object) -> bool: if not isinstance(other, RawCallableModel): return False return ( self.callable_name == other.callable_name and self.parameters == other.parameters ) # Need to explicitly define this(despite baseclass) as we are overriding eq def __hash__(self) -> int: parameter_names_string = ",".join( map( lambda parameter: f"{parameter.name}:{parameter.annotation}" if parameter.annotation else f"{parameter.name}:_empty", self.parameters, ) ) return hash((self.callable_name, parameter_names_string))
RawCallableModel
python
huggingface__transformers
tests/models/exaone4/test_modeling_exaone4.py
{ "start": 1224, "end": 1357 }
class ____(CausalLMModelTester): if is_torch_available(): base_model_class = Exaone4Model @require_torch
Exaone4ModelTester
python
openai__openai-python
src/openai/types/audio/transcription_word.py
{ "start": 155, "end": 367 }
class ____(BaseModel): end: float """End time of the word in seconds.""" start: float """Start time of the word in seconds.""" word: str """The text content of the word."""
TranscriptionWord
python
google__pytype
pytype/overlays/special_builtins.py
{ "start": 2879, "end": 3909 }
class ____(abstract.PyTDFunction): """Implementation of functions in builtins.pytd.""" _NAME: str = None @classmethod def make(cls, ctx): assert cls._NAME return super().make(cls._NAME, ctx, "builtins") @classmethod def make_alias(cls, name, ctx, module): return super().make(name, ctx, module) def get_underlying_method(self, node, receiver, method_name): """Get the bound method that a built-in function delegates to.""" results = [] for b in receiver.bindings: node, result = self.ctx.attribute_handler.get_attribute( node, b.data, method_name, valself=b ) if result is not None: results.append(result) if results: return node, self.ctx.join_variables(node, results) else: return node, None def get_file_mode(sig, args): callargs = {name: var for name, var, _ in sig.signature.iter_args(args)} if "mode" in callargs: return abstract_utils.get_atomic_python_constant(callargs["mode"]) else: return ""
BuiltinFunction
python
django__django
tests/custom_lookups/tests.py
{ "start": 27442, "end": 27974 }
class ____(TestCase): def test_subquery_usage(self): with register_lookup(models.IntegerField, Div3Transform): Author.objects.create(name="a1", age=1) a2 = Author.objects.create(name="a2", age=2) Author.objects.create(name="a3", age=3) Author.objects.create(name="a4", age=4) qs = Author.objects.order_by("name").filter( id__in=Author.objects.filter(age__div3=2) ) self.assertSequenceEqual(qs, [a2])
SubqueryTransformTests
python
nedbat__coveragepy
tests/test_json.py
{ "start": 452, "end": 27739 }
class ____(UsingModulesMixin, CoverageTest): """Tests of the JSON reports from coverage.py.""" def _assert_expected_json_report( self, cov: Coverage, expected_result: dict[str, Any], ) -> None: """ Helper that creates an example file for most tests. """ self.make_file( "a.py", """\ a = {'b': 1} if a.get('a'): b = 3 elif a.get('b'): b = 5 else: b = 7 if not a: b = 9 """, ) self._compare_json_reports(cov, expected_result, "a") def _assert_expected_json_report_with_regions( self, cov: Coverage, expected_result: dict[str, Any], ) -> None: """ Helper that creates an example file for regions tests. """ self.make_file( "b.py", """\ a = {"b": 1} def c(): return 4 class C: pass class D: def e(self): if a.get("a"): return 12 return 13 def f(self): return 15 """, ) self._compare_json_reports(cov, expected_result, "b") def _compare_json_reports( self, cov: Coverage, expected_result: dict[str, Any], mod_name: str, ) -> None: """ Helper that handles common ceremonies, comparing JSON reports that it creates to expected results, so tests can clearly show the consequences of setting various arguments. """ mod = self.start_import_stop(cov, mod_name) output_path = os.path.join(self.temp_dir, f"{mod_name}.json") cov.json_report(mod, outfile=output_path) with open(output_path, encoding="utf-8") as result_file: parsed_result = json.load(result_file) self.assert_recent_datetime( datetime.strptime(parsed_result["meta"]["timestamp"], "%Y-%m-%dT%H:%M:%S.%f"), ) del parsed_result["meta"]["timestamp"] expected_result["meta"].update( { "version": coverage.__version__, } ) assert parsed_result == expected_result def test_branch_coverage(self) -> None: cov = coverage.Coverage(branch=True) a_py_result = { "executed_lines": [1, 2, 4, 5, 8], "missing_lines": [3, 7, 9], "excluded_lines": [], "executed_branches": [ [2, 4], [4, 5], [8, -1], ], "missing_branches": [ [2, 3], [4, 7], [8, 9], ], "summary": { "missing_lines": 3, "covered_lines": 5, "num_statements": 8, "num_branches": 6, "excluded_lines": 0, "num_partial_branches": 3, "covered_branches": 3, "missing_branches": 3, "percent_covered": 57.142857142857146, "percent_covered_display": "57", "percent_statements_covered": 62.5, "percent_statements_covered_display": "62", "percent_branches_covered": 50.0, "percent_branches_covered_display": "50", }, } expected_result = { "meta": { "branch_coverage": True, "format": 3, "show_contexts": False, }, "files": { "a.py": copy.deepcopy(a_py_result), }, "totals": { "missing_lines": 3, "covered_lines": 5, "num_statements": 8, "num_branches": 6, "excluded_lines": 0, "num_partial_branches": 3, "percent_covered": 57.142857142857146, "percent_covered_display": "57", "covered_branches": 3, "missing_branches": 3, "percent_statements_covered": 62.5, "percent_statements_covered_display": "62", "percent_branches_covered": 50.0, "percent_branches_covered_display": "50", }, } # With regions, a lot of data is duplicated. expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] self._assert_expected_json_report(cov, expected_result) def test_simple_line_coverage(self) -> None: cov = coverage.Coverage() a_py_result = { "executed_lines": [1, 2, 4, 5, 8], "missing_lines": [3, 7, 9], "excluded_lines": [], "summary": { "excluded_lines": 0, "missing_lines": 3, "covered_lines": 5, "num_statements": 8, "percent_covered": 62.5, "percent_covered_display": "62", "percent_statements_covered": 62.5, "percent_statements_covered_display": "62", }, } expected_result = { "meta": { "branch_coverage": False, "format": 3, "show_contexts": False, }, "files": { "a.py": copy.deepcopy(a_py_result), }, "totals": { "excluded_lines": 0, "missing_lines": 3, "covered_lines": 5, "num_statements": 8, "percent_covered": 62.5, "percent_covered_display": "62", "percent_statements_covered": 62.5, "percent_statements_covered_display": "62", }, } # With regions, a lot of data is duplicated. expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] self._assert_expected_json_report(cov, expected_result) def test_regions_coverage(self) -> None: cov = coverage.Coverage() expected_result = { "files": { "b.py": { "classes": { "": { "excluded_lines": [], "executed_lines": [1, 3, 6, 7, 9, 10, 14], "missing_lines": [4], "summary": { "covered_lines": 7, "excluded_lines": 0, "missing_lines": 1, "num_statements": 8, "percent_covered": 87.5, "percent_covered_display": "88", "percent_statements_covered": 87.5, "percent_statements_covered_display": "88", }, }, "C": { "excluded_lines": [], "executed_lines": [], "missing_lines": [], "summary": { "covered_lines": 0, "excluded_lines": 0, "missing_lines": 0, "num_statements": 0, "percent_covered": 100.0, "percent_covered_display": "100", "percent_statements_covered": 100.0, "percent_statements_covered_display": "100", }, }, "D": { "executed_lines": [], "excluded_lines": [], "missing_lines": [11, 12, 13, 15], "summary": { "covered_lines": 0, "excluded_lines": 0, "missing_lines": 4, "num_statements": 4, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", }, }, }, "executed_lines": [1, 3, 6, 7, 9, 10, 14], "excluded_lines": [], "functions": { "": { "excluded_lines": [], "executed_lines": [1, 3, 6, 7, 9, 10, 14], "missing_lines": [], "summary": { "covered_lines": 7, "excluded_lines": 0, "missing_lines": 0, "num_statements": 7, "percent_covered": 100.0, "percent_covered_display": "100", "percent_statements_covered": 100.0, "percent_statements_covered_display": "100", }, }, "c": { "executed_lines": [], "excluded_lines": [], "missing_lines": [4], "summary": { "covered_lines": 0, "excluded_lines": 0, "missing_lines": 1, "num_statements": 1, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", }, }, "D.e": { "executed_lines": [], "excluded_lines": [], "missing_lines": [11, 12, 13], "summary": { "covered_lines": 0, "excluded_lines": 0, "missing_lines": 3, "num_statements": 3, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", }, }, "D.f": { "executed_lines": [], "excluded_lines": [], "missing_lines": [15], "summary": { "covered_lines": 0, "excluded_lines": 0, "missing_lines": 1, "num_statements": 1, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", }, }, }, "missing_lines": [4, 11, 12, 13, 15], "summary": { "covered_lines": 7, "excluded_lines": 0, "missing_lines": 5, "num_statements": 12, "percent_covered": 58.333333333333336, "percent_covered_display": "58", "percent_statements_covered": 58.333333333333336, "percent_statements_covered_display": "58", }, }, }, "meta": { "branch_coverage": False, "format": 3, "show_contexts": False, }, "totals": { "covered_lines": 7, "excluded_lines": 0, "missing_lines": 5, "num_statements": 12, "percent_covered": 58.333333333333336, "percent_covered_display": "58", "percent_statements_covered": 58.333333333333336, "percent_statements_covered_display": "58", }, } self._assert_expected_json_report_with_regions(cov, expected_result) def test_branch_regions_coverage(self) -> None: cov = coverage.Coverage(branch=True) expected_result = { "files": { "b.py": { "classes": { "": { "excluded_lines": [], "executed_branches": [], "executed_lines": [1, 3, 6, 7, 9, 10, 14], "missing_branches": [], "missing_lines": [4], "summary": { "covered_branches": 0, "covered_lines": 7, "excluded_lines": 0, "missing_branches": 0, "missing_lines": 1, "num_branches": 0, "num_partial_branches": 0, "num_statements": 8, "percent_covered": 87.5, "percent_covered_display": "88", "percent_statements_covered": 87.5, "percent_statements_covered_display": "88", "percent_branches_covered": 100.0, "percent_branches_covered_display": "100", }, }, "C": { "excluded_lines": [], "executed_branches": [], "executed_lines": [], "missing_branches": [], "missing_lines": [], "summary": { "covered_branches": 0, "covered_lines": 0, "excluded_lines": 0, "missing_branches": 0, "missing_lines": 0, "num_branches": 0, "num_partial_branches": 0, "num_statements": 0, "percent_covered": 100.0, "percent_covered_display": "100", "percent_statements_covered": 100.0, "percent_statements_covered_display": "100", "percent_branches_covered": 100.0, "percent_branches_covered_display": "100", }, }, "D": { "excluded_lines": [], "executed_branches": [], "executed_lines": [], "missing_branches": [[11, 12], [11, 13]], "missing_lines": [11, 12, 13, 15], "summary": { "covered_branches": 0, "covered_lines": 0, "excluded_lines": 0, "missing_branches": 2, "missing_lines": 4, "num_branches": 2, "num_partial_branches": 0, "num_statements": 4, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", "percent_branches_covered": 0.0, "percent_branches_covered_display": "0", }, }, }, "excluded_lines": [], "executed_branches": [], "executed_lines": [1, 3, 6, 7, 9, 10, 14], "functions": { "": { "excluded_lines": [], "executed_branches": [], "executed_lines": [1, 3, 6, 7, 9, 10, 14], "missing_branches": [], "missing_lines": [], "summary": { "covered_branches": 0, "covered_lines": 7, "excluded_lines": 0, "missing_branches": 0, "missing_lines": 0, "num_branches": 0, "num_partial_branches": 0, "num_statements": 7, "percent_covered": 100.0, "percent_covered_display": "100", "percent_statements_covered": 100.0, "percent_statements_covered_display": "100", "percent_branches_covered": 100.0, "percent_branches_covered_display": "100", }, }, "D.e": { "excluded_lines": [], "executed_branches": [], "executed_lines": [], "missing_branches": [[11, 12], [11, 13]], "missing_lines": [11, 12, 13], "summary": { "covered_branches": 0, "covered_lines": 0, "excluded_lines": 0, "missing_branches": 2, "missing_lines": 3, "num_branches": 2, "num_partial_branches": 0, "num_statements": 3, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", "percent_branches_covered": 0.0, "percent_branches_covered_display": "0", }, }, "D.f": { "excluded_lines": [], "executed_branches": [], "executed_lines": [], "missing_branches": [], "missing_lines": [15], "summary": { "covered_branches": 0, "covered_lines": 0, "excluded_lines": 0, "missing_branches": 0, "missing_lines": 1, "num_branches": 0, "num_partial_branches": 0, "num_statements": 1, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", "percent_branches_covered": 100.0, "percent_branches_covered_display": "100", }, }, "c": { "excluded_lines": [], "executed_branches": [], "executed_lines": [], "missing_branches": [], "missing_lines": [4], "summary": { "covered_branches": 0, "covered_lines": 0, "excluded_lines": 0, "missing_branches": 0, "missing_lines": 1, "num_branches": 0, "num_partial_branches": 0, "num_statements": 1, "percent_covered": 0.0, "percent_covered_display": "0", "percent_statements_covered": 0.0, "percent_statements_covered_display": "0", "percent_branches_covered": 100.0, "percent_branches_covered_display": "100", }, }, }, "missing_branches": [[11, 12], [11, 13]], "missing_lines": [4, 11, 12, 13, 15], "summary": { "covered_branches": 0, "covered_lines": 7, "excluded_lines": 0, "missing_branches": 2, "missing_lines": 5, "num_branches": 2, "num_partial_branches": 0, "num_statements": 12, "percent_covered": 50.0, "percent_covered_display": "50", "percent_statements_covered": 58.333333333333336, "percent_statements_covered_display": "58", "percent_branches_covered": 0.0, "percent_branches_covered_display": "0", }, }, }, "meta": { "branch_coverage": True, "format": 3, "show_contexts": False, }, "totals": { "covered_branches": 0, "covered_lines": 7, "excluded_lines": 0, "missing_branches": 2, "missing_lines": 5, "num_branches": 2, "num_partial_branches": 0, "num_statements": 12, "percent_covered": 50.0, "percent_covered_display": "50", "percent_statements_covered": 58.333333333333336, "percent_statements_covered_display": "58", "percent_branches_covered": 0.0, "percent_branches_covered_display": "0", }, } self._assert_expected_json_report_with_regions(cov, expected_result) def run_context_test(self, relative_files: bool) -> None: """A helper for two tests below.""" self.make_file( "config", f"""\ [run] relative_files = {relative_files} [report] precision = 2 [json] show_contexts = True """, ) cov = coverage.Coverage(context="cool_test", config_file="config") a_py_result = { "executed_lines": [1, 2, 4, 5, 8], "missing_lines": [3, 7, 9], "excluded_lines": [], "contexts": { "1": ["cool_test"], "2": ["cool_test"], "4": ["cool_test"], "5": ["cool_test"], "8": ["cool_test"], }, "summary": { "excluded_lines": 0, "missing_lines": 3, "covered_lines": 5, "num_statements": 8, "percent_covered": 62.5, "percent_covered_display": "62.50", "percent_statements_covered": 62.5, "percent_statements_covered_display": "62.50", }, } expected_result = { "meta": { "branch_coverage": False, "format": 3, "show_contexts": True, }, "files": { "a.py": copy.deepcopy(a_py_result), }, "totals": { "excluded_lines": 0, "missing_lines": 3, "covered_lines": 5, "num_statements": 8, "percent_covered": 62.5, "percent_covered_display": "62.50", "percent_statements_covered": 62.5, "percent_statements_covered_display": "62.50", }, } # With regions, a lot of data is duplicated. expected_result["files"]["a.py"]["classes"] = {"": a_py_result} # type: ignore[index] expected_result["files"]["a.py"]["functions"] = {"": a_py_result} # type: ignore[index] self._assert_expected_json_report(cov, expected_result) def test_context_non_relative(self) -> None: self.run_context_test(relative_files=False) def test_context_relative(self) -> None: self.run_context_test(relative_files=True) def test_l1_equals_l2(self) -> None: # In results.py, we had a line checking `if l1 == l2` that was never # true. This test makes it true. The annotations are essential, I # don't know why. self.make_file( "wtf.py", """\ def function( x: int, y: int, ) -> None: return x + y assert function(3, 5) == 8 """, ) cov = coverage.Coverage(branch=True) mod = self.start_import_stop(cov, "wtf") cov.json_report(mod)
JsonReportTest
python
walkccc__LeetCode
solutions/1792. Maximum Average Pass Ratio/1792.py
{ "start": 0, "end": 681 }
class ____: def maxAverageRatio( self, classes: list[list[int]], extraStudents: int, ) -> float: def extraPassRatio(pas: int, total: int) -> float: """Returns the extra pass ratio if a brilliant student joins.""" return (pas + 1) / (total + 1) - pas / total maxHeap = [(-extraPassRatio(pas, total), pas, total) for pas, total in classes] heapq.heapify(maxHeap) for _ in range(extraStudents): _, pas, total = heapq.heappop(maxHeap) heapq.heappush( maxHeap, (-extraPassRatio(pas + 1, total + 1), pas + 1, total + 1)) return sum(pas / total for _, pas, total in maxHeap) / len(maxHeap)
Solution
python
spack__spack
lib/spack/spack/package_base.py
{ "start": 19802, "end": 19929 }
class ____: def __init__(self, source, binary): self.source = source self.binary = binary
DisableRedistribute
python
keras-team__keras
keras/src/ops/numpy_test.py
{ "start": 334232, "end": 339331 }
class ____(testing.TestCase): def test_histogram_default_args(self): hist_op = knp.histogram input_tensor = np.random.rand(8) # Expected output expected_counts, expected_edges = np.histogram(input_tensor) counts, edges = hist_op(input_tensor) self.assertEqual(counts.shape, expected_counts.shape) self.assertAllClose(counts, expected_counts) self.assertEqual(edges.shape, expected_edges.shape) self.assertAllClose(edges, expected_edges) def test_histogram_custom_bins(self): hist_op = knp.histogram input_tensor = np.random.rand(8) bins = 5 # Expected output expected_counts, expected_edges = np.histogram(input_tensor, bins=bins) counts, edges = hist_op(input_tensor, bins=bins) self.assertEqual(counts.shape, expected_counts.shape) self.assertAllClose(counts, expected_counts) self.assertEqual(edges.shape, expected_edges.shape) self.assertAllClose(edges, expected_edges) def test_histogram_custom_range(self): hist_op = knp.histogram input_tensor = np.random.rand(10) range_specified = (2, 8) # Expected output expected_counts, expected_edges = np.histogram( input_tensor, range=range_specified ) counts, edges = hist_op(input_tensor, range=range_specified) self.assertEqual(counts.shape, expected_counts.shape) self.assertAllClose(counts, expected_counts) self.assertEqual(edges.shape, expected_edges.shape) self.assertAllClose(edges, expected_edges) def test_histogram_symbolic_input(self): hist_op = knp.histogram input_tensor = KerasTensor(shape=(None,), dtype="float32") counts, edges = hist_op(input_tensor) self.assertEqual(counts.shape, (10,)) self.assertEqual(edges.shape, (11,)) def test_histogram_non_integer_bins_raises_error(self): hist_op = knp.histogram input_tensor = np.random.rand(8) with self.assertRaisesRegex( ValueError, "Argument `bins` should be a non-negative integer" ): hist_op(input_tensor, bins=-5) def test_histogram_range_validation(self): hist_op = knp.histogram input_tensor = np.random.rand(8) with self.assertRaisesRegex( ValueError, "Argument `range` must be a tuple of two elements" ): hist_op(input_tensor, range=(1,)) with self.assertRaisesRegex( ValueError, "The second element of `range` must be greater than the first", ): hist_op(input_tensor, range=(5, 1)) def test_histogram_large_values(self): hist_op = knp.histogram input_tensor = np.array([1e10, 2e10, 3e10, 4e10, 5e10]) counts, edges = hist_op(input_tensor, bins=5) expected_counts, expected_edges = np.histogram(input_tensor, bins=5) self.assertAllClose(counts, expected_counts) self.assertAllClose(edges, expected_edges) def test_histogram_float_input(self): hist_op = knp.histogram input_tensor = np.random.rand(8) counts, edges = hist_op(input_tensor, bins=5) expected_counts, expected_edges = np.histogram(input_tensor, bins=5) self.assertAllClose(counts, expected_counts) self.assertAllClose(edges, expected_edges) def test_histogram_high_dimensional_input(self): hist_op = knp.histogram input_tensor = np.random.rand(3, 4, 5) with self.assertRaisesRegex( ValueError, "Input tensor must be 1-dimensional" ): hist_op(input_tensor) def test_histogram_values_on_edges(self): hist_op = knp.histogram input_tensor = np.array([0.0, 2.0, 4.0, 8.0, 10.0]) bins = 5 expected_counts, expected_edges = np.histogram(input_tensor, bins=bins) counts, edges = hist_op(input_tensor, bins=bins) self.assertAllClose(counts, expected_counts) self.assertAllClose(edges, expected_edges) # TODO: Fix predict for NumPy. @parameterized.named_parameters( ("jit_compile_false", False), ("jit_compile_true", True), ) @pytest.mark.skipif( backend.backend() == "numpy", reason=( "`predict` errors out with 'autodetected range of [nan, nan] is " "not finite' on the NumPy backend. To be fixed." ), ) def test_histogram_predict(self, jit_compile): class HistogramLayer(keras.layers.Layer): def call(self, x): shape = ops.shape(x) # Flatten, because the op does not work with >1-dim inputs. x = ops.reshape(x, (shape[0] * shape[1],)) return knp.histogram(x, bins=5) inputs = keras.Input(shape=(8,)) counts, edges = HistogramLayer()(inputs) model = keras.Model(inputs, (counts, edges)) model.compile(jit_compile=jit_compile) model.predict(np.random.randn(1, 8))
HistogramTest
python
tornadoweb__tornado
tornado/test/web_test.py
{ "start": 26064, "end": 26450 }
class ____(RequestHandler): def prepare(self): if self.get_argument("source", None) == "query": method = self.get_query_argument elif self.get_argument("source", None) == "body": method = self.get_body_argument else: method = self.get_argument # type: ignore self.finish(method("foo", "default"))
GetArgumentHandler
python
allegroai__clearml
clearml/backend_api/services/v2_9/projects.py
{ "start": 88259, "end": 90634 }
class ____(Response): """ Response of projects.update endpoint. :param updated: Number of projects updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict """ _service = "projects" _action = "update" _version = "2.9" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of projects updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None: super(UpdateResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value response_mapping = { CreateRequest: CreateResponse, GetByIdRequest: GetByIdResponse, GetAllRequest: GetAllResponse, UpdateRequest: UpdateResponse, DeleteRequest: DeleteResponse, GetUniqueMetricVariantsRequest: GetUniqueMetricVariantsResponse, GetHyperParametersRequest: GetHyperParametersResponse, GetTaskTagsRequest: GetTaskTagsResponse, GetModelTagsRequest: GetModelTagsResponse, MakePublicRequest: MakePublicResponse, MakePrivateRequest: MakePrivateResponse, }
UpdateResponse
python
joke2k__faker
faker/providers/person/el_GR/__init__.py
{ "start": 44, "end": 47359 }
class ____(PersonProvider): formats_male = ( "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}} {{last_name_male}}", "{{first_name_male}}-{{first_name_male}} {{last_name_male}}", ) formats_female = ( "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}} {{last_name_female}}", "{{first_name_female}}-{{first_name_female}} {{last_name_female}}", ) formats = formats_male + formats_female first_names_male = ( "Άγγελος", "Άνθιμος", "Άρης", "Άριστος", "Έκτορας", "Έξαρχος", "Ίκαρος", "Ίων", "Αίας", "Αβραάμ", "Αγάπιος", "Αγαθάγγελος", "Αγαθοκλής", "Αγαθόνικος", "Αγαμέμνων", "Αγαπητός", "Αγγελής", "Αγησίλαος", "Αδάμ", "Αδαμάντιος", "Αθανάσιος", "Αθηναγόρας", "Αθηνόδωρος", "Αιμίλιος", "Αιμιλιανός", "Ακρίτας", "Ακριβός", "Αλέξανδρος", "Αλέξιος", "Αλκιβιάδης", "Αμβρόσιος", "Ανάργυρος", "Ανέστης", "Αναγνώστης", "Ανανίας", "Αναξαγόρας", "Αναστάσιος", "Ανδρέας", "Ανδροκλής", "Ανδρόνικος", "Ανθούλης", "Αντίγονος", "Αντίπατρος", "Αντύπας", "Αντώνιος", "Απόλλων", "Απόστολος", "Αρίσταρχος", "Αργύριος", "Αριστείδης", "Αριστομένης", "Αριστοτέλης", "Αριστοφάνης", "Αριστόβουλος", "Αρτέμης", "Αρτέμιος", "Αρχέλαος", "Αρχιμήδης", "Ασημάκης", "Ασημής", "Ασκληπιός", "Αστέριος", "Αυγέρης", "Αυξέντιος", "Αφέντης", "Αχιλλέας", "Αύγουστος", "Βάιος", "Βαλάντης", "Βαλέριος", "Βαλεντίνος", "Βαρδής", "Βαρθολομαίος", "Βαρσάμος", "Βασίλειος", "Βασίλης", "Βελισσάριος", "Βενέτιος", "Βενιαμίν", "Βενιζέλος", "Βησσαρίων", "Βικέντιος", "Βλάσης", "Βλάσιος", "Βλαδίμηρος", "Βρασίδας", "Βύρων", "Γαβριήλ", "Γαλάτιος", "Γαληνός", "Γαρύφαλλος", "Γεράσιμος", "Γεώργιος", "Γιάννης", "Γιώργος", "Γκίκας", "Γρηγόριος", "Δήμος", "Δίκαιος", "Δαμασκηνός", "Δαμιανός", "Δανιήλ", "Δημήτρης", "Δημήτριος", "Δημοκράτης", "Δημοσθένης", "Δημόκριτος", "Διαμαντής", "Διογένης", "Διομήδης", "Διονύσιος", "Δομήνικος", "Δράκων", "Δρόσος", "Δωρόθεος", "Ειρηναίος", "Ελευθέριος", "Ελισσαίος", "Εμμανουήλ", "Επαμεινώνδας", "Ερμής", "Ερμόλαος", "Ερρίκος", "Ερωτόκριτος", "Ευάγγελος", "Ευγένιος", "Ευδόξιος", "Ευθύμιος", "Ευκλείδης", "Ευμένιος", "Ευριπίδης", "Ευσέβιος", "Ευστάθιος", "Ευστράτιος", "Ευτύχιος", "Εφραίμ", "Ζήνων", "Ζαφείρης", "Ζαφείριος", "Ζαχαρίας", "Ζηνόβιος", "Ηλίας", "Ηρακλής", "Ηρόδοτος", "Ησαΐας", "Θέμης", "Θαλής", "Θεμιστοκλής", "Θεοδόσης", "Θεοδόσιος", "Θεολόγος", "Θεοτόκης", "Θεοφάνης", "Θεοφύλακτος", "Θεοχάρης", "Θεόδουλος", "Θεόδωρος", "Θεόκλητος", "Θεόπιστος", "Θεόφιλος", "Θεόφραστος", "Θησεύς", "Θουκυδίδης", "Θρασύβουλος", "Θωμάς", "Ιάκωβος", "Ιάσονας", "Ιάσων", "Ιγνάτιος", "Ιερεμίας", "Ιερόθεος", "Ιερώνυμος", "Ιορδάνης", "Ιουλιανός", "Ιούλιος", "Ιπποκράτης", "Ιππόλυτος", "Ισίδωρος", "Ισαάκ", "Ιωάννης", "Ιωακείμ", "Ιωνάς", "Ιωσήφ", "Κάρολος", "Κίμων", "Καλλίνικος", "Κηρύκος", "Κλέαρχος", "Κλήμης", "Κλεάνθης", "Κλεομένης", "Κλεόβουλος", "Κλεόπας", "Κομνηνός", "Κορνήλιος", "Κοσμάς", "Κρυστάλλης", "Κυπριανός", "Κυριάκος", "Κυριαζής", "Κωνσταντίνος", "Κύρος", "Κώστας", "Λάζαρος", "Λάμπρος", "Λάσκαρης", "Λέανδρος", "Λέων", "Λαέρτης", "Λαοκράτης", "Λαυρέντιος", "Λεμονής", "Λεονάρδος", "Λεωνίδας", "Λογοθέτης", "Λουδοβίκος", "Λουκάς", "Λουκιανός", "Λυκούργος", "Λύσανδρος", "Μάριος", "Μάρκος", "Μένανδρος", "Μίνωας", "Μαγδαληνός", "Μακάριος", "Μαρίνος", "Μαρτίνος", "Ματθαίος", "Μαυρίκιος", "Μαυροειδής", "Μαυρούδης", "Μαύρος", "Μεγακλής", "Μεθόδιος", "Μελέτιος", "Μενέλαος", "Μερκούριος", "Μηνάς", "Μικές", "Μιλτιάδης", "Μιχάλης", "Μιχαήλ", "Νέαρχος", "Νίκανδρος", "Νίκος", "Νίκων", "Ναθαναήλ", "Ναπολέων", "Νεκτάριος", "Νεοκλής", "Νεόφυτος", "Νικήτας", "Νικηφόρος", "Νικόδημος", "Νικόλαος", "Ξανθός", "Ξενοφών", "Οδυσσέας", "Οδυσσεύς", "Ορέστης", "Ορφέας", "Πάρις", "Πάτροκλος", "Πέτρος", "Πίνδαρος", "Παναγής", "Παναγιώτης", "Πανορμίτης", "Πανταζής", "Παντελής", "Παντελεήμων", "Παράσχος", "Παρασκευάς", "Πασχάλης", "Παυσανίας", "Παύλος", "Πελοπίδας", "Περικλής", "Πλάτων", "Πλούταρχος", "Πολυζώης", "Πολυκράτης", "Πολυχρόνιος", "Πολύβιος", "Πολύδωρος", "Πολύκαρπος", "Πραξιτέλης", "Προκόπιος", "Προμηθέας", "Πρόδρομος", "Πυθαγόρας", "Πύρρος", "Ράλλης", "Ρήγας", "Ρίζος", "Ραφαήλ", "Ραχήλ", "Ροδόφλος", "Ρωμανός", "Σάββας", "Σέργιος", "Σαμουήλ", "Σαράντης", "Σεβαστιανός", "Σεραφείμ", "Σιρανούς", "Σολομών", "Σοφοκλής", "Σπήλιος", "Σπυρίδων", "Στέλλιος", "Στέργιος", "Στέφανος", "Σταμάτης", "Σταμάτιος", "Σταύρος", "Στυλιανός", "Συμεών", "Σωκράτης", "Σωτήρης", "Σωτήριος", "Σόλων", "Σώζων", "Τίμων", "Τίτος", "Ταξίαρχος", "Ταξιάρχης", "Τζανέτος", "Τηλέμαχος", "Τηλεμαχος", "Τιμολέων", "Τιμόθεος", "Τριαντάφυλλος", "Τρύφων", "Τσαμπίκος", "Υάκινθος", "Φίλιππος", "Φαίδων", "Φανούριος", "Φιλήμων", "Φλοριάντ", "Φοίβος", "Φρίξος", "Φραγκίσκος", "Φρειδερίκος", "Φωκάς", "Φωκίων", "Φωτεινός", "Φώτιος", "Χάρις", "Χαράλαμπος", "Χαρίλαος", "Χαρίτος", "Χρήστος", "Χρίστος", "Χριστιάν", "Χριστόδουλος", "Χριστόφορος", "Χρυσοβαλάντιος", "Χρυσόστομος", "Χρύσανθος", "Όθων", "Όμηρος", ) first_names_female = ( "Άλκηστις", "Άννα", "Άρτεμις", "Έλλη", "Ήβη", "Ήλια", "Ήρα", "Ίρις", "Αγάθη", "Αγάπη", "Αγαθή", "Αγγελική", "Αγλαΐα", "Αγνή", "Αγόρω", "Αδαμαντία", "Αηδόνα", "Αθανασία", "Αθηνά", "Αθηνοδώρα", "Αικατερίνη", "Αιμιλία", "Ακριβή", "Αλίκη", "Αλεξάνδρα", "Αλεξία", "Αλκινόη", "Αλκμήνη", "Αμαλία", "Αμβροσία", "Αμφιθέα", "Αμφιτρίτη", "Ανάργυρη", "Αναστασία", "Ανατολή", "Ανδριανή", "Ανδρομάχη", "Ανδρομέδα", "Ανδρονίκη", "Ανθή", "Ανθούλα", "Αννίκα", "Αντιγόνη", "Αντωνία", "Απολλωνία", "Αποστολία", "Αρέθα", "Αργυρούλα", "Αργυρώ", "Αρετή", "Αριάδνη", "Αριστέα", "Αρτεμισία", "Αρχοντία", "Ασημίνα", "Ασημούλα", "Ασπασία", "Αστέρω", "Αταλάντη", "Αυγή", "Αυγουστίνα", "Αφέντρα", "Αφροδίτη", "Αχιλλεία", "Βάγια", "Βέρα", "Βαγιανή", "Βαλάντω", "Βαλέρια", "Βαλεντίνα", "Βαρβάρα", "Βαρσαμία", "Βασιλεία", "Βασιλική", "Βεατρίκη", "Βελισσαρία", "Βενετία", "Βερονίκη", "Βερόνικα", "Βησσαρία", "Βικέντια", "Βικτωρία", "Βικτόρια", "Βιολέτα", "Βιργινία", "Βλασία", "Βρυσηίς", "Γαβριέλλα", "Γαλάτεια", "Γαλήνη", "Γαρυφαλλιά", "Γενοβέφα", "Γερακίνα", "Γερασιμούλα", "Γεσθημανή", "Γεωργία", "Γιαννούλα", "Γιασεμή", "Γιολάντα", "Γκόλφω", "Γλαύκη", "Γλυκερία", "Γραμματική", "Δάφνη", "Δέσποινα", "Δήμητρα", "Δαβιδούλα", "Δαμασκηνή", "Δαμιανή", "Δανάη", "Δημούλα", "Διαλεκτή", "Διαμάντω", "Διαμαντούλα", "Διδώ", "Δικαία", "Διονυσία", "Δούκισσα", "Δροσιά", "Δωροθέα", "Δόμνα", "Ειρήνη", "Ελένη", "Ελέσσα", "Ελεονόρα", "Ελευθερία", "Ελισάβετ", "Ελπίδα", "Ελπίς", "Εμμανουέλα", "Επιστήμη", "Ερασμία", "Ερατώ", "Εριέτα", "Εριφύλη", "Ερμιόνη", "Ερωφίλη", "Ευαγγελία", "Ευανθία", "Ευγενία", "Ευδοκία", "Ευδοξία", "Ευθαλία", "Ευθυμία", "Ευλαμπία", "Ευμορφία", "Ευπραξία", "Ευρυδίκη", "Ευρύκλεια", "Ευσεβία", "Ευσεβεία", "Ευσταθία", "Ευστρατία", "Ευτέρπη", "Ευτυχία", "Ευφροσύνη", "Εύα", "Εύκλεια", "Ζαμπέτα", "Ζαμπία", "Ζαφειρία", "Ζαχαρένια", "Ζαχαρούλα", "Ζηναϊς", "Ζηνοβία", "Ζησούλα", "Ζωή", "Ηλέκτρα", "Ηλιάνα", "Ηράκλεια", "Ηώ", "Θάλεια", "Θέκλα", "Θέμις", "Θέτις", "Θαλασσινή", "Θεανώ", "Θεμιστόκλεια", "Θεοδοσία", "Θεοδούλη", "Θεοδότη", "Θεοδώρα", "Θεολογία", "Θεοπίστη", "Θεοφίλη", "Θεοφανία", "Θεοφύλακτη", "Θεοχαρούλα", "Θεόκλεια", "Θεώνη", "Θηρεσία", "Θωμαίς", "Ιακωβίνα", "Ιγνατία", "Ινώ", "Ιοκάστη", "Ιορδανία", "Ιουλία", "Ιουλιανή", "Ιππολύτη", "Ισαβέλλα", "Ισιδώρα", "Ισμήνη", "Ιφιγένεια", "Ιωάννα", "Ιωσηφίνα", "Καλή", "Καλλίνικη", "Καλλιρρόη", "Καλλιρόη", "Καλλιόπη", "Καλομοίρα", "Καλυψώ", "Κανέλλα", "Καρυοφυλλιά", "Κασσάνδρα", "Κασσιανή", "Κατερίνα", "Κερασιά", "Κικιλία", "Κλαίρη", "Κλειώ", "Κλεονίκη", "Κλεοπάτρα", "Κλημεντίνη", "Κλυταιμνήστρα", "Κοκκώνα", "Κομνηνή", "Κονδυλία", "Κοραλία", "Κορνηλία", "Κρινιώ", "Κρυσταλλένια", "Κυβέλη", "Κυδωνία", "Κυπαρισσία", "Κυπριανή", "Κυράτσα", "Κυριακή", "Κωνσταντία", "Κωνσταντίνα", "Κωστούλα", "Κόσμια", "Λήδα", "Λαζαρία", "Λαμπρινή", "Λασκαρίνα", "Λαυρεντία", "Λεμονιά", "Λευκοθέα", "Λεωνιδιά", "Λεώνη", "Λητώ", "Λουίζα", "Λουκία", "Λουλουδένια", "Λυγερή", "Λυδία", "Μάνθα", "Μάρθα", "Μαγδαληνή", "Μακρίνα", "Μαλαμάτη", "Μαλαματένια", "Μαλβίνα", "Μαντώ", "Μαρία", "Μαρίνα", "Μαργαρίτα", "Μαργιέττα", "Μαριάνθη", "Μαριάννα", "Μαριγώ", "Μαριλένα", "Μαρκέλλα", "Μαρωτέσα", "Ματίνα", "Ματθίλδη", "Ματρώνη", "Μαύρα", "Μελένια", "Μελέτια", "Μελίνα", "Μελπομένη", "Μερόπη", "Μεταξία", "Μηλιά", "Μινέρβα", "Μιράντα", "Μιχαέλα", "Μυρσίνη", "Μυρτώ", "Μόσχα", "Νίκη", "Ναταλία", "Ναταλίνα", "Ναυσικά", "Νεκταρία", "Νερατζιά", "Νεφέλη", "Νεόκλεια", "Νικητία", "Νικολέτα", "Νικολίτσα", "Νομική", "Νταίζη", "Ντανιέλα", "Ξένη", "Ξανθή", "Ξανθίππη", "Οδύσσεια", "Ολύμπια", "Ουρανία", "Πέτρα", "Παγώνα", "Παναγία", "Παναγιώτα", "Πανδώρα", "Παντελία", "Παντούλα", "Πανωραία", "Παρέσσα", "Παρασκευή", "Παρθένα", "Πασχαλιά", "Παταπία", "Παυλίνα", "Πελαγία", "Περικλεία", "Περιστέρα", "Περσεφόνη", "Πηγή", "Πηνελόπη", "Πιερρίνα", "Ποθητή", "Πολυνίκη", "Πολυξένη", "Πολυτίμη", "Πολυχρονία", "Πολύβια", "Πολύδωρα", "Πολύμνια", "Πουλχερία", "Πούλια", "Προδρομία", "Πωλίνα", "Ρέα", "Ραλλία", "Ρεβέκα", "Ρεβέκκα", "Ρεγγίνα", "Ρηγούλα", "Ροδάνθη", "Ροδαμάνθη", "Ροδιά", "Ροδόκλεια", "Ρουμπίνη", "Ρούσα", "Ρωξάνη", "Ρόζα", "Σάρρα", "Σαββούλα", "Σαλώμη", "Σαπφώ", "Σεβαστή", "Σεβαστιανή", "Σελήνη", "Σεμίνα", "Σεραφεία", "Σμαράγδα", "Σουλτάνα", "Σουμέλα", "Σοφία", "Σπάρτη", "Σπυράννα", "Σπυριδούλα", "Στέλλα", "Σταματίνα", "Σταυρούλα", "Στεργιανή", "Στεργιαννώ", "Στεφανία", "Στυλιανή", "Συμέλα", "Συμεωνία", "Συμεώνη", "Σωζούσα", "Σωτηρία", "Σωφρονία", "Ταξιαρχία", "Τατιάνα", "Τερψιχόρη", "Τζένη", "Τιμοθέα", "Τριαντάφυλλη", "Τριανταφυλλιά", "Τρισεύγενη", "Τρυφωνία", "Τσαμπίκα", "Υακίνθη", "Υβόννη", "Υπαπαντή", "Φαίδρα", "Φανή", "Φανουρία", "Φεβρωνία", "Φερενίκη", "Φιλίππα", "Φιλαρέτη", "Φιλιππία", "Φιλιώ", "Φιλοθέη", "Φιλομήλα", "Φλωρίνα", "Φλωρεντία", "Φλώρα", "Φοίβη", "Φραντζέσκα", "Φρειδερίκη", "Φρύνη", "Φωτεινή", "Χάιδω", "Χάρη", "Χαρά", "Χαρίκλεια", "Χαραλαμπία", "Χιονιά", "Χλόη", "Χριστίνα", "Χριστιάνα", "Χριστοδούλα", "Χριστοφόρα", "Χριστόφιλη", "Χρυσάνθη", "Χρυσή", "Χρυσαυγή", "Χρυσαφένια", "Χρυσοβαλάντου", "Χρυσοβαλάντω", "Χρυσούλα", "Χρυσόστομη", "Όλγα", ) first_names = first_names_male + first_names_female last_names_male = ( "Αβαγιανός", "Αβραμίδης", "Αβραμιώτης", "Αγαλιώτης", "Αγγελάκος", "Αγγελής", "Αγγελίδης", "Αγγελίκας", "Αγγελόπουλος", "Αγκυρόπουλος", "Αδαμόπουλος", "Αετόπουλος", "Αθανασάκης", "Αθανασόπουλος", "Ακαλίδης", "Ακριτίδης", "Ακριώτης", "Αλατζάκης", "Αλαφάκης", "Αλεξανδρόπουλος", "Αλεξόπουλος", "Αμπατζιάνης", "Αμπλιάνιτης", "Αμυγδαλάκης", "Αναστασάκης", "Αναστασίου", "Αναστασόπουλος", "Αναστόπουλος", "Ανδρεανίδης", "Ανδριανός", "Ανδρικόπουλος", "Ανδριόπουλος", "Ανδριώτης", "Ανδρουλάκης", "Ανδρουλιδάκης", "Αντωνάκος", "Αντωνέας", "Αντωνίου", "Αντωνιάδης", "Αντωνόπουλος", "Αξαόπουλος", "Αξιώτης", "Αποστολάκης", "Αποστολίδης", "Αποστολακάκης", "Αποστολόπουλος", "Αραμπατζής", "Αργυρής", "Αργυρίου", "Αργυρόπουλος", "Αρμένης", "Αρχαυλής", "Ασαρίδης", "Ασημακόπουλος", "Ασημόπουλος", "Αυγουστής", "Αφρουδάκης", "Βάρσος", "Βάσσης", "Βαβουλίδης", "Βαβουράκης", "Βακαλάκης", "Βακαλής", "Βαλαβάνης", "Βαλασίδης", "Βαλτάς", "Βαμβακάς", "Βανδώρος", "Βαρακλής", "Βαρδάκης", "Βαρσάμης", "Βασιλάκης", "Βασιλείου", "Βασιλογιάννης", "Βαχουθιανάκης", "Βαϊτσόπουλος", "Βελεντζάς", "Βιδαλάκης", "Βιλδός", "Βιολάτος", "Βιτσαξής", "Βλάχος", "Βλαχονικολέας", "Βλησαρούλης", "Βολικάκης", "Βορλόκας", "Βουτσάς", "Βουτσελας", "Βούκας", "Βούλγαρης", "Βούλκος", "Βούρας", "Βώσσος", "Γάτος", "Γαβριδάκης", "Γαλατούλας", "Γαρουφαλής", "Γαρυφαλίδης", "Γενετζάκης", "Γεννηματάς", "Γεντίμης", "Γεράρδης", "Γεροδήμος", "Γερόπουλος", "Γεωργακόπουλος", "Γεωργαλάς", "Γεωργαλής", "Γεωργούδης", "Γεωργούλας", "Γιαννάκης", "Γιαννέλος", "Γιαννές", "Γιαννακέας", "Γιαννακίτσας", "Γιαννακουδάκης", "Γιαννακόπουλος", "Γιαννούκος", "Γιαννόπουλος", "Γιαπατζής", "Γιασημάκης", "Γιατρομανωλάκης", "Γιωτάκης", "Γιώτης", "Γκάγκας", "Γκίκας", "Γκίνης", "Γκαγκαουδάκης", "Γκαλίου", "Γκανάτσιος", "Γκατζογιάννης", "Γκικόπουλος", "Γκιολδελής", "Γκιτσάκης", "Γκουτζαμάνης", "Γκούβελος", "Γκούσκος", "Γκότσης", "Γουργουλής", "Γούσιος", "Γρίβας", "Γραμματικόπουλος", "Γραμματικός", "Γρατσιάς", "Γρεβενάρης", "Γρετός", "Γρηγορίου", "Γρηγοριάδης", "Γρηγορόπουλος", "Δάρας", "Δέλιος", "Δίγκας", "Δακαναλής", "Δανέζης", "Δανελής", "Δανιηλίδης", "Δερμιτζάκης", "Δεσύλλας", "Δημησκής", "Δημητρίου", "Δημητρόπουλος", "Δημοβελής", "Δημόπουλος", "Διακάκης", "Διαμαντής", "Δινεζάκης", "Δουβίκας", "Δουβρόπουλος", "Δουκάς", "Δουκατζής", "Δουλάμης", "Δουλγεράκης", "Δουμάς", "Δουράνης", "Δούνης", "Δραζιώτης", "Δρακάκης", "Δρακόπουλος", "Δρουλιάς", "Δόβας", "Δόσης", "Ελευθερίου", "Εμπορόπουλος", "Εσπερίδης", "Ευαγγελάτος", "Ευαγγελινός", "Ευκαρπίδης", "Ευστρατόπουλος", "Ευταξιάς", "Έψιμος", "Ζαραφέτας", "Ζαφείρης", "Ζαχαρίου", "Ζενεμπίσης", "Ζευγίτης", "Ζηδιανάκης", "Ζηματίκας", "Ζηρός", "Ζιάκας", "Ζιώγος", "Ζολώτας", "Ζορμπάς", "Ζουκής", "Ζουλούμης", "Ζούνης", "Ζτούκος", "Ζυγούρης", "Ζώνιος", "Ζώτος", "Θεοδοσίου", "Θεοδωρικάκος", "Θεοδωρόπουλος", "Θεολόγος", "Θεοχάρης", "Θηβαίος", "Θωμάς", "Θωμόπουλος", "Ιακωβάκης", "Ιατρόπουλος", "Ιντζές", "Ισάρης", "Ισέρης", "Ιωσηφίδης", "Κίσσας", "Καβούρης", "Καζανόπουλος", "Κακαβός", "Κακατσός", "Κακοσαίος", "Καλαμάρας", "Καλαμαράκης", "Καλαμπαλίκης", "Καλαπόδης", "Καλαργυρός", "Καλαϊτζάκης", "Καλιάμπος", "Καλιτσουνάκης", "Καμπάκας", "Καμπερίδης", "Καμποσάκης", "Καναβός", "Κανελλόπουλος", "Καπατσώρης", "Καπετάνιος", "Καπούνης", "Καράμπας", "Καράνης", "Καραΐσκος", "Καραγιάννης", "Καραγρηγορίου", "Καραδήμος", "Καρακάξης", "Καρακαστανιάς", "Καρακυρίου", "Καραλής", "Καραμάνος", "Καραμσαλής", "Καρανίκας", "Καρανασίου", "Καρανικόλας", "Καραντζόπουλος", "Καρατζίκος", "Καρατζαφέρης", "Καρούντζος", "Καρούσος", "Καρτάς", "Καρυπίδης", "Κασούτσας", "Καστάνης", "Κατάκης", "Καταραχιάς", "Κατεβάτης", "Κατιρτζής", "Κατσίπης", "Κατσίφος", "Κατσαβός", "Κατσαμάνης", "Κατσαντώνης", "Κατσαρός", "Κατσιγιάννης", "Κατσιλής", "Κατσογιάννης", "Καφίρης", "Καφαντάρης", "Καψάλης", "Κελαϊδώνης", "Κελλάρης", "Κερασοβίτης", "Κεσαλίδης", "Κεσεμίδης", "Κεχαγιάς", "Κιοσές", "Κιουπλιώτης", "Κιούσης", "Κιτινός", "Κλήμης", "Κοκορδέλης", "Κολοβός", "Κολοκάθης", "Κολχούρης", "Κομκούδης", "Κομπολιάς", "Κονδυλίδης", "Κονιδάρης", "Κοντάκος", "Κοντογιάννης", "Κοντοκώστας", "Κοντός", "Κορδατζής", "Κορμπόπουλος", "Κορομήλας", "Κοτσαρής", "Κοττίκας", "Κουβάς", "Κουκλατζής", "Κουκουβίνος", "Κουκουλιάντας", "Κουλίδης", "Κουλίζος", "Κουλουριώτης", "Κουμιώτης", "Κουράκος", "Κουρής", "Κουρελής", "Κουρκουτάς", "Κουρσάρης", "Κουρσουμίδης", "Κουταλιός", "Κουτελιέρης", "Κουτεντάκης", "Κουτσικόπουλος", "Κουτσογιάννης", "Κουτσογιάννόπουλος", "Κουτσουλής", "Κουτσουρέλης", "Κουτσούρας", "Κουτχιάς", "Κούρτης", "Κούτρης", "Κούτσικος", "Κραββαρίτης", "Κρεμμύδας", "Κριάλης", "Κριτσέλης", "Κτενίδης", "Κυμπάρης", "Κυπραίος", "Κυπριώτης", "Κυρίτσης", "Κυργιάκης", "Κυριαζής", "Κυριακάκης", "Κυριακίδης", "Κυριατσούλης", "Κωνσταντάς", "Κωνσταντακόπουλος", "Κωνσταντινίδης", "Κωνσταντόπουλος", "Κωστάκης", "Κωστίδης", "Κωσταλής", "Κωστούλας", "Κωστόπουλος", "Κωτούλας", "Κωτσίδης", "Κωτσικόρης", "Κόλκας", "Κώττας", "Λάκκας", "Λάλας", "Λάππας", "Λάτσκος", "Λέλεκας", "Λαγγούσης", "Λαγογιάννης", "Λαγοπάτης", "Λαζαρίδης", "Λαμπρινός", "Λαουρδέκης", "Λαφατζής", "Λεβέντης", "Λελεδάκης", "Λεμονής", "Λεντζίου", "Λιανάκης", "Λιβανός", "Λιθοξοΐδης", "Λιούτας", "Λιτίνας", "Λιόλιος", "Λουλάκης", "Λουπασάκης", "Λουράντος", "Λυγκούρας", "Λυμπέρης", "Λώλος", "Μάνδαλος", "Μάνδρος", "Μάνος", "Μάργαρης", "Μάρρας", "Μάστορας", "Μίσχος", "Μίχος", "Μαγκούφης", "Μαζαράκης", "Μακαριάδης", "Μακρής", "Μακρυγιάννης", "Μακρυκώστας", "Μαμμής", "Μανίκας", "Μανελίδης", "Μανιώτης", "Μανουσέλης", "Μανουσιάδης", "Μανούκας", "Μαντάρης", "Μαντάς", "Μαντζουράνης", "Μαντζώρος", "Μανωλάκης", "Μανωλέας", "Μαραγκός", "Μαρακάς", "Μαργαρώνης", "Μαρκόπουλος", "Μασγαλάς", "Μαστρογιάννης", "Μαστρογιώργης", "Μασόπουλος", "Ματσούκας", "Μαυρικάκης", "Μαυρογονάτος", "Μαυροειδάκος", "Μαυρομανωλάκης", "Μαυρομμάτης", "Μαυρουδής", "Μαυρούτσος", "Μελιτσόπουλος", "Μεταξάς", "Μεϊμάρης", "Μηλιώρης", "Μητρίδης", "Μητρόπουλος", "Μητσόπουλος", "Μιχαηλίδης", "Μιχαλάκης", "Μιχαλέλλης", "Μοσχόπουλος", "Μουσελίμης", "Μουτουσίδης", "Μπέκος", "Μπίλλας", "Μπαγανάς", "Μπακλάβας", "Μπακοστεργίου", "Μπαλάσκας", "Μπαλουκίδης", "Μπαλταγιάννης", "Μπαλτζής", "Μπαντές", "Μπαντής", "Μπερεδήμας", "Μπερμπατιώτης", "Μπλανάς", "Μπλιατσίου", "Μποζίκης", "Μποτσώλης", "Μποτώνης", "Μπουλούκος", "Μπουρλής", "Μποφός", "Μπούκος", "Μπούσιος", "Μπραζιώτης", "Μπότης", "Μυλωνάς", "Μυσιρλής", "Μυτελέτσης", "Μωραΐτης", "Μωρός", "Μόκας", "Μόναχας", "Νάστατος", "Νάστος", "Νέλος", "Νανούρης", "Νασίκας", "Νασιόπουλος", "Νικητόπουλος", "Νικολαΐδης", "Νικολόπουλος", "Νικουλής", "Νιτσοτόλης", "Νούσης", "Νταβάς", "Ντουλιάς", "Ντόβας", "Ντόκος", "Ντότης", "Ξανθάκης", "Ξηρογιάννης", "Ξηρός", "Ορφανάκης", "Ορφανιώτης", "Ότσος", "Πάγκαλος", "Πάνος", "Πάντος", "Πέππας", "Πέτσας", "Πίππας", "Πίσσιος", "Πίτσης", "Παλαιολόγος", "Παληός", "Παλλάς", "Παναγιωτακόπουλος", "Πανδής", "Πανούσης", "Πανταζής", "Παντζέκος", "Παντζαρτζίδης", "Παπαγεωργίου", "Παπαγιάννης", "Παπαγιαννακόπουλος", "Παπαγιαννόπουλος", "Παπαδάκης", "Παπαδημητράκης", "Παπαδημητριάδης", "Παπαδόπουλος", "Παπαευαγγελίου", "Παπαθανασίου", "Παπαθεοδοσίου", "Παπαπανός", "Παπαροϊδάμης", "Παπαστεργίου", "Παπατρέχας", "Παπαφώτης", "Παπουδής", "Παπουλής", "Παππάς", "Παρασκευόπουλος", "Παργανάς", "Παρούσης", "Πασσαλίδης", "Πατελής", "Πατμανίδης", "Πατσούρας", "Περάκης", "Περδίκης", "Περδικάκης", "Περιστερόπουλος", "Περπινιάς", "Περράκης", "Περρώτης", "Πετράκης", "Πετρίδης", "Πετριτάκης", "Πετρογιάννης", "Πετρόπουλος", "Πευκιανάκης", "Πιάγκος", "Πικούνης", "Πικρός", "Πιπεράκης", "Πιπερίγκος", "Πισχινάς", "Πιτερός", "Πιτσάκης", "Πιτσολής", "Πλακωτάρης", "Πλιάτσικας", "Πολυζωάκης", "Πολυχρονίδης", "Πολυχρονόπουλος", "Πολυχρόνης", "Πορίχης", "Πουλημένος", "Πουλιέζος", "Πουλογιαννόπουλος", "Πουφτσής", "Προβής", "Πυλαρινός", "Ράλλης", "Ράπτης", "Ρέγκας", "Ρέντας", "Ρέντζος", "Ρέππος", "Ρήγας", "Ρήνος", "Ρίγκος", "Ρίζος", "Ρίσβας", "Ραγκούσης", "Ραδοβάλης", "Ραφιός", "Ραχμανίδης", "Ραχωβίτσας", "Ριζούλης", "Ρουπακάς", "Ρουσιανός", "Ροϊδούλης", "Ρωμαίου", "Ρόγαρης", "Ρόδης", "Ρόκας", "Σάτλας", "Σίδερης", "Σακελλαρίου", "Σαλίχος", "Σαλταούρας", "Σαμακίδης", "Σαμανίδης", "Σαμαράς", "Σαουλίδης", "Σαρίκας", "Σαραντινός", "Σαραφίδης", "Σαρρός", "Σγουρός", "Σδραλλής", "Σεβδάς", "Σεφέκος", "Σιγανός", "Σιδηρόπουλος", "Σικαλίδης", "Σιτόπουλος", "Σιώμος", "Σιώρης", "Σκαρλάτος", "Σκαρπέτας", "Σκορδάκης", "Σκουλαρίδης", "Σκρέκας", "Σκρίμπας", "Σκόρδος", "Σμαρδάς", "Σμπονιάς", "Σμπρίνης", "Σμυρνιώτης", "Σολακούδης", "Σουλιντζής", "Σουρμπής", "Σοφιανός", "Σπάλας", "Σπαθόπουλος", "Σπανδωνίδης", "Σπανουδάκης", "Σπανός", "Σπασόπουλος", "Σπηλιώτης", "Σπορδιλής", "Σπυριδάκης", "Σπυρόπουλος", "Σταθάτος", "Σταθόπουλος", "Σταμάτης", "Σταμέλος", "Σταματιάδης", "Σταμούλης", "Σταμόπουλος", "Σταυριανός", "Σταυρόπουλος", "Στεργιαλής", "Στεργιούδης", "Στοφοριάδης", "Στραβοσνίχης", "Στόγιος", "Συλλίγαρδος", "Συργής", "Συρρής", "Σφούνης", "Σφύρλας", "Σωτηράλης", "Σύκας", "Τάρναρης", "Τάσιος", "Τάχας", "Ταβερναράκης", "Τακαντζάς", "Ταμιωλάκης", "Τασιούλας", "Ταχμαζίδης", "Ταχτσίδης", "Τεμουρτζίδης", "Τερζής", "Τερζίδης", "Τζέκος", "Τζήκας", "Τζαβέλλας", "Τζαλλας", "Τζανουδάκης", "Τζεβελέκος", "Τζιάβας", "Τζιάρας", "Τζινιέρης", "Τζιόρτζιος", "Τζιώτζης", "Τζουβέλης", "Τζουμάκης", "Τοδώρης", "Τολούδης", "Τορομίδης", "Τουρναβίτης", "Τραχίλης", "Τρεντσίου", "Τριαντακωνσταντής", "Τριβέλλας", "Τσάρκος", "Τσέας", "Τσέγας", "Τσέλιος", "Τσέλλος", "Τσαγκρασούλης", "Τσαγλιώτης", "Τσακανίκας", "Τσακμάκης", "Τσαλαμάνδρης", "Τσαμαδός", "Τσαμασλίδης", "Τσανδήλας", "Τσαπραλής", "Τσαραμιάδης", "Τσατσάνης", "Τσεμπερλίδης", "Τσεντούρος", "Τσιάκος", "Τσιάρας", "Τσιαμίτας", "Τσιαμούρας", "Τσιαντάς", "Τσιατής", "Τσιγαρίδας", "Τσικνιάς", "Τσιρώνης", "Τσιτούρας", "Τσομώκος", "Τσοπανίδης", "Τσουκνίδας", "Τσούμος", "Τσούπρας", "Τσόλκας", "Τσότρας", "Τσώνης", "Τσώτσης", "Τυμβίου", "Τυράλης", "Φαλιέρος", "Φανουργάκης", "Φειδερόπουλος", "Φερεντίνος", "Φιλιππάκης", "Φιλιππάτος", "Φιλιππόπουλος", "Φουντζούλας", "Φουσιέκης", "Φουτσιτζής", "Φούσκας", "Φραγκόπουλος", "Φραντζής", "Φρογάκης", "Φυδάνης", "Φωκάς", "Φωκαδελής", "Φωτογλίδης", "Φωτόπουλος", "Χέλιος", "Χαβρεδάκης", "Χαλατσής", "Χαραλάμπους", "Χαραλαμπίδης", "Χαρισιάδης", "Χαρμπίλας", "Χαρπαντίδης", "Χαρτερός", "Χατζαντώνης", "Χατζελλής", "Χατζηβλασίου", "Χατζηγεωργίου", "Χατζηγρηγοράκης", "Χατζηκύρκος", "Χατζησάββας", "Χατζησαββίδης", "Χατζησταυράκης", "Χατζιάρας", "Χατζόπουλος", "Χαχούδης", "Χητός", "Χιτός", "Χονδρολίδης", "Χουδαλάκης", "Χουλιάρας", "Χουντής", "Χουχουλής", "Χριστάκης", "Χριστάρας", "Χριστακόπουλος", "Χριστοδουλάκης", "Χριστόπουλος", "Χρονόπουλος", "Χρυσίδης", "Χρυσικός", "Χρυσοβέργης", "Χρυσουλής", "Ψάλτης", "Ψυλλάκης", "Ψυχιάς", ) last_names_female = ( "Αβραμίδου", "Αβραμπέκη", "Αγγελάκη", "Αγγελίδου", "Αγγελουσοπούλου", "Αγραφιώτη", "Αδαμοπούλου", "Αθανασιάδη", "Αθανασιάδου", "Αλεξανδράκη", "Αλεξανδρίδου", "Αλεξοπούλου", "Αλυσανδράτου", "Ανανιάδου", "Αναστασάκη", "Αναστασίου", "Ανεζάκη", "Αντωνάκη", "Αντωνίου", "Αποστολάκη", "Αποστολίδου", "Αραπάκη", "Αραπίδου", "Αργυράκη", "Αργυρίου", "Αργυροπούλου", "Αρματά", "Αρσενάκη", "Αρχάκη", "Ασλανίδου", "Ασουχίδου", "Ασσαργιωτάκη", "Ατσαλάκη", "Βαΐου", "Βαβάση", "Βαγενά", "Βαζούρα", "Βαϊραμίδου", "Βακουφτσή", "Βαλεντή", "Βαλιάκα", "Βαλκάνου", "Βαλσαμίδου", "Βαμβουκάκη", "Βαρελίδου", "Βαρθαλίτη", "Βαρουτίδου", "Βαρσάμου", "Βασιλάκη", "Βασιλείου", "Βασιλειάδη", "Βασιλοπούλου", "Βαφειάδου", "Βερβερίδου", "Βερβίτη", "Βλάσση", "Βλαχοδήμου", "Βλάχου", "Βοσινάκη", "Βουλγαρίδου", "Βουλτσίδου", "Βουτσινά", "Βραδή", "Βρακά", "Βράσκου", "Βυζιηνού", "Γαβριήλ", "Γαβριηλίδη", "Γαλανάκη", "Γαλάνη", "Γαλανοπούλου", "Γεροκώστα", "Γερούκη", "Γεωργακοπούλου", "Γεωργαρά", "Γεωργή", "Γεωργιάδου", "Γεωργίτση", "Γεωργουλάκη", "Γεωργούλα", "Γιακαμόζη", "Γιαννακουδάκη", "Γιανναρά", "Γιαννοπούλου", "Γιατράκου", "Γκάβρου", "Γκάγκα", "Γκαλίου", "Γκιριτζιώνη", "Γκόβα", "Γκόνη", "Γκούβα", "Γκούνη", "Γούλα", "Γούπα", "Γραμμένου", "Γρηγορίου", "Γρηγοριάδου", "Γρηγοροπούλου", "Γρίβα", "Δαγλή", "Δαΐκου", "Δαμαλά", "Δαματοπούλου", "Δαμήλου", "Δανδανίδου", "Δαρδιώτη", "Δαρσακλή", "Δαυίδ", "Δάφνη", "Δεδούκου", "Δελή", "Δερλώπα", "Δημακογιάννη", "Δημητρίου", "Δημητρέλου", "Δημητριάδου", "Δημοπούλου", "Δήμου", "Δημτσούδη", "Διακουμή", "Διαμαντοπούλου", "Διοπούλου", "Δούβαλη", "Δουκίδου", "Δουλάμη", "Δουλγεράκη", "Δουλουφάκη", "Δρακάκη", "Δρακουλή", "Δραμουντάνη", "Δρίβα", "Δρόσου", "Δρυμαλίτου", "Δώρη", "Ελευθερίου", "Εμμανουήλ", "Εμμανουηλίδου", "Ενωτιάδη", "Ευαγγελάκη", "Ευαγγελίδη", "Ζαβιτσάνου", "Ζαννίκου", "Ζαρειφοπούλου", "Ζαρζάνη", "Ζαφείρη", "Ζαχαρίου", "Ζαχαριουδάκη", "Ζεγλίνα", "Ζερβά", "Ζιάρα", "Ζυγούρη", "Ζώη", "Ζωντανού", "Θασίτου", "Θεοδοσίου", "Θεοδοσιάδου", "Θεοδωροπούλου", "Θωμάκου", "Ιωακείμ", "Ιωακειμίδου", "Ιωσηφίδου", "Καζαντζή", "Κάκκα", "Κακοτρίχη", "Καλαθά", "Καλαϊτζοπούλου", "Καλαμάρα", "Καλδή", "Καλέμη", "Καλλιάνταση", "Καλογιαννάκη", "Καλογιάννη", "Καλομοίρη", "Καλούδη", "Καμινάρη", "Καμπούρη", "Κανελή", "Κανελλή", "Κανελλοπούλου", "Κανταρελή", "Καπανταϊδάκη", "Καπνιά", "Καπουρνιώτη", "Καραγιάννη", "Καραγιοβάννη", "Καραγρηγορίου", "Καραθανάση", "Καρακυρίου", "Καρακωνσταντινού", "Καρακώστα", "Καραμάνη", "Καραμανλή", "Καράμπη", "Καραμπίνα", "Καρανάνου", "Καρανασίου", "Καραντάνα", "Καραογλάνη", "Καρατάσου", "Καρβέλη", "Καργάκου", "Καρκαλέτση", "Καρκανάκη", "Καρολίδου", "Καρυδά", "Κασκαούτη", "Κασμιρλή", "Κασσωτάκη", "Κατσανίκου", "Κατσαντώνη", "Κατσαφάδου", "Κατσιγιάννη", "Κατσιλλή", "Κατσιμάλη", "Κατσιμάνη", "Κατσούλα", "Καφαντάρη", "Καφφέ", "Καχριμανίδη", "Καψή", "Κεσίση", "Κεσκίνη", "Κετεσίδου", "Κεφαλή", "Κιορίδου", "Κίτσου", "Κλεινάκη", "Κοκκινίδου", "Κοκκίνου", "Κολαΐτου", "Κόλλια", "Κολτσάκη", "Κονιάρη", "Κονσούλα", "Κοντογεωργάκη", "Κοντογιάννη", "Κοντού", "Κορομπόκη", "Κορωνίδου", "Κοτρώτσου", "Κοτσινά", "Κουζουλά", "Κουθούρη", "Κουκουβά", "Κουκουθάκη", "Κουλαουσάρη", "Κουλουμπού", "Κουλοχέρη", "Κουμάρα", "Κουρλού", "Κουτκιά", "Κουτουζίδου", "Κουτσικούρη", "Κουτσιουμάρη", "Κουτσοβίδου", "Κουτσονίκα", "Κουτσούμπεη", "Κουτσούμπη", "Κουτσουνάκη", "Κουτσουρέλη", "Κουφάκη", "Κοφινάκη", "Κυργιά", "Κυριακοπούλου", "Κυριάκου", "Κυριακούλη", "Κυριαννάκη", "Κυρίτση", "Κυρκούδη", "Κυρούδη", "Κωνσταντινίδου", "Κωνσταντοπούλου", "Κωστοπούλου", "Κωτούλα", "Κωτσιονοπούλου", "Κώτσου", "Λαλαούνη", "Λέκκα", "Λεντζίου", "Λεπίδα", "Λιάκου", "Λιάνου", "Λιναρδάκη", "Λιολιοπούλου", "Λιόντη", "Λιούκα", "Λίτσιου", "Λογκάκη", "Λογοθέτη", "Λουμπούτσκου", "Λυμαξή", "Λυρή", "Λύτρα", "Μαγκανάρη", "Μαγκαφοπούλου", "Μαγούλα", "Μακρή", "Μακροπούλου", "Μάλαμα", "Μαλίμη", "Μαλλή", "Μαμαλά", "Μανέτα", "Μανιάτη", "Μανιτάρου", "Μανοπούλου", "Μανουσοπούλου", "Μαντά", "Μάντου", "Μαράκη", "Μάρα", "Μαργαριτάκη", "Μαργαρίτη", "Μαργιά", "Μαρκαντωνάκη", "Μαρουγιάννη", "Μαρτζούκου", "Μαρτιάδου", "Μασαούτη", "Μασιάλα", "Μασίκα", "Μαστραγγελή", "Μαστρογιαννίδου", "Ματεντσίδου", "Ματσούρη", "Μαυρίδου", "Μαυραειδή", "Μαυρίδη", "Μαυροείδη", "Μαυροπούλου", "Μέλανι", "Μενγκ", "Μεξή", "Μηλιάδου", "Μηλιάκη", "Μηνά", "Μητροπούλου", "Μητσοπούλου", "Μήτσου", "Μιλέα", "Μισίδου", "Μιχαηλίδου", "Μιχαλάρου", "Μιχελή", "Μιχελακάκη", "Μίχου", "Μοσχοβάκη", "Μουτζούρη", "Μπακοστεργίου", "Μπακουλή", "Μπαλή", "Μπαλαμπάνη", "Μπαλανίκα", "Μπαλάση", "Μπαλλή", "Μπαλτατζή", "Μπαρκούτα", "Μπατζάνη", "Μπατσάκη", "Μπαφέρα", "Μπαφίτη", "Μπεκάκου", "Μπελέκου", "Μπενέτου", "Μπεσύρη", "Μπίκα", "Μπιμπίκα", "Μπιμπίρη", "Μπλιατσίου", "Μποζίκη", "Μπονέλη", "Μπότζα", "Μποτζιώρη", "Μπούζα", "Μπραέσα", "Μπρούζου", "Μπύρου", "Μυλωνίδου", "Μυτάρη", "Μωραΐτη", "Μωυσίδου", "Νάντσου", "Ναούμ", "Νάσσου", "Νατσουλή", "Νίκα", "Νικολάτου", "Νικολάου", "Νικολοπούλου", "Νικολουδάκη", "Νταγκαλή", "Ντάνου", "Ντανώλα", "Ντζιαβίδα", "Ντόβα", "Ντότση", "Ντριβαλά", "Νώε", "Ξανθοπούλου", "Ξενάκη", "Ξενίδη", "Ξένου", "Ξηρίδου", "Ξηροδήμα", "Ξηροτύρη", "Ξυγκάκου", "Οικονομοπούλου", "Οικονόμου", "Ορφανίδου", "Παλαιολογοπούλου", "Παλαμπουγιούκη", "Παλιεράκη", "Παλιούρα", "Παναγιωτακοπούλου", "Παναγιώτου", "Πανκίδου", "Πανοπούλου", "Πάνου", "Πανταζή", "Πάντζιου", "Παντίσκα", "Πάντου", "Παπαγεωργίου", "Παπαδήμα", "Παπαδημητροπούλου", "Παπαδοπούλου", "Παπαευαγγελίου", "Παπαευαγγέλου", "Παπαθανασίου", "Παπαθεοδοσίου", "Παπαθωμά", "Παπακωνσταντίνου", "Παπακώστα", "Παπανάνου", "Παπανδρέου", "Παπανδρικοπούλου", "Παπανικολάου", "Παπανώτα", "Παπασπηλιωτοπούλου", "Παπασταύρου", "Παπαστεργίου", "Παπατζήκα", "Παπαχρήστου", "Παπουτσοπούλου", "Παππά", "Παράνου", "Παρασκευά", "Πατελλή", "Πατινιωτάκη", "Πατσουρέα", "Παυλάκη", "Παυλή", "Παυλίδου", "Πεμούση", "Περουλάκη", "Πετράκη", "Πετρίδου", "Πετροσιάν", "Πέτρου", "Πέτση", "Πετσιά", "Πέττα", "Πιπερίδη", "Πισκοπάνη", "Πίσπα", "Πλατάκη", "Πολατίδου", "Πολέμη", "Πολίτου", "Πορφυριάδου", "Ποσάντζη", "Ποταμιάνου", "Πουλή", "Πουλιάση", "Πουλίδα", "Πουρνάρα", "Πρέκα", "Προύβα", "Πυροβόλου", "Ραδιοπούλου", "Ραυτοπούλου", "Ρέππα", "Ρόγγα", "Ροδίτου", "Ρόκκα", "Ρούσσου", "Ρωμαίου", "Σαββάκη", "Σακελλαρίου", "Σακκή", "Σαλέμη", "Σαμπάνη", "Σαμτανίδου", "Σάουερ", "Σαπρίκη", "Σαριδάκη", "Σαρρή", "Σγουρένα", "Σδούκου", "Σεβαστού", "Σελινοπούλου", "Σεμπέπου", "Σηκωτίδου", "Σίββα", "Σιδηροπούλου", "Σιμητοπούλου", "Σιμιτζή", "Σιόλου", "Σιούτα", "Σκαβέντζου", "Σκασίλα", "Σκαφτούρου", "Σκοτάδη", "Σκούμπρου", "Σκρεμμύδα", "Σκυλογιάννη", "Σμιτ", "Σοκολάκη", "Σολωμίδου", "Σόφρα", "Σπαθάρη", "Σπανίδου", "Σπανοχριστοδούλου", "Σπασέγκου", "Σπηλιάδου", "Σπυριδάκη", "Σταθά", "Σταματούκου", "Σταμέλου", "Σταμούλη", "Σταυρίδου", "Σταύρου", "Στεφάνου", "Στούμπου", "Στρατογιάννη", "Στρούμπα", "Στυλιανίδου", "Συβιλιά", "Συμεωνίδου", "Συνοδινού", "Συρμού", "Σύψα", "Σφακιανάκη", "Σωτηροπούλου", "Ταμπορρίνο", "Τάντου", "Ταξίδου", "Τάσση", "Τάτση", "Ταυλαρίδου", "Ταφραλή", "Τζάγκα", "Τζαναβάρα", "Τζιγκούρα", "Τζιόβα", "Τζιρατούδη", "Τζιωρτζή", "Τόγια", "Τόλη", "Τομπουλίδου", "Τόμπρη", "Τοπαλή", "Τοσούνη", "Τουλάκη", "Τουλούπη", "Τουρνά", "Τραγούστη", "Τρεντσίου", "Τριανταφύλλου", "Τρίκα", "Τριφτανίδου", "Τρομπούκη", "Τσάκη", "Τσακαλάκου", "Τσακίρη", "Τσαμοπούλου", "Τσαμπαλή", "Τσαμπούρη", "Τσαμτσούρη", "Τσανάκα", "Τσαντίδου", "Τσάτη", "Τσαχάκη", "Τσέτου", "Τσετσέρη", "Τσικνή", "Τσικρίκα", "Τσίμη", "Τσιομπάνη", "Τσιούπρα", "Τσιπλίκωφ", "Τσιωλξ", "Τσουκιά", "Τσουνάκου", "Τσουράκη", "Τσούρα", "Τσώνη", "Τυμβίου", "Υφαντή", "Φαββάτα", "Φαρμάκη", "Φασατάκη", "Φασουλή", "Φασουλίδου", "Φειζατίδου", "Φιδάνη", "Φιλιάγκου", "Φιλίππου", "Φίλκα", "Φλασκή", "Φουρκιώτη", "Φραγκιαδάκη", "Φραγκουδάκη", "Φρονιμάκη", "Φυτιλή", "Φωλιά", "Φωτακοπούλου", "Φωτιάδου", "Χαλαντζούκα", "Χαλβατζή", "Χαλκίδου", "Χαμαλίδου", "Χανταμπή", "Χαντζή", "Χαραλαμπίδου", "Χασάπη", "Χατζή", "Χατζηβλασίου", "Χατζηγεωργίου", "Χατζηδάκη", "Χατζημιχαήλ", "Χατζηφώτη", "Χατζηχαρίστου", "Χιωτίδου", "Χολέβα", "Χονδρούδη", "Χοντζιά", "Χορόζη", "Χορτάτου", "Χουρζαμάνη", "Χρήστου", "Χριστογιάννη", "Χριστοδούλου", "Χριστοπούλου", "Χριστοφόρη", "Χρονοπούλου", "Χρυσανθακοπούλου", "Χρυσάφη", "Χρυσικάκη", "Χωριανοπούλου", "Χωρινού", "Ψυρρή", "Ψυχάρη", ) last_names = last_names_male + last_names_female
Provider
python
run-llama__llama_index
llama-index-core/llama_index/core/schema.py
{ "start": 5850, "end": 6351 }
class ____(BaseComponent, DispatcherSpanMixin): """Base class for transform components.""" model_config = ConfigDict(arbitrary_types_allowed=True) @abstractmethod def __call__(self, nodes: Sequence[BaseNode], **kwargs: Any) -> Sequence[BaseNode]: """Transform nodes.""" async def acall( self, nodes: Sequence[BaseNode], **kwargs: Any ) -> Sequence[BaseNode]: """Async transform nodes.""" return self.__call__(nodes, **kwargs)
TransformComponent
python
astropy__astropy
astropy/utils/masked/tests/test_function_helpers.py
{ "start": 33558, "end": 34626 }
class ____(MaskedArraySetup): def test_outer(self): result = np.outer(self.ma, self.mb) expected_data = np.outer(self.a.ravel(), self.b.ravel()) expected_mask = np.logical_or.outer(self.mask_a.ravel(), self.mask_b.ravel()) assert_array_equal(result.unmasked, expected_data) assert_array_equal(result.mask, expected_mask) out = np.zeros_like(result) result2 = np.outer(self.ma, self.mb, out=out) assert result2 is out assert result2 is not result assert_masked_equal(result2, result) out2 = np.zeros_like(result.unmasked) with pytest.raises(TypeError): np.outer(self.ma, self.mb, out=out2) def test_kron(self): result = np.kron(self.ma, self.mb) expected_data = np.kron(self.a, self.b) expected_mask = np.logical_or.outer(self.mask_a, self.mask_b).reshape( result.shape ) assert_array_equal(result.unmasked, expected_data) assert_array_equal(result.mask, expected_mask)
TestOuterLikeFunctions
python
python__mypy
mypyc/test-data/fixtures/ir.py
{ "start": 1299, "end": 1640 }
class ____: __class__: type def __new__(cls) -> Self: pass def __init__(self) -> None: pass def __eq__(self, x: object) -> bool: pass def __ne__(self, x: object) -> bool: pass def __str__(self) -> str: pass def __setattr__(self, k: str, v: object) -> None: pass def __delattr__(self, k: str) -> None: pass
object
python
walkccc__LeetCode
solutions/3413. Maximum Coins From K Consecutive Bags/3413.py
{ "start": 0, "end": 903 }
class ____: def maximumCoins(self, coins: list[list[int]], k: int) -> int: return max(self._slide(coins, k), self._slide([[-r, -l, c] for l, r, c in coins], k)) def _slide(self, coins: list[list[int]], k: int) -> int: coins.sort() res = 0 windowSum = 0 j = 0 for li, ri, ci in coins: # Consider the number line [li..li + k). rightBoundary = li + k # [lj, rj] is fully in [li..li + k). while j + 1 < len(coins) and coins[j + 1][0] < rightBoundary: lj, rj, cj = coins[j] windowSum += (rj - lj + 1) * cj j += 1 # [lj, rj] may be partially in [l..l + k). last = 0 if j < len(coins) and coins[j][0] < rightBoundary: lj, rj, cj = coins[j] last = (min(rightBoundary - 1, rj) - lj + 1) * cj res = max(res, windowSum + last) windowSum -= (ri - li + 1) * ci return res
Solution
python
instagram__MonkeyType
monkeytype/typing.py
{ "start": 16079, "end": 16385 }
class ____(TypeRewriter): """Returns an Iterator, if the send_type and return_type of a Generator is None""" def rewrite_Generator(self, typ): args = typ.__args__ if args[1] is NoneType and args[2] is NoneType: return Iterator[args[0]] return typ
RewriteGenerator
python
kamyu104__LeetCode-Solutions
Python/divide-an-array-into-subarrays-with-minimum-cost-ii.py
{ "start": 1724, "end": 3851 }
class ____(object): def minimumCost(self, nums, k, dist): """ :type nums: List[int] :type k: int :type dist: int :rtype: int """ def get_top(heap, cnt, total): while heap[0] in cnt: x = heapq.heappop(heap) cnt[x] -= 1 if cnt[x] == 0: del cnt[x] total[0] -= 1 return heap[0] def lazy_delete(heap, cnt, total, x): cnt[x] += 1 total[0] += 1 if total[0] <= len(heap)-total[0]: return new_heap = [] for x in heap: if x not in cnt: new_heap.append(x) continue cnt[x] -= 1 if cnt[x] == 0: del cnt[x] total[0] = 0 heapq.heapify(new_heap) heap[:] = new_heap max_heap, min_heap = [], [] cnt1, cnt2 = collections.Counter(), collections.Counter() total1, total2 = [0], [0] mn, curr = float("inf"), 0 for i in xrange(1, len(nums)): heapq.heappush(max_heap, -nums[i]) curr += nums[i] if (len(max_heap)-total1[0]) > k-1: x = get_top(max_heap, cnt1, total1) curr -= -x heapq.heappush(min_heap, -heapq.heappop(max_heap)) if (len(max_heap)-total1[0])+(len(min_heap)-total2[0]) > 1+dist: x = get_top(min_heap, cnt2, total2) if x <= nums[i-(1+dist)]: lazy_delete(min_heap, cnt2, total2, nums[i-(1+dist)]) else: lazy_delete(max_heap, cnt1, total1, -nums[i-(1+dist)]) heapq.heappop(min_heap) curr -= nums[i-(1+dist)]-x heapq.heappush(max_heap, -x) if len(max_heap)-total1[0] == k-1: mn = min(mn, curr) return nums[0]+mn # Time: O(nlogd) # Space: O(d) from sortedcontainers import SortedList # sliding window, sorted list
Solution2
python
apache__airflow
airflow-core/src/airflow/models/crypto.py
{ "start": 1019, "end": 1365 }
class ____(Protocol): """This class is only used for TypeChecking (for IDEs, mypy, etc).""" is_encrypted: bool def decrypt(self, msg: bytes | str, ttl: int | None = None) -> bytes: """Decrypt with Fernet.""" ... def encrypt(self, msg: bytes) -> bytes: """Encrypt with Fernet.""" ...
FernetProtocol
python
PrefectHQ__prefect
src/prefect/server/services/late_runs.py
{ "start": 1154, "end": 5053 }
class ____(LoopService): """ Finds flow runs that are later than their scheduled start time A flow run is defined as "late" if has not scheduled within a certain amount of time after its scheduled start time. The exact amount is configurable in Prefect REST API Settings. """ @classmethod def service_settings(cls) -> ServicesBaseSetting: return get_current_settings().server.services.late_runs def __init__(self, loop_seconds: float | None = None, **kwargs: Any): super().__init__( loop_seconds=loop_seconds or PREFECT_API_SERVICES_LATE_RUNS_LOOP_SECONDS.value(), **kwargs, ) # mark runs late if they are this far past their expected start time self.mark_late_after: datetime.timedelta = ( PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS.value() ) # query for this many runs to mark as late at once self.batch_size = 400 @db_injector async def run_once(self, db: PrefectDBInterface) -> None: """ Mark flow runs as late by: - Querying for flow runs in a scheduled state that are Scheduled to start in the past - For any runs past the "late" threshold, setting the flow run state to a new `Late` state """ scheduled_to_start_before = now("UTC") - datetime.timedelta( seconds=self.mark_late_after.total_seconds() ) while True: async with db.session_context(begin_transaction=True) as session: query = self._get_select_late_flow_runs_query( scheduled_to_start_before=scheduled_to_start_before, db=db ) result = await session.execute(query) runs = result.all() # mark each run as late for run in runs: await self._mark_flow_run_as_late(session=session, flow_run=run) # if no runs were found, exit the loop if len(runs) < self.batch_size: break self.logger.info("Finished monitoring for late runs.") @inject_db def _get_select_late_flow_runs_query( self, scheduled_to_start_before: datetime.datetime, db: PrefectDBInterface ) -> sa.Select[tuple["UUID", DateTime | None]]: """ Returns a sqlalchemy query for late flow runs. Args: scheduled_to_start_before: the maximum next scheduled start time of scheduled flow runs to consider in the returned query """ query = ( sa.select( db.FlowRun.id, db.FlowRun.next_scheduled_start_time, ) .where( # The next scheduled start time is in the past, including the mark late # after buffer (db.FlowRun.next_scheduled_start_time <= scheduled_to_start_before), db.FlowRun.state_type == states.StateType.SCHEDULED, db.FlowRun.state_name == "Scheduled", ) .limit(self.batch_size) ) return query async def _mark_flow_run_as_late( self, session: AsyncSession, flow_run: sa.Row[tuple["UUID", DateTime | None]], ) -> None: """ Mark a flow run as late. Pass-through method for overrides. """ try: await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=states.Late(scheduled_time=flow_run.next_scheduled_start_time), flow_policy=MarkLateRunsPolicy, # type: ignore ) except ObjectNotFoundError: return # flow run was deleted, ignore it if __name__ == "__main__": asyncio.run(MarkLateRuns(handle_signals=True).start())
MarkLateRuns
python
encode__django-rest-framework
rest_framework/exceptions.py
{ "start": 4085, "end": 4802 }
class ____(APIException): status_code = status.HTTP_400_BAD_REQUEST default_detail = _('Invalid input.') default_code = 'invalid' def __init__(self, detail=None, code=None): if detail is None: detail = self.default_detail if code is None: code = self.default_code # For validation failures, we may collect many errors together, # so the details should always be coerced to a list if not already. if isinstance(detail, tuple): detail = list(detail) elif not isinstance(detail, dict) and not isinstance(detail, list): detail = [detail] self.detail = _get_error_details(detail, code)
ValidationError
python
pyca__cryptography
tests/hazmat/primitives/test_sm4.py
{ "start": 698, "end": 1182 }
class ____: test_ecb = generate_encrypt_test( load_nist_vectors, os.path.join("ciphers", "SM4"), ["draft-ribose-cfrg-sm4-10-ecb.txt"], lambda key, **kwargs: algorithms.SM4(binascii.unhexlify(key)), lambda **kwargs: modes.ECB(), ) @pytest.mark.supported( only_if=lambda backend: backend.cipher_supported( algorithms.SM4(b"\x00" * 16), modes.CBC(b"\x00" * 16) ), skip_message="Does not support SM4 CBC", )
TestSM4ModeECB
python
ansible__ansible
test/lib/ansible_test/_internal/cli/argparsing/parsers.py
{ "start": 6849, "end": 7180 }
class ____(enum.Flag): """Acceptable condition(s) for matching user input to available choices.""" CHOICE = enum.auto() """Match any choice.""" ANY = enum.auto() """Match any non-empty string.""" NOTHING = enum.auto() """Match an empty string which is not followed by a boundary match."""
MatchConditions
python
apache__airflow
task-sdk/src/airflow/sdk/definitions/connection.py
{ "start": 3329, "end": 13443 }
class ____: """ A connection to an external data source. :param conn_id: The connection ID. :param conn_type: The connection type. :param description: The connection description. :param host: The host. :param login: The login. :param password: The password. :param schema: The schema. :param port: The port number. :param extra: Extra metadata. Non-standard data such as private/SSH keys can be saved here. JSON encoded object. """ conn_id: str conn_type: str description: str | None = None host: str | None = None schema: str | None = None login: str | None = None password: str | None = None port: int | None = None extra: str | None = None EXTRA_KEY = "__extra__" def get_uri(self) -> str: """Generate and return connection in URI format.""" from urllib.parse import parse_qsl if self.conn_type and "_" in self.conn_type: log.warning( "Connection schemes (type: %s) shall not contain '_' according to RFC3986.", self.conn_type, ) if self.conn_type: uri = f"{self.conn_type.lower().replace('_', '-')}://" else: uri = "//" host_to_use: str | None if self.host and "://" in self.host: protocol, host = self.host.split("://", 1) # If the protocol in host matches the connection type, don't add it again if protocol == self.conn_type: host_to_use = self.host protocol_to_add = None else: # Different protocol, add it to the URI host_to_use = host protocol_to_add = protocol else: host_to_use = self.host protocol_to_add = None if protocol_to_add: uri += f"{protocol_to_add}://" authority_block = "" if self.login is not None: authority_block += quote(self.login, safe="") if self.password is not None: authority_block += ":" + quote(self.password, safe="") if authority_block > "": authority_block += "@" uri += authority_block host_block = "" if host_to_use: host_block += quote(host_to_use, safe="") if self.port: if host_block == "" and authority_block == "": host_block += f"@:{self.port}" else: host_block += f":{self.port}" if self.schema: host_block += f"/{quote(self.schema, safe='')}" uri += host_block if self.extra: try: extra_dejson = self.extra_dejson query: str | None = urlencode(extra_dejson) except TypeError: query = None if query and extra_dejson == dict(parse_qsl(query, keep_blank_values=True)): uri += ("?" if self.schema else "/?") + query else: uri += ("?" if self.schema else "/?") + urlencode({self.EXTRA_KEY: self.extra}) return uri def get_hook(self, *, hook_params=None): """Return hook based on conn_type.""" from airflow.providers_manager import ProvidersManager from airflow.sdk.module_loading import import_string hook = ProvidersManager().hooks.get(self.conn_type, None) if hook is None: raise AirflowException(f'Unknown hook type "{self.conn_type}"') try: hook_class = import_string(hook.hook_class_name) except ImportError: log.error( "Could not import %s when discovering %s %s", hook.hook_class_name, hook.hook_name, hook.package_name, ) raise if hook_params is None: hook_params = {} return hook_class(**{hook.connection_id_attribute_name: self.conn_id}, **hook_params) @classmethod def _handle_connection_error(cls, e: AirflowRuntimeError, conn_id: str) -> None: """Handle connection retrieval errors.""" if e.error.error == ErrorType.CONNECTION_NOT_FOUND: raise AirflowNotFoundException(f"The conn_id `{conn_id}` isn't defined") from None raise @classmethod def get(cls, conn_id: str) -> Any: from airflow.sdk.execution_time.context import _get_connection try: return _get_connection(conn_id) except AirflowRuntimeError as e: cls._handle_connection_error(e, conn_id) @classmethod async def async_get(cls, conn_id: str) -> Any: from airflow.sdk.execution_time.context import _async_get_connection try: return await _async_get_connection(conn_id) except AirflowRuntimeError as e: cls._handle_connection_error(e, conn_id) @property def extra_dejson(self) -> dict: """Returns the extra property by deserializing json.""" from airflow.sdk.log import mask_secret extra = {} if self.extra: try: extra = json.loads(self.extra) except JSONDecodeError: log.exception("Failed to deserialize extra property `extra`, returning empty dictionary") else: mask_secret(extra) return extra def get_extra_dejson(self) -> dict: """Deserialize extra property to JSON.""" import warnings warnings.warn( "`get_extra_dejson` is deprecated and will be removed in a future release. ", DeprecationWarning, stacklevel=2, ) return self.extra_dejson def to_dict(self, *, prune_empty: bool = False, validate: bool = True) -> dict[str, Any]: """ Convert Connection to json-serializable dictionary. :param prune_empty: Whether or not remove empty values. :param validate: Validate dictionary is JSON-serializable :meta private: """ conn: dict[str, Any] = { "conn_id": self.conn_id, "conn_type": self.conn_type, "description": self.description, "host": self.host, "login": self.login, "password": self.password, "schema": self.schema, "port": self.port, } if prune_empty: conn = _prune_dict(val=conn, mode="strict") if (extra := self.extra_dejson) or not prune_empty: conn["extra"] = extra if validate: json.dumps(conn) return conn @classmethod def from_json(cls, value, conn_id=None) -> Connection: kwargs = json.loads(value) extra = kwargs.pop("extra", None) if extra: kwargs["extra"] = extra if isinstance(extra, str) else json.dumps(extra) conn_type = kwargs.pop("conn_type", None) if conn_type: kwargs["conn_type"] = cls._normalize_conn_type(conn_type) port = kwargs.pop("port", None) if port: try: kwargs["port"] = int(port) except ValueError: raise ValueError(f"Expected integer value for `port`, but got {port!r} instead.") return cls(conn_id=conn_id, **kwargs) def as_json(self) -> str: """Convert Connection to JSON-string object.""" conn_repr = self.to_dict(prune_empty=True, validate=False) conn_repr.pop("conn_id", None) return json.dumps(conn_repr) @classmethod def from_uri(cls, uri: str, conn_id: str) -> Connection: """ Create a Connection from a URI string. :param uri: URI string to parse :param conn_id: Connection ID to assign to the connection :return: Connection object """ schemes_count_in_uri = uri.count("://") if schemes_count_in_uri > 2: raise AirflowException(f"Invalid connection string: {uri}.") host_with_protocol = schemes_count_in_uri == 2 uri_parts = urlsplit(uri) conn_type = uri_parts.scheme normalized_conn_type = cls._normalize_conn_type(conn_type) rest_of_the_url = uri.replace(f"{conn_type}://", ("" if host_with_protocol else "//")) if host_with_protocol: uri_splits = rest_of_the_url.split("://", 1) if "@" in uri_splits[0] or ":" in uri_splits[0]: raise AirflowException(f"Invalid connection string: {uri}.") uri_parts = urlsplit(rest_of_the_url) protocol = uri_parts.scheme if host_with_protocol else None host = _parse_netloc_to_hostname(uri_parts) parsed_host = cls._create_host(protocol, host) quoted_schema = uri_parts.path[1:] schema = unquote(quoted_schema) if quoted_schema else quoted_schema login = unquote(uri_parts.username) if uri_parts.username else uri_parts.username password = unquote(uri_parts.password) if uri_parts.password else uri_parts.password port = uri_parts.port extra = None if uri_parts.query: query = dict(parse_qsl(uri_parts.query, keep_blank_values=True)) if cls.EXTRA_KEY in query: extra = query[cls.EXTRA_KEY] else: extra = json.dumps(query) return cls( conn_id=conn_id, conn_type=normalized_conn_type, host=parsed_host, schema=schema, login=login, password=password, port=port, extra=extra, ) @staticmethod def _create_host(protocol, host) -> str | None: """Return the connection host with the protocol.""" if not host: return host if protocol: return f"{protocol}://{host}" return host @staticmethod def _normalize_conn_type(conn_type): if conn_type == "postgresql": conn_type = "postgres" elif "-" in conn_type: conn_type = conn_type.replace("-", "_") return conn_type
Connection
python
huggingface__transformers
src/transformers/models/lfm2_moe/modular_lfm2_moe.py
{ "start": 1808, "end": 1870 }
class ____(Lfm2RotaryEmbedding): pass
Lfm2MoeRotaryEmbedding
python
pydantic__pydantic
pydantic-core/tests/validators/test_is_instance.py
{ "start": 2062, "end": 2388 }
class ____(type): def __instancecheck__(self, instance) -> bool: if 'error' in repr(instance): # an error here comes from a problem in the schema, not in the input value, so raise as internal error raise TypeError('intentional error') return 'true' in repr(instance)
HasIsInstanceMeta
python
pyca__cryptography
tests/hazmat/primitives/test_hashes.py
{ "start": 2281, "end": 2538 }
class ____: test_sha384 = generate_base_hash_test( hashes.SHA384(), digest_size=48, ) @pytest.mark.supported( only_if=lambda backend: backend.hash_supported(hashes.SHA512()), skip_message="Does not support SHA512", )
TestSHA384
python
sympy__sympy
sympy/physics/quantum/tests/test_state.py
{ "start": 966, "end": 1086 }
class ____(Ket): @classmethod def default_args(self): return ("r", "theta", "phi")
CustomKetMultipleLabels
python
google__pytype
pytype/module_utils_test.py
{ "start": 160, "end": 1907 }
class ____(unittest.TestCase): """Test module utilities.""" def test_get_absolute_name(self): test_cases = [ ("x.y", "a.b", "x.y.a.b"), ("", "a.b", "a.b"), ("x.y", ".a.b", "x.y.a.b"), ("x.y", "..a.b", "x.a.b"), ("x.y", "...a.b", None), ] for prefix, name, expected in test_cases: self.assertEqual(module_utils.get_absolute_name(prefix, name), expected) def test_get_relative_name(self): test_cases = [ ("x.y", "x.y.a.b", "a.b"), ("x.y", "x.a.b", "..a.b"), ("x.y.z", "x.a.b", "...a.b"), ("x.y", "a.b", "a.b"), ("x.y", "y.a.b", "y.a.b"), ("x.y", "..x.y.a.b", "..x.y.a.b"), ("", "a.b", "a.b"), ("x.y", "", ""), ] for prefix, name, expected in test_cases: self.assertEqual(module_utils.get_relative_name(prefix, name), expected) def test_path_to_module_name(self): self.assertIsNone( module_utils.path_to_module_name( file_utils.replace_separator("../foo.py") ) ) self.assertIsNone( module_utils.path_to_module_name( file_utils.replace_separator("x/y/foo.txt") ) ) self.assertEqual( "x.y.z", module_utils.path_to_module_name( file_utils.replace_separator("x/y/z.pyi") ), ) self.assertEqual( "x.y.z", module_utils.path_to_module_name( file_utils.replace_separator("x/y/z.pytd") ), ) self.assertEqual( "x.y.z", module_utils.path_to_module_name( file_utils.replace_separator("x/y/z/__init__.pyi") ), ) # Because TestInferModule expands a lot of paths: expand = file_utils.expand_path
ModuleUtilsTest
python
PrefectHQ__prefect
src/prefect/client/schemas/sorting.py
{ "start": 1925, "end": 2184 }
class ____(AutoEnum): """Defines artifact collection sorting options.""" CREATED_DESC = AutoEnum.auto() UPDATED_DESC = AutoEnum.auto() ID_DESC = AutoEnum.auto() KEY_DESC = AutoEnum.auto() KEY_ASC = AutoEnum.auto()
ArtifactCollectionSort
python
ray-project__ray
python/ray/dashboard/modules/reporter/tests/test_gpu_providers.py
{ "start": 18806, "end": 24856 }
class ____(unittest.TestCase): """Test GpuMetricProvider class.""" def setUp(self): """Set up test fixtures.""" self.provider = GpuMetricProvider() def test_init(self): """Test GpuMetricProvider initialization.""" self.assertIsNone(self.provider._provider) self.assertTrue(self.provider._enable_metric_report) self.assertEqual(len(self.provider._providers), 2) self.assertFalse(self.provider._initialized) @patch.object(NvidiaGpuProvider, "is_available", return_value=True) @patch.object(AmdGpuProvider, "is_available", return_value=False) def test_detect_gpu_provider_nvidia( self, mock_amd_available, mock_nvidia_available ): """Test GPU provider detection when NVIDIA is available.""" provider = self.provider._detect_gpu_provider() self.assertIsInstance(provider, NvidiaGpuProvider) mock_nvidia_available.assert_called_once() @patch.object(NvidiaGpuProvider, "is_available", return_value=False) @patch.object(AmdGpuProvider, "is_available", return_value=True) def test_detect_gpu_provider_amd(self, mock_amd_available, mock_nvidia_available): """Test GPU provider detection when AMD is available.""" provider = self.provider._detect_gpu_provider() self.assertIsInstance(provider, AmdGpuProvider) mock_nvidia_available.assert_called_once() mock_amd_available.assert_called_once() @patch.object(NvidiaGpuProvider, "is_available", return_value=False) @patch.object(AmdGpuProvider, "is_available", return_value=False) def test_detect_gpu_provider_none(self, mock_amd_available, mock_nvidia_available): """Test GPU provider detection when no GPUs are available.""" provider = self.provider._detect_gpu_provider() self.assertIsNone(provider) @patch("subprocess.check_output") def test_should_disable_gpu_check_true(self, mock_subprocess): """Test should_disable_gpu_check returns True for specific conditions.""" mock_subprocess.return_value = "" # Empty result means AMD GPU module not live class MockNVMLError(Exception): pass MockNVMLError.__name__ = "NVMLError_DriverNotLoaded" error = MockNVMLError("NVIDIA driver not loaded") result = self.provider._should_disable_gpu_check(error) self.assertTrue(result) @patch("subprocess.check_output") def test_should_disable_gpu_check_false_wrong_error(self, mock_subprocess): """Test should_disable_gpu_check returns False for wrong error type.""" mock_subprocess.return_value = "" error = Exception("Some other error") result = self.provider._should_disable_gpu_check(error) self.assertFalse(result) @patch("subprocess.check_output") def test_should_disable_gpu_check_false_amd_present(self, mock_subprocess): """Test should_disable_gpu_check returns False when AMD GPU is present.""" mock_subprocess.return_value = "live" # AMD GPU module is live class MockNVMLError(Exception): pass MockNVMLError.__name__ = "NVMLError_DriverNotLoaded" error = MockNVMLError("NVIDIA driver not loaded") result = self.provider._should_disable_gpu_check(error) self.assertFalse(result) def test_get_gpu_usage_disabled(self): """Test get_gpu_usage when GPU usage check is disabled.""" self.provider._enable_metric_report = False result = self.provider.get_gpu_usage() self.assertEqual(result, []) @patch.object(GpuMetricProvider, "_detect_gpu_provider") def test_get_gpu_usage_no_provider(self, mock_detect): """Test get_gpu_usage when no GPU provider is available.""" mock_detect.return_value = None with patch.object( NvidiaGpuProvider, "_initialize", side_effect=Exception("No GPU") ): result = self.provider.get_gpu_usage() self.assertEqual(result, []) self.provider._initialized = False # Reset for clean test mock_detect.assert_called_once() @patch.object(GpuMetricProvider, "_detect_gpu_provider") def test_get_gpu_usage_success(self, mock_detect): """Test successful get_gpu_usage.""" mock_provider = Mock() mock_provider.get_gpu_utilization.return_value = [ GpuUtilizationInfo( index=0, name="Test GPU", uuid="test-uuid", utilization_gpu=50, memory_used=1024, memory_total=2048, processes_pids={ 1234: ProcessGPUInfo( pid=1234, gpu_memory_usage=1024, gpu_utilization=None ) }, ) ] mock_detect.return_value = mock_provider result = self.provider.get_gpu_usage() self.assertEqual(len(result), 1) self.assertEqual(result[0]["index"], 0) self.assertEqual(result[0]["name"], "Test GPU") mock_provider.get_gpu_utilization.assert_called_once() def test_get_provider_name_no_provider(self): """Test get_provider_name when no provider is set.""" result = self.provider.get_provider_name() self.assertIsNone(result) def test_get_provider_name_with_provider(self): """Test get_provider_name when provider is set.""" mock_provider = Mock() mock_provider.get_provider_name.return_value = GpuProviderType.NVIDIA self.provider._provider = mock_provider result = self.provider.get_provider_name() self.assertEqual(result, "nvidia") def test_is_metric_report_enabled(self): """Test is_metric_report_enabled.""" self.assertTrue(self.provider.is_metric_report_enabled()) self.provider._enable_metric_report = False self.assertFalse(self.provider.is_metric_report_enabled()) if __name__ == "__main__": unittest.main()
TestGpuMetricProvider
python
python-openxml__python-docx
tests/image/test_png.py
{ "start": 546, "end": 1980 }
class ____: def it_can_construct_from_a_png_stream(self, stream_, _PngParser_, png_parser_, Png__init__): px_width, px_height, horz_dpi, vert_dpi = 42, 24, 36, 63 png_parser_.px_width = px_width png_parser_.px_height = px_height png_parser_.horz_dpi = horz_dpi png_parser_.vert_dpi = vert_dpi png = Png.from_stream(stream_) _PngParser_.parse.assert_called_once_with(stream_) Png__init__.assert_called_once_with(ANY, px_width, px_height, horz_dpi, vert_dpi) assert isinstance(png, Png) def it_knows_its_content_type(self): png = Png(None, None, None, None) assert png.content_type == MIME_TYPE.PNG def it_knows_its_default_ext(self): png = Png(None, None, None, None) assert png.default_ext == "png" # fixtures ------------------------------------------------------- @pytest.fixture def Png__init__(self, request): return initializer_mock(request, Png) @pytest.fixture def _PngParser_(self, request, png_parser_): _PngParser_ = class_mock(request, "docx.image.png._PngParser") _PngParser_.parse.return_value = png_parser_ return _PngParser_ @pytest.fixture def png_parser_(self, request): return instance_mock(request, _PngParser) @pytest.fixture def stream_(self, request): return instance_mock(request, io.BytesIO)
DescribePng
python
PyCQA__pylint
tests/functional/a/alternative/alternative_union_syntax_error.py
{ "start": 2293, "end": 2385 }
class ____(TypedDict): my_var: int | str # [unsupported-binary-operation]
CustomTypedDict3
python
python-poetry__poetry
src/poetry/utils/helpers.py
{ "start": 4514, "end": 12442 }
class ____: def __init__( self, url: str, dest: Path, session: Authenticator | Session | None = None, max_retries: int = 0, ): self._dest = dest self._max_retries = max_retries self._session = session or get_default_authenticator() self._url = url self._response = self._get() @cached_property def accepts_ranges(self) -> bool: return self._response.headers.get("Accept-Ranges") == "bytes" @cached_property def total_size(self) -> int: total_size = 0 if "Content-Length" in self._response.headers: with suppress(ValueError): total_size = int(self._response.headers["Content-Length"]) return total_size def _get(self, start: int = 0) -> Response: headers = {"Accept-Encoding": "Identity"} if start > 0: headers["Range"] = f"bytes={start}-" response = self._session.get( self._url, stream=True, headers=headers, timeout=REQUESTS_TIMEOUT ) try: response.raise_for_status() return response except BaseException: response.close() raise def _iter_content_with_resume(self, chunk_size: int) -> Iterator[bytes]: fetched_size = 0 retries = 0 while True: try: with self._response: for chunk in self._response.iter_content(chunk_size=chunk_size): yield chunk fetched_size += len(chunk) except (ChunkedEncodingError, ConnectionError): if ( retries < self._max_retries and self.accepts_ranges and fetched_size > 0 ): # only retry if server supports byte ranges # and we have fetched at least one chunk # otherwise, we should just fail retries += 1 self._response = self._get(fetched_size) continue raise else: break def download_with_progress(self, chunk_size: int = 1024) -> Iterator[int]: fetched_size = 0 with atomic_open(self._dest) as f: for chunk in self._iter_content_with_resume(chunk_size=chunk_size): if chunk: f.write(chunk) fetched_size += len(chunk) yield fetched_size def get_package_version_display_string( package: Package, root: Path | None = None ) -> str: if package.source_type in ["file", "directory"] and root: assert package.source_url is not None path = Path(os.path.relpath(package.source_url, root)).as_posix() return f"{package.version} {path}" pretty_version: str = package.full_pretty_version return pretty_version def paths_csv(paths: list[Path]) -> str: return ", ".join(f'"{c!s}"' for c in paths) def ensure_path(path: str | Path, is_directory: bool = False) -> Path: if isinstance(path, str): path = Path(path) if path.exists() and path.is_dir() == is_directory: return path raise ValueError( f"Specified path '{path}' is not a valid {'directory' if is_directory else 'file'}." ) def is_dir_writable(path: Path, create: bool = False) -> bool: try: if not path.exists(): if not create: return False path.mkdir(parents=True, exist_ok=True) with tempfile.TemporaryFile(dir=str(path)): pass except OSError: return False else: return True def pluralize(count: int, word: str = "") -> str: if count == 1: return word return word + "s" def _get_win_folder_from_registry(csidl_name: str) -> str: if sys.platform != "win32": raise RuntimeError("Method can only be called on Windows.") import winreg as _winreg shell_folder_name = { "CSIDL_APPDATA": "AppData", "CSIDL_COMMON_APPDATA": "Common AppData", "CSIDL_LOCAL_APPDATA": "Local AppData", "CSIDL_PROGRAM_FILES": "Program Files", }[csidl_name] key = _winreg.OpenKey( _winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders", ) dir, _type = _winreg.QueryValueEx(key, shell_folder_name) assert isinstance(dir, str) return dir def _get_win_folder_with_ctypes(csidl_name: str) -> str: if sys.platform != "win32": raise RuntimeError("Method can only be called on Windows.") import ctypes csidl_const = { "CSIDL_APPDATA": 26, "CSIDL_COMMON_APPDATA": 35, "CSIDL_LOCAL_APPDATA": 28, "CSIDL_PROGRAM_FILES": 38, }[csidl_name] buf = ctypes.create_unicode_buffer(1024) ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) # Downgrade to short path name if have highbit chars. See # <http://bugs.activestate.com/show_bug.cgi?id=85099>. has_high_char = False for c in buf: if ord(c) > 255: has_high_char = True break if has_high_char: buf2 = ctypes.create_unicode_buffer(1024) if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): buf = buf2 return buf.value def get_win_folder(csidl_name: str) -> Path: if sys.platform == "win32": try: from ctypes import windll # noqa: F401 _get_win_folder = _get_win_folder_with_ctypes except ImportError: _get_win_folder = _get_win_folder_from_registry return Path(_get_win_folder(csidl_name)) raise RuntimeError("Method can only be called on Windows.") def get_real_windows_path(path: Path) -> Path: program_files = get_win_folder("CSIDL_PROGRAM_FILES") local_appdata = get_win_folder("CSIDL_LOCAL_APPDATA") path = Path( str(path).replace( str(program_files / "WindowsApps"), str(local_appdata / "Microsoft/WindowsApps"), ) ) if path.as_posix().startswith(local_appdata.as_posix()): path = path.resolve() return path def get_file_hash(path: Path, hash_name: str = "sha256") -> str: h = hashlib.new(hash_name) with path.open("rb") as fp: for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""): h.update(content) return h.hexdigest() def get_highest_priority_hash_type( hash_types: Collection[str], archive_name: str ) -> str | None: if not hash_types: return None for prioritised_hash_type in prioritised_hash_types: if prioritised_hash_type in hash_types: return prioritised_hash_type logger.debug( f"There are no known hash types for {archive_name} that are prioritised (known" f" hash types: {hash_types!s})" ) for available_hash_type in non_prioritised_available_hash_types: if available_hash_type in hash_types: return available_hash_type return None def extractall(source: Path, dest: Path, zip: bool) -> None: """Extract all members from either a zip or tar archive.""" if zip: with zipfile.ZipFile(source) as archive: archive.extractall(dest) else: # These versions of python shipped with a broken tarfile data_filter, per # https://github.com/python/cpython/issues/107845. broken_tarfile_filter = {(3, 9, 17), (3, 10, 12), (3, 11, 4)} with tarfile.open(source) as archive: if ( hasattr(tarfile, "data_filter") and sys.version_info[:3] not in broken_tarfile_filter ): archive.extractall(dest, filter="data") else: archive.extractall(dest)
Downloader
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 528199, "end": 533787 }
class ____(Request): """ Update task's runtime parameters :param task: ID of the task :type task: str :param name: Task name Unique within the company. :type name: str :param tags: User-defined tags list :type tags: Sequence[str] :param system_tags: System tags list. This field is reserved for system use, please don't use it. :type system_tags: Sequence[str] :param comment: Free text comment :type comment: str :param project: Project ID of the project to which this task is assigned :type project: str :param output__error: Free text error :type output__error: str :param created: Task creation time (UTC) :type created: datetime.datetime """ _service = "tasks" _action = "update" _version = "2.23" _schema = { "definitions": {}, "properties": { "comment": {"description": "Free text comment ", "type": "string"}, "created": { "description": "Task creation time (UTC) ", "format": "date-time", "type": "string", }, "name": { "description": "Task name Unique within the company.", "type": "string", }, "output__error": {"description": "Free text error", "type": "string"}, "project": { "description": "Project ID of the project to which this task is assigned", "type": "string", }, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": "array", }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": "array", }, "task": {"description": "ID of the task", "type": "string"}, }, "required": ["task"], "type": "object", } def __init__( self, task, name=None, tags=None, system_tags=None, comment=None, project=None, output__error=None, created=None, **kwargs ): super(UpdateRequest, self).__init__(**kwargs) self.task = task self.name = name self.tags = tags self.system_tags = system_tags self.comment = comment self.project = project self.output__error = output__error self.created = created @schema_property("task") def task(self): return self._property_task @task.setter def task(self, value): if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("name") def name(self): return self._property_name @name.setter def name(self, value): if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("tags") def tags(self): return self._property_tags @tags.setter def tags(self, value): if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self): return self._property_system_tags @system_tags.setter def system_tags(self, value): if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value @schema_property("comment") def comment(self): return self._property_comment @comment.setter def comment(self, value): if value is None: self._property_comment = None return self.assert_isinstance(value, "comment", six.string_types) self._property_comment = value @schema_property("project") def project(self): return self._property_project @project.setter def project(self, value): if value is None: self._property_project = None return self.assert_isinstance(value, "project", six.string_types) self._property_project = value @schema_property("output__error") def output__error(self): return self._property_output__error @output__error.setter def output__error(self, value): if value is None: self._property_output__error = None return self.assert_isinstance(value, "output__error", six.string_types) self._property_output__error = value @schema_property("created") def created(self): return self._property_created @created.setter def created(self, value): if value is None: self._property_created = None return self.assert_isinstance(value, "created", six.string_types + (datetime,)) if not isinstance(value, datetime): value = parse_datetime(value) self._property_created = value
UpdateRequest
python
hynek__structlog
src/structlog/processors.py
{ "start": 8390, "end": 10124 }
class ____: """ Render the ``event_dict`` using ``serializer(event_dict, **dumps_kw)``. Args: dumps_kw: Are passed unmodified to *serializer*. If *default* is passed, it will disable support for ``__structlog__``-based serialization. serializer: A :func:`json.dumps`-compatible callable that will be used to format the string. This can be used to use alternative JSON encoders (default: :func:`json.dumps`). .. seealso:: :doc:`performance` for examples. .. versionadded:: 0.2.0 Support for ``__structlog__`` serialization method. .. versionadded:: 15.4.0 *serializer* parameter. .. versionadded:: 18.2.0 Serializer's *default* parameter can be overwritten now. """ def __init__( self, serializer: Callable[..., str | bytes] = json.dumps, **dumps_kw: Any, ) -> None: dumps_kw.setdefault("default", _json_fallback_handler) self._dumps_kw = dumps_kw self._dumps = serializer def __call__( self, logger: WrappedLogger, name: str, event_dict: EventDict ) -> str | bytes: """ The return type of this depends on the return type of self._dumps. """ return self._dumps(event_dict, **self._dumps_kw) def _json_fallback_handler(obj: Any) -> Any: """ Serialize custom datatypes and pass the rest to __structlog__ & repr(). """ # circular imports :( from structlog.threadlocal import _ThreadLocalDictWrapper if isinstance(obj, _ThreadLocalDictWrapper): return obj._dict try: return obj.__structlog__() except AttributeError: return repr(obj)
JSONRenderer
python
PyCQA__pylint
tests/functional/a/arguments.py
{ "start": 2500, "end": 2769 }
class ____: """ Regression """ if sys.version_info > (3,): def __new__(cls): """ empty """ return object.__new__(cls) else: def __new__(cls): """ empty """ return object.__new__(cls) Text()
Text
python
keras-team__keras
keras/src/legacy/layers.py
{ "start": 1923, "end": 4673 }
class ____(Layer): """DEPRECATED.""" def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs): super().__init__(**kwargs) self.seed_generator = backend.random.SeedGenerator(seed) self.factor = factor if isinstance(factor, (tuple, list)): self.height_lower = factor[0] self.height_upper = factor[1] else: self.height_lower = -factor self.height_upper = factor if self.height_upper < self.height_lower: raise ValueError( "`factor` argument cannot have an upper bound lesser than the " f"lower bound. Received: factor={factor}" ) if self.height_lower < -1.0 or self.height_upper < -1.0: raise ValueError( "`factor` argument must have values larger than -1. " f"Received: factor={factor}" ) self.interpolation = interpolation self.seed = seed def call(self, inputs, training=True): inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype) def random_height_inputs(inputs): """Inputs height-adjusted with random ops.""" inputs_shape = tf.shape(inputs) img_hd = tf.cast(inputs_shape[-3], tf.float32) img_wd = inputs_shape[-2] height_factor = backend.random.uniform( shape=[], minval=(1.0 + self.height_lower), maxval=(1.0 + self.height_upper), seed=self.seed_generator, ) adjusted_height = tf.cast(height_factor * img_hd, tf.int32) adjusted_size = tf.stack([adjusted_height, img_wd]) output = tf.image.resize( images=inputs, size=adjusted_size, method=self.interpolation, ) # tf.resize will output float32 regardless of input type. output = tf.cast(output, self.compute_dtype) output_shape = inputs.shape.as_list() output_shape[-3] = None output.set_shape(output_shape) return output if training: return random_height_inputs(inputs) else: return inputs def compute_output_shape(self, input_shape): input_shape = list(input_shape) input_shape[-3] = None return tuple(input_shape) def get_config(self): config = { "factor": self.factor, "interpolation": self.interpolation, "seed": self.seed, } base_config = super().get_config() return {**base_config, **config} @keras_export("keras._legacy.layers.RandomWidth")
RandomHeight
python
django__django
django/db/models/expressions.py
{ "start": 64351, "end": 68314 }
class ____(Expression): template = "%(expression)s %(ordering)s" conditional = False constraint_validation_compatible = False allows_composite_expressions = True def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None): if nulls_first and nulls_last: raise ValueError("nulls_first and nulls_last are mutually exclusive") if nulls_first is False or nulls_last is False: raise ValueError("nulls_first and nulls_last values must be True or None.") self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, "resolve_expression"): raise ValueError("expression must be an expression type") self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending ) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): if isinstance(self.expression, ColPairs): sql_parts = [] params = [] for col in self.expression.get_cols(): copy = self.copy() copy.set_source_expressions([col]) sql, col_params = compiler.compile(copy) sql_parts.append(sql) params.extend(col_params) return ", ".join(sql_parts), params template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = "%s NULLS LAST" % template elif self.nulls_first: template = "%s NULLS FIRST" % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NULL, %s" % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NOT NULL, %s" % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { "expression": expression_sql, "ordering": "DESC" if self.descending else "ASC", **extra_context, } params *= template.count("%(expression)s") return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle < 23c doesn't allow ORDER BY EXISTS() or filters unless it's # wrapped in a CASE WHEN. if ( not connection.features.supports_boolean_expr_in_select_clause and connection.ops.conditional_expression_supported_in_where_clause( self.expression ) ): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first: self.nulls_last = True self.nulls_first = None elif self.nulls_last: self.nulls_first = True self.nulls_last = None return self def asc(self): self.descending = False def desc(self): self.descending = True
OrderBy
python
streamlit__streamlit
lib/streamlit/runtime/caching/cached_message_replay.py
{ "start": 1968, "end": 2145 }
class ____: message: Block id_of_dg_called_on: str returned_dgs_id: str MsgData: TypeAlias = ElementMsgData | BlockMsgData R = TypeVar("R") @dataclass
BlockMsgData
python
ray-project__ray
rllib/env/vector_env.py
{ "start": 8349, "end": 14448 }
class ____(VectorEnv): """Internal wrapper to translate any gym.Envs into a VectorEnv object.""" def __init__( self, make_env: Optional[Callable[[int], EnvType]] = None, existing_envs: Optional[List[gym.Env]] = None, num_envs: int = 1, *, observation_space: Optional[gym.Space] = None, action_space: Optional[gym.Space] = None, restart_failed_sub_environments: bool = False, # Deprecated. These seem to have never been used. env_config=None, policy_config=None, ): """Initializes a _VectorizedGymEnv object. Args: make_env: Factory that produces a new gym.Env taking the sub-env's vector index as only arg. Must be defined if the number of `existing_envs` is less than `num_envs`. existing_envs: Optional list of already instantiated sub environments. num_envs: Total number of sub environments in this VectorEnv. action_space: The action space. If None, use existing_envs[0]'s action space. observation_space: The observation space. If None, use existing_envs[0]'s observation space. restart_failed_sub_environments: If True and any sub-environment (within a vectorized env) throws any error during env stepping, we will try to restart the faulty sub-environment. This is done without disturbing the other (still intact) sub-environments. """ self.envs = existing_envs self.make_env = make_env self.restart_failed_sub_environments = restart_failed_sub_environments # Fill up missing envs (so we have exactly num_envs sub-envs in this # VectorEnv. while len(self.envs) < num_envs: self.envs.append(make_env(len(self.envs))) super().__init__( observation_space=observation_space or self.envs[0].observation_space, action_space=action_space or self.envs[0].action_space, num_envs=num_envs, ) @override(VectorEnv) def vector_reset( self, *, seeds: Optional[List[int]] = None, options: Optional[List[dict]] = None ) -> Tuple[List[EnvObsType], List[EnvInfoDict]]: seeds = seeds or [None] * self.num_envs options = options or [None] * self.num_envs # Use reset_at(index) to restart and retry until # we successfully create a new env. resetted_obs = [] resetted_infos = [] for i in range(len(self.envs)): while True: obs, infos = self.reset_at(i, seed=seeds[i], options=options[i]) if not isinstance(obs, Exception): break resetted_obs.append(obs) resetted_infos.append(infos) return resetted_obs, resetted_infos @override(VectorEnv) def reset_at( self, index: Optional[int] = None, *, seed: Optional[int] = None, options: Optional[dict] = None, ) -> Tuple[Union[EnvObsType, Exception], Union[EnvInfoDict, Exception]]: if index is None: index = 0 try: obs_and_infos = self.envs[index].reset(seed=seed, options=options) except Exception as e: if self.restart_failed_sub_environments: logger.exception(e.args[0]) self.restart_at(index) obs_and_infos = e, {} else: raise e return obs_and_infos @override(VectorEnv) def restart_at(self, index: Optional[int] = None) -> None: if index is None: index = 0 # Try closing down the old (possibly faulty) sub-env, but ignore errors. try: self.envs[index].close() except Exception as e: if log_once("close_sub_env"): logger.warning( "Trying to close old and replaced sub-environment (at vector " f"index={index}), but closing resulted in error:\n{e}" ) env_to_del = self.envs[index] self.envs[index] = None del env_to_del # Re-create the sub-env at the new index. logger.warning(f"Trying to restart sub-environment at index {index}.") self.envs[index] = self.make_env(index) logger.warning(f"Sub-environment at index {index} restarted successfully.") @override(VectorEnv) def vector_step( self, actions: List[EnvActionType] ) -> Tuple[ List[EnvObsType], List[float], List[bool], List[bool], List[EnvInfoDict] ]: obs_batch, reward_batch, terminated_batch, truncated_batch, info_batch = ( [], [], [], [], [], ) for i in range(self.num_envs): try: results = self.envs[i].step(actions[i]) except Exception as e: if self.restart_failed_sub_environments: logger.exception(e.args[0]) self.restart_at(i) results = e, 0.0, True, True, {} else: raise e obs, reward, terminated, truncated, info = results if not isinstance(info, dict): raise ValueError( "Info should be a dict, got {} ({})".format(info, type(info)) ) obs_batch.append(obs) reward_batch.append(reward) terminated_batch.append(terminated) truncated_batch.append(truncated) info_batch.append(info) return obs_batch, reward_batch, terminated_batch, truncated_batch, info_batch @override(VectorEnv) def get_sub_environments(self) -> List[EnvType]: return self.envs @override(VectorEnv) def try_render_at(self, index: Optional[int] = None): if index is None: index = 0 return self.envs[index].render() @OldAPIStack
_VectorizedGymEnv
python
spyder-ide__spyder
spyder/plugins/console/widgets/main_widget.py
{ "start": 2151, "end": 2243 }
class ____: Run = 'run_section' Quit = 'quit_section'
ConsoleWidgetOptionsMenuSections
python
mlflow__mlflow
mlflow/genai/labeling/stores.py
{ "start": 7491, "end": 10361 }
class ____: """ Scheme-based registry for labeling store implementations. This class allows the registration of a function or class to provide an implementation for a given scheme of `store_uri` through the `register` methods. Implementations declared though the entrypoints `mlflow.labeling_store` group can be automatically registered through the `register_entrypoints` method. When instantiating a store through the `get_store` method, the scheme of the store URI provided (or inferred from environment) will be used to select which implementation to instantiate, which will be called with same arguments passed to the `get_store` method. """ def __init__(self) -> None: self._registry: dict[str, Callable[..., AbstractLabelingStore]] = {} self.group_name = "mlflow.labeling_store" def register(self, scheme: str, store_builder: Callable[..., AbstractLabelingStore]) -> None: self._registry[scheme] = store_builder def register_entrypoints(self) -> None: """Register labeling stores provided by other packages""" for entrypoint in get_entry_points(self.group_name): try: self.register(entrypoint.name, entrypoint.load()) except (AttributeError, ImportError) as exc: warnings.warn( 'Failure attempting to register labeling store for scheme "{}": {}'.format( entrypoint.name, str(exc) ), stacklevel=2, ) def get_store_builder(self, store_uri: str) -> Callable[..., AbstractLabelingStore]: """Get a store from the registry based on the scheme of store_uri Args: store_uri: The store URI. If None, it will be inferred from the environment. This URI is used to select which labeling store implementation to instantiate and is passed to the constructor of the implementation. Returns: A function that returns an instance of ``mlflow.genai.labeling.stores.AbstractLabelingStore`` that fulfills the store URI requirements. """ scheme = store_uri if store_uri == "databricks" else get_uri_scheme(store_uri) try: store_builder = self._registry[scheme] except KeyError: raise UnsupportedLabelingStoreURIException( unsupported_uri=store_uri, supported_uri_schemes=list(self._registry.keys()) ) return store_builder def get_store(self, tracking_uri: str | None = None) -> AbstractLabelingStore: resolved_store_uri = tracking_utils._resolve_tracking_uri(tracking_uri) builder = self.get_store_builder(resolved_store_uri) return builder(tracking_uri=resolved_store_uri)
LabelingStoreRegistry
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_base.py
{ "start": 2022, "end": 7928 }
class ____: ERROR_WHEN_RESOURCE_NOT_FOUND = ClientError({"Error": {"Code": "ValidationException"}}, "op") def setup_method(self): self.sagemaker = SageMakerBaseOperator(task_id="test_sagemaker_operator", config=CONFIG) self.sagemaker.aws_conn_id = "aws_default" def test_parse_integer(self): self.sagemaker.integer_fields = [ ["key1"], ["key2", "key3"], ["key2", "key4"], ["key5", "key6"], ] self.sagemaker.parse_config_integers() assert self.sagemaker.config == PARSED_CONFIG def test_default_integer_fields(self): self.sagemaker.preprocess_config() assert self.sagemaker.integer_fields == EXPECTED_INTEGER_FIELDS def test_job_exists(self): exists = self.sagemaker._check_if_job_exists("the name", lambda _: {}) assert exists def test_job_does_not_exists(self): def raiser(_): raise self.ERROR_WHEN_RESOURCE_NOT_FOUND exists = self.sagemaker._check_if_job_exists("the name", raiser) assert not exists def test_job_renamed(self): describe_mock = MagicMock() # scenario : name exists, new proposed name exists as well, second proposal is ok describe_mock.side_effect = [None, None, self.ERROR_WHEN_RESOURCE_NOT_FOUND] name = self.sagemaker._get_unique_job_name("test", False, describe_mock) assert describe_mock.call_count == 3 assert re.match("test-[0-9]+$", name) @patch("airflow.providers.amazon.aws.operators.sagemaker.time.time_ns", return_value=MOCK_UNIX_TIME) def test_job_name_length(self, _): describe_mock = MagicMock() # scenario: The name is longer than 63 characters so we need the function to truncate the name and add a timestamp describe_mock.side_effect = [None, None, self.ERROR_WHEN_RESOURCE_NOT_FOUND] name = self.sagemaker._get_unique_job_name( "ThisNameIsLongerThan64CharactersSoItShouldBeTruncatedWithATimestamp", False, describe_mock ) assert len(name) <= 63 @patch("airflow.providers.amazon.aws.operators.sagemaker.time.time_ns", return_value=MOCK_UNIX_TIME) def test_truncated_job_name(self, _): describe_mock = MagicMock() describe_mock.side_effect = [None, None, self.ERROR_WHEN_RESOURCE_NOT_FOUND] # scenario: The name is longer than 63 characters so we need the function to truncate the name and add a timestamp full_name = "ThisNameIsLongerThan64CharactersSoItShouldBeTruncatedWithATimestamp" name = self.sagemaker._get_unique_job_name(full_name, False, describe_mock) base_name, timestamp = name.split("-") assert base_name == full_name[: len(base_name)] assert timestamp == str(MOCK_UNIX_TIME)[:10] def test_job_not_unique_with_fail(self): with pytest.raises(AirflowException): self.sagemaker._get_unique_job_name("test", True, lambda _: None) def test_check_resource_type_raises_exception_when_resource_type_is_invalid(self): with pytest.raises(AirflowException) as context: self.sagemaker._check_resource_type("invalid_resource") assert str(context.value) == ( "Argument resource_type accepts only 'model' and 'job'. Provided value: 'invalid_resource'." ) def test_get_unique_name_raises_exception_if_name_exists_when_fail_is_true(self): with pytest.raises(AirflowException) as context: self.sagemaker._get_unique_name( "existing_name", fail_if_exists=True, describe_func=None, check_exists_func=lambda name, describe_func: True, resource_type="model", ) assert str(context.value) == "A SageMaker model with name existing_name already exists." @patch("airflow.providers.amazon.aws.operators.sagemaker.time.time_ns", return_value=MOCK_UNIX_TIME) def test_get_unique_name_avoids_name_collision(self, time_mock): new_name = self.sagemaker._get_unique_name( "existing_name", fail_if_exists=False, describe_func=None, check_exists_func=MagicMock(side_effect=[True, False]), resource_type="model", ) assert new_name == "existing_name-1234567890" def test_get_unique_name_checks_only_once_when_resource_does_not_exist(self): describe_func = MagicMock(side_effect=ClientError({"Error": {"Code": "ValidationException"}}, "op")) new_name = "new_name" name = self.sagemaker._get_unique_name( new_name, fail_if_exists=False, describe_func=describe_func, check_exists_func=self.sagemaker._check_if_job_exists, resource_type="job", ) describe_func.assert_called_once_with(new_name) assert name == new_name def test_check_if_resource_exists_returns_true_when_it_finds_existing_resource(self): exists = self.sagemaker._check_if_resource_exists("job_123", "job", lambda name: None) assert exists def test_check_if_resource_exists_returns_false_when_validation_exception_is_raised(self): describe_func = MagicMock(side_effect=ClientError({"Error": {"Code": "ValidationException"}}, "op")) exists = self.sagemaker._check_if_resource_exists("job_123", "job", describe_func) assert not exists def test_check_if_resource_exists_raises_when_it_is_not_validation_exception(self): describe_func = MagicMock(side_effect=ValueError("different exception")) with pytest.raises(ValueError, match="different exception") as context: self.sagemaker._check_if_resource_exists("job_123", "job", describe_func) assert str(context.value) == "different exception" @pytest.mark.db_test
TestSageMakerBaseOperator
python
pytorch__pytorch
test/torch_np/test_basic.py
{ "start": 11932, "end": 12745 }
class ____(TestCase): def test_copyto_basic(self): dst = w.empty(4) src = w.arange(4) w.copyto(dst, src) assert (dst == src).all() def test_copytobcast(self): dst = w.empty((4, 2)) src = w.arange(4) # cannot broadcast => error out with assert_raises(RuntimeError): w.copyto(dst, src) # broadcast src against dst dst = w.empty((2, 4)) w.copyto(dst, src) assert (dst == src).all() def test_copyto_typecast(self): dst = w.empty(4, dtype=int) src = w.arange(4, dtype=float) with assert_raises(TypeError): w.copyto(dst, src, casting="no") # force the type cast w.copyto(dst, src, casting="unsafe") assert (dst == src).all()
TestCopyTo
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_bar08.py
{ "start": 315, "end": 1491 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_bar08.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) # Turn off default URL format for testing. workbook.default_url_format = None worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [40522880, 40524416] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) worksheet.write("A7", "http://www.perl.com/") chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
dagster-io__dagster
python_modules/dagster/dagster/_core/errors.py
{ "start": 8771, "end": 11363 }
class ____(DagsterError): """Indicates that previous step outputs required for an execution step to proceed are not available. """ def __init__(self, *args, **kwargs): self.step_key = check.str_param(kwargs.pop("step_key"), "step_key") self.output_name = check.str_param(kwargs.pop("output_name"), "output_name") super().__init__(*args, **kwargs) @contextmanager def raise_execution_interrupts() -> Iterator[None]: with raise_interrupts_as(DagsterExecutionInterruptedError): yield @contextmanager def user_code_error_boundary( error_cls: type["DagsterUserCodeExecutionError"], msg_fn: Callable[[], str], log_manager: Optional["DagsterLogManager"] = None, **kwargs: object, ) -> Iterator[None]: """Wraps the execution of user-space code in an error boundary. This places a uniform policy around any user code invoked by the framework. This ensures that all user errors are wrapped in an exception derived from DagsterUserCodeExecutionError, and that the original stack trace of the user error is preserved, so that it can be reported without confusing framework code in the stack trace, if a tool author wishes to do so. Examples: .. code-block:: python with user_code_error_boundary( # Pass a class that inherits from DagsterUserCodeExecutionError DagsterExecutionStepExecutionError, # Pass a function that produces a message "Error occurred during step execution" ): call_user_provided_function() """ check.callable_param(msg_fn, "msg_fn") check.class_param(error_cls, "error_cls", superclass=DagsterUserCodeExecutionError) from dagster._utils.error import redact_user_stacktrace_if_enabled with redact_user_stacktrace_if_enabled(), raise_execution_interrupts(): if log_manager: log_manager.begin_python_log_capture() try: yield except DagsterError as de: # The system has thrown an error that is part of the user-framework contract raise de except Exception as e: # An exception has been thrown by user code and computation should cease # with the error reported further up the stack new_error = error_cls( msg_fn(), user_exception=e, original_exc_info=sys.exc_info(), **kwargs ) raise new_error from e finally: if log_manager: log_manager.end_python_log_capture()
DagsterStepOutputNotFoundError
python
django__django
django/forms/widgets.py
{ "start": 1366, "end": 1437 }
class ____(RuntimeWarning): pass @html_safe
MediaOrderConflictWarning
python
spack__spack
lib/spack/spack/test/database.py
{ "start": 20735, "end": 47562 }
class ____: """Provide a function which can execute in a separate process that removes a spec from the database. """ def __call__(self): # check that other process can read DB _check_db_sanity(spack.store.STORE.db) with spack.store.STORE.db.write_transaction(): _mock_remove("mpileaks ^zmpi") def test_030_db_sanity_from_another_process(mutable_database): spack_process = spack.subprocess_context.SpackTestProcess(ReadModify()) p = spack_process.create() p.start() p.join() # ensure child process change is visible in parent process with mutable_database.read_transaction(): assert len(mutable_database.query("mpileaks ^zmpi")) == 0 def test_040_ref_counts(database): """Ensure that we got ref counts right when we read the DB.""" database._check_ref_counts() def test_041_ref_counts_deprecate(mutable_database): """Ensure that we have appropriate ref counts after deprecating""" mpich = mutable_database.query_one("mpich") zmpi = mutable_database.query_one("zmpi") mutable_database.deprecate(mpich, zmpi) mutable_database._check_ref_counts() def test_050_basic_query(database): """Ensure querying database is consistent with what is installed.""" # query everything total_specs = len(spack.store.STORE.db.query()) assert total_specs == 20 # query specs with multiple configurations mpileaks_specs = database.query("mpileaks") callpath_specs = database.query("callpath") mpi_specs = database.query("mpi") assert len(mpileaks_specs) == 3 assert len(callpath_specs) == 3 assert len(mpi_specs) == 3 # query specs with single configurations dyninst_specs = database.query("dyninst") libdwarf_specs = database.query("libdwarf") libelf_specs = database.query("libelf") assert len(dyninst_specs) == 1 assert len(libdwarf_specs) == 1 assert len(libelf_specs) == 1 # Query by dependency assert len(database.query("mpileaks ^mpich")) == 1 assert len(database.query("mpileaks ^mpich2")) == 1 assert len(database.query("mpileaks ^zmpi")) == 1 # Query by date assert len(database.query(start_date=datetime.datetime.min)) == total_specs assert len(database.query(start_date=datetime.datetime.max)) == 0 assert len(database.query(end_date=datetime.datetime.min)) == 0 assert len(database.query(end_date=datetime.datetime.max)) == total_specs def test_060_remove_and_add_root_package(mutable_database): _check_remove_and_add_package(mutable_database, "mpileaks ^mpich") def test_070_remove_and_add_dependency_package(mutable_database): _check_remove_and_add_package(mutable_database, "dyninst") def test_080_root_ref_counts(mutable_database): rec = mutable_database.get_record("mpileaks ^mpich") # Remove a top-level spec from the DB mutable_database.remove("mpileaks ^mpich") # record no longer in DB assert mutable_database.query("mpileaks ^mpich", installed=InstallRecordStatus.ANY) == [] # record's deps have updated ref_counts assert mutable_database.get_record("callpath ^mpich").ref_count == 0 assert mutable_database.get_record("mpich").ref_count == 1 # Put the spec back mutable_database.add(rec.spec) # record is present again assert len(mutable_database.query("mpileaks ^mpich", installed=InstallRecordStatus.ANY)) == 1 # dependencies have ref counts updated assert mutable_database.get_record("callpath ^mpich").ref_count == 1 assert mutable_database.get_record("mpich").ref_count == 2 def test_090_non_root_ref_counts(mutable_database): mutable_database.get_record("mpileaks ^mpich") mutable_database.get_record("callpath ^mpich") # "force remove" a non-root spec from the DB mutable_database.remove("callpath ^mpich") # record still in DB but marked uninstalled assert mutable_database.query("callpath ^mpich", installed=True) == [] assert len(mutable_database.query("callpath ^mpich", installed=InstallRecordStatus.ANY)) == 1 # record and its deps have same ref_counts assert ( mutable_database.get_record("callpath ^mpich", installed=InstallRecordStatus.ANY).ref_count == 1 ) assert mutable_database.get_record("mpich").ref_count == 2 # remove only dependent of uninstalled callpath record mutable_database.remove("mpileaks ^mpich") # record and parent are completely gone. assert mutable_database.query("mpileaks ^mpich", installed=InstallRecordStatus.ANY) == [] assert mutable_database.query("callpath ^mpich", installed=InstallRecordStatus.ANY) == [] # mpich ref count updated properly. mpich_rec = mutable_database.get_record("mpich") assert mpich_rec.ref_count == 0 def test_100_no_write_with_exception_on_remove(database): def fail_while_writing(): with database.write_transaction(): _mock_remove("mpileaks ^zmpi") raise Exception() with database.read_transaction(): assert len(database.query("mpileaks ^zmpi", installed=InstallRecordStatus.ANY)) == 1 with pytest.raises(Exception): fail_while_writing() # reload DB and make sure zmpi is still there. with database.read_transaction(): assert len(database.query("mpileaks ^zmpi", installed=InstallRecordStatus.ANY)) == 1 def test_110_no_write_with_exception_on_install(database): def fail_while_writing(): with database.write_transaction(): _mock_install("cmake") raise Exception() with database.read_transaction(): assert database.query("cmake", installed=InstallRecordStatus.ANY) == [] with pytest.raises(Exception): fail_while_writing() # reload DB and make sure cmake was not written. with database.read_transaction(): assert database.query("cmake", installed=InstallRecordStatus.ANY) == [] def test_115_reindex_with_packages_not_in_repo(mutable_database, repo_builder: RepoBuilder): # Dont add any package definitions to this repository, the idea is that # packages should not have to be defined in the repository once they # are installed with spack.repo.use_repositories(repo_builder.root): spack.store.STORE.reindex() _check_db_sanity(mutable_database) def test_external_entries_in_db(mutable_database): rec = mutable_database.get_record("mpileaks ^zmpi") assert rec.spec.external_path is None assert not rec.spec.external_modules rec = mutable_database.get_record("externaltool") assert rec.spec.external_path == os.path.sep + os.path.join("path", "to", "external_tool") assert not rec.spec.external_modules assert rec.explicit is False PackageInstaller([rec.spec.package], fake=True, explicit=True).install() rec = mutable_database.get_record("externaltool") assert rec.spec.external_path == os.path.sep + os.path.join("path", "to", "external_tool") assert not rec.spec.external_modules assert rec.explicit is True @pytest.mark.regression("8036") def test_regression_issue_8036(mutable_database, usr_folder_exists): # The test ensures that the external package prefix is treated as # existing. Even when the package prefix exists, the package should # not be considered installed until it is added to the database by # the installer with install(). s = spack.concretize.concretize_one("externaltool@0.9") assert not s.installed # Now install the external package and check again the `installed` property PackageInstaller([s.package], fake=True, explicit=True).install() assert s.installed @pytest.mark.regression("11118") def test_old_external_entries_prefix(mutable_database): with open(spack.store.STORE.db._index_path, "r", encoding="utf-8") as f: db_obj = json.loads(f.read()) spack.vendor.jsonschema.validate(db_obj, schema) s = spack.concretize.concretize_one("externaltool") db_obj["database"]["installs"][s.dag_hash()]["path"] = "None" with open(spack.store.STORE.db._index_path, "w", encoding="utf-8") as f: f.write(json.dumps(db_obj)) if _use_uuid: with open(spack.store.STORE.db._verifier_path, "w", encoding="utf-8") as f: f.write(str(uuid.uuid4())) record = spack.store.STORE.db.get_record(s) assert record.path is None assert record.spec._prefix is None assert record.spec.prefix == record.spec.external_path def test_uninstall_by_spec(mutable_database): with mutable_database.write_transaction(): for spec in mutable_database.query(): if spec.installed: spack.package_base.PackageBase.uninstall_by_spec(spec, force=True) else: mutable_database.remove(spec) assert len(mutable_database.query()) == 0 def test_query_unused_specs(mutable_database): # This spec installs a fake cmake as a build only dependency s = spack.concretize.concretize_one("simple-inheritance") PackageInstaller([s.package], fake=True, explicit=True).install() si = s.dag_hash() ml_mpich = spack.store.STORE.db.query_one("mpileaks ^mpich").dag_hash() ml_mpich2 = spack.store.STORE.db.query_one("mpileaks ^mpich2").dag_hash() ml_zmpi = spack.store.STORE.db.query_one("mpileaks ^zmpi").dag_hash() externaltest = spack.store.STORE.db.query_one("externaltest").dag_hash() trivial_smoke_test = spack.store.STORE.db.query_one("trivial-smoke-test").dag_hash() def check_unused(roots, deptype, expected): unused = spack.store.STORE.db.unused_specs(root_hashes=roots, deptype=deptype) assert set(u.name for u in unused) == set(expected) default_dt = dt.LINK | dt.RUN check_unused(None, default_dt, ["cmake", "gcc", "compiler-wrapper"]) check_unused( [si, ml_mpich, ml_mpich2, ml_zmpi, externaltest], default_dt, ["trivial-smoke-test", "cmake", "gcc", "compiler-wrapper"], ) check_unused( [si, ml_mpich, ml_mpich2, ml_zmpi, externaltest], dt.LINK | dt.RUN | dt.BUILD, ["trivial-smoke-test"], ) check_unused( [si, ml_mpich, ml_mpich2, externaltest, trivial_smoke_test], dt.LINK | dt.RUN | dt.BUILD, ["mpileaks", "callpath", "zmpi", "fake"], ) check_unused( [si, ml_mpich, ml_mpich2, ml_zmpi], default_dt, [ "trivial-smoke-test", "cmake", "externaltest", "externaltool", "externalvirtual", "gcc", "compiler-wrapper", ], ) @pytest.mark.regression("10019") def test_query_spec_with_conditional_dependency(mutable_database): # The issue is triggered by having dependencies that are # conditional on a Boolean variant s = spack.concretize.concretize_one("hdf5~mpi") PackageInstaller([s.package], fake=True, explicit=True).install() results = spack.store.STORE.db.query_local("hdf5 ^mpich") assert not results @pytest.mark.regression("10019") def test_query_spec_with_non_conditional_virtual_dependency(database): # Ensure the same issue doesn't come up for virtual # dependency that are not conditional on variants results = spack.store.STORE.db.query_local("mpileaks ^mpich") assert len(results) == 1 def test_query_virtual_spec(database): """Make sure we can query for virtuals in the DB""" results = spack.store.STORE.db.query_local("mpi") assert len(results) == 3 names = [s.name for s in results] assert all(name in names for name in ["mpich", "mpich2", "zmpi"]) def test_failed_spec_path_error(mutable_database): """Ensure spec not concrete check is covered.""" s = spack.spec.Spec("pkg-a") with pytest.raises(AssertionError, match="concrete spec required"): spack.store.STORE.failure_tracker.mark(s) @pytest.mark.db def test_clear_failure_keep(mutable_database, monkeypatch, capfd): """Add test coverage for clear_failure operation when to be retained.""" def _is(self, spec): return True # Pretend the spec has been failure locked monkeypatch.setattr(spack.database.FailureTracker, "lock_taken", _is) s = spack.concretize.concretize_one("pkg-a") spack.store.STORE.failure_tracker.clear(s) out = capfd.readouterr()[0] assert "Retaining failure marking" in out @pytest.mark.db def test_clear_failure_forced(mutable_database, monkeypatch, capfd): """Add test coverage for clear_failure operation when force.""" def _is(self, spec): return True # Pretend the spec has been failure locked monkeypatch.setattr(spack.database.FailureTracker, "lock_taken", _is) # Ensure raise OSError when try to remove the non-existent marking monkeypatch.setattr(spack.database.FailureTracker, "persistent_mark", _is) s = spack.concretize.concretize_one("pkg-a") spack.store.STORE.failure_tracker.clear(s, force=True) out = capfd.readouterr()[1] assert "Removing failure marking despite lock" in out assert "Unable to remove failure marking" in out @pytest.mark.db def test_mark_failed(mutable_database, monkeypatch, tmp_path: pathlib.Path, capsys): """Add coverage to mark_failed.""" def _raise_exc(lock): raise lk.LockTimeoutError("write", "/mock-lock", 1.234, 10) with fs.working_dir(str(tmp_path)): s = spack.concretize.concretize_one("pkg-a") # Ensure attempt to acquire write lock on the mark raises the exception monkeypatch.setattr(lk.Lock, "acquire_write", _raise_exc) spack.store.STORE.failure_tracker.mark(s) out = str(capsys.readouterr()[1]) assert "Unable to mark pkg-a as failed" in out spack.store.STORE.failure_tracker.clear_all() @pytest.mark.db def test_prefix_failed(mutable_database, monkeypatch): """Add coverage to failed operation.""" s = spack.concretize.concretize_one("pkg-a") # Confirm the spec is not already marked as failed assert not spack.store.STORE.failure_tracker.has_failed(s) # Check that a failure entry is sufficient spack.store.STORE.failure_tracker.mark(s) assert spack.store.STORE.failure_tracker.has_failed(s) # Remove the entry and check again spack.store.STORE.failure_tracker.clear(s) assert not spack.store.STORE.failure_tracker.has_failed(s) # Now pretend that the prefix failure is locked monkeypatch.setattr(spack.database.FailureTracker, "lock_taken", lambda self, spec: True) assert spack.store.STORE.failure_tracker.has_failed(s) def test_prefix_write_lock_error(mutable_database, monkeypatch): """Cover the prefix write lock exception.""" def _raise(db, spec): raise lk.LockError("Mock lock error") s = spack.concretize.concretize_one("pkg-a") # Ensure subsequent lock operations fail monkeypatch.setattr(lk.Lock, "acquire_write", _raise) with pytest.raises(Exception): with spack.store.STORE.prefix_locker.write_lock(s): assert False @pytest.mark.regression("26600") def test_database_works_with_empty_dir(tmp_path: pathlib.Path): # Create the lockfile and failures directory otherwise # we'll get a permission error on Database creation db_dir = tmp_path / ".spack-db" db_dir.mkdir() (db_dir / spack.database._LOCK_FILE).touch() (db_dir / "failures").mkdir() tmp_path.chmod(mode=0o555) db = spack.database.Database(str(tmp_path)) with db.read_transaction(): db.query() # Check that reading an empty directory didn't create a new index.json assert not os.path.exists(db._index_path) @pytest.mark.parametrize( "query_arg,exc_type,msg_str", [ (["callpath"], spack.store.MatchError, "matches multiple packages"), (["tensorflow"], spack.store.MatchError, "does not match any"), ], ) def test_store_find_failures(database, query_arg, exc_type, msg_str): with pytest.raises(exc_type) as exc_info: spack.store.find(query_arg, multiple=False) assert msg_str in str(exc_info.value) def test_store_find_accept_string(database): result = spack.store.find("callpath", multiple=True) assert len(result) == 3 def test_reindex_removed_prefix_is_not_installed(mutable_database, mock_store, capfd): """When a prefix of a dependency is removed and the database is reindexed, the spec should still be added through the dependent, but should be listed as not installed.""" # Remove libelf from the filesystem prefix = mutable_database.query_one("libelf").prefix assert prefix.startswith(str(mock_store)) shutil.rmtree(prefix) # Reindex should pick up libelf as a dependency of libdwarf spack.store.STORE.reindex() # Reindexing should warn about libelf not found on the filesystem assert re.search( "libelf@0.8.13.+ was marked installed in the database " "but was not found on the file system", capfd.readouterr().err, ) # And we should still have libelf in the database, but not installed. assert not mutable_database.query_one("libelf", installed=True) assert mutable_database.query_one("libelf", installed=False) def test_reindex_when_all_prefixes_are_removed(mutable_database, mock_store): # Remove all non-external installations from the filesystem for spec in spack.store.STORE.db.query_local(): if not spec.external: assert spec.prefix.startswith(str(mock_store)) shutil.rmtree(spec.prefix) # Make sure we have some explicitly installed specs num = len(mutable_database.query_local(installed=True, explicit=True)) assert num > 0 # Reindex uses the current index to repopulate itself spack.store.STORE.reindex() # Make sure all explicit specs are still there, but are now uninstalled. specs = mutable_database.query_local(installed=False, explicit=True) assert len(specs) == num # And make sure they can be removed from the database (covers the case where # `ref_count == 0 and not installed`, which hits some obscure branches. for s in specs: mutable_database.remove(s) assert len(mutable_database.query_local(installed=False, explicit=True)) == 0 @pytest.mark.parametrize( "spec_str,parent_name,expected_nparents", [("dyninst", "callpath", 3), ("libelf", "dyninst", 1), ("libelf", "libdwarf", 1)], ) @pytest.mark.regression("11983") def test_check_parents(spec_str, parent_name, expected_nparents, database): """Check that a spec returns the correct number of parents.""" s = database.query_one(spec_str) parents = s.dependents(name=parent_name) assert len(parents) == expected_nparents edges = s.edges_from_dependents(name=parent_name) assert len(edges) == expected_nparents def test_db_all_hashes(database): # ensure we get the right number of hashes without a read transaction hashes = database.all_hashes() assert len(hashes) == 20 # and make sure the hashes match with database.read_transaction(): assert set(s.dag_hash() for s in database.query()) == set(hashes) def test_consistency_of_dependents_upon_remove(mutable_database): # Check the initial state s = mutable_database.query_one("dyninst") parents = s.dependents(name="callpath") assert len(parents) == 3 # Remove a dependent (and all its dependents) mutable_database.remove("mpileaks ^callpath ^mpich2") mutable_database.remove("callpath ^mpich2") # Check the final state s = mutable_database.query_one("dyninst") parents = s.dependents(name="callpath") assert len(parents) == 2 @pytest.mark.regression("30187") def test_query_installed_when_package_unknown(database, repo_builder: RepoBuilder): """Test that we can query the installation status of a spec when we don't know its package.py """ with spack.repo.use_repositories(repo_builder.root): specs = database.query("mpileaks") for s in specs: # Assert that we can query the installation methods even though we # don't have the package.py available assert s.installed assert not s.installed_upstream with pytest.raises(spack.repo.UnknownNamespaceError): s.package def test_error_message_when_using_too_new_db(database, monkeypatch): """Sometimes the database format needs to be bumped. When that happens, we have forward incompatibilities that need to be reported in a clear way to the user, in case we moved back to an older version of Spack. This test ensures that the error message for a too new database version stays comprehensible across refactoring of the database code. """ monkeypatch.setattr(spack.database, "_DB_VERSION", vn.Version("0")) with pytest.raises( spack.database.InvalidDatabaseVersionError, match="you need a newer Spack version" ): spack.database.Database(database.root)._read() @pytest.mark.parametrize( "lock_cfg", [spack.database.NO_LOCK, spack.database.NO_TIMEOUT, spack.database.DEFAULT_LOCK_CFG, None], ) def test_database_construction_doesnt_use_globals( tmp_path: pathlib.Path, config, nullify_globals, lock_cfg ): lock_cfg = lock_cfg or spack.database.lock_configuration(config) db = spack.database.Database(str(tmp_path), lock_cfg=lock_cfg) with db.write_transaction(): pass # ensure the DB is written assert os.path.exists(db.database_directory) def test_database_read_works_with_trailing_data( tmp_path: pathlib.Path, default_mock_concretization ): # Populate a database root = str(tmp_path) db = spack.database.Database(root, layout=None) spec = default_mock_concretization("pkg-a") db.add(spec) specs_in_db = db.query_local() assert spec in specs_in_db # Append anything to the end of the database file with open(db._index_path, "a", encoding="utf-8") as f: f.write(json.dumps({"hello": "world"})) # Read the database and check that it ignores the trailing data assert spack.database.Database(root).query_local() == specs_in_db def test_database_errors_with_just_a_version_key(mutable_database): next_version = f"{spack.database._DB_VERSION}.next" with open(mutable_database._index_path, "w", encoding="utf-8") as f: f.write(json.dumps({"database": {"version": next_version}})) with pytest.raises(spack.database.InvalidDatabaseVersionError): spack.database.Database(mutable_database.root).query_local() def test_reindex_with_upstreams(tmp_path: pathlib.Path, monkeypatch, mock_packages, config): # Reindexing should not put install records of upstream entries into the local database. Here # we install `mpileaks` locally with dependencies in the upstream. And we even install # `mpileaks` with the same hash in the upstream. After reindexing, `mpileaks` should still be # in the local db, and `callpath` should not. mpileaks = spack.concretize.concretize_one("mpileaks") callpath = mpileaks.dependencies("callpath")[0] upstream_store = spack.store.create( spack.config.create_from( spack.config.InternalConfigScope( "cfg", {"config": {"install_tree": {"root": str(tmp_path / "upstream")}}} ) ) ) monkeypatch.setattr(spack.store, "STORE", upstream_store) PackageInstaller([callpath.package], fake=True, explicit=True).install() local_store = spack.store.create( spack.config.create_from( spack.config.InternalConfigScope( "cfg", { "config": {"install_tree": {"root": str(tmp_path / "local")}}, "upstreams": {"my-upstream": {"install_tree": str(tmp_path / "upstream")}}, }, ) ) ) monkeypatch.setattr(spack.store, "STORE", local_store) PackageInstaller([mpileaks.package], fake=True, explicit=True).install() # Sanity check that callpath is from upstream. assert not local_store.db.query_local("callpath") assert local_store.db.query("callpath") # Install mpileaks also upstream with the same hash to ensure that determining upstreamness # checks local installs before upstream databases, even when the local database is being # reindexed. monkeypatch.setattr(spack.store, "STORE", upstream_store) PackageInstaller([mpileaks.package], fake=True, explicit=True).install() # Delete the local database shutil.rmtree(local_store.db.database_directory) # Create a new instance s.t. we don't have cached specs in memory reindexed_local_store = spack.store.create( spack.config.create_from( spack.config.InternalConfigScope( "cfg", { "config": {"install_tree": {"root": str(tmp_path / "local")}}, "upstreams": {"my-upstream": {"install_tree": str(tmp_path / "upstream")}}, }, ) ) ) reindexed_local_store.db.reindex() assert not reindexed_local_store.db.query_local("callpath") assert reindexed_local_store.db.query("callpath") == [callpath] assert reindexed_local_store.db.query_local("mpileaks") == [mpileaks] @pytest.mark.regression("47101") def test_query_with_predicate_fn(database): all_specs = database.query() # Name starts with a string specs = database.query(predicate_fn=lambda x: x.spec.name.startswith("mpil")) assert specs and all(x.name.startswith("mpil") for x in specs) assert len(specs) < len(all_specs) # Recipe is currently known/unknown specs = database.query(predicate_fn=lambda x: spack.repo.PATH.exists(x.spec.name)) assert specs == all_specs specs = database.query(predicate_fn=lambda x: not spack.repo.PATH.exists(x.spec.name)) assert not specs @pytest.mark.regression("49964") def test_querying_reindexed_database_specfilev5(tmp_path: pathlib.Path): """Tests that we can query a reindexed database from before compilers as dependencies, and get appropriate results for %<compiler> and similar selections. """ test_path = pathlib.Path(spack.paths.test_path) zipfile = test_path / "data" / "database" / "index.json.v7_v8.json.gz" with gzip.open(str(zipfile), "rt", encoding="utf-8") as f: data = json.load(f) index_json = tmp_path / spack.database._DB_DIRNAME / spack.database.INDEX_JSON_FILE index_json.parent.mkdir(parents=True) index_json.write_text(json.dumps(data)) db = spack.database.Database(str(tmp_path)) specs = db.query("%gcc") assert len(specs) == 8 assert len([x for x in specs if x.external]) == 2 assert len([x for x in specs if x.original_spec_format() < 5]) == 8
ReadModify
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 298219, "end": 298747 }
class ____(ExprNode): subexprs = [] def __init__(self, pos, py_name, cname, func_type, utility_code = None): ExprNode.__init__(self, pos, name=py_name, cname=cname, type=func_type, utility_code=utility_code) def analyse_types(self, env): return self def generate_result_code(self, code): if self.utility_code: code.globalstate.use_utility_code(self.utility_code) def calculate_result_code(self): return self.cname
PythonCapiFunctionNode
python
prabhupant__python-ds
data_structures/heap/kth_largest_element_in_stream.py
{ "start": 837, "end": 1794 }
class ____: def __init__(self, k): self.heap = [] self.stream = [] self.k = k self.curr_min = None heapify(self.heap) def insert(self, x): self.stream.append(x) if len(self.heap) < self.k: # when the heap is empty or size is less than K heappush(self.heap, x) self.curr_min = self.heap[0] else: if x > self.curr_min: heappop(self.heap) # remove the curr min element heappush(self.heap, x) # insert x self.curr_min = self.heap[0] def find_kth_max(self): if len(self.heap) == self.k: print(f'{self.k}th max number - {self.heap[0]}') k = 3 x = Stream(k) num = input() while num != 'q': if num == 's': print(f'Stream - {x.stream} | K - {k}') else: num = int(num) x.insert(num) x.find_kth_max() num = input()
Stream
python
pytorch__pytorch
test/torch_np/test_basic.py
{ "start": 4640, "end": 5977 }
class ____(TestCase): """Smoke test of functions (array_like, shape_like) -> array_like""" def setUp(self): self.shape = (2, 3) self.shape_arg_name = { w.reshape: "newshape", } # reshape expects `newshape` @parametrize("func", arr_shape_funcs) def test_andshape_tensor(self, func): t = torch.Tensor([[1, 2, 3], [4, 5, 6]]) shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} ta = func(t, **shape_dict) assert isinstance(ta, w.ndarray) assert ta.shape == self.shape @parametrize("func", arr_shape_funcs) def test_andshape_list(self, func): t = [[1, 2, 3], [4, 5, 6]] shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} ta = func(t, **shape_dict) assert isinstance(ta, w.ndarray) assert ta.shape == self.shape @parametrize("func", arr_shape_funcs) def test_andshape_array(self, func): t = w.asarray([[1, 2, 3], [4, 5, 6]]) shape_dict = {self.shape_arg_name.get(func, "shape"): self.shape} ta = func(t, **shape_dict) assert isinstance(ta, w.ndarray) assert ta.shape == self.shape one_arg_scalar_funcs = [(w.size, _np.size), (w.shape, _np.shape), (w.ndim, _np.ndim)] @instantiate_parametrized_tests
TestOneArrAndShape
python
tensorflow__tensorflow
tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py
{ "start": 79748, "end": 86269 }
class ____(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testBroadcastToBasic(self): for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]: with self.session(use_gpu=True): x = np.array([1, 2, 3], dtype=dtype) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToString(self): with self.session(use_gpu=True): x = np.array([b"1", b"2", b"3"]) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToBool(self): with self.session(use_gpu=True): x = np.array([True, False, True], dtype=np.bool) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToShape(self): for input_dim in range(1, 6): for output_dim in range(input_dim, 6): with self.cached_session(use_gpu=True): input_shape = [2] * input_dim output_shape = [2] * output_dim x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToShapeInnerDim(self): input_shape = [2, 1, 3] output_shape = [2, 5, 3] with self.cached_session(use_gpu=True): x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToShapeLargerDim(self): input_shape = [2, 1, 3, 2, 2, 2] output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2] with self.cached_session(use_gpu=True): x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToShapeLargerDim2(self): input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1] output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3] with self.cached_session(use_gpu=True): x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape) v_np = np.broadcast_to(x, output_shape) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToScalar(self): with self.session(use_gpu=True): x = np.array(1, dtype=np.int32) v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3]) v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastScalarToNonScalar(self): with self.session(use_gpu=True): x = np.array(1.0, dtype=np.float) v_tf = array_ops.broadcast_to( constant_op.constant(1.0), [2, 3, 4, 1, 1, 1] ) v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1]) self.assertAllEqual(v_tf.eval(), v_np) @test_util.run_deprecated_v1 def testBroadcastToShapeTypeAndInference(self): for dtype in [dtypes.int32, dtypes.int64]: with self.cached_session(use_gpu=True): x = np.array([1, 2, 3]) v_tf = array_ops.broadcast_to( constant_op.constant(x), constant_op.constant([3, 3], dtype=dtype) ) shape = v_tf.get_shape().as_list() v_np = np.broadcast_to(x, [3, 3]) self.assertAllEqual(v_tf.eval(), v_np) # check shape inference when shape input is constant self.assertAllEqual(shape, v_np.shape) def testBroadcastToBadOutputShape(self): with context.eager_mode(): with self.assertRaisesRegex( errors.InvalidArgumentError, "Unable to broadcast tensor of shape" ): self.evaluate( array_ops.broadcast_to( constant_op.constant([0, 1]), constant_op.constant([2, 1]) ) ) @test_util.run_deprecated_v1 def testGradientForScalar(self): x = constant_op.constant(1, dtype=dtypes.float32) v = array_ops.broadcast_to(x, [2, 4, 3]) out = 2 * v with self.cached_session(): err = gradient_checker.compute_gradient_error( x, x.get_shape(), out, out.get_shape() ) self.assertLess(err, 1e-4) @test_util.run_deprecated_v1 def testGradientWithSameRank(self): x = constant_op.constant( np.reshape(np.arange(6), (2, 1, 3)), dtype=dtypes.float32 ) v = array_ops.broadcast_to(x, [2, 5, 3]) out = 2 * v with self.cached_session(): err = gradient_checker.compute_gradient_error( x, x.get_shape(), out, out.get_shape() ) self.assertLess(err, 1e-4) @test_util.run_deprecated_v1 def testGradientWithIncreasingRank(self): x = constant_op.constant([[1], [2]], dtype=dtypes.float32) v = array_ops.broadcast_to(x, [5, 2, 3]) out = 2 * v with self.cached_session(): err = gradient_checker.compute_gradient_error( x, x.get_shape(), out, out.get_shape() ) self.assertLess(err, 1e-4) @test_util.run_deprecated_v1 def testGradientWithBroadcastAllDimensions(self): x = constant_op.constant([1], dtype=dtypes.float32) v = array_ops.broadcast_to(x, [5, 2, 3]) out = 2 * v with self.cached_session(): err = gradient_checker.compute_gradient_error( x, x.get_shape(), out, out.get_shape() ) self.assertLess(err, 1e-4) @test_util.run_deprecated_v1 def testGradientWithLargeDim(self): input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1] output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3] x = constant_op.constant( np.array(np.random.randn(*input_shape), dtype=np.float32) ) v = array_ops.broadcast_to(x, output_shape) out = 2 * v with self.cached_session(): err = gradient_checker.compute_gradient_error( x, x.get_shape(), out, out.get_shape() ) self.assertLess(err, 1e-4)
BroadcastToTest
python
fluentpython__example-code-2e
24-class-metaprog/tinyenums/nanoenum_demo.py
{ "start": 163, "end": 221 }
class ____(NanoEnum): cocoa coconut vanilla
Flavor
python
scikit-learn__scikit-learn
asv_benchmarks/benchmarks/decomposition.py
{ "start": 270, "end": 811 }
class ____(Transformer, Estimator, Benchmark): """ Benchmarks for PCA. """ param_names = ["svd_solver"] params = (["full", "arpack", "randomized"],) def setup_cache(self): super().setup_cache() def make_data(self, params): return _mnist_dataset() def make_estimator(self, params): (svd_solver,) = params estimator = PCA(n_components=32, svd_solver=svd_solver, random_state=0) return estimator def make_scorers(self): make_pca_scorers(self)
PCABenchmark
python
django-import-export__django-import-export
tests/core/admin.py
{ "start": 3211, "end": 4801 }
class ____(ExportActionModelAdmin, ImportExportModelAdmin): """Example usage of custom import / export forms""" resource_classes = [EBookResource] import_form_class = CustomImportForm confirm_form_class = CustomConfirmImportForm export_form_class = CustomExportForm def get_confirm_form_initial(self, request, import_form): initial = super().get_confirm_form_initial(request, import_form) # Pass on the `author` value from the import form to # the confirm form (if provided) if import_form: initial["author"] = import_form.cleaned_data["author"].id return initial def get_import_resource_kwargs(self, request, **kwargs): # update resource kwargs so that the Resource is passed the authenticated user # This is included as an example of how dynamic values # can be passed to resources if "form" not in kwargs: # test for #1789 raise ValueError("'form' param was expected in kwargs") kwargs = super().get_resource_kwargs(request, **kwargs) kwargs.update({"user": request.user}) return kwargs def get_export_resource_kwargs(self, request, **kwargs): # this is overridden to demonstrate that custom form fields can be used # to override the export query. # The dict returned here will be passed as kwargs to EBookResource export_form = kwargs.get("export_form") if export_form: kwargs.update(author_id=export_form.cleaned_data["author"].id) return kwargs
CustomBookAdmin
python
python-markdown__markdown
markdown/inlinepatterns.py
{ "start": 36481, "end": 36776 }
class ____(ImageReferenceInlineProcessor): """ Short form of image reference: `![ref]`. """ def evalId(self, data: str, index: int, text: str) -> tuple[str, int, bool]: """Evaluate the id of `[ref]`. """ return text.lower(), index, True
ShortImageReferenceInlineProcessor
python
huggingface__transformers
src/transformers/models/blt/modular_blt.py
{ "start": 11537, "end": 11691 }
class ____(MllamaTextSelfAttention): def __init__(self, config: BltConfig, layer_idx: int): super().__init__(config, layer_idx)
BltSelfAttention
python
realpython__materials
solid-principles-python/printers_isp.py
{ "start": 987, "end": 1067 }
class ____(ABC): @abstractmethod def fax(self, document): pass
Fax
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/runs.py
{ "start": 2936, "end": 3139 }
class ____(graphene.Union): class Meta: types = launch_pipeline_run_result_types + pipeline_execution_error_types name = "LaunchRunReexecutionResult"
GrapheneLaunchRunReexecutionResult
python
wandb__wandb
wandb/automations/_filters/operators.py
{ "start": 5367, "end": 5550 }
class ____(BaseOp): val: Scalar = Field(alias="$gte") @override def __invert__(self) -> Lt: """Implements `~Gte(a) -> Lt(a)`.""" return Lt(val=self.val)
Gte
python
vyperlang__vyper
tests/evm_backends/base_env.py
{ "start": 752, "end": 863 }
class ____: is_success: bool logs: list[LogEntry] gas_refunded: int gas_used: int
ExecutionResult
python
huggingface__transformers
src/transformers/models/glm4v_moe/modular_glm4v_moe.py
{ "start": 11217, "end": 13740 }
class ____(Glm4vConfig): r""" This is the configuration class to store the configuration of a [`Glm4vMoeModel`]. It is used to instantiate a GLM-4.5V model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of GLM-4.5V [zai-org/GLM-4.5V](https://huggingface.co/zai-org/GLM-4.5V). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeTextConfig`): The config object or dictionary of the text backbone. vision_config (`Union[PreTrainedConfig, dict]`, *optional*, defaults to `Glm4vMoeVisionConfig`): The config object or dictionary of the vision backbone. image_token_id (`int`, *optional*, defaults to 151363): The image token index to encode the image prompt. video_token_id (`int`, *optional*, defaults to 151364): The video token index to encode the image prompt. image_start_token_id (`int`, *optional*, defaults to 151339): The image start token index to encode the start of image. image_end_token_id (`int`, *optional*, defaults to 151340): The image end token index to encode the end of image. video_start_token_id (`int`, *optional*, defaults to 151341): The video start token index to encode the start of video. video_end_token_id (`int`, *optional*, defaults to 151342): The video end token index to encode the end of video. ```python >>> from transformers import Glm4vMoeForConditionalGeneration, Glm4vMoeConfig >>> # Initializing a GLM-4.5V style configuration >>> configuration = Glm4vMoeConfig() >>> # Initializing a model from the GLM-4.5V style configuration >>> model = Glm4vMoeForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" def __init__( self, text_config=None, vision_config=None, image_token_id=151363, video_token_id=151364, image_start_token_id=151339, image_end_token_id=151340, video_start_token_id=151341, video_end_token_id=151342, **kwargs, ): super().__init__()
Glm4vMoeConfig
python
wntrblm__nox
nox/sessions.py
{ "start": 4763, "end": 33687 }
class ____: """The Session object is passed into each user-defined session function. This is your primary means for installing package and running commands in your Nox session. """ __slots__ = ("_runner",) def __init__(self, runner: SessionRunner) -> None: self._runner = runner @property def __dict__(self) -> dict[str, SessionRunner]: # type: ignore[override] """Attribute dictionary for object inspection. This is needed because ``__slots__`` turns off ``__dict__`` by default. Unlike a typical object, modifying the result of this dictionary won't allow modification of the instance. """ return {"_runner": self._runner} @property def name(self) -> str: """The name of this session.""" return self._runner.friendly_name @property def env(self) -> dict[str, str | None]: """A dictionary of environment variables to pass into all commands.""" return self.virtualenv.env @property def env_dir(self) -> pathlib.Path: """The path to the environment of this session.""" return pathlib.Path(self._runner.envdir) @property def posargs(self) -> list[str]: """Any extra arguments from the ``nox`` commandline or :class:`Session.notify`.""" return self._runner.posargs @property def virtualenv(self) -> ProcessEnv: """The virtualenv that all commands are run in.""" venv = self._runner.venv if venv is None: msg = "A virtualenv has not been created for this session" raise ValueError(msg) return venv @property def venv_backend(self) -> str: """The venv_backend selected.""" venv = self._runner.venv if venv is None: return "none" return venv.venv_backend @property def python(self) -> str | Sequence[str] | bool | None: """The python version passed into ``@nox.session``.""" return self._runner.func.python @property def bin_paths(self) -> list[str] | None: """The bin directories for the virtualenv.""" return self.virtualenv.bin_paths @property def bin(self) -> str: """The first bin directory for the virtualenv.""" paths = self.bin_paths if paths is None: msg = "The environment does not have a bin directory." raise ValueError(msg) return paths[0] def create_tmp(self) -> str: """Create, and return, a temporary directory.""" tmpdir = os.path.join(self._runner.envdir, "tmp") os.makedirs(tmpdir, exist_ok=True) self.env["TMPDIR"] = os.path.abspath(tmpdir) return tmpdir @property def cache_dir(self) -> pathlib.Path: """Create and return a 'shared cache' directory to be used across sessions.""" path = pathlib.Path(self._runner.global_config.envdir).joinpath(".cache") path.mkdir(exist_ok=True) return path @property def interactive(self) -> bool: """Returns True if Nox is being run in an interactive session or False otherwise.""" return not self._runner.global_config.non_interactive and sys.stdin.isatty() def install_and_run_script( self, script: str | os.PathLike[str], *args: str | os.PathLike[str], env: Mapping[str, str | None] | None = None, include_outer_env: bool = True, silent: bool = False, success_codes: Iterable[int] | None = None, log: bool = True, stdout: int | IO[str] | None = None, stderr: int | IO[str] | None = subprocess.STDOUT, interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT, terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT, ) -> Any | None: """ Install dependencies and run a Python script. """ deps = (nox.project.load_toml(script) or {}).get("dependencies", []) self.install(*deps) return self.run( "python", script, *args, env=env, include_outer_env=include_outer_env, silent=silent, success_codes=success_codes, external=None, log=log, stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) @property def invoked_from(self) -> str: """The directory that Nox was originally invoked from. Since you can use the ``--noxfile / -f`` command-line argument to run a Noxfile in a location different from your shell's current working directory, Nox automatically changes the working directory to the Noxfile's directory before running any sessions. This gives you the original working directory that Nox was invoked form. """ return self._runner.global_config.invoked_from # type: ignore[no-any-return] def chdir(self, dir: str | os.PathLike[str]) -> _WorkingDirContext: """Change the current working directory. Can be used as a context manager to automatically restore the working directory:: with session.chdir("somewhere/deep/in/monorepo"): # Runs in "/somewhere/deep/in/monorepo" session.run("pytest") # Runs in original working directory session.run("flake8") """ self.log(f"cd {dir}") return _WorkingDirContext(dir) cd = chdir """An alias for :meth:`chdir`.""" def _run_func(self, func: Callable[..., Any], args: Iterable[Any]) -> Any: """Legacy support for running a function through :func`run`.""" self.log(f"{func}(args={args!r})") try: return func(*args) except Exception as e: logger.exception(f"Function {func!r} raised {e!r}.") raise nox.command.CommandFailed() from e def run( self, *args: str | os.PathLike[str], env: Mapping[str, str | None] | None = None, include_outer_env: bool = True, silent: bool = False, success_codes: Iterable[int] | None = None, log: bool = True, external: ExternalType | None = None, stdout: int | IO[str] | None = None, stderr: int | IO[str] | None = subprocess.STDOUT, interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT, terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT, ) -> Any | None: """Run a command. Commands must be specified as a list of strings, for example:: session.run('pytest', '-k', 'fast', 'tests/') session.run('flake8', '--import-order-style=google') You **can not** just pass everything as one string. For example, this **will not work**:: session.run('pytest -k fast tests/') You can set environment variables for the command using ``env``:: session.run( 'bash', '-c', 'echo $SOME_ENV', env={'SOME_ENV': 'Hello'}) You can extend the shutdown timeout to allow long-running cleanup tasks to complete before being terminated. For example, if you wanted to allow ``pytest`` extra time to clean up large projects in the case that Nox receives an interrupt signal from your build system and needs to terminate its child processes:: session.run( 'pytest', '-k', 'long_cleanup', interrupt_timeout=10.0, terminate_timeout=2.0) You can also tell Nox to treat non-zero exit codes as success using ``success_codes``. For example, if you wanted to treat the ``pytest`` "tests discovered, but none selected" error as success:: session.run( 'pytest', '-k', 'not slow', success_codes=[0, 5]) On Windows, builtin commands like ``del`` cannot be directly invoked, but you can use ``cmd /c`` to invoke them:: session.run('cmd', '/c', 'del', 'docs/modules.rst') If ``session.run`` fails, it will stop the session and will not run the next steps. Basically, this will raise a Python exception. Taking this in count, you can use a ``try...finally`` block for cleanup runs, that will run even if the other runs fail:: try: session.run("coverage", "run", "-m", "pytest") finally: # Display coverage report even when tests fail. session.run("coverage", "report") If you pass ``silent=True``, you can capture the output of a command that would otherwise be shown to the user. For example to get the current Git commit ID:: out = session.run( "git", "rev-parse", "--short", "HEAD", external=True, silent=True ) print("Current Git commit is", out.strip()) :param env: A dictionary of environment variables to expose to the command. By default, all environment variables are passed. You can block an environment variable from the outer environment by setting it to None. :type env: dict or None :param include_outer_env: Boolean parameter that determines if the environment variables from the nox invocation environment should be passed to the command. ``True`` by default. :type include_outer_env: bool :param bool silent: Silence command output, unless the command fails. If ``True``, returns the command output (unless the command fails). ``False`` by default. :param success_codes: A list of return codes that are considered successful. By default, only ``0`` is considered success. :type success_codes: list, tuple, or None :param external: If False (the default) then programs not in the virtualenv path will cause a warning. If True, no warning will be emitted. These warnings can be turned into errors using ``--error-on-external-run``. This has no effect for sessions that do not have a virtualenv. :type external: bool :param interrupt_timeout: The timeout (in seconds) that Nox should wait after it and its children receive an interrupt signal before sending a terminate signal to its children. Set to ``None`` to never send a terminate signal. Default: ``0.3`` :type interrupt_timeout: float or None :param terminate_timeout: The timeout (in seconds) that Nox should wait after it sends a terminate signal to its children before sending a kill signal to them. Set to ``None`` to never send a kill signal. Default: ``0.2`` :type terminate_timeout: float or None :param stdout: Redirect standard output of the command into a file. Can't be combined with *silent*. :type stdout: file or file descriptor :param stderr: Redirect standard output of the command into a file. Can't be combined with *silent*. :type stderr: file or file descriptor """ if not args: msg = "At least one argument required to run()." raise ValueError(msg) if len(args) == 1 and isinstance(args[0], (list, tuple)): msg = "First argument to `session.run` is a list. Did you mean to use `session.run(*args)`?" raise ValueError(msg) if self._runner.global_config.install_only: logger.info(f"Skipping {args[0]} run, as --install-only is set.") return None return self._run( *args, env=env, include_outer_env=include_outer_env, silent=silent, success_codes=success_codes, log=log, external=external, stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) def run_install( self, *args: str | os.PathLike[str], env: Mapping[str, str | None] | None = None, include_outer_env: bool = True, silent: bool = False, success_codes: Iterable[int] | None = None, log: bool = True, external: ExternalType | None = None, stdout: int | IO[str] | None = None, stderr: int | IO[str] | None = subprocess.STDOUT, interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT, terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT, ) -> Any | None: """Run a command in the install step. This is a variant of :meth:`run` that runs even in the presence of ``--install-only``. This method returns early if ``--no-install`` is specified and the virtualenv is being reused. (In nox 2023.04.22 and earlier, this was called ``run_always``, and that continues to be available as an alias.) Here are some cases where this method is useful: - You need to install packages using a command other than ``pip install`` or ``conda install``. - You need to run a command as a prerequisite of package installation, such as building a package or compiling a binary extension. :param env: A dictionary of environment variables to expose to the command. By default, all environment variables are passed. :type env: dict or None :param include_outer_env: Boolean parameter that determines if the environment variables from the nox invocation environment should be passed to the command. ``True`` by default. :type include_outer_env: bool :param bool silent: Silence command output, unless the command fails. ``False`` by default. :param success_codes: A list of return codes that are considered successful. By default, only ``0`` is considered success. :type success_codes: list, tuple, or None :param external: If False (the default) then programs not in the virtualenv path will cause a warning. If True, no warning will be emitted. These warnings can be turned into errors using ``--error-on-external-run``. This has no effect for sessions that do not have a virtualenv. :type external: bool :param interrupt_timeout: The timeout (in seconds) that Nox should wait after it and its children receive an interrupt signal before sending a terminate signal to its children. Set to ``None`` to never send a terminate signal. Default: ``0.3`` :type interrupt_timeout: float or None :param terminate_timeout: The timeout (in seconds) that Nox should wait after it sends a terminate signal to its children before sending a kill signal to them. Set to ``None`` to never send a kill signal. Default: ``0.2`` :type terminate_timeout: float or None """ if ( self._runner.global_config.no_install and self._runner.venv is not None and self._runner.venv._reused ): return None if not args: msg = "At least one argument required to run_install()" raise ValueError(msg) return self._run( *args, env=env, include_outer_env=include_outer_env, silent=silent, success_codes=success_codes, log=log, external=external, stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) def run_always( self, *args: str | os.PathLike[str], env: Mapping[str, str | None] | None = None, include_outer_env: bool = True, silent: bool = False, success_codes: Iterable[int] | None = None, log: bool = True, external: ExternalType | None = None, stdout: int | IO[str] | None = None, stderr: int | IO[str] | None = subprocess.STDOUT, interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT, terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT, ) -> Any | None: """This is an alias to ``run_install``, which better describes the use case. :meta private: """ return self.run_install( *args, env=env, include_outer_env=include_outer_env, silent=silent, success_codes=success_codes, log=log, external=external, stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) def _run( self, *args: str | os.PathLike[str], env: Mapping[str, str | None] | None = None, include_outer_env: bool, silent: bool, success_codes: Iterable[int] | None, log: bool, external: ExternalType | None, stdout: int | IO[str] | None, stderr: int | IO[str] | None, interrupt_timeout: float | None, terminate_timeout: float | None, ) -> Any: """Like run(), except that it runs even if --install-only is provided.""" # Legacy support - run a function given. if callable(args[0]): return self._run_func(args[0], args[1:]) # type: ignore[unreachable] # Using `"uv"` or `"uvx" when `uv` is the backend is guaranteed to # work, even if it was co-installed with nox. if self.virtualenv.venv_backend == "uv" and nox.virtualenv.UV != "uv": if ( args[0] == "uv" and shutil.which("uv", path=self.bin) is None # Session uv takes priority ): args = (nox.virtualenv.UV, *args[1:]) elif args[0] == "uvx" and shutil.which("uvx", path=self.bin) is None: args = (f"{nox.virtualenv.UV}x", *args[1:]) # Combine the env argument with our virtualenv's env vars. env = self.virtualenv._get_env(env or {}, include_outer_env=include_outer_env) # If --error-on-external-run is specified, error on external programs. if self._runner.global_config.error_on_external_run and external is None: external = "error" # Allow all external programs when running outside a sandbox. if ( not self.virtualenv.is_sandboxed or args[0] in self.virtualenv.allowed_globals ): external = True if external is None: external = False # Run a shell command. return nox.command.run( args, env=env, paths=self.bin_paths, silent=silent, success_codes=success_codes, log=log, external=external, stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) def conda_install( self, *args: str, auto_offline: bool = True, channel: str | Sequence[str] = "", env: Mapping[str, str] | None = None, include_outer_env: bool = True, silent: bool | None = None, success_codes: Iterable[int] | None = None, log: bool = True, stdout: int | IO[str] | None = None, stderr: int | IO[str] | None = subprocess.STDOUT, interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT, terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT, ) -> None: """Install invokes `conda install`_ to install packages inside of the session's environment. To install packages directly:: session.conda_install('pandas') session.conda_install('numpy', 'scipy') session.conda_install('dask==2.1.0', channel='conda-forge') To install packages from a ``requirements.txt`` file:: session.conda_install('--file', 'requirements.txt') session.conda_install('--file', 'requirements-dev.txt') By default this method will detect when internet connection is not available and will add the `--offline` flag automatically in that case. To disable this behaviour, set `auto_offline=False`. To install the current package without clobbering conda-installed dependencies:: session.install('.', '--no-deps') # Install in editable mode. session.install('-e', '.', '--no-deps') You can specify a conda channel using `channel=`; a falsey value will not change the current channels. You can specify a list of channels if needed. It is highly recommended to specify this; micromamba does not set default channels, and default channels vary for conda. Note that "defaults" is also not permissively licensed like "conda-forge" is. Additional keyword args are the same as for :meth:`run`. .. _conda install: """ venv = self._runner.venv prefix_args: tuple[str, ...] = () if isinstance(venv, CondaEnv): prefix_args = ("--prefix", venv.location) elif not isinstance(venv, PassthroughEnv): msg = ( "A session without a conda environment can not install dependencies" " from conda." ) raise TypeError(msg) if not args: msg = "At least one argument required to install()." raise ValueError(msg) if self._runner.global_config.no_install and ( isinstance(venv, PassthroughEnv) or venv._reused ): return # Escape args that should be (conda-specific; pip install does not need this) if sys.platform.startswith("win32"): args = _dblquote_pkg_install_args(args) if silent is None: silent = not self._runner.global_config.verbose extraopts: list[str] = [] if auto_offline and venv.is_offline(): logger.warning( "Automatically setting the `--offline` flag as conda repo seems" " unreachable." ) extraopts.append("--offline") if channel: if isinstance(channel, str): extraopts.append(f"--channel={channel}") else: extraopts += [f"--channel={c}" for c in channel] self._run( venv.conda_cmd, "install", "--yes", *extraopts, *prefix_args, *args, env=env, include_outer_env=include_outer_env, silent=silent, success_codes=success_codes, log=log, external="error", stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) def install( self, *args: str, env: Mapping[str, str | None] | None = None, include_outer_env: bool = True, silent: bool | None = None, success_codes: Iterable[int] | None = None, log: bool = True, external: ExternalType | None = None, # noqa: ARG002 stdout: int | IO[str] | None = None, stderr: int | IO[str] | None = subprocess.STDOUT, interrupt_timeout: float | None = DEFAULT_INTERRUPT_TIMEOUT, terminate_timeout: float | None = DEFAULT_TERMINATE_TIMEOUT, ) -> None: """Install invokes `pip`_ to install packages inside of the session's virtualenv. To install packages directly:: session.install('pytest') session.install('requests', 'mock') session.install('requests[security]==2.9.1') To install packages from a ``requirements.txt`` file:: session.install('-r', 'requirements.txt') session.install('-r', 'requirements-dev.txt') To install the current package:: session.install('.') # Install in editable mode. session.install('-e', '.') Additional keyword args are the same as for :meth:`run`. .. warning:: Running ``session.install`` without a virtual environment is no longer supported. If you still want to do that, please use ``session.run("pip", "install", ...)`` instead. .. warning:: The ``uv`` backend does not reinstall, even for local packages, so you need to include ``--reinstall-package <pkg-name>`` (uv-only) if reusing the environment. .. _pip: https://pip.readthedocs.org """ venv = self._runner.venv if not isinstance( venv, (CondaEnv, VirtualEnv, PassthroughEnv) ): # pragma: no cover msg = f"A session without a virtualenv (got {venv!r}) can not install dependencies." raise TypeError(msg) if isinstance(venv, PassthroughEnv): if self._runner.global_config.no_install: return msg = ( f"Session {self.name} does not have a virtual environment, so use of" " session.install() is no longer allowed since it would modify the" " global Python environment. If you're really sure that is what you" ' want to do, use session.run("pip", "install", ...) instead.' ) raise ValueError(msg) if not args: msg = "At least one argument required to install()." raise ValueError(msg) if self._runner.global_config.no_install and venv._reused: return if silent is None: silent = not self._runner.global_config.verbose if isinstance(venv, VirtualEnv) and venv.venv_backend == "uv": cmd = ["uv", "pip", "install"] else: cmd = ["python", "-m", "pip", "install"] self._run( *cmd, *args, env=env, include_outer_env=include_outer_env, external="error", silent=silent, success_codes=success_codes, log=log, stdout=stdout, stderr=stderr, interrupt_timeout=interrupt_timeout, terminate_timeout=terminate_timeout, ) def notify( self, target: str | SessionRunner, posargs: Iterable[str] | None = None, ) -> None: """Place the given session at the end of the queue. This method is idempotent; multiple notifications to the same session have no effect. A common use case is to notify a code coverage analysis session from a test session:: @nox.session def test(session): session.run("pytest") session.notify("coverage") @nox.session def coverage(session): session.run("coverage") Now if you run `nox -s test`, the coverage session will run afterwards. Args: target (Union[str, Callable]): The session to be notified. This may be specified as the appropriate string (same as used for ``nox -s``) or using the function object. posargs (Optional[Iterable[str]]): If given, sets the positional arguments *only* for the queued session. Otherwise, the standard globally available positional arguments will be used instead. """ if posargs is not None: posargs = list(posargs) self._runner.manifest.notify(target, posargs) def log(self, *args: Any, **kwargs: Any) -> None: """Outputs a log during the session.""" logger.info(*args, **kwargs) def warn(self, *args: Any, **kwargs: Any) -> None: """Outputs a warning during the session.""" logger.warning(*args, **kwargs) def debug(self, *args: Any, **kwargs: Any) -> None: """Outputs a debug-level message during the session.""" logger.debug(*args, **kwargs) def error(self, *args: Any) -> NoReturn: """Immediately aborts the session and optionally logs an error.""" raise _SessionQuit(*args) def skip(self, *args: Any) -> NoReturn: """Immediately skips the session and optionally logs a warning.""" raise _SessionSkip(*args)
Session
python
spyder-ide__spyder
spyder/plugins/updatemanager/container.py
{ "start": 816, "end": 4107 }
class ____(PluginMainContainer): def __init__(self, name, plugin, parent=None): super().__init__(name, plugin, parent) self.install_on_close = False # ---- PluginMainContainer API # ------------------------------------------------------------------------- def setup(self): self.dialog_manager = DialogManager() self.update_manager = UpdateManagerWidget(parent=self) self.update_manager_status = UpdateManagerStatus(parent=self) # Actions self.check_update_action = self.create_action( UpdateManagerActions.SpyderCheckUpdateAction, _("Check for updates"), triggered=self.start_check_update ) # Signals self.update_manager.sig_set_status.connect(self.set_status) self.update_manager.sig_disable_actions.connect( self._set_actions_state ) self.update_manager.sig_block_status_signals.connect( self.update_manager_status.blockSignals) self.update_manager.sig_download_progress.connect( self.update_manager_status.set_download_progress) self.update_manager.sig_exception_occurred.connect( self.sig_exception_occurred ) self.update_manager.sig_install_on_close.connect( self.set_install_on_close) self.update_manager.sig_quit_requested.connect(self.sig_quit_requested) self.update_manager_status.sig_check_update.connect( self.start_check_update) self.update_manager_status.sig_start_update.connect(self.start_update) self.update_manager_status.sig_show_progress_dialog.connect( self.update_manager.show_progress_dialog) def update_actions(self): pass def on_close(self): """To call from Spyder when the plugin is closed.""" self.update_manager.cleanup_threads() # Run installer after Spyder is closed if self.install_on_close: self.update_manager.start_install() self.dialog_manager.close_all() # ---- Public API # ------------------------------------------------------------------------- def set_status(self, status, latest_version=None): """Set Update Manager status""" self.update_manager_status.set_value(status) @Slot() def start_check_update(self, startup=False): """Check for spyder updates.""" self.update_manager.start_check_update(startup=startup) @Slot() def start_update(self): """Start the update process""" self.update_manager.start_update() def set_install_on_close(self, install_on_close): """Set whether start install on close.""" self.install_on_close = install_on_close # ---- Private API # ------------------------------------------------------------------------- @Slot(bool) def _set_actions_state(self, is_disabled): self.check_update_action.setDisabled(is_disabled) # Change text to give better feedback to users about why the action is # disabled. if is_disabled: self.check_update_action.setText(_("Checking for updates...")) else: self.check_update_action.setText(_("Check for updates"))
UpdateManagerContainer
python
astropy__astropy
astropy/utils/decorators.py
{ "start": 35117, "end": 45419 }
class ____(classmethod): """ This is a method decorator that allows both an instance method and a `classmethod` to share the same name. When using `sharedmethod` on a method defined in a class's body, it may be called on an instance, or on a class. In the former case it behaves like a normal instance method (a reference to the instance is automatically passed as the first ``self`` argument of the method):: >>> class Example: ... @sharedmethod ... def identify(self, *args): ... print('self was', self) ... print('additional args were', args) ... >>> ex = Example() >>> ex.identify(1, 2) self was <astropy.utils.decorators.Example object at 0x...> additional args were (1, 2) In the latter case, when the `sharedmethod` is called directly from a class, it behaves like a `classmethod`:: >>> Example.identify(3, 4) self was <class 'astropy.utils.decorators.Example'> additional args were (3, 4) This also supports a more advanced usage, where the `classmethod` implementation can be written separately. If the class' *metaclass* has a method of the same name as the `sharedmethod`, the version on the metaclass is delegated to:: >>> class ExampleMeta(type): ... def identify(self): ... print('this implements the {0}.identify ' ... 'classmethod'.format(self.__name__)) ... >>> class Example(metaclass=ExampleMeta): ... @sharedmethod ... def identify(self): ... print('this implements the instancemethod') ... >>> Example().identify() this implements the instancemethod >>> Example.identify() this implements the Example.identify classmethod """ def __get__(self, obj, objtype=None): if obj is None: mcls = type(objtype) clsmeth = getattr(mcls, self.__func__.__name__, None) if callable(clsmeth): func = clsmeth else: func = self.__func__ return self._make_method(func, objtype) else: return self._make_method(self.__func__, obj) @staticmethod def _make_method(func, instance): return types.MethodType(func, instance) def format_doc(docstring, *args, **kwargs): """ Replaces the docstring of the decorated object and then formats it. The formatting works like :meth:`str.format` and if the decorated object already has a docstring this docstring can be included in the new documentation if you use the ``{__doc__}`` placeholder. Its primary use is for reusing a *long* docstring in multiple functions when it is the same or only slightly different between them. Parameters ---------- docstring : str or object or None The docstring that will replace the docstring of the decorated object. If it is an object like a function or class it will take the docstring of this object. If it is a string it will use the string itself. One special case is if the string is ``None`` then it will use the decorated functions docstring and formats it. args : passed to :meth:`str.format`. kwargs : passed to :meth:`str.format`. If the function has a (not empty) docstring the original docstring is added to the kwargs with the keyword ``'__doc__'``. Raises ------ ValueError If the ``docstring`` (or interpreted docstring if it was ``None`` or not a string) is empty. IndexError, KeyError If a placeholder in the (interpreted) ``docstring`` was not filled. see :meth:`str.format` for more information. Notes ----- Using this decorator allows, for example Sphinx, to parse the correct docstring. Examples -------- Replacing the current docstring is very easy:: >>> from astropy.utils.decorators import format_doc >>> @format_doc('''Perform num1 + num2''') ... def add(num1, num2): ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform num1 + num2 sometimes instead of replacing you only want to add to it:: >>> doc = ''' ... {__doc__} ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... ''' >>> @format_doc(doc) ... def add(num1, num2): ... '''Perform addition.''' ... return num1+num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number in case one might want to format it further:: >>> doc = ''' ... Perform {0}. ... Parameters ... ---------- ... num1, num2 : Numbers ... Returns ... ------- ... result: Number ... result of num1 {op} num2 ... {__doc__} ... ''' >>> @format_doc(doc, 'addition', op='+') ... def add(num1, num2): ... return num1+num2 ... >>> @format_doc(doc, 'subtraction', op='-') ... def subtract(num1, num2): ... '''Notes: This one has additional notes.''' ... return num1-num2 ... >>> help(add) # doctest: +SKIP Help on function add in module __main__: <BLANKLINE> add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 >>> help(subtract) # doctest: +SKIP Help on function subtract in module __main__: <BLANKLINE> subtract(num1, num2) Perform subtraction. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 - num2 Notes : This one has additional notes. These methods can be combined; even taking the docstring from another object is possible as docstring attribute. You just have to specify the object:: >>> @format_doc(add) ... def another_add(num1, num2): ... return num1 + num2 ... >>> help(another_add) # doctest: +SKIP Help on function another_add in module __main__: <BLANKLINE> another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 But be aware that this decorator *only* formats the given docstring not the strings passed as ``args`` or ``kwargs`` (not even the original docstring):: >>> @format_doc(doc, 'addition', op='+') ... def yet_another_add(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(yet_another_add) # doctest: +SKIP Help on function yet_another_add in module __main__: <BLANKLINE> yet_another_add(num1, num2) Perform addition. Parameters ---------- num1, num2 : Numbers Returns ------- result : Number result of num1 + num2 This one is good for {0}. To work around it you could specify the docstring to be ``None``:: >>> @format_doc(None, 'addition') ... def last_add_i_swear(num1, num2): ... '''This one is good for {0}.''' ... return num1 + num2 ... >>> help(last_add_i_swear) # doctest: +SKIP Help on function last_add_i_swear in module __main__: <BLANKLINE> last_add_i_swear(num1, num2) This one is good for addition. Using it with ``None`` as docstring allows to use the decorator twice on an object to first parse the new docstring and then to parse the original docstring or the ``args`` and ``kwargs``. """ if sys.flags.optimize >= 2: # docstrings are dropped at runtime, so let's return a noop decorator return lambda func: func def set_docstring(obj): if docstring is None: # None means: use the objects __doc__ doc = obj.__doc__ # Delete documentation in this case so we don't end up with # awkwardly self-inserted docs. obj.__doc__ = None elif isinstance(docstring, str): # String: use the string that was given doc = docstring else: # Something else: Use the __doc__ of this doc = docstring.__doc__ if not doc: # In case the docstring is empty it's probably not what was wanted. raise ValueError( "docstring must be a string or containing a " "docstring that is not empty." ) # Dedent both the original and the new docstring to ensure consistent # leading whitespace, because from Python 3.13 the bytecode compiler # strips leading whitespace from docstrings. If the text in ``doc`` # has any leading whitespace, this can lead to reST/Sphinx errors. if sys.version_info[:2] >= (3, 13): doc = textwrap.dedent(doc).lstrip("\n") # If the original has a not-empty docstring append it to the format # kwargs. kwargs["__doc__"] = obj.__doc__ or "" obj.__doc__ = doc.format(*args, **kwargs) return obj return set_docstring
sharedmethod