language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
mkdocs__mkdocs
mkdocs/livereload/__init__.py
{ "start": 12888, "end": 13530 }
class ____(wsgiref.simple_server.WSGIRequestHandler): def log_request(self, code="-", size="-"): level = logging.DEBUG if str(code) == "200" else logging.WARNING log.log(level, f'"{self.requestline}" code {code}') def log_message(self, format, *args): log.debug(format, *args) def _timestamp() -> int: return round(time.monotonic() * 1000) def _try_relativize_path(path: str) -> str: """Make the path relative to current directory if it's under that directory.""" p = pathlib.Path(path) try: p = p.relative_to(os.getcwd()) except ValueError: pass return str(p)
_Handler
python
bokeh__bokeh
src/bokeh/sphinxext/_internal/bokeh_sampledata_xref.py
{ "start": 1958, "end": 2149 }
class ____(nodes.General, nodes.Element): def __init__(self, *args, **kwargs): self.subfolder = kwargs.pop("subfolder", None) super().__init__(*args, **kwargs)
gallery_xrefs
python
getsentry__sentry
src/sentry/api/serializers/models/dashboard.py
{ "start": 21664, "end": 21842 }
class ____(TypedDict, total=False): environment: list[str] period: str utc: str expired: bool start: datetime end: datetime
DashboardDetailsResponseOptional
python
Unity-Technologies__ml-agents
ml-agents-envs/mlagents_envs/side_channel/stats_side_channel.py
{ "start": 274, "end": 736 }
class ____(Enum): # Values within the summary period are averaged before reporting. AVERAGE = 0 # Only the most recent value is reported. MOST_RECENT = 1 # Values within the summary period are summed up before reporting. SUM = 2 # All values within a summary period are reported as a histogram. HISTOGRAM = 3 StatList = List[Tuple[float, StatsAggregationMethod]] EnvironmentStats = Mapping[str, StatList]
StatsAggregationMethod
python
python__mypy
mypyc/transform/lower.py
{ "start": 915, "end": 1344 }
class ____(IRTransform): def visit_primitive_op(self, op: PrimitiveOp) -> Value | None: # The lowering implementation functions of various primitive ops are stored # in a registry, which is populated using function decorators. The name # of op (such as "int_eq") is used as the key. lower_fn = lowering_registry[op.desc.name] return lower_fn(self.builder, op.args, op.line)
LoweringVisitor
python
crytic__slither
slither/detectors/functions/unimplemented.py
{ "start": 864, "end": 4452 }
class ____(AbstractDetector): """ Unimplemented functions detector """ ARGUMENT = "unimplemented-functions" HELP = "Unimplemented functions" IMPACT = DetectorClassification.INFORMATIONAL CONFIDENCE = DetectorClassification.HIGH WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#unimplemented-functions" WIKI_TITLE = "Unimplemented functions" WIKI_DESCRIPTION = "Detect functions that are not implemented on derived-most contracts." # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity interface BaseInterface { function f1() external returns(uint); function f2() external returns(uint); } interface BaseInterface2 { function f3() external returns(uint); } contract DerivedContract is BaseInterface, BaseInterface2 { function f1() external returns(uint){ return 42; } } ``` `DerivedContract` does not implement `BaseInterface.f2` or `BaseInterface2.f3`. As a result, the contract will not properly compile. All unimplemented functions must be implemented on a contract that is meant to be used.""" # endregion wiki_exploit_scenario WIKI_RECOMMENDATION = "Implement all unimplemented functions in any contract you intend to use directly (not simply inherit from)." @staticmethod def _match_state_variable(contract: Contract, f: FunctionContract) -> bool: return any(s.full_name == f.full_name for s in contract.state_variables) def _detect_unimplemented_function(self, contract: Contract) -> Set[Function]: """ Detects any function definitions which are not implemented in the given contract. :param contract: The contract to search unimplemented functions for. :return: A list of functions which are not implemented. """ # If it's simply a contract signature, we have no functions. if contract.is_signature_only(): return set() # Populate our unimplemented functions set with any functions not implemented in this contract, excluding the # fallback function and constructor. unimplemented = set() for f in contract.all_functions_called: if not isinstance(f, Function): continue if ( not f.is_implemented and not f.is_constructor and not f.is_fallback and not f.is_constructor_variables ): if self.compilation_unit.solc_version not in older_solc_versions: # Since 0.5.1, Solidity allows creating state variable matching a function signature if not self._match_state_variable(contract, f): unimplemented.add(f) else: unimplemented.add(f) return unimplemented def _detect(self) -> List[Output]: """Detect unimplemented functions Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func'} """ results = [] for contract in self.compilation_unit.contracts_derived: functions = self._detect_unimplemented_function(contract) if functions: info: DETECTOR_INFO = [contract, " does not implement functions:\n"] for function in sorted(functions, key=lambda x: x.full_name): info += ["\t- ", function, "\n"] res = self.generate_result(info) results.append(res) return results
UnimplementedFunctionDetection
python
doocs__leetcode
solution/0100-0199/0122.Best Time to Buy and Sell Stock II/Solution2.py
{ "start": 0, "end": 345 }
class ____: def maxProfit(self, prices: List[int]) -> int: n = len(prices) f = [[0] * 2 for _ in range(n)] f[0][0] = -prices[0] for i in range(1, n): f[i][0] = max(f[i - 1][0], f[i - 1][1] - prices[i]) f[i][1] = max(f[i - 1][1], f[i - 1][0] + prices[i]) return f[n - 1][1]
Solution
python
mlflow__mlflow
mlflow/genai/judges/tools/get_trace_info.py
{ "start": 568, "end": 1952 }
class ____(JudgeTool): """ Tool for retrieving high-level metadata about a trace. This provides trace metadata like ID, timing, state, and location without the detailed span data. """ @property def name(self) -> str: return ToolNames.GET_TRACE_INFO def get_definition(self) -> ToolDefinition: return ToolDefinition( function=FunctionToolDefinition( name=ToolNames.GET_TRACE_INFO, description=( "Retrieve high-level metadata about the trace including ID, timing, state, " "location, and request/response previews. This provides an overview of the " "trace without detailed span data. Use this to understand the overall trace " "context, execution duration, and whether the trace completed successfully." ), parameters=ToolParamsSchema( type="object", properties={}, required=[], ), ), type="function", ) def invoke(self, trace: Trace) -> TraceInfo: """ Get metadata about the trace. Args: trace: The MLflow trace object to analyze Returns: TraceInfo object """ return trace.info
GetTraceInfoTool
python
coleifer__peewee
tests/keys.py
{ "start": 294, "end": 423 }
class ____(TestModel): title = CharField() package = ForeignKeyField(Package, Package.barcode, backref='items')
PackageItem
python
urllib3__urllib3
test/with_dummyserver/test_socketlevel.py
{ "start": 79040, "end": 80117 }
class ____(SocketDummyServerTestCase): def test_chunked_head_response_does_not_hang(self) -> None: self.start_response_handler( b"HTTP/1.1 200 OK\r\n" b"Transfer-Encoding: chunked\r\n" b"Content-type: text/plain\r\n" b"\r\n" ) with HTTPConnectionPool(self.host, self.port, retries=False) as pool: r = pool.request("HEAD", "/", timeout=LONG_TIMEOUT, preload_content=False) # stream will use the read_chunked method here. assert [] == list(r.stream()) def test_empty_head_response_does_not_hang(self) -> None: self.start_response_handler( b"HTTP/1.1 200 OK\r\n" b"Content-Length: 256\r\n" b"Content-type: text/plain\r\n" b"\r\n" ) with HTTPConnectionPool(self.host, self.port, retries=False) as pool: r = pool.request("HEAD", "/", timeout=LONG_TIMEOUT, preload_content=False) # stream will use the read method here. assert [] == list(r.stream())
TestHEAD
python
joke2k__faker
faker/providers/credit_card/ru_RU/__init__.py
{ "start": 210, "end": 3174 }
class ____(CreditCardProvider): """Implement credit card provider for ``ru_RU`` locale. For all methods that take ``card_type`` as an argument, a random card type will be used if the supplied value is ``None``. The list of valid card types includes ``'amex'``, ``'maestro'``, ``'mastercard'``, ``'mir'``, ``'unionpay'``, and ``'visa'``. Sources: - https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN) """ prefix_visa = ["4"] prefix_mastercard = [ "51", "52", "53", "54", "55", "222%", "223", "224", "225", "226", "227", "228", "229", "23", "24", "25", "26", "270", "271", "2720", ] prefix_mir = ["2200", "2201", "2202", "2203", "2204"] prefix_maestro = [ "50", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", ] prefix_amex = ["34", "37"] prefix_unionpay = ["62", "81"] credit_card_types = OrderedDict( ( ("visa", CreditCard("Visa", prefix_visa, security_code="CVV2")), ( "mastercard", CreditCard("Mastercard", prefix_mastercard, security_code="CVC2"), ), ("mir", CreditCard("МИР", prefix_mir)), ("maestro", CreditCard("Maestro", prefix_maestro, security_code="CVV2")), ( "amex", CreditCard( "American Express", prefix_amex, 15, security_code="CID", security_code_length=4, ), ), ("unionpay", CreditCard("Union Pay", prefix_unionpay)), ) ) def credit_card_full(self, card_type: Optional[CardType] = None) -> str: """Generate a set of credit card details.""" card = self._credit_card_type(card_type) tpl = "{provider}\n{owner}\n{number} {expire_date}\n{security}: {security_nb}\n{issuer}" tpl = tpl.format( provider=card.name, owner=translit( self.generator.parse( self.random_element( [ "{{first_name_male}} {{last_name_male}}", "{{first_name_female}} {{last_name_female}}", ] ) ) ), number=self.credit_card_number(card), expire_date=self.credit_card_expire(), security=card.security_code, security_nb=self.credit_card_security_code(card), issuer=self.generator.parse("{{bank}}"), ) return self.generator.parse(tpl)
Provider
python
simonw__datasette
datasette/views/special.py
{ "start": 27054, "end": 33170 }
class ____(BaseView): name = "api_explorer" has_json_alternate = False async def example_links(self, request): databases = [] for name, db in self.ds.databases.items(): database_visible, _ = await self.ds.check_visibility( request.actor, action="view-database", resource=DatabaseResource(database=name), ) if not database_visible: continue tables = [] table_names = await db.table_names() for table in table_names: visible, _ = await self.ds.check_visibility( request.actor, action="view-table", resource=TableResource(database=name, table=table), ) if not visible: continue table_links = [] tables.append({"name": table, "links": table_links}) table_links.append( { "label": "Get rows for {}".format(table), "method": "GET", "path": self.ds.urls.table(name, table, format="json"), } ) # If not mutable don't show any write APIs if not db.is_mutable: continue if await self.ds.allowed( action="insert-row", resource=TableResource(database=name, table=table), actor=request.actor, ): pks = await db.primary_keys(table) table_links.extend( [ { "path": self.ds.urls.table(name, table) + "/-/insert", "method": "POST", "label": "Insert rows into {}".format(table), "json": { "rows": [ { column: None for column in await db.table_columns(table) if column not in pks } ] }, }, { "path": self.ds.urls.table(name, table) + "/-/upsert", "method": "POST", "label": "Upsert rows into {}".format(table), "json": { "rows": [ { column: None for column in await db.table_columns(table) if column not in pks } ] }, }, ] ) if await self.ds.allowed( action="drop-table", resource=TableResource(database=name, table=table), actor=request.actor, ): table_links.append( { "path": self.ds.urls.table(name, table) + "/-/drop", "label": "Drop table {}".format(table), "json": {"confirm": False}, "method": "POST", } ) database_links = [] if ( await self.ds.allowed( action="create-table", resource=DatabaseResource(database=name), actor=request.actor, ) and db.is_mutable ): database_links.append( { "path": self.ds.urls.database(name) + "/-/create", "label": "Create table in {}".format(name), "json": { "table": "new_table", "columns": [ {"name": "id", "type": "integer"}, {"name": "name", "type": "text"}, ], "pk": "id", }, "method": "POST", } ) if database_links or tables: databases.append( { "name": name, "links": database_links, "tables": tables, } ) # Sort so that mutable databases are first databases.sort(key=lambda d: not self.ds.databases[d["name"]].is_mutable) return databases async def get(self, request): visible, private = await self.ds.check_visibility( request.actor, action="view-instance", ) if not visible: raise Forbidden("You do not have permission to view this instance") def api_path(link): return "/-/api#{}".format( urllib.parse.urlencode( { key: json.dumps(value, indent=2) if key == "json" else value for key, value in link.items() if key in ("path", "method", "json") } ) ) return await self.render( ["api_explorer.html"], request, { "example_links": await self.example_links(request), "api_path": api_path, "private": private, }, )
ApiExplorerView
python
huggingface__transformers
tests/utils/test_cache_utils.py
{ "start": 25314, "end": 37562 }
class ____(unittest.TestCase): """Cache tests that rely on `torch.export()` and model loading""" @pytest.mark.torch_export_test def test_dynamic_cache_exportability(self): model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model = model.eval() tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") prompt = "What is the best way to debug python script?" inputs = tokenizer(prompt, return_tensors="pt") attention_mask = inputs.attention_mask input_ids = inputs.input_ids ep = export_with_dynamic_cache(model, input_ids, attention_mask) res = ep.module()( input_ids=input_ids, attention_mask=attention_mask, past_key_values=DynamicCache(config=model.config), use_cache=True, ) self.assertTrue(len(res.past_key_values) == model.config.num_hidden_layers) self.assertEqual(2 * model.config.num_hidden_layers + 1, len(ep.graph_signature.output_specs)) self.assertEqual( 3, len( [ x for x in ep.graph_signature.input_specs if x.kind == torch.export.graph_signature.InputKind.USER_INPUT ] ), ) past_key_values_eager = DynamicCache(config=model.config) res_eager = model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values_eager, use_cache=True, ) self.assertTrue(torch.allclose(res.logits, res_eager.logits, atol=1e-5)) for l1, l2 in zip(res.past_key_values.layers, res_eager.past_key_values.layers): self.assertTrue(torch.allclose(l1.keys, l2.keys, atol=1e-5)) self.assertTrue(torch.allclose(l1.values, l2.values, atol=1e-5)) @pytest.mark.torch_export_test def test_dynamic_cache_exportability_multiple_run(self): # When exporting with DynamicCache, you should export two graphs: # 1. A graph without cache # 2. A graph with cache # In the future, we will make improvements to export API to export two graphs # more seamlessly. model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") model = model.eval() tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM") prompt = "What is the best way to debug python script?" inputs = tokenizer(prompt, return_tensors="pt") attention_mask = inputs.attention_mask input_ids = inputs.input_ids ep = export_with_dynamic_cache(model, input_ids, attention_mask) res = ep.module()( input_ids=input_ids, attention_mask=attention_mask, past_key_values=DynamicCache(config=model.config), use_cache=True, ) self.assertTrue(len(res.past_key_values) == model.config.num_hidden_layers) self.assertEqual(2 * model.config.num_hidden_layers + 1, len(ep.graph_signature.output_specs)) self.assertEqual( 3, len( [ x for x in ep.graph_signature.input_specs if x.kind == torch.export.graph_signature.InputKind.USER_INPUT ] ), ) res_eager = model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=DynamicCache(config=model.config), use_cache=True, ) past_key_values_eager = res_eager.past_key_values past_key_values = res.past_key_values shapes = torch.export.ShapesCollection() dyn = torch.export.Dim.DYNAMIC(max=512) for ix in range(len(past_key_values)): shapes[past_key_values.layers[ix].keys] = (None, None, dyn, None) shapes[past_key_values.layers[ix].values] = (None, None, dyn, None) ep_second = torch.export.export( model, (), { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": True, }, strict=False, dynamic_shapes=shapes, ) res_export = ep_second.module()( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, use_cache=True, ) # It should work with variable len res_export_2 = ep_second.module()( input_ids=input_ids, attention_mask=attention_mask, past_key_values=res_export.past_key_values, use_cache=True, ) res_eager = model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values_eager, use_cache=True, ) res_eager_2 = model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=res_eager.past_key_values, use_cache=True, ) for l1, l2 in zip(res_export_2.past_key_values.layers, res_eager_2.past_key_values.layers): self.assertTrue(torch.allclose(l1.keys, l2.keys, atol=1e-5)) self.assertTrue(torch.allclose(l1.values, l2.values, atol=1e-5)) @unittest.skip("Runs on my machine locally, passed, no idea why it does not online") @pytest.mark.torch_export_test def test_static_cache_exportability(self): """ Tests that static cache works with `torch.export()` """ if not is_torch_greater_or_equal("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") set_seed(0) device = torch_device dtype = "bfloat16" cache_implementation = "static" attn_implementation = "sdpa" # Export and ExecuTorch only works for SdpaAttention batch_size = 1 max_cache_len = 1234 model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM" model = AutoModelForCausalLM.from_pretrained( model_id, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_cache_len, cache_config={ "batch_size": batch_size, "max_cache_len": max_cache_len, "device": device, }, ), ) # Check if cache config is passed through correctly self.assertEqual(model.generation_config.use_cache, True) self.assertEqual(model.generation_config.cache_implementation, cache_implementation) self.assertEqual(model.generation_config.max_length, max_cache_len) self.assertTrue(model.generation_config.cache_config is not None) self.assertEqual(model.generation_config.cache_config.get("batch_size"), batch_size) self.assertEqual(model.generation_config.cache_config.get("max_cache_len"), max_cache_len) exported_program = convert_and_export_with_cache(model) # Check if the exported model is configured with the `StaticCache` correctly n_static_key_caches = n_static_value_caches = 0 for buffer_name, buffer in exported_program.named_buffers(): if buffer_name.startswith("key_cache"): self.assertTrue(buffer.shape[0] == batch_size) self.assertTrue(buffer.shape[2] == max_cache_len) n_static_key_caches = n_static_key_caches + 1 if buffer_name.startswith("value_cache"): self.assertTrue(buffer.shape[0] == batch_size) self.assertTrue(buffer.shape[2] == max_cache_len) n_static_value_caches = n_static_value_caches + 1 self.assertEqual(n_static_key_caches, model.config.num_hidden_layers) self.assertEqual(n_static_value_caches, model.config.num_hidden_layers) # Export with dynamic shapes input_ids = torch.zeros((1, 3), dtype=torch.long, device=device) cache_position = torch.tensor([0, 1, 2], dtype=torch.long, device=device) dynamic_shapes = {"input_ids": {1: torch.export.Dim.DYNAMIC}, "cache_position": {0: torch.export.Dim.DYNAMIC}} strict = version.parse(torch.__version__) != version.parse("2.7.0") exported_program = convert_and_export_with_cache( model, example_input_ids=input_ids, example_cache_position=cache_position, dynamic_shapes=dynamic_shapes, strict=strict, ) from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=input_ids, cache_position=cache_position, dynamic_shapes=dynamic_shapes, strict=strict, ) @pytest.mark.torch_export_test def test_hybrid_cache_exportability(self): """ Tests that static cache works with `torch.export()` """ if not is_torch_greater_or_equal("2.6"): self.skipTest(reason="This test requires torch >= 2.6 to run.") from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM set_seed(0) model_id = "hf-internal-testing/tiny-random-Gemma3ForCausalLM" model = AutoModelForCausalLM.from_pretrained(model_id) model.eval() self.assertEqual(model.config.use_cache, True) # Export + hybrid StaticCache model.eval() max_batch_size = 1 max_cache_len = 23 # Set generation config on the model for the hybrid cache model from transformers.generation.configuration_utils import GenerationConfig model.generation_config = GenerationConfig( use_cache=True, cache_implementation="static", max_length=max_cache_len, cache_config={ "batch_size": max_batch_size, "max_cache_len": max_cache_len, "device": model.device, }, ) exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) n_g_key_caches = n_g_value_caches = 0 for buffer_name, buffer in exported_program.named_buffers(): if buffer_name.startswith("key_cache"): self.assertTrue(buffer.shape[0] == max_batch_size) self.assertTrue(buffer.shape[2] == max_cache_len) n_g_key_caches = n_g_key_caches + 1 if buffer_name.startswith("value_cache"): self.assertTrue(buffer.shape[0] == max_batch_size) self.assertTrue(buffer.shape[2] == max_cache_len) n_g_value_caches = n_g_value_caches + 1 self.assertEqual(n_g_key_caches, model.config.num_hidden_layers) self.assertEqual(n_g_value_caches, model.config.num_hidden_layers) # Export with dynamic shapes using Dim.AUTO input_ids = torch.zeros((1, 3), dtype=torch.long) cache_position = torch.tensor([0, 1, 2], dtype=torch.long) dynamic_shapes = {"input_ids": {1: torch.export.Dim.DYNAMIC}, "cache_position": {0: torch.export.Dim.DYNAMIC}} strict = version.parse(torch.__version__) < version.parse("2.7.0") exported_program = exportable_module.export( input_ids=input_ids, cache_position=cache_position, dynamic_shapes=dynamic_shapes, strict=strict, )
CacheExportIntegrationTest
python
scikit-learn__scikit-learn
sklearn/feature_extraction/text.py
{ "start": 17918, "end": 31900 }
class ____( TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None ): r"""Convert a collection of text documents to a matrix of token occurrences. It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory. - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters. - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. For an efficiency comparison of the different feature extractors, see :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. For an example of document clustering and comparison with :class:`~sklearn.feature_extraction.text.TfidfVectorizer`, see :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : {'filename', 'file', 'content'}, default='content' - If `'filename'`, the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. - If `'file'`, the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. - If `'content'`, the input is expected to be a sequence of items that can be of type string or byte. encoding : str, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode'} or callable, default=None Remove accents and perform other character normalization during the preprocessing step. 'ascii' is a fast method that only works on characters that have a direct ASCII mapping. 'unicode' is a slightly slower method that works on any character. None (default) means no character normalization is performed. Both 'ascii' and 'unicode' use NFKD normalization from :func:`unicodedata.normalize`. lowercase : bool, default=True Convert all characters to lowercase before tokenizing. preprocessor : callable, default=None Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. Only applies if ``analyzer`` is not callable. tokenizer : callable, default=None Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. stop_words : {'english'}, list, default=None If 'english', a built-in stop word list for English is used. There are several known issues with 'english' and you should consider an alternative (see :ref:`stop_words`). If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b" Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). If there is a capturing group in token_pattern then the captured group content, not the entire match, becomes the token. At most one capturing group is permitted. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. Only applies if ``analyzer`` is not callable. analyzer : {'word', 'char', 'char_wb'} or callable, default='word' Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. .. versionchanged:: 0.21 Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data is first read from the file and then passed to the given callable analyzer. n_features : int, default=(2 ** 20) The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. binary : bool, default=False If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. norm : {'l1', 'l2'}, default='l2' Norm used to normalize term vectors. None for no normalization. alternate_sign : bool, default=True When True, an alternating sign is added to the features as to approximately conserve the inner product in the hashed space even for small n_features. This approach is similar to sparse random projection. .. versionadded:: 0.19 dtype : type, default=np.float64 Type of the matrix returned by fit_transform() or transform(). See Also -------- CountVectorizer : Convert a collection of text documents to a matrix of token counts. TfidfVectorizer : Convert a collection of raw documents to a matrix of TF-IDF features. Notes ----- This estimator is :term:`stateless` and does not need to be fitted. However, we recommend to call :meth:`fit_transform` instead of :meth:`transform`, as parameter validation is only performed in :meth:`fit`. Examples -------- >>> from sklearn.feature_extraction.text import HashingVectorizer >>> corpus = [ ... 'This is the first document.', ... 'This document is the second document.', ... 'And this is the third one.', ... 'Is this the first document?', ... ] >>> vectorizer = HashingVectorizer(n_features=2**4) >>> X = vectorizer.fit_transform(corpus) >>> print(X.shape) (4, 16) """ _parameter_constraints: dict = { "input": [StrOptions({"filename", "file", "content"})], "encoding": [str], "decode_error": [StrOptions({"strict", "ignore", "replace"})], "strip_accents": [StrOptions({"ascii", "unicode"}), None, callable], "lowercase": ["boolean"], "preprocessor": [callable, None], "tokenizer": [callable, None], "stop_words": [StrOptions({"english"}), list, None], "token_pattern": [str, None], "ngram_range": [tuple], "analyzer": [StrOptions({"word", "char", "char_wb"}), callable], "n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")], "binary": ["boolean"], "norm": [StrOptions({"l1", "l2"}), None], "alternate_sign": ["boolean"], "dtype": "no_validation", # delegate to numpy } def __init__( self, *, input="content", encoding="utf-8", decode_error="strict", strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer="word", n_features=(2**20), binary=False, norm="l2", alternate_sign=True, dtype=np.float64, ): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.alternate_sign = alternate_sign self.dtype = dtype @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y=None): """Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object HashingVectorizer instance. """ return self @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : ndarray of shape [n_samples, n_features] Training data. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object HashingVectorizer instance. """ # triggers a parameter validation if isinstance(X, str): raise ValueError( "Iterable over raw text documents expected, string object received." ) self._warn_for_unused_params() self._validate_ngram_range() self._get_hasher().fit(X, y=y) return self def transform(self, X): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix. """ if isinstance(X, str): raise ValueError( "Iterable over raw text documents expected, string object received." ) self._validate_ngram_range() analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X def fit_transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : any Ignored. This parameter exists only for compatibility with sklearn.pipeline.Pipeline. Returns ------- X : sparse matrix of shape (n_samples, n_features) Document-term matrix. """ return self.fit(X, y).transform(X) def _get_hasher(self): return FeatureHasher( n_features=self.n_features, input_type="string", dtype=self.dtype, alternate_sign=self.alternate_sign, ) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.string = True tags.input_tags.two_d_array = False tags.requires_fit = False return tags def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.issparse(X) and X.format == "csr": return np.bincount(X.indices, minlength=X.shape[1]) else: return np.diff(X.indptr)
HashingVectorizer
python
kamyu104__LeetCode-Solutions
Python/check-for-contradictions-in-equations.py
{ "start": 1513, "end": 1967 }
class ____(object): def checkContradictions(self, equations, values): """ :type equations: List[List[str]] :type values: List[float] :rtype: bool """ EPS = 1e-5 uf = UnionFind() return any(not uf.union_set(a, b, k) and abs(uf.query_set(a, b)-k) >= EPS for (a, b), k in itertools.izip(equations, values)) # Time: O(e + q) # Space: O(n) import collections import itertools # dfs
Solution
python
getsentry__sentry
src/sentry/integrations/source_code_management/repository.py
{ "start": 11275, "end": 11804 }
class ____(ABC): base_url: str @abstractmethod def check_file(self, repo: Repository, path: str, version: str | None) -> object | None: """Check if the file exists. Currently used for stacktrace linking and CODEOWNERS.""" raise NotImplementedError @abstractmethod def get_file( self, repo: Repository, path: str, ref: str | None, codeowners: bool = False ) -> str: """Get the file contents. Currently used for CODEOWNERS.""" raise NotImplementedError
RepositoryClient
python
run-llama__llama_index
llama-index-utils/llama-index-utils-qianfan/llama_index/utils/qianfan/apis.py
{ "start": 716, "end": 925 }
class ____(BaseModel): """ All model service items. """ common: List[ServiceItem] """built-in model service""" custom: List[ServiceItem] """custom model service"""
ServiceListResult
python
kamyu104__LeetCode-Solutions
Python/validate-stack-sequences.py
{ "start": 29, "end": 437 }
class ____(object): def validateStackSequences(self, pushed, popped): """ :type pushed: List[int] :type popped: List[int] :rtype: bool """ i = 0 s = [] for v in pushed: s.append(v) while s and i < len(popped) and s[-1] == popped[i]: s.pop() i += 1 return i == len(popped)
Solution
python
ray-project__ray
python/ray/serve/tests/unit/test_proxy.py
{ "start": 26906, "end": 32807 }
class ____: """Test ProxyRouter.match_route_pattern functionality.""" @pytest.fixture def mock_get_handle(self): def _get_handle(endpoint: DeploymentID, info: EndpointInfo): return MockDeploymentHandle(deployment_name=endpoint.name) return _get_handle def test_match_route_pattern_no_patterns(self, mock_get_handle): """Test that match_route_pattern returns route_prefix when no patterns exist.""" router = ProxyRouter(mock_get_handle) router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=None ) } ) scope = {"type": "http", "path": "/api/users/123", "method": "GET"} result = router.match_route_pattern("/api", scope) assert result == "/api" def test_match_route_pattern_with_patterns(self, mock_get_handle): """Test that match_route_pattern matches specific route patterns.""" router = ProxyRouter(mock_get_handle) router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=[ RoutePattern(methods=None, path="/api/"), RoutePattern(methods=None, path="/api/users/{user_id}"), RoutePattern(methods=None, path="/api/items/{item_id}/details"), ], ) } ) # Test matching parameterized route scope = {"type": "http", "path": "/api/users/123", "method": "GET"} result = router.match_route_pattern("/api", scope) assert result == "/api/users/{user_id}" # Test matching nested parameterized route scope = {"type": "http", "path": "/api/items/abc/details", "method": "GET"} result = router.match_route_pattern("/api", scope) assert result == "/api/items/{item_id}/details" # Test matching root scope = {"type": "http", "path": "/api/", "method": "GET"} result = router.match_route_pattern("/api", scope) assert result == "/api/" def test_match_route_pattern_caching(self, mock_get_handle): """Test that mock Starlette apps are cached for performance.""" router = ProxyRouter(mock_get_handle) router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=[ RoutePattern(methods=None, path="/api/users/{user_id}") ], ) } ) scope = {"type": "http", "path": "/api/users/123", "method": "GET"} # First call should create and cache the mock app assert "/api" not in router._route_pattern_apps result1 = router.match_route_pattern("/api", scope) assert result1 == "/api/users/{user_id}" assert "/api" in router._route_pattern_apps # Second call should use cached app cached_app = router._route_pattern_apps["/api"] result2 = router.match_route_pattern("/api", scope) assert result2 == "/api/users/{user_id}" assert router._route_pattern_apps["/api"] is cached_app def test_match_route_pattern_cache_invalidation(self, mock_get_handle): """Test that cache is cleared when routes are updated.""" router = ProxyRouter(mock_get_handle) router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=[ RoutePattern(methods=None, path="/api/users/{user_id}") ], ) } ) scope = {"type": "http", "path": "/api/users/123", "method": "GET"} router.match_route_pattern("/api", scope) assert "/api" in router._route_pattern_apps # Update routes should clear cache router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=[ RoutePattern(methods=None, path="/api/items/{item_id}") ], ) } ) assert len(router._route_pattern_apps) == 0 def test_match_route_pattern_empty_patterns(self, mock_get_handle): """Test that empty pattern list returns route_prefix.""" router = ProxyRouter(mock_get_handle) router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=[] ) } ) scope = {"type": "http", "path": "/api/users/123", "method": "GET"} result = router.match_route_pattern("/api", scope) assert result == "/api" def test_match_route_pattern_no_match_fallback(self, mock_get_handle): """Test that unmatched requests fall back to route_prefix.""" router = ProxyRouter(mock_get_handle) router.update_routes( { DeploymentID("api", "default"): EndpointInfo( route="/api", route_patterns=[ RoutePattern(methods=None, path="/api/users/{user_id}") ], ) } ) # Request to path not in patterns scope = {"type": "http", "path": "/api/admin/settings", "method": "GET"} result = router.match_route_pattern("/api", scope) # Should fall back to prefix since no pattern matches assert result == "/api" if __name__ == "__main__": sys.exit(pytest.main(["-v", "-s", __file__]))
TestProxyRouterMatchRoutePattern
python
HypothesisWorks__hypothesis
hypothesis-python/tests/typing_extensions/test_backported_types.py
{ "start": 6800, "end": 7770 }
class ____(Book): genre: Required[str] rating: NotRequired[str] @pytest.mark.parametrize( "check,condition", [ pytest.param( assert_all_examples, lambda novel: "author" in novel, id="author-is-required", ), pytest.param( assert_all_examples, lambda novel: "genre" in novel, id="genre-is-required" ), pytest.param( find_any, lambda novel: "pages" in novel, id="pages-may-be-present" ), pytest.param( find_any, lambda novel: "pages" not in novel, id="pages-may-be-absent" ), pytest.param( find_any, lambda novel: "rating" in novel, id="rating-may-be-present" ), pytest.param( find_any, lambda novel: "rating" not in novel, id="rating-may-be-absent" ), ], ) def test_required_and_not_required_keys(check, condition): check(from_type(Novel), condition)
Novel
python
getsentry__sentry
tests/sentry/rules/filters/test_issue_occurrences.py
{ "start": 205, "end": 1371 }
class ____(RuleTestCase): rule_cls = IssueOccurrencesFilter def setUp(self) -> None: super().setUp() self.event.group.times_seen_pending = 0 def test_compares_correctly(self) -> None: event = self.get_event() value = 10 data = {"value": str(value)} rule = self.get_rule(data=data) event.group.times_seen = 11 self.assertPasses(rule, event) event.group.times_seen = 10 self.assertPasses(rule, event) event.group.times_seen = 8 self.assertDoesNotPass(rule, event) def test_uses_pending(self) -> None: event = self.get_event() value = 10 data = {"value": str(value)} rule = self.get_rule(data=data) event.group.times_seen = 8 self.assertDoesNotPass(rule, event) event.group.times_seen_pending = 3 self.assertPasses(rule, event) def test_fails_on_bad_data(self) -> None: event = self.get_event() data = {"value": "bad data"} rule = self.get_rule(data=data) event.group.times_seen = 10 self.assertDoesNotPass(rule, event)
IssueOccurrencesTest
python
catalyst-team__catalyst
tests/catalyst/callbacks/test_profiler.py
{ "start": 1105, "end": 1475 }
class ____(nn.Module): """Docs.""" def __init__(self, in_features, out_features): """Docs.""" super().__init__() self.in_features = in_features self.out_features = out_features self.layers = nn.Linear(in_features, out_features) def forward(self, batch): """Docs.""" return self.layers(batch)
DummyModel
python
python-poetry__poetry
src/poetry/console/commands/update.py
{ "start": 352, "end": 1788 }
class ____(InstallerCommand): name = "update" description = ( "Update the dependencies as according to the <comment>pyproject.toml</> file." ) arguments: ClassVar[list[Argument]] = [ argument("packages", "The packages to update", optional=True, multiple=True) ] options: ClassVar[list[Option]] = [ *InstallerCommand._group_dependency_options(), option( "sync", None, "Synchronize the environment with the locked packages and the specified" " groups.", ), option( "dry-run", None, "Output the operations but do not execute anything " "(implicitly enables --verbose).", ), option("lock", None, "Do not perform operations (only update the lockfile)."), ] loggers: ClassVar[list[str]] = ["poetry.repositories.pypi_repository"] def handle(self) -> int: packages = self.argument("packages") if packages: self.installer.whitelist(dict.fromkeys(packages, "*")) self.installer.only_groups(self.activated_groups) self.installer.dry_run(self.option("dry-run")) self.installer.requires_synchronization(self.option("sync")) self.installer.execute_operations(not self.option("lock")) # Force update self.installer.update(True) return self.installer.run()
UpdateCommand
python
redis__redis-py
tests/test_pubsub.py
{ "start": 29434, "end": 29656 }
class ____: def test_channel_subscribe(self, r): r = redis.Redis(host="localhost", port=6390) p = r.pubsub() with pytest.raises(ConnectionError): p.subscribe("foo")
TestPubSubRedisDown
python
getsentry__sentry
tests/sentry/core/endpoints/test_organization_details.py
{ "start": 28589, "end": 64782 }
class ____(OrganizationDetailsTestBase): method = "put" def test_simple(self) -> None: self.get_success_response(self.organization.slug, name="hello world", slug="foobar") org = Organization.objects.get(id=self.organization.id) assert org.name == "hello world" assert org.slug == "foobar" def test_include_feature_flag_query_param(self) -> None: response = self.get_success_response( self.organization.slug, qs_params={"include_feature_flags": 1} ) assert "features" in response.data response = self.get_success_response(self.organization.slug) assert "features" not in response.data def test_dupe_slug(self) -> None: org = self.create_organization(owner=self.user, slug="duplicate") self.get_error_response(self.organization.slug, slug=org.slug, status_code=400) def test_short_slug(self) -> None: self.get_error_response(self.organization.slug, slug="a", status_code=400) def test_reserved_slug(self) -> None: illegal_slug = list(RESERVED_ORGANIZATION_SLUGS)[0] self.get_error_response(self.organization.slug, slug=illegal_slug, status_code=400) def test_valid_slugs(self) -> None: valid_slugs = ["santry", "downtown-canada", "1234-foo"] for slug in valid_slugs: self.organization.refresh_from_db() self.get_success_response(self.organization.slug, slug=slug) def test_invalid_slugs(self) -> None: self.get_error_response(self.organization.slug, slug=" i have whitespace ", status_code=400) self.get_error_response(self.organization.slug, slug="foo-bar ", status_code=400) self.get_error_response(self.organization.slug, slug="bird-company!", status_code=400) self.get_error_response(self.organization.slug, slug="downtown_canada", status_code=400) self.get_error_response(self.organization.slug, slug="canada-", status_code=400) self.get_error_response(self.organization.slug, slug="-canada", status_code=400) self.get_error_response(self.organization.slug, slug="----", status_code=400) self.get_error_response(self.organization.slug, slug="1234", status_code=400) self.get_error_response(self.organization.slug, slug="I-contain-UPPERCASE", status_code=400) def test_upload_avatar(self) -> None: data = { "avatarType": "upload", "avatar": b64encode(self.load_fixture("avatar.jpg")), } self.get_success_response(self.organization.slug, **data) avatar = OrganizationAvatar.objects.get(organization=self.organization) assert avatar.get_avatar_type_display() == "upload" assert avatar.file_id @responses.activate @patch( "sentry.integrations.github.client.GitHubBaseClient.get_repos", return_value=[{"name": "cool-repo", "full_name": "testgit/cool-repo"}], ) @with_feature(["organizations:codecov-integration", "organizations:dynamic-sampling-custom"]) def test_various_options(self, mock_get_repositories: MagicMock) -> None: self.organization.update_option("sentry:sampling_mode", DynamicSamplingMode.PROJECT.value) initial = self.organization.get_audit_log_data() with assume_test_silo_mode_of(AuditLogEntry): AuditLogEntry.objects.filter(organization_id=self.organization.id).delete() self.create_integration( organization=self.organization, provider="github", external_id="extid" ) responses.add( responses.GET, "https://api.codecov.io/api/v2/github/testgit", status=200, ) data = { "openMembership": False, "isEarlyAdopter": True, "codecovAccess": True, "allowSuperuserAccess": False, "allowMemberInvite": False, "hideAiFeatures": True, "githubNudgeInvite": True, "githubPRBot": True, "gitlabPRBot": True, "allowSharedIssues": False, "enhancedPrivacy": True, "dataScrubber": True, "dataScrubberDefaults": True, "sensitiveFields": ["password"], "eventsMemberAdmin": False, "alertsMemberWrite": False, "safeFields": ["email"], "storeCrashReports": 10, "scrubIPAddresses": True, "scrapeJavaScript": False, "defaultRole": "owner", "require2FA": True, "allowJoinRequests": False, "issueAlertsThreadFlag": False, "metricAlertsThreadFlag": False, "targetSampleRate": 0.1, "samplingMode": "organization", "rollbackEnabled": True, "streamlineOnly": None, } # needed to set require2FA interface = TotpInterface() with assume_test_silo_mode_of(Authenticator): interface.enroll(self.user) assert self.user.has_2fa() with outbox_runner(): self.get_success_response(self.organization.slug, **data) org = Organization.objects.get(id=self.organization.id) assert initial != org.get_audit_log_data() assert org.flags.early_adopter assert org.flags.codecov_access assert org.flags.prevent_superuser_access assert org.flags.disable_member_invite assert not org.flags.allow_joinleave assert org.flags.disable_shared_issues assert org.flags.enhanced_privacy assert org.flags.require_2fa assert org.default_role == "owner" options = {o.key: o.value for o in OrganizationOption.objects.filter(organization=org)} assert options.get("sentry:require_scrub_defaults") assert options.get("sentry:require_scrub_data") assert options.get("sentry:require_scrub_ip_address") assert options.get("sentry:sensitive_fields") == ["password"] assert options.get("sentry:safe_fields") == ["email"] assert options.get("sentry:store_crash_reports") == 10 assert options.get("sentry:scrape_javascript") is False assert options.get("sentry:join_requests") is False assert options.get("sentry:events_member_admin") is False assert options.get("sentry:target_sample_rate") == 0.1 assert options.get("sentry:sampling_mode") == "organization" assert options.get("sentry:rollback_enabled") is True assert options.get("sentry:hide_ai_features") is True # log created with assume_test_silo_mode_of(AuditLogEntry): log = AuditLogEntry.objects.get(organization_id=org.id) assert audit_log.get(log.event).api_name == "org.edit" # org fields & flags assert "to {}".format(data["defaultRole"]) in log.data["default_role"] assert "to {}".format(data["openMembership"]) in log.data["allow_joinleave"] assert "to {}".format(data["isEarlyAdopter"]) in log.data["early_adopter"] assert "to {}".format(data["codecovAccess"]) in log.data["codecov_access"] assert ( "to {}".format(not data["allowSuperuserAccess"]) in log.data["prevent_superuser_access"] ) assert "to {}".format(not data["allowMemberInvite"]) in log.data["disable_member_invite"] assert "to {}".format(data["enhancedPrivacy"]) in log.data["enhanced_privacy"] assert "to {}".format(not data["allowSharedIssues"]) in log.data["disable_shared_issues"] assert "to {}".format(data["require2FA"]) in log.data["require_2fa"] # org options assert "to {}".format(data["dataScrubber"]) in log.data["dataScrubber"] assert "to {}".format(data["dataScrubberDefaults"]) in log.data["dataScrubberDefaults"] assert "to {}".format(data["sensitiveFields"]) in log.data["sensitiveFields"] assert "to {}".format(data["safeFields"]) in log.data["safeFields"] assert "to {}".format(data["storeCrashReports"]) in log.data["storeCrashReports"] assert "to {}".format(data["scrubIPAddresses"]) in log.data["scrubIPAddresses"] assert "to {}".format(data["scrapeJavaScript"]) in log.data["scrapeJavaScript"] assert "to {}".format(data["allowJoinRequests"]) in log.data["allowJoinRequests"] assert "to {}".format(data["eventsMemberAdmin"]) in log.data["eventsMemberAdmin"] assert "to {}".format(data["alertsMemberWrite"]) in log.data["alertsMemberWrite"] assert "to {}".format(data["hideAiFeatures"]) in log.data["hideAiFeatures"] assert "to {}".format(data["githubPRBot"]) in log.data["githubPRBot"] assert "to {}".format(data["githubNudgeInvite"]) in log.data["githubNudgeInvite"] assert "to {}".format(data["gitlabPRBot"]) in log.data["gitlabPRBot"] assert "to {}".format(data["issueAlertsThreadFlag"]) in log.data["issueAlertsThreadFlag"] assert "to {}".format(data["metricAlertsThreadFlag"]) in log.data["metricAlertsThreadFlag"] assert "to Default Mode" in log.data["samplingMode"] @responses.activate @patch( "sentry.integrations.github.client.GitHubBaseClient.get_repos", return_value=[{"name": "abc", "full_name": "testgit/abc"}], ) @with_feature("organizations:codecov-integration") def test_setting_codecov_without_integration_forbidden( self, mock_get_repositories: MagicMock ) -> None: responses.add( responses.GET, "https://api.codecov.io/api/v2/github/testgit", status=404, ) data = {"codecovAccess": True} self.get_error_response(self.organization.slug, status_code=400, **data) def test_setting_codecov_without_paid_plan_forbidden(self) -> None: data = {"codecovAccess": True} self.get_error_response(self.organization.slug, status_code=403, **data) def test_setting_duplicate_trusted_keys(self) -> None: """ Test that you cannot set duplicated keys Try to put the same key twice and check we get an error """ with assume_test_silo_mode_of(AuditLogEntry): AuditLogEntry.objects.filter(organization_id=self.organization.id).delete() trusted_relays = [ { "publicKey": _VALID_RELAY_KEYS[0], "name": "name1", "description": "description1", }, { "publicKey": _VALID_RELAY_KEYS[1], "name": "name2", "description": "description2", }, { "publicKey": _VALID_RELAY_KEYS[0], "name": "name1 2", "description": "description1 2", }, ] data = {"trustedRelays": trusted_relays} response = self.get_error_response(self.organization.slug, status_code=400, **data) response_data = response.data.get("trustedRelays") assert response_data is not None resp_str = orjson.dumps(response_data).decode() # check that we have the duplicate key specified somewhere in the error message assert resp_str.find(_VALID_RELAY_KEYS[0]) >= 0 def test_creating_trusted_relays(self) -> None: with assume_test_silo_mode_of(AuditLogEntry): AuditLogEntry.objects.filter(organization_id=self.organization.id).delete() trusted_relays = [ { "publicKey": _VALID_RELAY_KEYS[0], "name": "name1", "description": "description1", }, { "publicKey": _VALID_RELAY_KEYS[1], "name": "name2", "description": "description2", }, ] data = {"trustedRelays": trusted_relays} with outbox_runner(): start_time = timezone.now() response = self.get_success_response(self.organization.slug, **data) end_time = timezone.now() response_data = response.data.get("trustedRelays") actual = get_trusted_relay_value(self.organization) assert len(actual) == len(trusted_relays) assert len(response_data) == len(trusted_relays) for i in range(len(actual)): assert actual[i]["public_key"] == trusted_relays[i]["publicKey"] assert actual[i]["name"] == trusted_relays[i]["name"] assert actual[i]["description"] == trusted_relays[i]["description"] assert response_data[i]["publicKey"] == trusted_relays[i]["publicKey"] assert response_data[i]["name"] == trusted_relays[i]["name"] assert response_data[i]["description"] == trusted_relays[i]["description"] # check that last_modified is in the correct range last_modified = datetime.fromisoformat(actual[i]["last_modified"]) assert start_time < last_modified < end_time assert response_data[i]["lastModified"] == actual[i]["last_modified"] # check that created is in the correct range created = datetime.fromisoformat(actual[i]["created"]) assert start_time < created < end_time assert response_data[i]["created"] == actual[i]["created"] with assume_test_silo_mode_of(AuditLogEntry): log = AuditLogEntry.objects.get(organization_id=self.organization.id) trusted_relay_log = log.data["trustedRelays"] assert trusted_relay_log is not None # check that we log a new trusted-relays entry assert trusted_relay_log.startswith("to ") # check that we have the public keys somewhere in the log message assert trusted_relays[0]["publicKey"] in trusted_relay_log assert trusted_relays[1]["publicKey"] in trusted_relay_log def test_modifying_trusted_relays(self) -> None: with assume_test_silo_mode_of(AuditLogEntry): AuditLogEntry.objects.filter(organization_id=self.organization.id).delete() initial_trusted_relays = [ { "publicKey": _VALID_RELAY_KEYS[0], "name": "name1", "description": "description1", }, { "publicKey": _VALID_RELAY_KEYS[1], "name": "name2", "description": "description2", }, { "publicKey": _VALID_RELAY_KEYS[2], "name": "name3", "description": "description3", }, ] modified_trusted_relays = [ # key1 was removed # key2 is not modified { "publicKey": _VALID_RELAY_KEYS[1], "name": "name2", "description": "description2", }, # key3 modified name & desc { "publicKey": _VALID_RELAY_KEYS[2], "name": "name3 modified", "description": "description3 modified", }, # key4 is new { "publicKey": _VALID_RELAY_KEYS[3], "name": "name4", "description": "description4", }, ] initial_settings = {"trustedRelays": initial_trusted_relays} changed_settings = {"trustedRelays": modified_trusted_relays} with outbox_runner(): start_time = timezone.now() self.get_success_response(self.organization.slug, **initial_settings) after_initial = timezone.now() self.get_success_response(self.organization.slug, **changed_settings) after_final = timezone.now() actual = get_trusted_relay_value(self.organization) assert len(actual) == len(modified_trusted_relays) for i in range(len(actual)): assert actual[i]["public_key"] == modified_trusted_relays[i]["publicKey"] assert actual[i]["name"] == modified_trusted_relays[i]["name"] assert actual[i]["description"] == modified_trusted_relays[i]["description"] last_modified = datetime.fromisoformat(actual[i]["last_modified"]) created = datetime.fromisoformat(actual[i]["created"]) key = modified_trusted_relays[i]["publicKey"] if key == _VALID_RELAY_KEYS[1]: # key2 should have not been modified assert start_time < created < after_initial assert start_time < last_modified < after_initial elif key == _VALID_RELAY_KEYS[2]: # key3 should have been updated assert start_time < created < after_initial assert after_initial < last_modified < after_final elif key == _VALID_RELAY_KEYS[3]: # key4 is new assert after_initial < created < after_final assert after_initial < last_modified < after_final # we should have 2 log messages from the two calls with assume_test_silo_mode_of(AuditLogEntry): (first_log, second_log) = AuditLogEntry.objects.filter( organization_id=self.organization.id ) log_str_1 = first_log.data["trustedRelays"] log_str_2 = second_log.data["trustedRelays"] assert log_str_1 is not None assert log_str_2 is not None if log_str_1.startswith("to "): modif_log = log_str_2 else: modif_log = log_str_1 assert modif_log.startswith("from ") # check that we have the new public keys somewhere in the modify operation log message for i in range(len(modified_trusted_relays)): assert modified_trusted_relays[i]["publicKey"] in modif_log def test_deleting_trusted_relays(self) -> None: with assume_test_silo_mode_of(AuditLogEntry): AuditLogEntry.objects.filter(organization_id=self.organization.id).delete() initial_trusted_relays = [ { "publicKey": _VALID_RELAY_KEYS[0], "name": "name1", "description": "description1", }, ] initial_settings = {"trustedRelays": initial_trusted_relays} changed_settings: dict[str, Any] = {"trustedRelays": []} self.get_success_response(self.organization.slug, **initial_settings) response = self.get_success_response(self.organization.slug, **changed_settings) response_data = response.data.get("trustedRelays") actual = get_trusted_relay_value(self.organization) assert len(actual) == 0 assert len(response_data) == 0 def test_safe_fields_as_string_regression(self) -> None: data = {"safeFields": "email"} self.get_error_response(self.organization.slug, status_code=400, **data) org = Organization.objects.get(id=self.organization.id) options = {o.key: o.value for o in OrganizationOption.objects.filter(organization=org)} assert not options.get("sentry:safe_fields") def test_manager_cannot_set_default_role(self) -> None: org = self.create_organization(owner=self.user) user = self.create_user("baz@example.com") self.create_member(organization=org, user=user, role="manager") self.login_as(user=user) self.get_success_response(org.slug, **{"defaultRole": "owner"}) org = Organization.objects.get(id=org.id) assert org.default_role == "member" def test_empty_string_in_array_safe_fields(self) -> None: self.get_error_response(self.organization.slug, status_code=400, **{"safeFields": [""]}) org = Organization.objects.get(id=self.organization.id) options = {o.key: o.value for o in OrganizationOption.objects.filter(organization=org)} assert not options.get("sentry:safe_fields") def test_empty_string_in_array_sensitive_fields(self) -> None: OrganizationOption.objects.set_value( self.organization, "sentry:sensitive_fields", ["foobar"] ) self.get_error_response( self.organization.slug, status_code=400, **{"sensitiveFields": [""]} ) org = Organization.objects.get(id=self.organization.id) options = {o.key: o.value for o in OrganizationOption.objects.filter(organization=org)} assert options.get("sentry:sensitive_fields") == ["foobar"] def test_empty_sensitive_fields(self) -> None: OrganizationOption.objects.set_value( self.organization, "sentry:sensitive_fields", ["foobar"] ) self.get_success_response(self.organization.slug, **{"sensitiveFields": []}) org = Organization.objects.get(id=self.organization.id) options = {o.key: o.value for o in OrganizationOption.objects.filter(organization=org)} assert not options.get("sentry:sensitive_fields") def test_cancel_delete(self) -> None: org = self.create_organization(owner=self.user, status=OrganizationStatus.PENDING_DELETION) RegionScheduledDeletion.schedule(org, days=1) self.get_success_response(org.slug, **{"cancelDeletion": True}) org = Organization.objects.get(id=org.id) assert org.status == OrganizationStatus.ACTIVE assert not RegionScheduledDeletion.objects.filter( model_name="Organization", object_id=org.id ).exists() def test_relay_pii_config(self) -> None: value = '{"applications": {"freeform": []}}' response = self.get_success_response(self.organization.slug, **{"relayPiiConfig": value}) assert self.organization.get_option("sentry:relay_pii_config") == value assert response.data["relayPiiConfig"] == value def test_store_crash_reports_exceeded(self) -> None: # Uses a hard-coded number of MAX + 1 for regression testing. # # DO NOT INCREASE this number without checking the logic in event # manager's ``get_stored_crashreports`` function. Increasing this number # causes more load on postgres during ingestion. data = {"storeCrashReports": 101} resp = self.get_error_response(self.organization.slug, status_code=400, **data) assert self.organization.get_option("sentry:store_crash_reports") is None assert b"storeCrashReports" in resp.content def test_update_name_with_mapping_and_slug_reservation(self) -> None: response = self.get_success_response(self.organization.slug, name="SaNtRy") organization_id = response.data["id"] org = Organization.objects.get(id=organization_id) assert org.name == "SaNtRy" with assume_test_silo_mode_of(OrganizationMapping): assert OrganizationMapping.objects.filter( organization_id=organization_id, name="SaNtRy" ).exists() def test_update_slug(self) -> None: with outbox_runner(): pass with assume_test_silo_mode_of(OrganizationMapping, OrganizationSlugReservation): organization_mapping = OrganizationMapping.objects.get( organization_id=self.organization.id, ) org_slug_res = OrganizationSlugReservation.objects.get( organization_id=self.organization.id, slug=self.organization.slug ) assert organization_mapping.slug == self.organization.slug desired_slug = "new-santry" self.get_success_response(self.organization.slug, slug=desired_slug) self.organization.refresh_from_db() assert self.organization.slug == desired_slug organization_mapping.refresh_from_db() assert organization_mapping.slug == desired_slug org_slug_res.refresh_from_db() assert org_slug_res.slug == desired_slug def test_org_mapping_already_taken(self) -> None: self.create_organization(slug="taken") self.get_error_response(self.organization.slug, slug="taken", status_code=400) def test_target_sample_rate_feature(self) -> None: with self.feature("organizations:dynamic-sampling-custom"): data = {"targetSampleRate": 0.1} self.get_success_response(self.organization.slug, **data) with self.feature({"organizations:dynamic-sampling-custom": False}): data = {"targetSampleRate": 0.1} self.get_error_response(self.organization.slug, status_code=400, **data) def test_ingest_through_trusted_relays_only_option(self) -> None: # by default option is not set assert self.organization.get_option("sentry:ingest-through-trusted-relays-only") is None with self.feature("organizations:ingest-through-trusted-relays-only"): data = {"ingestThroughTrustedRelaysOnly": "enabled"} self.get_success_response(self.organization.slug, **data) assert ( self.organization.get_option("sentry:ingest-through-trusted-relays-only") == "enabled" ) with self.feature("organizations:ingest-through-trusted-relays-only"): data = {"ingestThroughTrustedRelaysOnly": "invalid"} self.get_error_response(self.organization.slug, status_code=400, **data) with self.feature({"organizations:ingest-through-trusted-relays-only": False}): data = {"ingestThroughTrustedRelaysOnly": "enabled"} self.get_error_response(self.organization.slug, status_code=400, **data) @with_feature("organizations:ingest-through-trusted-relays-only") def test_get_ingest_through_trusted_relays_only_option(self) -> None: response = self.get_success_response(self.organization.slug) assert response.data["ingestThroughTrustedRelaysOnly"] == "disabled" def test_get_ingest_through_trusted_relays_only_option_without_feature(self) -> None: response = self.get_success_response(self.organization.slug) assert "ingestThroughTrustedRelaysOnly" not in response.data @with_feature("organizations:dynamic-sampling-custom") def test_target_sample_rate_range(self) -> None: # low, within and high data = {"targetSampleRate": 0.0} self.get_success_response(self.organization.slug, **data) data = {"targetSampleRate": 0.1} self.get_success_response(self.organization.slug, **data) data = {"targetSampleRate": 1.0} self.get_success_response(self.organization.slug, **data) # below range data = {"targetSampleRate": -0.1} self.get_error_response(self.organization.slug, status_code=400, **data) # above range data = {"targetSampleRate": 1.1} self.get_error_response(self.organization.slug, status_code=400, **data) def test_sampling_mode_feature(self) -> None: with self.feature("organizations:dynamic-sampling-custom"): data = {"samplingMode": "project"} self.get_success_response(self.organization.slug, **data) with self.feature({"organizations:dynamic-sampling-custom": False}): data = {"samplingMode": "project"} self.get_error_response(self.organization.slug, status_code=400, **data) @with_feature("organizations:dynamic-sampling-custom") def test_sampling_mode_values(self) -> None: # project data = {"samplingMode": "project"} self.get_success_response(self.organization.slug, **data) # organization data = {"samplingMode": "organization"} self.get_success_response(self.organization.slug, **data) # invalid data = {"samplingMode": "invalid"} self.get_error_response(self.organization.slug, status_code=400, **data) def test_default_autofix_automation_tuning(self) -> None: data = {"defaultAutofixAutomationTuning": "high"} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:default_autofix_automation_tuning") == "high" def test_default_seer_scanner_automation(self) -> None: data = {"defaultSeerScannerAutomation": True} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:default_seer_scanner_automation") is True def test_enabled_console_platforms_present_in_response(self) -> None: response = self.get_success_response(self.organization.slug) assert "enabledConsolePlatforms" in response.data assert response.data["enabledConsolePlatforms"] == [] def test_enabled_console_platforms_no_staff_member(self) -> None: data = {"enabledConsolePlatforms": ["playstation", "xbox"]} response = self.get_error_response(self.organization.slug, status_code=400, **data) assert response.data["enabledConsolePlatforms"] == [ "Only staff members can toggle console platforms." ] def test_enabled_console_platforms_multiple_platforms_parameter(self) -> None: staff_user = self.create_user(is_staff=True) self.create_member(organization=self.organization, user=staff_user, role="owner") self.login_as(user=staff_user, staff=True) data = {"enabledConsolePlatforms": ["playstation", "xbox"]} self.get_success_response(self.organization.slug, **data) enabled_platforms = self.organization.get_option("sentry:enabled_console_platforms") assert len(enabled_platforms) == 2 and set(enabled_platforms) == {"playstation", "xbox"} with outbox_runner(): pass with assume_test_silo_mode_of(AuditLogEntry): audit_entry = AuditLogEntry.objects.get( event=audit_log.get_event_id("ORG_CONSOLE_PLATFORM_EDIT") ) audit_log_event = audit_log.get(audit_entry.event) assert audit_log_event.render(audit_entry) == "Enabled platforms: PlayStation, Xbox" # Verify console platforms are NOT in the main ORG_EDIT audit log # (Since this test only changes console platforms, there should be no ORG_EDIT log) org_edit_logs = AuditLogEntry.objects.filter( organization_id=self.organization.id, event=audit_log.get_event_id("ORG_EDIT") ) assert org_edit_logs.count() == 0 def test_enabled_console_platforms_empty_platforms_parameter(self) -> None: staff_user = self.create_user(is_staff=True) self.create_member(organization=self.organization, user=staff_user, role="owner") self.login_as(user=staff_user, staff=True) self.organization.update_option( "sentry:enabled_console_platforms", ["playstation", "nintendo-switch"] ) data: dict[str, list[str]] = {"enabledConsolePlatforms": []} self.get_success_response(self.organization.slug, **data) enabled_platforms = self.organization.get_option("sentry:enabled_console_platforms") assert enabled_platforms == [] with outbox_runner(): pass with assume_test_silo_mode_of(AuditLogEntry): audit_entry = AuditLogEntry.objects.get( event=audit_log.get_event_id("ORG_CONSOLE_PLATFORM_EDIT") ) audit_log_event = audit_log.get(audit_entry.event) assert ( audit_log_event.render(audit_entry) == "Disabled platforms: Nintendo Switch, PlayStation" ) def test_enabled_console_platforms_duplicate_platform_parameter(self) -> None: staff_user = self.create_user(is_staff=True) self.create_member(organization=self.organization, user=staff_user, role="owner") self.login_as(user=staff_user, staff=True) data = {"enabledConsolePlatforms": ["playstation", "playstation"]} self.get_success_response(self.organization.slug, **data) enabled_platforms = self.organization.get_option("sentry:enabled_console_platforms") assert enabled_platforms == ["playstation"] with outbox_runner(): pass with assume_test_silo_mode_of(AuditLogEntry): audit_entry = AuditLogEntry.objects.get( event=audit_log.get_event_id("ORG_CONSOLE_PLATFORM_EDIT") ) audit_log_event = audit_log.get(audit_entry.event) assert audit_log_event.render(audit_entry) == "Enabled platforms: PlayStation" def test_enabled_and_disabled_console_platforms(self) -> None: staff_user = self.create_user(is_staff=True) self.create_member(organization=self.organization, user=staff_user, role="owner") self.login_as(user=staff_user, staff=True) self.organization.update_option("sentry:enabled_console_platforms", ["nintendo-switch"]) data = {"enabledConsolePlatforms": ["playstation", "xbox"]} self.get_success_response(self.organization.slug, **data) enabled_platforms = self.organization.get_option("sentry:enabled_console_platforms") assert set(enabled_platforms) == {"playstation", "xbox"} with outbox_runner(): pass with assume_test_silo_mode_of(AuditLogEntry): audit_entry = AuditLogEntry.objects.get( event=audit_log.get_event_id("ORG_CONSOLE_PLATFORM_EDIT") ) audit_log_event = audit_log.get(audit_entry.event) assert ( audit_log_event.render(audit_entry) == "Enabled platforms: PlayStation, Xbox; Disabled platforms: Nintendo Switch" ) def test_enable_pr_review_test_generation_default_false(self) -> None: response = self.get_success_response(self.organization.slug) assert response.data["enablePrReviewTestGeneration"] is False def test_enable_pr_review_test_generation_can_be_disabled(self) -> None: data = {"enablePrReviewTestGeneration": False} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:enable_pr_review_test_generation") is False def test_enable_pr_review_test_generation_can_be_enabled(self) -> None: # First disable it self.organization.update_option("sentry:enable_pr_review_test_generation", False) data = {"enablePrReviewTestGeneration": True} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:enable_pr_review_test_generation") is True def test_enable_seer_enhanced_alerts_default_true(self) -> None: response = self.get_success_response(self.organization.slug) assert response.data["enableSeerEnhancedAlerts"] is True def test_enable_seer_enhanced_alerts_can_be_disabled(self) -> None: data = {"enableSeerEnhancedAlerts": False} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:enable_seer_enhanced_alerts") is False def test_enable_seer_enhanced_alerts_can_be_enabled(self) -> None: # First disable it self.organization.update_option("sentry:enable_seer_enhanced_alerts", False) data = {"enableSeerEnhancedAlerts": True} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:enable_seer_enhanced_alerts") is True def test_enable_seer_coding_default_true(self) -> None: response = self.get_success_response(self.organization.slug) assert response.data["enableSeerCoding"] is True def test_enable_seer_coding_can_be_disabled(self) -> None: data = {"enableSeerCoding": False} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:enable_seer_coding") is False def test_enable_seer_coding_can_be_enabled(self) -> None: # First disable it self.organization.update_option("sentry:enable_seer_coding", False) data = {"enableSeerCoding": True} self.get_success_response(self.organization.slug, **data) assert self.organization.get_option("sentry:enable_seer_coding") is True
OrganizationUpdateTest
python
huggingface__transformers
src/transformers/models/vit_msn/modeling_vit_msn.py
{ "start": 14724, "end": 15266 }
class ____(nn.Module): def __init__(self, config: ViTMSNConfig): super().__init__() self.config = config self.layer = nn.ModuleList([ViTMSNLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> BaseModelOutput: for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring
ViTMSNEncoder
python
ray-project__ray
python/ray/data/_internal/execution/progress_manager.py
{ "start": 1220, "end": 2768 }
class ____(str, Enum): NONE = "NONE" # no-op GLOBAL_ONLY = "GLOBAL_ONLY" # global progress ALL = "ALL" # show everything def show_op(self) -> bool: return self == self.ALL def is_enabled(self) -> bool: return self != self.NONE @classmethod def get_mode(cls) -> "_ManagerMode": from ray.data.context import DataContext ctx = DataContext.get_current() if not ctx.enable_progress_bars: if log_once("ray_data_progress_manager_disabled"): logger.warning( "Progress bars disabled. To enable, set " "`ray.data.DataContext.get_current()." "enable_progress_bars = True`." ) return cls.NONE elif rich is None: global needs_rich_warning if needs_rich_warning: print( "[dataset]: Run `pip install rich` to enable " "execution progress reporting." ) needs_rich_warning = False return cls.NONE elif not ctx.enable_operator_progress_bars: if log_once("ray_data_progress_manager_global"): logger.warning( "Progress bars for operators disabled. To enable, " "set `ray.data.DataContext.get_current()." "enable_operator_progress_bars = True`." ) return cls.GLOBAL_ONLY else: return cls.ALL
_ManagerMode
python
pytorch__pytorch
test/inductor/test_ordered_set.py
{ "start": 34794, "end": 35340 }
class ____(TestCase): def test_constructor(self): inner = frozenset([1]) outer = OrderedSet([inner]) element = outer.pop() self.assertEqual(type(element), frozenset) outer.add(inner) # Rebuild OrderedSet of sets with .add method outer.remove(inner) self.assertEqual(outer, OrderedSet()) # Verify that remove worked outer.discard(inner) # Absence of KeyError indicates working fine # ==============================================================================
TestSetOfSets
python
automl__auto-sklearn
autosklearn/ensemble_building/manager.py
{ "start": 799, "end": 15177 }
class ____(IncorporateRunResultCallback): def __init__( self, backend: Backend, dataset_name: str, task: int, metrics: Sequence[Scorer], time_left_for_ensembles: float = np.inf, max_iterations: int | None = None, pynisher_context: str = "fork", ensemble_class: Type[AbstractEnsemble] = EnsembleSelection, ensemble_kwargs: Dict[str, Any] | None = None, ensemble_nbest: int | float = 50, max_models_on_disc: int | float | None = None, seed: int = 1, precision: int = 32, memory_limit: int | None = None, read_at_most: int | None = None, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, start_time: float | None = None, ): """SMAC callback to handle ensemble building Parameters ---------- backend: Backend backend to write and read files dataset_name: str name of dataset task: int Type of ML task metrics: Sequence[Scorer] Metrics to optimize the ensemble for time_left_for_ensemble: float = np.inf How much time is left for the task in seconds. Job should finish within this allocated time max_iterations: int | None = None maximal number of iterations to run this script. None indicates no limit on iterations. pynisher_context: "spawn" | "fork" | "forkserver" = "fork" The multiprocessing context for pynisher. ensemble_class : Type[AbstractEnsemble] (default=EnsembleSelection) Class implementing the post-hoc ensemble algorithm. Set to ``None`` to disable ensemble building or use ``SingleBest`` to obtain only use the single best model instead of an ensemble. ensemble_kwargs : Dict, optional Keyword arguments that are passed to the ensemble class upon initialization. ensemble_nbest: int | float = 50 If int: consider only the n best prediction If float: consider only this fraction of the best models max_models_on_disc: int | float | None = None Defines the maximum number of models that are kept in the disc. If int, it must be greater or equal than 1, and dictates the max number of models to keep. If float, it will be interpreted as the max megabytes allowed of disc space. That is, if the number of ensemble candidates require more disc space than this float value, the worst models will be deleted to keep within this budget. Models and predictions of the worst-performing models will be deleted then. If None, the feature is disabled. It defines an upper bound on the models that can be used in the ensemble. seed: int = 1 Seed used for the inidividual runs precision: 16 | 32 | 64 | 128 = 32 Precision of floats to read the predictions memory_limit: int | None = None Memory limit in mb. If ``None``, no memory limit is enforced. read_at_most: int | None = None read at most n new prediction files in each iteration. If `None`, will read the predictions and calculate losses for all runs that require it. logger_port: int = DEFAULT_TCP_LOGGING_PORT Port that receives logging records start_time: float | None = None DISABLED: Just using time.time() to set it The time when this job was started, to account for any latency in job allocation. """ self.time_left_for_ensembles = time_left_for_ensembles self.backend = backend self.dataset_name = dataset_name self.task = task self.metrics = metrics self.ensemble_class = ensemble_class self.ensemble_kwargs = ensemble_kwargs self.ensemble_nbest = ensemble_nbest self.max_models_on_disc = max_models_on_disc self.seed = seed self.precision = precision self.max_iterations = max_iterations self.read_at_most = read_at_most self.memory_limit = memory_limit self.random_state = check_random_state(random_state) self.logger_port = logger_port self.pynisher_context = pynisher_context # Store something similar to SMAC's runhistory self.history: list[dict[str, Any]] = [] # We only submit new ensembles when there is not an active ensemble job self.futures: list[dask.distributed.Future] = [] # The last criteria is the number of iterations self.iteration = 0 # Keep track of when we started to know when we need to finish! self.start_time = time.time() def __call__( self, smbo: "SMBO", run_info: RunInfo, result: RunValue, time_left: float, ) -> None: """ Returns ------- List[Tuple[int, float, float, float]]: A list with the performance history of this ensemble, of the form [(pandas_timestamp, train_performance, val_performance, test_performance)] """ if result.status in (StatusType.STOP, StatusType.ABORT) or smbo._stop: return self.build_ensemble(smbo.tae_runner.client) def build_ensemble( self, dask_client: dask.distributed.Client, ) -> None: """Build the ensemble Parameters ---------- dask_client: dask.distributed.Client The dask client to use """ # The second criteria is elapsed time elapsed_time = time.time() - self.start_time logger = get_named_client_logger( name="EnsembleBuilder", port=self.logger_port, ) # First test for termination conditions if self.time_left_for_ensembles < elapsed_time: logger.info( "Terminate ensemble building as not time is left (run for {}s)".format( elapsed_time ), ) return if self.max_iterations is not None and self.max_iterations <= self.iteration: logger.info( "Terminate ensemble building because of max iterations:" f" {self.max_iterations} of {self.iteration}" ) return if len(self.futures) != 0: if self.futures[0].done(): result = self.futures.pop().result() if result: ensemble_history, self.ensemble_nbest = result logger.debug( f"iteration={self.iteration} @ elapsed_time={elapsed_time}" f" has history={ensemble_history}" ) self.history.extend(ensemble_history) # Only submit new jobs if the previous ensemble job finished if len(self.futures) == 0: # Add the result of the run # On the next while iteration, no references to # ensemble builder object, so it should be garbage collected to # save memory while waiting for resources # Also, notice how ensemble nbest is returned, so we don't waste # iterations testing if the deterministic predictions size can # be fitted in memory try: # Submit a Dask job from this job, to properly # see it in the dask diagnostic dashboard # Notice that the forked ensemble_builder_process will # wait for the below function to be done self.futures.append( dask_client.submit( EnsembleBuilderManager.fit_and_return_ensemble, backend=self.backend, dataset_name=self.dataset_name, task_type=self.task, metrics=self.metrics, ensemble_class=self.ensemble_class, ensemble_kwargs=self.ensemble_kwargs, ensemble_nbest=self.ensemble_nbest, max_models_on_disc=self.max_models_on_disc, seed=self.seed, precision=self.precision, memory_limit=self.memory_limit, read_at_most=self.read_at_most, random_state=self.random_state, end_at=self.start_time + self.time_left_for_ensembles, iteration=self.iteration, pynisher_context=self.pynisher_context, logger_port=self.logger_port, ) ) logger.info( "{}/{} Started Ensemble builder job at {} for iteration {}.".format( # Log the client to make sure we # remain connected to the scheduler self.futures[0], dask_client, time.strftime("%Y.%m.%d-%H.%M.%S"), self.iteration, ), ) self.iteration += 1 except Exception as e: exception_traceback = traceback.format_exc() error_message = repr(e) logger.critical(exception_traceback) logger.critical(error_message) @staticmethod def fit_and_return_ensemble( iteration: int, end_at: float, backend: Backend, dataset_name: str, task_type: int, metrics: Sequence[Scorer], pynisher_context: str, ensemble_class: Type[AbstractEnsemble] = EnsembleSelection, ensemble_kwargs: Dict[str, Any] | None = None, ensemble_nbest: int | float = 50, max_models_on_disc: int | float | None = None, seed: int = 1, precision: int = 32, memory_limit: int | None = None, read_at_most: int | None = None, logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT, random_state: int | np.random.RandomState | None = None, ) -> tuple[list[dict[str, Any]], int | float]: """ A short function to fit and create an ensemble. It is just a wrapper to easily send a request to dask to create an ensemble and clean the memory when finished Parameters ---------- iteration: int The current iteration end_at: float At what time the job must finish. Needs to be the endtime and not the time left because we do not know when dask schedules the job. backend: Backend Backend to write and read files dataset_name: str name of dataset task_type: int type of ML task metrics: Sequence[Scorer] Metrics to optimize the ensemble for. pynisher_context: "fork" | "spawn" | "forkserver" = "fork" Context to use for multiprocessing, can be either fork, spawn or forkserver. ensemble_class : Type[AbstractEnsemble] (default=EnsembleSelection) Class implementing the post-hoc ensemble algorithm. Set to ``None`` to disable ensemble building or use ``SingleBest`` to obtain only use the single best model instead of an ensemble. ensemble_kwargs : Dict, optional Keyword arguments that are passed to the ensemble class upon initialization. ensemble_nbest: int | float = 50 If int: consider only the n best prediction If float: consider only this fraction of the best models max_models_on_disc: int | float | None = 100 Defines the maximum number of models that are kept in the disc. If int, it must be greater or equal than 1, and dictates the max number of models to keep. If float, it will be interpreted as the max megabytes allowed of disc space. That is, if the number of ensemble candidates require more disc space than this float value, the worst models will be deleted to keep within this budget. Models and predictions of the worst-performing models will be deleted then. If None, the feature is disabled. seed: int = 1 Seed used for training the models in the backend precision: 16 | 32 | 64 | 128 = 32 Precision of floats to read the predictions memory_limit: int | None = None Memory limit in mb. If ``None``, no memory limit is enforced. read_at_most: int | None = None read at most n new prediction files in each iteration. If `None`, will read the predictions and calculate losses for all runs that require it. logger_port: int = DEFAULT_TCP_LOGGING_PORT The port where the logging server is listening to. random_state: int | RandomState | None = None A random state used for the ensemble selection process. Returns ------- (ensemble_history: list[dict[str, Any]], nbest: int | float) The ensemble history and the nbest chosen members """ random_state = check_random_state(random_state) result = EnsembleBuilder( backend=backend, dataset_name=dataset_name, task_type=task_type, metrics=metrics, ensemble_class=ensemble_class, ensemble_kwargs=ensemble_kwargs, ensemble_nbest=ensemble_nbest, max_models_on_disc=max_models_on_disc, seed=seed, precision=precision, memory_limit=memory_limit, read_at_most=read_at_most, random_state=random_state.randint(10000000), logger_port=logger_port, ).run( end_at=end_at, iteration=iteration, pynisher_context=pynisher_context, ) return result
EnsembleBuilderManager
python
huggingface__transformers
src/transformers/models/qwen2_moe/modeling_qwen2_moe.py
{ "start": 3357, "end": 6363 }
class ____(nn.Module): inv_freq: torch.Tensor # fix linting for `register_buffer` def __init__(self, config: Qwen2MoeConfig, device=None): super().__init__() self.max_seq_len_cached = config.max_position_embeddings self.original_max_seq_len = config.max_position_embeddings self.config = config self.rope_type = self.config.rope_parameters["rope_type"] rope_init_fn: Callable = self.compute_default_rope_parameters if self.rope_type != "default": rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type] inv_freq, self.attention_scaling = rope_init_fn(self.config, device) self.register_buffer("inv_freq", inv_freq, persistent=False) self.original_inv_freq = inv_freq @staticmethod def compute_default_rope_parameters( config: Optional[Qwen2MoeConfig] = None, device: Optional["torch.device"] = None, seq_len: Optional[int] = None, ) -> tuple["torch.Tensor", float]: """ Computes the inverse frequencies according to the original RoPE implementation Args: config ([`~transformers.PreTrainedConfig`]): The model configuration. device (`torch.device`): The device to use for initialization of the inverse frequencies. seq_len (`int`, *optional*): The current sequence length. Unused for this type of RoPE. Returns: Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE). """ base = config.rope_parameters["rope_theta"] dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads attention_factor = 1.0 # Unused in this type of RoPE # Compute the inverse frequencies inv_freq = 1.0 / ( base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim) ) return inv_freq, attention_factor @torch.no_grad() @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope) def forward(self, x, position_ids): inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device) position_ids_expanded = position_ids[:, None, :].float() device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
Qwen2MoeRotaryEmbedding
python
ansible__ansible
test/lib/ansible_test/_internal/provider/layout/__init__.py
{ "start": 2379, "end": 5140 }
class ____(Layout): """Information about the current Ansible content being tested.""" def __init__( self, root: str, paths: list[str], plugin_paths: dict[str, str], collection: t.Optional[CollectionDetail], test_path: str, results_path: str, sanity_path: str, sanity_messages: t.Optional[LayoutMessages], integration_path: str, integration_targets_path: str, integration_vars_path: str, integration_messages: t.Optional[LayoutMessages], unit_path: str, unit_module_path: str, unit_module_utils_path: str, unit_messages: t.Optional[LayoutMessages], unsupported: bool | list[str] = False, ) -> None: super().__init__(root, paths) self.plugin_paths = plugin_paths self.collection = collection self.test_path = test_path self.results_path = results_path self.sanity_path = sanity_path self.sanity_messages = sanity_messages self.integration_path = integration_path self.integration_targets_path = integration_targets_path self.integration_vars_path = integration_vars_path self.integration_messages = integration_messages self.unit_path = unit_path self.unit_module_path = unit_module_path self.unit_module_utils_path = unit_module_utils_path self.unit_messages = unit_messages self.unsupported = unsupported self.is_ansible = root == ANSIBLE_SOURCE_ROOT @property def prefix(self) -> str: """Return the collection prefix or an empty string if not a collection.""" if self.collection: return self.collection.prefix return '' @property def module_path(self) -> t.Optional[str]: """Return the path where modules are found, if any.""" return self.plugin_paths.get('modules') @property def module_utils_path(self) -> t.Optional[str]: """Return the path where module_utils are found, if any.""" return self.plugin_paths.get('module_utils') @property def module_utils_powershell_path(self) -> t.Optional[str]: """Return the path where powershell module_utils are found, if any.""" if self.is_ansible: return os.path.join(self.plugin_paths['module_utils'], 'powershell') return self.plugin_paths.get('module_utils') @property def module_utils_csharp_path(self) -> t.Optional[str]: """Return the path where csharp module_utils are found, if any.""" if self.is_ansible: return os.path.join(self.plugin_paths['module_utils'], 'csharp') return self.plugin_paths.get('module_utils')
ContentLayout
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 2169, "end": 2276 }
class ____(SQLRole): """mixin indicating a role that results in strings""" __slots__ = ()
StringRole
python
apache__airflow
providers/celery/src/airflow/providers/celery/executors/celery_executor_utils.py
{ "start": 9333, "end": 11692 }
class ____: """ Wrapper class used to propagate exceptions to parent processes from subprocesses. :param exception: The exception to wrap :param exception_traceback: The stacktrace to wrap """ def __init__(self, exception: BaseException, exception_traceback: str): self.exception = exception self.traceback = exception_traceback def send_task_to_executor( task_tuple: TaskInstanceInCelery, ) -> tuple[TaskInstanceKey, CommandType, AsyncResult | ExceptionWithTraceback]: """Send task to executor.""" key, args, queue, task_to_run = task_tuple if AIRFLOW_V_3_0_PLUS: if TYPE_CHECKING: assert isinstance(args, workloads.BaseWorkload) args = (args.model_dump_json(),) else: args = [args] # type: ignore[list-item] try: with timeout(seconds=OPERATION_TIMEOUT): result = task_to_run.apply_async(args=args, queue=queue) except (Exception, AirflowTaskTimeout) as e: exception_traceback = f"Celery Task ID: {key}\n{traceback.format_exc()}" result = ExceptionWithTraceback(e, exception_traceback) # The type is right for the version, but the type cannot be defined correctly for Airflow 2 and 3 # concurrently; return key, args, result def fetch_celery_task_state(async_result: AsyncResult) -> tuple[str, str | ExceptionWithTraceback, Any]: """ Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param async_result: a tuple of the Celery task key and the async Celery object used to fetch the task's state :return: a tuple of the Celery task key and the Celery state and the celery info of the task """ try: with timeout(seconds=OPERATION_TIMEOUT): # Accessing state property of celery task will make actual network request # to get the current state of the task info = async_result.info if hasattr(async_result, "info") else None return async_result.task_id, async_result.state, info except Exception as e: exception_traceback = f"Celery Task ID: {async_result}\n{traceback.format_exc()}" return async_result.task_id, ExceptionWithTraceback(e, exception_traceback), None
ExceptionWithTraceback
python
astropy__astropy
astropy/modeling/projections.py
{ "start": 15089, "end": 15791 }
class ____(Sky2PixProjection, Zenithal): r""" Slant orthographic projection - sky to pixel. Corresponds to the ``SIN`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. The following transformation applies when :math:`\xi` and :math:`\eta` are both zero. .. math:: R_\theta = \frac{180^{\circ}}{\pi}\cos \theta But more specifically are: .. math:: x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\ y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)] """ xi = _ParameterDS(default=0.0) eta = _ParameterDS(default=0.0)
Sky2Pix_SlantOrthographic
python
tensorflow__tensorflow
tensorflow/python/tools/api/generator2/generator/generator.py
{ "start": 3961, "end": 4643 }
class ____(Exception): """Exception for when two docstrings are registered to a single module.""" def _get_import_path( file: str, file_prefixes_to_strip: Sequence[str], module_prefix: str ) -> str: module_import_path = file for prefix in file_prefixes_to_strip: module_import_path = module_import_path.removeprefix(prefix) module_import_path = module_import_path.removesuffix('.py') module_import_path = module_import_path.removesuffix('__init__') module_import_path = module_import_path.strip('/') module_import_path = module_import_path.replace('/', '.') return module_prefix + module_import_path @dataclasses.dataclass(frozen=True)
DocExportedTwiceError
python
Lightning-AI__lightning
tests/tests_pytorch/test_cli.py
{ "start": 24092, "end": 26153 }
class ____(BoringDataModule): def __init__(self, batch_size: int = 8): super().__init__() self.batch_size = batch_size self.num_classes = 5 # only available after instantiation def test_lightning_cli_link_arguments(cleandir): class MyLightningCLI(LightningCLI): def add_arguments_to_parser(self, parser): parser.link_arguments("data.batch_size", "model.batch_size") parser.link_arguments("data.num_classes", "model.num_classes", apply_on="instantiate") cli_args = ["--data.batch_size=12", "--trainer.max_epochs=1"] with mock.patch("sys.argv", ["any.py"] + cli_args): cli = MyLightningCLI(BoringModelRequiredClasses, BoringDataModuleBatchSizeAndClasses, run=False) assert cli.model.batch_size == 12 assert cli.model.num_classes == 5 cli.trainer.fit(cli.model) hparams_path = Path(cli.trainer.log_dir) / "hparams.yaml" assert hparams_path.is_file() hparams = yaml.safe_load(hparams_path.read_text()) hparams.pop("_instantiator") assert hparams == {"batch_size": 12, "num_classes": 5} class MyLightningCLI2(LightningCLI): def add_arguments_to_parser(self, parser): parser.link_arguments("data.batch_size", "model.init_args.batch_size") parser.link_arguments("data.num_classes", "model.init_args.num_classes", apply_on="instantiate") cli_args[0] = "--model=tests_pytorch.test_cli.BoringModelRequiredClasses" with mock.patch("sys.argv", ["any.py"] + cli_args): cli = MyLightningCLI2( BoringModelRequiredClasses, BoringDataModuleBatchSizeAndClasses, subclass_mode_model=True, run=False ) assert cli.model.batch_size == 8 assert cli.model.num_classes == 5 cli.trainer.fit(cli.model) hparams_path = Path(cli.trainer.log_dir) / "hparams.yaml" assert hparams_path.is_file() hparams = yaml.safe_load(hparams_path.read_text()) hparams.pop("_instantiator") assert hparams == {"batch_size": 8, "num_classes": 5}
BoringDataModuleBatchSizeAndClasses
python
django-extensions__django-extensions
tests/testapp/models.py
{ "start": 4063, "end": 4239 }
class ____(PostWithUniqFieldCompat): new_field = models.CharField(max_length=10) class Meta: app_label = "django_extensions"
InheritedFromPostWithUniqFieldCompat
python
lepture__authlib
authlib/jose/rfc7517/jwk.py
{ "start": 122, "end": 2029 }
class ____: JWK_KEY_CLS = {} @classmethod def generate_key(cls, kty, crv_or_size, options=None, is_private=False): """Generate a Key with the given key type, curve name or bit size. :param kty: string of ``oct``, ``RSA``, ``EC``, ``OKP`` :param crv_or_size: curve name or bit size :param options: a dict of other options for Key :param is_private: create a private key or public key :return: Key instance """ key_cls = cls.JWK_KEY_CLS[kty] return key_cls.generate_key(crv_or_size, options, is_private) @classmethod def import_key(cls, raw, options=None): """Import a Key from bytes, string, PEM or dict. :return: Key instance """ kty = None if options is not None: kty = options.get("kty") if kty is None and isinstance(raw, dict): kty = raw.get("kty") if kty is None: raw_key = load_pem_key(raw) for _kty in cls.JWK_KEY_CLS: key_cls = cls.JWK_KEY_CLS[_kty] if key_cls.validate_raw_key(raw_key): return key_cls.import_key(raw_key, options) key_cls = cls.JWK_KEY_CLS[kty] return key_cls.import_key(raw, options) @classmethod def import_key_set(cls, raw): """Import KeySet from string, dict or a list of keys. :return: KeySet instance """ raw = _transform_raw_key(raw) if isinstance(raw, dict) and "keys" in raw: keys = raw.get("keys") return KeySet([cls.import_key(k) for k in keys]) raise ValueError("Invalid key set format") def _transform_raw_key(raw): if isinstance(raw, str) and raw.startswith("{") and raw.endswith("}"): return json_loads(raw) elif isinstance(raw, (tuple, list)): return {"keys": raw} return raw
JsonWebKey
python
pandas-dev__pandas
asv_bench/benchmarks/groupby.py
{ "start": 16435, "end": 17340 }
class ____: """ Benchmarks specifically targeting our cython aggregation algorithms (using a big enough dataframe with simple key, so a large part of the time is actually spent in the grouped aggregation). """ param_names = ["dtype", "method"] params = [ ["float64"], [ "sum", "prod", "min", "max", "idxmin", "idxmax", "mean", "median", "var", "first", "last", "any", "all", ], ] def setup(self, dtype, method): N = 1_000_000 df = DataFrame(np.random.randn(N, 10), columns=list("abcdefghij")) df["key"] = np.random.randint(0, 100, size=N) self.df = df def time_frame_agg(self, dtype, method): self.df.groupby("key").agg(method)
GroupByCythonAgg
python
kubernetes-client__python
kubernetes/client/models/v1alpha1_named_rule_with_operations.py
{ "start": 383, "end": 10860 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_groups': 'list[str]', 'api_versions': 'list[str]', 'operations': 'list[str]', 'resource_names': 'list[str]', 'resources': 'list[str]', 'scope': 'str' } attribute_map = { 'api_groups': 'apiGroups', 'api_versions': 'apiVersions', 'operations': 'operations', 'resource_names': 'resourceNames', 'resources': 'resources', 'scope': 'scope' } def __init__(self, api_groups=None, api_versions=None, operations=None, resource_names=None, resources=None, scope=None, local_vars_configuration=None): # noqa: E501 """V1alpha1NamedRuleWithOperations - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_groups = None self._api_versions = None self._operations = None self._resource_names = None self._resources = None self._scope = None self.discriminator = None if api_groups is not None: self.api_groups = api_groups if api_versions is not None: self.api_versions = api_versions if operations is not None: self.operations = operations if resource_names is not None: self.resource_names = resource_names if resources is not None: self.resources = resources if scope is not None: self.scope = scope @property def api_groups(self): """Gets the api_groups of this V1alpha1NamedRuleWithOperations. # noqa: E501 APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :return: The api_groups of this V1alpha1NamedRuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._api_groups @api_groups.setter def api_groups(self, api_groups): """Sets the api_groups of this V1alpha1NamedRuleWithOperations. APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :param api_groups: The api_groups of this V1alpha1NamedRuleWithOperations. # noqa: E501 :type: list[str] """ self._api_groups = api_groups @property def api_versions(self): """Gets the api_versions of this V1alpha1NamedRuleWithOperations. # noqa: E501 APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :return: The api_versions of this V1alpha1NamedRuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._api_versions @api_versions.setter def api_versions(self, api_versions): """Sets the api_versions of this V1alpha1NamedRuleWithOperations. APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :param api_versions: The api_versions of this V1alpha1NamedRuleWithOperations. # noqa: E501 :type: list[str] """ self._api_versions = api_versions @property def operations(self): """Gets the operations of this V1alpha1NamedRuleWithOperations. # noqa: E501 Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :return: The operations of this V1alpha1NamedRuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._operations @operations.setter def operations(self, operations): """Sets the operations of this V1alpha1NamedRuleWithOperations. Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required. # noqa: E501 :param operations: The operations of this V1alpha1NamedRuleWithOperations. # noqa: E501 :type: list[str] """ self._operations = operations @property def resource_names(self): """Gets the resource_names of this V1alpha1NamedRuleWithOperations. # noqa: E501 ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501 :return: The resource_names of this V1alpha1NamedRuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._resource_names @resource_names.setter def resource_names(self, resource_names): """Sets the resource_names of this V1alpha1NamedRuleWithOperations. ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501 :param resource_names: The resource_names of this V1alpha1NamedRuleWithOperations. # noqa: E501 :type: list[str] """ self._resource_names = resource_names @property def resources(self): """Gets the resources of this V1alpha1NamedRuleWithOperations. # noqa: E501 Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501 :return: The resources of this V1alpha1NamedRuleWithOperations. # noqa: E501 :rtype: list[str] """ return self._resources @resources.setter def resources(self, resources): """Sets the resources of this V1alpha1NamedRuleWithOperations. Resources is a list of resources this rule applies to. For example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources. If wildcard is present, the validation rule will ensure resources do not overlap with each other. Depending on the enclosing object, subresources might not be allowed. Required. # noqa: E501 :param resources: The resources of this V1alpha1NamedRuleWithOperations. # noqa: E501 :type: list[str] """ self._resources = resources @property def scope(self): """Gets the scope of this V1alpha1NamedRuleWithOperations. # noqa: E501 scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501 :return: The scope of this V1alpha1NamedRuleWithOperations. # noqa: E501 :rtype: str """ return self._scope @scope.setter def scope(self, scope): """Sets the scope of this V1alpha1NamedRuleWithOperations. scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\". # noqa: E501 :param scope: The scope of this V1alpha1NamedRuleWithOperations. # noqa: E501 :type: str """ self._scope = scope def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1alpha1NamedRuleWithOperations): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1alpha1NamedRuleWithOperations): return True return self.to_dict() != other.to_dict()
V1alpha1NamedRuleWithOperations
python
optuna__optuna
optuna/storages/journal/_file.py
{ "start": 12279, "end": 12589 }
class ____(JournalFileOpenLock): pass @deprecated_class( deprecated_version="4.0.0", removed_version="6.0.0", name="The import path :class:`~optuna.storages.JournalFileSymlinkLock`", text="Use :class:`~optuna.storages.journal.JournalFileSymlinkLock` instead.", )
DeprecatedJournalFileOpenLock
python
huggingface__transformers
src/transformers/models/starcoder2/modeling_starcoder2.py
{ "start": 22072, "end": 22363 }
class ____(GenericForTokenClassification, Starcoder2PreTrainedModel): pass __all__ = [ "Starcoder2ForCausalLM", "Starcoder2Model", "Starcoder2PreTrainedModel", "Starcoder2ForSequenceClassification", "Starcoder2ForTokenClassification", ]
Starcoder2ForTokenClassification
python
django__django
django/contrib/auth/forms.py
{ "start": 9497, "end": 10492 }
class ____(forms.ModelForm): password = ReadOnlyPasswordHashField( label=_("Password"), help_text=_( "Raw passwords are not stored, so there is no way to see " "the user’s password." ), ) class Meta: model = User fields = "__all__" field_classes = {"username": UsernameField} def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) password = self.fields.get("password") if password: if self.instance and not self.instance.has_usable_password(): password.help_text = _( "Enable password-based authentication for this user by setting a " "password." ) user_permissions = self.fields.get("user_permissions") if user_permissions: user_permissions.queryset = user_permissions.queryset.select_related( "content_type" )
UserChangeForm
python
google__pytype
pytype/tools/traces/source_test.py
{ "start": 267, "end": 608 }
class ____(unittest.TestCase): def test_instantiate(self): with self.assertRaises(TypeError): source.AbstractTrace(None, None, None) self.assertIsInstance(_FakeTrace(None, None, None), _FakeTrace) def test_repr(self): trace = _FakeTrace("LOAD_NAME", "x", (["t"],)) print(repr(trace)) # smoke test
AbstractTraceTest
python
facebook__pyre-check
tools/upgrade/commands/tests/fixme_test.py
{ "start": 429, "end": 1561 }
class ____(unittest.TestCase): def test_run(self) -> None: arguments = MagicMock() arguments.error_source = ErrorSource.STDIN mock_errors = MagicMock() arguments.fixme_threshold = None with patch.object( errors.Errors, "from_stdin", return_value=mock_errors ) as errors_from_stdin, patch.object( ErrorSuppressingCommand, "_apply_suppressions" ) as apply_suppressions: Fixme.from_arguments(arguments, repository).run() errors_from_stdin.assert_called_once() apply_suppressions.assert_called_once_with(mock_errors) arguments.error_source = ErrorSource.GENERATE arguments.lint = False with patch.object( Fixme, "_generate_errors", return_value=mock_errors ) as generate_errors, patch.object( ErrorSuppressingCommand, "_apply_suppressions" ) as apply_suppressions: Fixme.from_arguments(arguments, repository).run() generate_errors.assert_called_once() apply_suppressions.assert_called_once_with(mock_errors)
FixmeTest
python
bokeh__bokeh
src/bokeh/util/compiler.py
{ "start": 4195, "end": 4519 }
class ____(Inline): ''' An implementation for a Bokeh custom model in TypeScript Example: .. code-block:: python class MyExt(Model): __implementation__ = TypeScript(""" <TypeScript code> """) ''' @property def lang(self) -> str: return "typescript"
TypeScript
python
google__python-fire
fire/docstrings_fuzz_test.py
{ "start": 817, "end": 1100 }
class ____(testutils.BaseTestCase): @settings(max_examples=1000, deadline=1000) @given(st.text(min_size=1)) @example('This is a one-line docstring.') def test_fuzz_parse(self, value): docstrings.parse(value) if __name__ == '__main__': testutils.main()
DocstringsFuzzTest
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_multiarray.py
{ "start": 30793, "end": 43617 }
class ____(TestCase): """ Test the np.array constructor """ def test_from_attribute(self): class x: def __array__(self, dtype=None): pass assert_raises(ValueError, np.array, x()) def test_from_string(self): types = np.typecodes["AllInteger"] + np.typecodes["Float"] nstr = ["123", "123"] result = np.array([123, 123], dtype=int) for type in types: msg = f"String conversion for {type}" assert_equal(np.array(nstr, dtype=type), result, err_msg=msg) def test_void(self): arr = np.array([], dtype="V") assert arr.dtype == "V8" # current default # Same length scalars (those that go to the same void) work: arr = np.array([b"1234", b"1234"], dtype="V") assert arr.dtype == "V4" # Promoting different lengths will fail (pre 1.20 this worked) # by going via S5 and casting to V5. with pytest.raises(TypeError): np.array([b"1234", b"12345"], dtype="V") with pytest.raises(TypeError): np.array([b"12345", b"1234"], dtype="V") # Check the same for the casting path: arr = np.array([b"1234", b"1234"], dtype="O").astype("V") assert arr.dtype == "V4" with pytest.raises(TypeError): np.array([b"1234", b"12345"], dtype="O").astype("V") @parametrize( # "idx", [pytest.param(Ellipsis, id="arr"), pytest.param((), id="scalar")] "idx", [subtest(Ellipsis, name="arr"), subtest((), name="scalar")], ) def test_structured_void_promotion(self, idx): arr = np.array( [np.array(1, dtype="i,i")[idx], np.array(2, dtype="i,i")[idx]], dtype="V" ) assert_array_equal(arr, np.array([(1, 1), (2, 2)], dtype="i,i")) # The following fails to promote the two dtypes, resulting in an error with pytest.raises(TypeError): np.array( [np.array(1, dtype="i,i")[idx], np.array(2, dtype="i,i,i")[idx]], dtype="V", ) def test_too_big_error(self): # 45341 is the smallest integer greater than sqrt(2**31 - 1). # 3037000500 is the smallest integer greater than sqrt(2**63 - 1). # We want to make sure that the square byte array with those dimensions # is too big on 32 or 64 bit systems respectively. if np.iinfo("intp").max == 2**31 - 1: shape = (46341, 46341) elif np.iinfo("intp").max == 2**63 - 1: shape = (3037000500, 3037000500) else: return assert_raises(ValueError, np.empty, shape, dtype=np.int8) assert_raises(ValueError, np.zeros, shape, dtype=np.int8) assert_raises(ValueError, np.ones, shape, dtype=np.int8) @skipif( np.dtype(np.intp).itemsize != 8, reason="malloc may not fail on 32 bit systems" ) def test_malloc_fails(self): # This test is guaranteed to fail due to a too large allocation with assert_raises(np.core._exceptions._ArrayMemoryError): np.empty(np.iinfo(np.intp).max, dtype=np.uint8) def test_zeros(self): types = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] for dt in types: d = np.zeros((13,), dtype=dt) assert_equal(np.count_nonzero(d), 0) # true for ieee floats assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype="(2,4)i4") assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype="4i4") assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype="(2,4)i4, (2,4)i4") assert_equal(np.count_nonzero(d), 0) @slow def test_zeros_big(self): # test big array as they might be allocated different by the system types = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] for dt in types: d = np.zeros((30 * 1024**2,), dtype=dt) assert_(not d.any()) # This test can fail on 32-bit systems due to insufficient # contiguous memory. Deallocating the previous array increases the # chance of success. del d def test_zeros_obj(self): # test initialization from PyLong(0) d = np.zeros((13,), dtype=object) assert_array_equal(d, [0] * 13) assert_equal(np.count_nonzero(d), 0) def test_zeros_obj_obj(self): d = np.zeros(10, dtype=[("k", object, 2)]) assert_array_equal(d["k"], 0) def test_zeros_like_like_zeros(self): # test zeros_like returns the same as zeros for c in np.typecodes["All"]: if c == "V": continue d = np.zeros((3, 3), dtype=c) assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) # explicitly check some special cases d = np.zeros((3, 3), dtype="S5") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3, 3), dtype="U5") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3, 3), dtype="<i4") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3, 3), dtype=">i4") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3, 3), dtype="<M8[s]") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3, 3), dtype=">M8[s]") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) d = np.zeros((3, 3), dtype="f4,f4") assert_array_equal(np.zeros_like(d), d) assert_equal(np.zeros_like(d).dtype, d.dtype) def test_empty_unicode(self): # don't throw decode errors on garbage memory for i in range(5, 100, 5): d = np.empty(i, dtype="U") str(d) def test_sequence_non_homogeneous(self): assert_equal(np.array([4, 2**80]).dtype, object) assert_equal(np.array([4, 2**80, 4]).dtype, object) assert_equal(np.array([2**80, 4]).dtype, object) assert_equal(np.array([2**80] * 3).dtype, object) assert_equal(np.array([[1, 1], [1j, 1j]]).dtype, complex) assert_equal(np.array([[1j, 1j], [1, 1]]).dtype, complex) assert_equal(np.array([[1, 1, 1], [1, 1j, 1.0], [1, 1, 1]]).dtype, complex) def test_non_sequence_sequence(self): """Should not segfault. Class Fail breaks the sequence protocol for new style classes, i.e., those derived from object. Class Map is a mapping type indicated by raising a ValueError. At some point we may raise a warning instead of an error in the Fail case. """ class Fail: def __len__(self): return 1 def __getitem__(self, index): raise ValueError class Map: def __len__(self): return 1 def __getitem__(self, index): raise KeyError a = np.array([Map()]) assert_(a.shape == (1,)) assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) def test_no_len_object_type(self): # gh-5100, want object array from iterable object without len() class Point2: def __init__(self) -> None: pass def __getitem__(self, ind): if ind in [0, 1]: return ind else: raise IndexError d = np.array([Point2(), Point2(), Point2()]) assert_equal(d.dtype, np.dtype(object)) def test_false_len_sequence(self): # gh-7264, segfault for this example class C: def __getitem__(self, i): raise IndexError def __len__(self): return 42 a = np.array(C()) # segfault? assert_equal(len(a), 0) def test_false_len_iterable(self): # Special case where a bad __getitem__ makes us fall back on __iter__: class C: def __getitem__(self, x): raise Exception # noqa: TRY002 def __iter__(self): return iter(()) def __len__(self): return 2 a = np.empty(2) with assert_raises(ValueError): a[:] = C() # Segfault! assert_equal(np.array(C()), list(C())) def test_failed_len_sequence(self): # gh-7393 class A: def __init__(self, data): self._data = data def __getitem__(self, item): return type(self)(self._data[item]) def __len__(self): return len(self._data) # len(d) should give 3, but len(d[0]) will fail d = A([1, 2, 3]) assert_equal(len(np.array(d)), 3) def test_array_too_big(self): # Test that array creation succeeds for arrays addressable by intp # on the byte level and fails for too large arrays. buf = np.zeros(100) max_bytes = np.iinfo(np.intp).max for dtype in ["intp", "S20", "b"]: dtype = np.dtype(dtype) itemsize = dtype.itemsize np.ndarray( buffer=buf, strides=(0,), shape=(max_bytes // itemsize,), dtype=dtype ) assert_raises( ValueError, np.ndarray, buffer=buf, strides=(0,), shape=(max_bytes // itemsize + 1,), dtype=dtype, ) def _ragged_creation(self, seq): # without dtype=object, the ragged object raises with pytest.raises(ValueError, match=".*detected shape was"): np.array(seq) return np.array(seq, dtype=object) def test_ragged_ndim_object(self): # Lists of mismatching depths are treated as object arrays a = self._ragged_creation([[1], 2, 3]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = self._ragged_creation([1, [2], 3]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = self._ragged_creation([1, 2, [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) def test_ragged_shape_object(self): # The ragged dimension of a list is turned into an object array a = self._ragged_creation([[1, 1], [2], [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = self._ragged_creation([[1], [2, 2], [3]]) assert_equal(a.shape, (3,)) assert_equal(a.dtype, object) a = self._ragged_creation([[1], [2], [3, 3]]) assert a.shape == (3,) assert a.dtype == object def test_array_of_ragged_array(self): outer = np.array([None, None]) outer[0] = outer[1] = np.array([1, 2, 3]) assert np.array(outer).shape == (2,) assert np.array([outer]).shape == (1, 2) outer_ragged = np.array([None, None]) outer_ragged[0] = np.array([1, 2, 3]) outer_ragged[1] = np.array([1, 2, 3, 4]) # should both of these emit deprecation warnings? assert np.array(outer_ragged).shape == (2,) assert np.array([outer_ragged]).shape == ( 1, 2, ) def test_deep_nonragged_object(self): # None of these should raise, even though they are missing dtype=object np.array([[[Decimal(1)]]]) np.array([1, Decimal(1)]) np.array([[1], [Decimal(1)]]) @parametrize("dtype", [object, "O,O", "O,(3)O", "(2,3)O"]) @parametrize( "function", [ np.ndarray, np.empty, lambda shape, dtype: np.empty_like(np.empty(shape, dtype=dtype)), ], ) def test_object_initialized_to_None(self, function, dtype): # NumPy has support for object fields to be NULL (meaning None) # but generally, we should always fill with the proper None, and # downstream may rely on that. (For fully initialized arrays!) arr = function(3, dtype=dtype) # We expect a fill value of None, which is not NULL: expected = np.array(None).tobytes() expected = expected * (arr.nbytes // len(expected)) assert arr.tobytes() == expected
TestCreation
python
PyCQA__pycodestyle
testing/data/E30not.py
{ "start": 1489, "end": 2452 }
class ____(object): pass if __name__ == '__main__': foo() #: Okay classification_errors = None #: Okay defined_properly = True #: Okay defaults = {} defaults.update({}) #: Okay def foo(x): classification = x definitely = not classification #: E704:3:1 E704:4:1 # This emits the (ignored-by-default) E704, but here we're testing # for no E30x being emitted. def bar(): pass def baz(): pass def main(): pass #: E704:4:5 E704:5:5 def foo(): # This emits the (ignored-by-default) E704, but here we're testing # for no E30x being emitted. def bar(): pass def baz(): pass #: E704:8:1 E704:10:1 from typing import overload from typing import Union # This emits the (ignored-by-default) E704, but here we're testing # for no E30x being emitted. @overload def f(x: int) -> int: ... @overload def f(x: str) -> str: ... def f(x: Union[int, str]) -> Union[int, str]: return x #: E704:8:5 E704:10:5 from typing import Protocol
Bar
python
weaviate__weaviate-python-client
weaviate/collections/classes/config_vector_index.py
{ "start": 2978, "end": 3167 }
class ____(_VectorIndexConfigCreate): skip: bool = True @staticmethod def vector_index_type() -> VectorIndexType: return VectorIndexType.HNSW
_VectorIndexConfigSkipCreate
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
{ "start": 130092, "end": 130884 }
class ____: @mock.patch(VERTEX_AI_PATH.format("ray.RayHook")) def test_execute(self, mock_hook): op = GetRayClusterOperator( task_id=TASK_ID, cluster_id=TEST_CLUSTER_ID, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, location=GCP_LOCATION, project_id=GCP_PROJECT, ) op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()}) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN) mock_hook.return_value.get_ray_cluster.assert_called_once_with( location=GCP_LOCATION, project_id=GCP_PROJECT, cluster_id=TEST_CLUSTER_ID, )
TestVertexAIGetRayClusterOperator
python
dagster-io__dagster
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/run_event.py
{ "start": 743, "end": 1076 }
class ____(BaseModel): """Single run event model.""" run_id: str message: str timestamp: str # ISO 8601 timestamp level: RunEventLevel step_key: Optional[str] = None event_type: Optional[str] = None error: Optional[DgApiErrorInfo] = None class Config: from_attributes = True
DgApiRunEvent
python
django__django
django/db/models/functions/datetime.py
{ "start": 4323, "end": 4378 }
class ____(Extract): lookup_name = "year"
ExtractYear
python
cython__cython
tests/run/ext_auto_richcmp.py
{ "start": 7108, "end": 7951 }
class ____(X): """ >>> a = ClassLtGtInherited(1) >>> b = ClassLtGtInherited(2) >>> c = ClassLtGtInherited(1) >>> a < b True >>> b > a True >>> b < a False >>> a > b False >>> a < c False >>> c > a False >>> c < a False >>> a > c False >>> b < c False >>> c > b False >>> c < b True >>> b > c True >>> sorted([a, b, c]) [<1>, <1>, <2>] >>> sorted([b, a, c]) [<1>, <1>, <2>] """ def __gt__(self, other): assert 1 <= self.x <= 2 assert isinstance(self, ClassLtGtInherited), type(self) if isinstance(other, X): return self.x > x_of(other) elif isinstance(other, int): return self.x > other return NotImplemented @cython.cclass
ClassLtGtInherited
python
encode__django-rest-framework
rest_framework/throttling.py
{ "start": 5757, "end": 6370 }
class ____(SimpleRateThrottle): """ Limits the rate of API calls that may be made by a given user. The user id will be used as a unique cache key if the user is authenticated. For anonymous requests, the IP address of the request will be used. """ scope = 'user' def get_cache_key(self, request, view): if request.user and request.user.is_authenticated: ident = request.user.pk else: ident = self.get_ident(request) return self.cache_format % { 'scope': self.scope, 'ident': ident }
UserRateThrottle
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/memberAccess1.py
{ "start": 1930, "end": 2448 }
class ____(Generic[_T, _P, _R]): def __init__(self, func: Callable[Concatenate[_T, _P], Awaitable[_R]]) -> None: self.func = func @overload def __get__(self, obj: None, objtype: type[_T]) -> "Decorator[_T, _P, _R]": ... @overload def __get__( self, obj: _T, objtype: type[_T] | None ) -> Callable[_P, Awaitable[_R]]: ... def __get__( self, obj: _T | None, objtype: type[_T] | None = None ) -> "Decorator[_T, _P, _R] | Callable[_P, Awaitable[_R]]": ...
Decorator
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_data_bar06.py
{ "start": 345, "end": 7437 }
class ____(unittest.TestCase): """ Test assembling a complete Worksheet file. """ def test_assemble_xml_file(self): """Test writing a worksheet with conditional formatting.""" self.maxDiff = None fh = StringIO() worksheet = Worksheet() worksheet._set_filehandle(fh) worksheet.select() worksheet.index = 0 worksheet.conditional_format( "A1", { "type": "data_bar", "bar_negative_color_same": True, }, ) worksheet.conditional_format( "A2:B2", { "type": "data_bar", "bar_color": "#63C384", "bar_negative_border_color": "#92D050", }, ) worksheet.conditional_format( "A3:C3", { "type": "data_bar", "bar_color": "#FF555A", "bar_negative_border_color_same": True, }, ) worksheet._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac"> <dimension ref="A1"/> <sheetViews> <sheetView tabSelected="1" workbookViewId="0"/> </sheetViews> <sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/> <sheetData/> <conditionalFormatting sqref="A1"> <cfRule type="dataBar" priority="1"> <dataBar> <cfvo type="min"/> <cfvo type="max"/> <color rgb="FF638EC6"/> </dataBar> <extLst> <ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}"> <x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id> </ext> </extLst> </cfRule> </conditionalFormatting> <conditionalFormatting sqref="A2:B2"> <cfRule type="dataBar" priority="2"> <dataBar> <cfvo type="min"/> <cfvo type="max"/> <color rgb="FF63C384"/> </dataBar> <extLst> <ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}"> <x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000002}</x14:id> </ext> </extLst> </cfRule> </conditionalFormatting> <conditionalFormatting sqref="A3:C3"> <cfRule type="dataBar" priority="3"> <dataBar> <cfvo type="min"/> <cfvo type="max"/> <color rgb="FFFF555A"/> </dataBar> <extLst> <ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}"> <x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000003}</x14:id> </ext> </extLst> </cfRule> </conditionalFormatting> <pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/> <extLst> <ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}"> <x14:conditionalFormattings> <x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main"> <x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}"> <x14:dataBar minLength="0" maxLength="100" border="1" negativeBarColorSameAsPositive="1" negativeBarBorderColorSameAsPositive="0"> <x14:cfvo type="autoMin"/> <x14:cfvo type="autoMax"/> <x14:borderColor rgb="FF638EC6"/> <x14:negativeBorderColor rgb="FFFF0000"/> <x14:axisColor rgb="FF000000"/> </x14:dataBar> </x14:cfRule> <xm:sqref>A1</xm:sqref> </x14:conditionalFormatting> <x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main"> <x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000002}"> <x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0"> <x14:cfvo type="autoMin"/> <x14:cfvo type="autoMax"/> <x14:borderColor rgb="FF63C384"/> <x14:negativeFillColor rgb="FFFF0000"/> <x14:negativeBorderColor rgb="FF92D050"/> <x14:axisColor rgb="FF000000"/> </x14:dataBar> </x14:cfRule> <xm:sqref>A2:B2</xm:sqref> </x14:conditionalFormatting> <x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main"> <x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000003}"> <x14:dataBar minLength="0" maxLength="100" border="1"> <x14:cfvo type="autoMin"/> <x14:cfvo type="autoMax"/> <x14:borderColor rgb="FFFF555A"/> <x14:negativeFillColor rgb="FFFF0000"/> <x14:axisColor rgb="FF000000"/> </x14:dataBar> </x14:cfRule> <xm:sqref>A3:C3</xm:sqref> </x14:conditionalFormatting> </x14:conditionalFormattings> </ext> </extLst> </worksheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleWorksheet
python
huggingface__transformers
src/transformers/models/evolla/modular_evolla.py
{ "start": 5816, "end": 5876 }
class ____(EsmIntermediate): pass
EvollaSaProtIntermediate
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/abstractClass3.py
{ "start": 387, "end": 499 }
class ____(MixinA, MixinB, MixinC): pass # This should not generate an error trainer = Trainer_1a()
Trainer_1a
python
skorch-dev__skorch
skorch/tests/callbacks/test_training.py
{ "start": 39935, "end": 48286 }
class ____: @pytest.fixture(params=['torch', 'safetensors']) def use_safetensors(self, request): return request.param == 'safetensors' @pytest.fixture def trainendcheckpoint_cls(self): from skorch.callbacks import TrainEndCheckpoint return TrainEndCheckpoint @pytest.fixture def save_params_mock(self): with patch('skorch.NeuralNet.save_params') as mock: yield mock @pytest.fixture def net_cls(self): """very simple network that trains for 10 epochs""" from skorch import NeuralNetRegressor from skorch.toy import make_regressor module_cls = make_regressor( input_units=1, num_hidden=0, output_units=1, ) return partial( NeuralNetRegressor, module=module_cls, max_epochs=10, batch_size=10) @pytest.fixture(scope='module') def data(self): # have 10 examples so we can do a nice CV split X = np.zeros((10, 1), dtype='float32') y = np.zeros((10, 1), dtype='float32') return X, y def test_init_with_wrong_kwarg_name_raises(self, trainendcheckpoint_cls): trainendcheckpoint_cls(f_foobar='foobar.pt').initialize() # works msg = ("TrainEndCheckpoint got an unexpected argument 'foobar', " "did you mean 'f_foobar'?") with pytest.raises(TypeError, match=msg): trainendcheckpoint_cls(foobar='foobar.pt').initialize() def test_init_with_f_params_and_f_module_raises(self, trainendcheckpoint_cls): msg = "Checkpoint called with both f_params and f_module, please choose one" with pytest.raises(TypeError, match=msg): trainendcheckpoint_cls( f_module='weights.pt', f_params='params.pt').initialize() def test_init_with_f_optimizer_and_safetensors_raises(self, trainendcheckpoint_cls): msg = ( "Cannot save optimizer state when using safetensors, " "please set f_optimizer=None or don't use safetensors." ) with pytest.raises(ValueError, match=msg): trainendcheckpoint_cls( f_optimizer='optimizer.safetensors', use_safetensors=True ) def test_saves_at_end( self, save_params_mock, net_cls, trainendcheckpoint_cls, data, use_safetensors, ): sink = Mock() kwargs = dict( sink=sink, dirname='exp1', fn_prefix='train_end_', use_safetensors=use_safetensors, ) if use_safetensors: # safetensors cannot safe optimizers kwargs['f_optimizer'] = None net = net_cls(callbacks=[trainendcheckpoint_cls(**kwargs)]) net.fit(*data) if use_safetensors: # safetensors cannot safe optimizers assert save_params_mock.call_count == 3 else: assert save_params_mock.call_count == 4 assert sink.call_args == call("Final checkpoint triggered") kwargs = {'use_safetensors': use_safetensors} calls_expected = [ # params is turned into module call(f_module='exp1/train_end_params.pt', **kwargs), call(f_criterion='exp1/train_end_criterion.pt', **kwargs), call(f_history='exp1/train_end_history.json', **kwargs), ] if not use_safetensors: calls_expected.append( call(f_optimizer='exp1/train_end_optimizer.pt', **kwargs) ) save_params_mock.assert_has_calls( calls_expected, any_order=True, ) def test_saves_at_end_with_custom_formatting( self, save_params_mock, net_cls, trainendcheckpoint_cls, data, use_safetensors, ): sink = Mock() kwargs = dict( sink=sink, dirname='exp1', f_params='model_{last_epoch[epoch]}.pt', f_optimizer='optimizer_{last_epoch[epoch]}.pt', f_criterion='criterion_{last_epoch[epoch]}.pt', fn_prefix='train_end_', use_safetensors=use_safetensors, ) if use_safetensors: # safetensors cannot safe optimizers kwargs['f_optimizer'] = None net = net_cls(callbacks=[trainendcheckpoint_cls(**kwargs)]) net.fit(*data) if use_safetensors: # safetensors cannot safe optimizers assert save_params_mock.call_count == 3 else: assert save_params_mock.call_count == 4 assert sink.call_args == call("Final checkpoint triggered") kwargs = {'use_safetensors': use_safetensors} calls_expected = [ # params is turned into module call(f_module='exp1/train_end_model_10.pt', **kwargs), call(f_criterion='exp1/train_end_criterion_10.pt', **kwargs), call(f_history='exp1/train_end_history.json', **kwargs), ] if not use_safetensors: calls_expected.append( call(f_optimizer='exp1/train_end_optimizer_10.pt', **kwargs), ) save_params_mock.assert_has_calls( calls_expected, any_order=True, ) def test_cloneable(self, trainendcheckpoint_cls): # reproduces bug #459 cp = trainendcheckpoint_cls() clone(cp) # does not raise def test_train_end_with_load_init(self, trainendcheckpoint_cls, net_cls, data): # test for https://github.com/skorch-dev/skorch/issues/528 # Check that the initial state is indeed loaded from the checkpoint. from skorch.callbacks import LoadInitState from sklearn.metrics import mean_squared_error X, y = data cp = trainendcheckpoint_cls() net = net_cls(callbacks=[cp], max_epochs=3, lr=0.1).initialize() score_before = mean_squared_error(y, net.predict(X)) net.partial_fit(X, y) score_after = mean_squared_error(y, net.predict(X)) # make sure the net learned at all assert score_after < score_before net_new = net_cls(callbacks=[LoadInitState(cp)], max_epochs=0) net_new.fit(X, y) score_loaded = mean_squared_error(y, net_new.predict(X)) # the same score as after the end of training of the initial # net should be obtained assert np.isclose(score_loaded, score_after) def test_save_custom_module( self, save_params_mock, module_cls, trainendcheckpoint_cls, data, use_safetensors, ): # checkpointing custom modules works from skorch import NeuralNetRegressor class MyNet(NeuralNetRegressor): """Net with custom module""" def __init__(self, *args, mymodule=module_cls, **kwargs): self.mymodule = mymodule super().__init__(*args, **kwargs) def initialize_module(self, *args, **kwargs): super().initialize_module(*args, **kwargs) params = self.get_params_for('mymodule') self.mymodule_ = self.mymodule(**params) return self cp = trainendcheckpoint_cls( f_params=None, f_optimizer=None, f_criterion=None, f_history=None, f_mymodule='mymodule.pt', use_safetensors=use_safetensors, ) net = MyNet(module_cls, callbacks=[cp]) net.fit(*data) kwargs = {'use_safetensors': use_safetensors} assert save_params_mock.call_count == 1 save_params_mock.assert_has_calls( [call(f_mymodule='train_end_mymodule.pt', **kwargs)] ) def test_pickle_uninitialized_callback(self, trainendcheckpoint_cls): # isuue 773 cp = trainendcheckpoint_cls() # does not raise s = pickle.dumps(cp) pickle.loads(s) def test_pickle_initialized_callback(self, trainendcheckpoint_cls): # issue 773 cp = trainendcheckpoint_cls().initialize() # does not raise s = pickle.dumps(cp) pickle.loads(s)
TestTrainEndCheckpoint
python
miyuchina__mistletoe
test/test_block_token.py
{ "start": 161, "end": 893 }
class ____(unittest.TestCase): def setUp(self): self.addCleanup(lambda: span_token._token_types.__setitem__(-1, span_token.RawText)) patcher = patch('mistletoe.span_token.RawText') self.mock = patcher.start() span_token._token_types[-1] = self.mock self.addCleanup(patcher.stop) def _test_match(self, token_cls, lines, arg, **kwargs): token = next(iter(block_token.tokenize(lines))) self.assertIsInstance(token, token_cls) self._test_token(token, arg, **kwargs) def _test_token(self, token, arg, **kwargs): for attr, value in kwargs.items(): self.assertEqual(getattr(token, attr), value) self.mock.assert_any_call(arg)
TestToken
python
apache__airflow
providers/alibaba/tests/unit/alibaba/cloud/operators/test_oss.py
{ "start": 1340, "end": 1868 }
class ____: @mock.patch("airflow.providers.alibaba.cloud.operators.oss.OSSHook") def test_execute(self, mock_hook): operator = OSSCreateBucketOperator( task_id=MOCK_TASK_ID, region=MOCK_REGION, bucket_name=MOCK_BUCKET, oss_conn_id=MOCK_OSS_CONN_ID ) operator.execute(None) mock_hook.assert_called_once_with(oss_conn_id=MOCK_OSS_CONN_ID, region=MOCK_REGION) mock_hook.return_value.create_bucket.assert_called_once_with(bucket_name=MOCK_BUCKET)
TestOSSCreateBucketOperator
python
pallets__quart
src/quart/logging.py
{ "start": 641, "end": 2197 }
class ____(QueueHandler): """Custom QueueHandler that skips record preparation. There is no need to prepare records that go into a local, in-process queue, we can skip that process and minimise the cost of logging further. """ def prepare(self, record: LogRecord) -> LogRecord: return record def _setup_logging_queue(*handlers: Handler) -> QueueHandler: """Create a new LocalQueueHandler and start an associated QueueListener.""" queue: Queue = Queue() queue_handler = LocalQueueHandler(queue) serving_listener = QueueListener(queue, *handlers, respect_handler_level=True) serving_listener.start() return queue_handler def has_level_handler(logger: Logger) -> bool: """Check if the logger already has a handler""" level = logger.getEffectiveLevel() current = logger while current: if any(handler.level <= level for handler in current.handlers): return True if not current.propagate: break current = current.parent return False def create_logger(app: Quart) -> Logger: """Create a logger for the app based on the app settings. This creates a logger named quart.app that has a log level based on the app configuration. """ logger = getLogger(app.name) if app.debug and logger.level == NOTSET: logger.setLevel(DEBUG) if not has_level_handler(logger): queue_handler = _setup_logging_queue(default_handler) logger.addHandler(queue_handler) return logger
LocalQueueHandler
python
ray-project__ray
release/nightly_tests/multimodal_inference_benchmarks/large_image_embedding/daft_main.py
{ "start": 1421, "end": 2692 }
class ____: def __init__(self): self._device = "cuda" if torch.cuda.is_available() else "cpu" self._model = ViTForImageClassification.from_pretrained( "google/vit-base-patch16-224" ).to(self._device) def __call__(self, image_column) -> np.ndarray: image_ndarray = np.array(image_column.to_pylist()) with torch.inference_mode(): next_tensor = torch.from_numpy(image_ndarray).to( dtype=torch.float32, device=self._device, non_blocking=True ) output = self._model(next_tensor).logits return output.cpu().detach().numpy() start_time = time.time() df = daft.read_parquet(INPUT_PREFIX) df = df.with_column("image", df["image"].apply(decode, return_dtype=DataType.binary())) df = df.with_column("image", df["image"].image.decode(mode=daft.ImageMode.RGB)) df = df.with_column("height", df["image"].image_height()) df = df.with_column("width", df["image"].image.width()) df = df.with_column( "image", df["image"].apply(preprocess, return_dtype=DataType.tensor(DataType.float32())), ) df = df.with_column("embeddings", Infer(df["image"])) df = df.select("embeddings") df.write_parquet(OUTPUT_PREFIX) print("Runtime", time.time() - start_time)
Infer
python
sanic-org__sanic
tests/typing/samples/app_fully_custom.py
{ "start": 58, "end": 97 }
class ____(Config): pass
CustomConfig
python
readthedocs__readthedocs.org
readthedocs/embed/v3/views.py
{ "start": 918, "end": 1353 }
class ____(IsAuthorizedToViewVersion): """ Checks if the user from the request has permissions to get content from the version. If the URL is from an external site, we return ``True``, since we don't have a project to check for. """ def has_permission(self, request, view): if view.external: return True return super().has_permission(request, view)
IsAuthorizedToGetContenFromVersion
python
great-expectations__great_expectations
great_expectations/datasource/fluent/data_asset/path/spark/parquet_asset.py
{ "start": 540, "end": 1800 }
class ____(_SparkGenericFilePathAssetMixin): # The options below are available as of spark v3.4.0 # See https://spark.apache.org/docs/latest/sql-data-sources-parquet.html for more info. merge_schema: Optional[Union[bool, str]] = Field(None, alias="mergeSchema") datetime_rebase_mode: Optional[Literal["EXCEPTION", "CORRECTED", "LEGACY"]] = Field( None, alias="datetimeRebaseMode" ) int_96_rebase_mode: Optional[Literal["EXCEPTION", "CORRECTED", "LEGACY"]] = Field( None, alias="int96RebaseMode" ) class Config: extra = "forbid" allow_population_by_field_name = True @classmethod @override def _get_reader_method(cls) -> str: return "parquet" @override def _get_reader_options_include(self) -> set[str]: """These options are available as of spark v3.4.0 See https://spark.apache.org/docs/latest/sql-data-sources-parquet.html for more info. """ return ( super() ._get_reader_options_include() .union( { "datetime_rebase_mode", "int_96_rebase_mode", "merge_schema", } ) )
ParquetAssetBase
python
apache__airflow
providers/google/src/airflow/providers/google/suite/sensors/drive.py
{ "start": 1163, "end": 3334 }
class ____(BaseSensorOperator): """ Checks for the existence of a file in Google Cloud Storage. :param folder_id: The Google drive folder where the file is. :param file_name: The name of the file to check in Google Drive :param drive_id: Optional. The id of the shared Google Drive in which the file resides. :param gcp_conn_id: The connection ID to use when connecting to Google Cloud Storage. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ( "folder_id", "file_name", "drive_id", "impersonation_chain", ) ui_color = "#f0eee4" def __init__( self, *, folder_id: str, file_name: str, drive_id: str | None = None, gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.folder_id = folder_id self.file_name = file_name self.drive_id = drive_id self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain def poke(self, context: Context) -> bool: self.log.info("Sensor is checking for the file %s in the folder %s", self.file_name, self.folder_id) hook = GoogleDriveHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) return hook.exists(folder_id=self.folder_id, file_name=self.file_name, drive_id=self.drive_id)
GoogleDriveFileExistenceSensor
python
python-openxml__python-docx
src/docx/oxml/section.py
{ "start": 15307, "end": 20395 }
class ____: """Generates the block-item XML elements in a section. A block-item element is a `CT_P` (paragraph) or a `CT_Tbl` (table). """ _compiled_blocks_xpath: etree.XPath | None = None _compiled_count_xpath: etree.XPath | None = None def __init__(self, sectPr: CT_SectPr): self._sectPr = sectPr @classmethod def iter_sect_block_elements(cls, sectPr: CT_SectPr) -> Iterator[BlockElement]: """Generate each CT_P or CT_Tbl element within extents governed by `sectPr`.""" return cls(sectPr)._iter_sect_block_elements() def _iter_sect_block_elements(self) -> Iterator[BlockElement]: """Generate each CT_P or CT_Tbl element in section.""" # -- General strategy is to get all block (<w;p> and <w:tbl>) elements from # -- start of doc to and including this section, then compute the count of those # -- elements that came from prior sections and skip that many to leave only the # -- ones in this section. It's possible to express this "between here and # -- there" (end of prior section and end of this one) concept in XPath, but it # -- would be harder to follow because there are special cases (e.g. no prior # -- section) and the boundary expressions are fairly hairy. I also believe it # -- would be computationally more expensive than doing it this straighforward # -- albeit (theoretically) slightly wasteful way. sectPr, sectPrs = self._sectPr, self._sectPrs sectPr_idx = sectPrs.index(sectPr) # -- count block items belonging to prior sections -- n_blks_to_skip = ( 0 if sectPr_idx == 0 else self._count_of_blocks_in_and_above_section(sectPrs[sectPr_idx - 1]) ) # -- and skip those in set of all blks from doc start to end of this section -- for element in self._blocks_in_and_above_section(sectPr)[n_blks_to_skip:]: yield element def _blocks_in_and_above_section(self, sectPr: CT_SectPr) -> Sequence[BlockElement]: """All ps and tbls in section defined by `sectPr` and all prior sections.""" if self._compiled_blocks_xpath is None: self._compiled_blocks_xpath = etree.XPath( self._blocks_in_and_above_section_xpath, namespaces=nsmap, regexp=False, ) xpath = self._compiled_blocks_xpath # -- XPath callable results are Any (basically), so need a cast. -- return cast(Sequence[BlockElement], xpath(sectPr)) @lazyproperty def _blocks_in_and_above_section_xpath(self) -> str: """XPath expr for ps and tbls in context of a sectPr and all prior sectPrs.""" # -- "p_sect" is a section with sectPr located at w:p/w:pPr/w:sectPr. # -- "body_sect" is a section with sectPr located at w:body/w:sectPr. The last # -- section in the document is a "body_sect". All others are of the "p_sect" # -- variety. "term" means "terminal", like the last p or tbl in the section. # -- "pred" means "predecessor", like a preceding p or tbl in the section. # -- the terminal block in a p-based sect is the p the sectPr appears in -- p_sect_term_block = "./parent::w:pPr/parent::w:p" # -- the terminus of a body-based sect is the sectPr itself (not a block) -- body_sect_term = "self::w:sectPr[parent::w:body]" # -- all the ps and tbls preceding (but not including) the context node -- pred_ps_and_tbls = "preceding-sibling::*[self::w:p | self::w:tbl]" # -- p_sect_term_block and body_sect_term(inus) are mutually exclusive. So the # -- result is either the union of nodes found by the first two selectors or the # -- nodes found by the last selector, never both. return ( # -- include the p containing a sectPr -- f"{p_sect_term_block}" # -- along with all the blocks that precede it -- f" | {p_sect_term_block}/{pred_ps_and_tbls}" # -- or all the preceding blocks if sectPr is body-based (last sectPr) -- f" | {body_sect_term}/{pred_ps_and_tbls}" ) def _count_of_blocks_in_and_above_section(self, sectPr: CT_SectPr) -> int: """All ps and tbls in section defined by `sectPr` and all prior sections.""" if self._compiled_count_xpath is None: self._compiled_count_xpath = etree.XPath( f"count({self._blocks_in_and_above_section_xpath})", namespaces=nsmap, regexp=False, ) xpath = self._compiled_count_xpath # -- numeric XPath results are always float, so need an int() conversion -- return int(cast(float, xpath(sectPr))) @lazyproperty def _sectPrs(self) -> Sequence[CT_SectPr]: """All w:sectPr elements in document, in document-order.""" return self._sectPr.xpath( "/w:document/w:body/w:p/w:pPr/w:sectPr | /w:document/w:body/w:sectPr", )
_SectBlockElementIterator
python
getsentry__sentry
src/sentry/flags/endpoints/logs.py
{ "start": 997, "end": 1254 }
class ____(TypedDict): id: int action: str createdAt: datetime createdBy: str | None createdByType: str | None flag: str provider: str | None tags: dict[str, Any] @register(FlagAuditLogModel)
FlagAuditLogModelSerializerResponse
python
oauthlib__oauthlib
examples/device_code_flow.py
{ "start": 2923, "end": 7364 }
class ____: @staticmethod def create_device_authorization_response(request): server = DeviceApplicationServer(interval=5, verification_uri="https://example.com/device") return server.create_device_authorization_response(request) def post(self, request): headers, data, status = self.create_device_authorization_response(request) device_response = ... # Create an instance of examples.device_flow.Device` using `request` and `data`that encapsulates # https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 & # https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 return device_response """ 2. Client presents the information to the user (There's a section on non visual capable devices as well https://datatracker.ietf.org/doc/html/rfc8628#section-5.7) +-------------------------------------------------+ | | | Scan the QR code or, using +------------+ | | a browser on another device, |[_].. . [_]| | | visit: | . .. . .| | | https://example.com/device | . . . ....| | | |. . . . | | | And enter the code: |[_]. ... . | | | WDJB-MJHT +------------+ | | | +-------------------------------------------------+ """ # The implementation for step 2 is up to the owner of device. """" 3 (The browser flow). User goes to https://example.com/device where they're presented with a form to fill in the user code. Implement that endpoint on your provider and follow the logic in the rfc. Making use of the errors in `oauthlib.oauth2.rfc8628.errors` raise AccessDenied/AuthorizationPendingError/ExpiredTokenError where appropriate making use of `examples.device_flow.Device` to get and update current state of the device during the session If the user isn't logged in(after inputting the user-code), they should be redirected to the provider's /login endpoint and redirected back to an /approve-deny endpoint(The name and implementation of /approve-deny is up to the owner of the provider, this is just an example). They should then see an "approve" or "deny" button to authorize the device. Again, using `examples.device_flow.Device` to update the status appropriately during the session. """ # /device and /approve-deny is up to the owner of the provider to implement. Again, make sure to # keep referring to the rfc when implementing. """ 4 (The polling flow) https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 Right after step 2, the device polls the /token endpoint every "interval" amount of seconds to check if user has approved or denied the request. When grant type is `urn:ietf:params:oauth:grant-type:device_code`, `oauthlib.oauth2.rfc8628.grant_types.device_code.DeviceCodeGrant` will be the handler that handles token generation. """ # This is purely for illustrative purposes # to demonstrate rate limiting on the token endpoint for the device flow. # It is up to as the provider to decide how you want # to rate limit the device during polling. def rate_limit(func, rate="1/5s"): def wrapper(): # some logic to ensure client device is rate limited by a minimum # of 1 request every 5 seconds during device polling # https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 # use device_code to retrieve device device = Device # get the time in seconds since the device polled the /token endpoint now = datetime.datetime.now(tz=datetime.UTC) diff = now - timedelta(device.last_checked) total_seconds_since_last_device_poll = diff.total_seconds() device.last_checked = now # for illustrative purposes. 1/5s means 1 request every 5 seconds. # so if `total_seconds_since_last_device_poll` is 4 seconds, this will # raise an error if total_seconds_since_last_device_poll < rate: raise device_flow_errors.SlowDownError() result = func() return result return wrapper
DeviceAuthorizationEndpoint
python
django__django
tests/custom_pk/tests.py
{ "start": 4724, "end": 7425 }
class ____(TestCase): def test_custom_pk_create(self): """ New objects can be created both with pk and the custom name """ Employee.objects.create(employee_code=1234, first_name="Foo", last_name="Bar") Employee.objects.create(pk=1235, first_name="Foo", last_name="Baz") Business.objects.create(name="Bears") Business.objects.create(pk="Tears") def test_unicode_pk(self): # Primary key may be Unicode string. Business.objects.create(name="jaźń") def test_unique_pk(self): # The primary key must also be unique, so trying to create a new object # with the same primary key will fail. Employee.objects.create( employee_code=123, first_name="Frank", last_name="Jones" ) with self.assertRaises(IntegrityError): with transaction.atomic(): Employee.objects.create( employee_code=123, first_name="Fred", last_name="Jones" ) def test_zero_non_autoincrement_pk(self): Employee.objects.create(employee_code=0, first_name="Frank", last_name="Jones") employee = Employee.objects.get(pk=0) self.assertEqual(employee.employee_code, 0) def test_custom_field_pk(self): # Regression for #10785 -- Custom fields can be used for primary keys. new_bar = Bar.objects.create() new_foo = Foo.objects.create(bar=new_bar) f = Foo.objects.get(bar=new_bar.pk) self.assertEqual(f, new_foo) self.assertEqual(f.bar, new_bar) f = Foo.objects.get(bar=new_bar) self.assertEqual(f, new_foo) self.assertEqual(f.bar, new_bar) # SQLite lets objects be saved with an empty primary key, even though an # integer is expected. So we can't check for an error being raised in that # case for SQLite. Remove it from the suite for this next bit. @skipIfDBFeature("supports_unspecified_pk") def test_required_pk(self): # The primary key must be specified, so an error is raised if you # try to create an object without it. with self.assertRaises(IntegrityError): with transaction.atomic(): Employee.objects.create(first_name="Tom", last_name="Smith") def test_auto_field_subclass_create(self): obj = CustomAutoFieldModel.objects.create() self.assertIsInstance(obj.id, MyWrapper) @skipUnlessDBFeature("can_return_rows_from_bulk_insert") def test_auto_field_subclass_bulk_create(self): obj = CustomAutoFieldModel() CustomAutoFieldModel.objects.bulk_create([obj]) self.assertIsInstance(obj.id, MyWrapper)
CustomPKTests
python
astropy__astropy
astropy/table/tests/test_table.py
{ "start": 10439, "end": 12161 }
class ____: def test_simple(self, table_types): cols = [ table_types.Column(name="a", data=[1, 2, 3]), table_types.Column(name="b", data=[4, 5, 6], dtype=np.float32), ] t = table_types.Table(cols) assert np.all(t["a"].data == np.array([1, 2, 3])) assert np.all(t["b"].data == np.array([4, 5, 6], dtype=np.float32)) assert type(t["b"][1]) is np.float32 def test_from_np_array(self, table_types): cols = [ table_types.Column( name="a", data=np.array([1, 2, 3], dtype=np.int64), dtype=np.float64 ), table_types.Column(name="b", data=np.array([4, 5, 6], dtype=np.float32)), ] t = table_types.Table(cols) assert np.all(t["a"] == np.array([1, 2, 3], dtype=np.float64)) assert np.all(t["b"] == np.array([4, 5, 6], dtype=np.float32)) assert type(t["a"][1]) is np.float64 assert type(t["b"][1]) is np.float32 def test_size_mismatch(self, table_types): cols = [ table_types.Column(name="a", data=[1, 2, 3]), table_types.Column(name="b", data=[4, 5, 6, 7]), ] with pytest.raises(ValueError): table_types.Table(cols) def test_name_none(self, table_types): """Column with name=None can init a table whether or not names are supplied""" c = table_types.Column(data=[1, 2], name="c") d = table_types.Column(data=[3, 4]) t = table_types.Table([c, d], names=(None, "d")) assert t.colnames == ["c", "d"] t = table_types.Table([c, d]) assert t.colnames == ["c", "col1"] @pytest.mark.usefixtures("table_types")
TestNewFromColumns
python
ansible__ansible
test/lib/ansible_test/_internal/host_configs.py
{ "start": 2941, "end": 4264 }
class ____(metaclass=abc.ABCMeta): """Configuration for Python.""" version: t.Optional[str] = None path: t.Optional[str] = None @property def tuple(self) -> tuple[int, ...]: """Return the Python version as a tuple.""" return str_to_version(self.version) @property def major_version(self) -> int: """Return the Python major version.""" return self.tuple[0] def apply_defaults(self, context: HostContext, defaults: PosixCompletionConfig) -> None: """Apply default settings.""" if self.version in (None, 'default'): self.version = defaults.get_default_python(context.controller) if self.path: if self.path.endswith('/'): self.path = os.path.join(self.path, f'python{self.version}') # FUTURE: If the host is origin, the python path could be validated here. else: self.path = defaults.get_python_path(self.version) @property @abc.abstractmethod def is_managed(self) -> bool: """ True if this Python is a managed instance, otherwise False. Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user. """ @dataclasses.dataclass
PythonConfig
python
allegroai__clearml
clearml/backend_api/services/v2_13/events.py
{ "start": 54379, "end": 55626 }
class ____(Response): """ Response of events.get_debug_image_sample endpoint. """ _service = "events" _action = "get_debug_image_sample" _version = "2.13" _schema = { "$ref": "#/definitions/debug_image_sample_reposnse", "definitions": { "debug_image_sample_reposnse": { "properties": { "event": { "description": "Debugimageevent", "type": ["object", "null"], }, "max_iteration": { "description": "maximalvaliditerationforthevariant", "type": ["integer", "null"], }, "min_iteration": { "description": "minimalvaliditerationforthevariant", "type": ["integer", "null"], }, "scroll_id": { "description": "ScrollIDtopasstothenextcallstoget_debug_image_sampleornext_debug_image_sample", "type": ["string", "null"], }, }, "type": "object", } }, }
GetDebugImageSampleResponse
python
pytorch__pytorch
test/distributed/checkpoint/test_checkpoint.py
{ "start": 2298, "end": 4693 }
class ____(ShardedTensorTestBase): @property def world_size(self) -> int: return 2 @with_comms(init_rpc=False) @skip_if_lt_x_gpu(2) @requires_accelerator_dist_backend() def test_tensor_metadata_with_missing_rank_spec(self) -> None: spec = ChunkShardingSpec( dim=0, placements=[ f"rank:1/{device_type}:1", ], ) st = sharded_tensor.zeros(spec, 4, 4, dtype=torch.float64) md = _create_default_local_metadata({"st": st}) st_md = md.state_dict_metadata["st"] self.assertEqual(1, len(st_md.chunks)) @with_comms(init_rpc=False) @skip_if_lt_x_gpu(2) @requires_accelerator_dist_backend() def test_default_metadata(self) -> None: device = f"{device_type}:{dist.get_rank()}" spec = ChunkShardingSpec( dim=0, placements=[ f"rank:0/{device_type}:0", f"rank:1/{device_type}:1", ], ) state_dict = { "sharded": sharded_tensor.rand( spec, ( 10, 10, ), ), "replicated": torch.rand(4, device=device), "bytes": [1, 2, 3, 4], } metadata = _create_default_local_metadata(state_dict) self.assertTrue("bytes" in metadata.state_dict_metadata) self.assertIsInstance( metadata.state_dict_metadata["bytes"], BytesStorageMetadata ) self.assertTrue("replicated" in metadata.state_dict_metadata) self.assertIsInstance( metadata.state_dict_metadata["replicated"], TensorStorageMetadata ) md = metadata.state_dict_metadata["replicated"] self.assertEqual(md.size, state_dict["replicated"].size()) self.assertEqual(md.properties.dtype, torch.float32) self.assertEqual(1, len(md.chunks)) self.assertTrue("sharded" in metadata.state_dict_metadata) self.assertIsInstance( metadata.state_dict_metadata["sharded"], TensorStorageMetadata ) md = metadata.state_dict_metadata["sharded"] self.assertEqual(md.properties.dtype, torch.float32) self.assertEqual(md.size, state_dict["sharded"].size()) self.assertEqual(2, len(md.chunks))
TestDistributedCheckpointing
python
sphinx-doc__sphinx
sphinx/theming.py
{ "start": 4889, "end": 15394 }
class ____: """A factory class for HTML Themes.""" def __init__( self, *, confdir: Path, app: Sphinx, config: Config, registry: SphinxComponentRegistry, ) -> None: self._app = app self._confdir = confdir self._themes = registry.html_themes self._entry_point_themes: dict[str, Callable[[], None]] = {} self._load_builtin_themes() if html_theme_path := getattr(config, 'html_theme_path', None): self._load_additional_themes(html_theme_path) self._load_entry_point_themes() def _load_builtin_themes(self) -> None: """Load built-in themes.""" themes = self._find_themes(package_dir / 'themes') for name, theme in themes.items(): self._themes[name] = _StrPath(theme) def _load_additional_themes(self, theme_paths: list[str]) -> None: """Load additional themes placed at specified directories.""" for theme_path in theme_paths: abs_theme_path = (self._confdir / theme_path).resolve() themes = self._find_themes(abs_theme_path) for name, theme in themes.items(): self._themes[name] = _StrPath(theme) def _load_entry_point_themes(self) -> None: """Try to load a theme with the specified name. This uses the ``sphinx.html_themes`` entry point from package metadata. """ for entry_point in entry_points(group='sphinx.html_themes'): if entry_point.name in self._themes: continue # don't overwrite loaded themes def _load_theme_closure( # bind variables in the function definition app: Sphinx = self._app, theme_module: str = entry_point.module, ) -> None: app.setup_extension(theme_module) _config_post_init(app, app.config) self._entry_point_themes[entry_point.name] = _load_theme_closure @staticmethod def _find_themes(theme_path: Path) -> dict[str, Path]: """Search themes from specified directory.""" themes: dict[str, Path] = {} if not theme_path.is_dir(): return themes for pathname in theme_path.iterdir(): entry = pathname.name if pathname.is_file() and pathname.suffix.lower() == '.zip': if _is_archived_theme(pathname): themes[pathname.stem] = pathname else: logger.warning( __( 'file %r on theme path is not a valid ' 'zipfile or contains no theme' ), entry, ) else: toml_path = pathname / _THEME_TOML conf_path = pathname / _THEME_CONF if toml_path.is_file() or conf_path.is_file(): themes[entry] = pathname return themes def create(self, name: str) -> Theme: """Create an instance of theme.""" if name in self._entry_point_themes: # Load a deferred theme from an entry point entry_point_loader = self._entry_point_themes[name] entry_point_loader() if name not in self._themes: raise ThemeError(__('no theme named %r found (missing theme.toml?)') % name) themes, theme_dirs, tmp_dirs = _load_theme_with_ancestors( name, self._themes, self._entry_point_themes, ) return Theme(name, configs=themes, paths=theme_dirs, tmp_dirs=tmp_dirs) def _is_archived_theme(filename: Path, /) -> bool: """Check whether the specified file is an archived theme file or not.""" try: with ZipFile(filename) as f: namelist = frozenset(f.namelist()) return _THEME_TOML in namelist or _THEME_CONF in namelist except Exception: return False def _load_theme_with_ancestors( name: str, theme_paths: dict[str, _StrPath], entry_point_themes: dict[str, Callable[[], None]], /, ) -> tuple[dict[str, _ConfigFile], list[Path], list[Path]]: themes: dict[str, _ConfigFile] = {} theme_dirs: list[Path] = [] tmp_dirs: list[Path] = [] # having 10+ theme ancestors is ludicrous for _ in range(10): inherit, theme_dir, tmp_dir, config = _load_theme(name, theme_paths[name]) theme_dirs.append(theme_dir) if tmp_dir is not None: tmp_dirs.append(tmp_dir) themes[name] = config if inherit == 'none': break if inherit in themes: msg = __('The %r theme has circular inheritance') % name raise ThemeError(msg) if inherit in entry_point_themes and inherit not in theme_paths: # Load a deferred theme from an entry point entry_point_loader = entry_point_themes[inherit] entry_point_loader() if inherit not in theme_paths: msg = __( 'The %r theme inherits from %r, which is not a loaded theme. ' 'Loaded themes are: %s' ) % (name, inherit, ', '.join(sorted(theme_paths))) raise ThemeError(msg) name = inherit else: msg = __('The %r theme has too many ancestors') % name raise ThemeError(msg) return themes, theme_dirs, tmp_dirs def _load_theme( name: str, theme_path: Path, / ) -> tuple[str, Path, Path | None, _ConfigFile]: if theme_path.is_dir(): # already a directory, do nothing tmp_dir = None theme_dir = theme_path else: # extract the theme to a temp directory tmp_dir = Path(tempfile.mkdtemp('sxt')) theme_dir = tmp_dir / name _extract_zip(theme_path, theme_dir) if (toml_path := theme_dir / _THEME_TOML).is_file(): _cfg_table = _load_theme_toml(toml_path) inherit = _validate_theme_toml(_cfg_table, name) config = _convert_theme_toml(_cfg_table) elif (conf_path := theme_dir / _THEME_CONF).is_file(): _cfg_parser = _load_theme_conf(conf_path) inherit = _validate_theme_conf(_cfg_parser, name) config = _convert_theme_conf(_cfg_parser) else: raise ThemeError(__('no theme configuration file found in %r') % theme_dir) return inherit, theme_dir, tmp_dir, config def _extract_zip(filename: Path, target_dir: Path, /) -> None: """Extract zip file to target directory.""" ensuredir(target_dir) with ZipFile(filename) as archive: for name in archive.namelist(): if name.endswith('/'): continue entry = target_dir / name ensuredir(entry.parent) entry.write_bytes(archive.read(name)) def _load_theme_toml(config_file_path: Path, /) -> _ThemeToml: c = tomllib.loads(config_file_path.read_text(encoding='utf-8')) return {s: c[s] for s in ('theme', 'options') if s in c} # type: ignore[return-value] def _validate_theme_toml(cfg: _ThemeToml, name: str) -> str: if 'theme' not in cfg: msg = __('theme %r doesn\'t have the "theme" table') % name raise ThemeError(msg) theme = cfg['theme'] if not isinstance(theme, dict): msg = __('The %r theme "[theme]" table is not a table') % name raise ThemeError(msg) inherit = theme.get('inherit', '') if not inherit: msg = __('The %r theme must define the "theme.inherit" setting') % name raise ThemeError(msg) if 'options' in cfg: if not isinstance(cfg['options'], dict): msg = __('The %r theme "[options]" table is not a table') % name raise ThemeError(msg) return inherit def _convert_theme_toml(cfg: _ThemeToml, /) -> _ConfigFile: theme = cfg['theme'] if 'stylesheets' in theme: stylesheets: tuple[str, ...] | None = tuple(theme['stylesheets']) else: stylesheets = None if 'sidebars' in theme: sidebar_templates: tuple[str, ...] | None = tuple(theme['sidebars']) else: sidebar_templates = None pygments_table = theme.get('pygments_style', {}) if isinstance(pygments_table, str): hint = f'pygments_style = {{ default = "{pygments_table}" }}' msg = ( __('The "theme.pygments_style" setting must be a table. Hint: "%s"') % hint ) raise ThemeError(msg) pygments_style_default: str | None = pygments_table.get('default') pygments_style_dark: str | None = pygments_table.get('dark') return _ConfigFile( stylesheets=stylesheets, sidebar_templates=sidebar_templates, pygments_style_default=pygments_style_default, pygments_style_dark=pygments_style_dark, options=cfg.get('options', {}), ) def _load_theme_conf(config_file_path: Path, /) -> configparser.RawConfigParser: c = configparser.RawConfigParser() c.read(config_file_path, encoding='utf-8') return c def _validate_theme_conf(cfg: configparser.RawConfigParser, name: str) -> str: if not cfg.has_section('theme'): raise ThemeError(__('theme %r doesn\'t have the "theme" table') % name) if inherit := cfg.get('theme', 'inherit', fallback=None): return inherit msg = __('The %r theme must define the "theme.inherit" setting') % name raise ThemeError(msg) def _convert_theme_conf(cfg: configparser.RawConfigParser, /) -> _ConfigFile: if stylesheet := cfg.get('theme', 'stylesheet', fallback=''): stylesheets: tuple[str, ...] | None = tuple( map(str.strip, stylesheet.split(',')) ) else: stylesheets = None if sidebar := cfg.get('theme', 'sidebars', fallback=''): sidebar_templates: tuple[str, ...] | None = tuple( map(str.strip, sidebar.split(',')) ) else: sidebar_templates = None pygments_style_default: str | None = cfg.get( 'theme', 'pygments_style', fallback=None ) pygments_style_dark: str | None = cfg.get( 'theme', 'pygments_dark_style', fallback=None ) options = dict(cfg.items('options')) if cfg.has_section('options') else {} return _ConfigFile( stylesheets=stylesheets, sidebar_templates=sidebar_templates, pygments_style_default=pygments_style_default, pygments_style_dark=pygments_style_dark, options=options, )
HTMLThemeFactory
python
plotly__plotly.py
plotly/graph_objs/funnel/marker/colorbar/title/_font.py
{ "start": 233, "end": 9944 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "funnel.marker.colorbar.title" _path_str = "funnel.marker.colorbar.title.font" _valid_props = { "color", "family", "lineposition", "shadow", "size", "style", "textcase", "variant", "weight", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') Returns ------- Any """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] Returns ------- Any """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] Returns ------- Any """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, lineposition=None, shadow=None, size=None, style=None, textcase=None, variant=None, weight=None, **kwargs, ): """ Construct a new Font object Sets this color bar's title font. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.funnel.marker. colorbar.title.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.funnel.marker.colorbar.title.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.funnel.marker.colorbar.title.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("lineposition", arg, lineposition) self._set_property("shadow", arg, shadow) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("textcase", arg, textcase) self._set_property("variant", arg, variant) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
pallets__jinja
src/jinja2/nodes.py
{ "start": 13013, "end": 13722 }
class ____(Stmt): """A node that represents the from import tag. It's important to not pass unsafe names to the name attribute. The compiler translates the attribute lookups directly into getattr calls and does *not* use the subscript callback of the interface. As exported variables may not start with double underscores (which the parser asserts) this is not a problem for regular Jinja code, but if this node is used in an extension extra care must be taken. The list of names may contain tuples if aliases are wanted. """ fields = ("template", "names", "with_context") template: "Expr" names: list[str | tuple[str, str]] with_context: bool
FromImport
python
tensorflow__tensorflow
tensorflow/python/util/decorator_utils_test.py
{ "start": 4083, "end": 5567 }
class ____(test.TestCase): def testCachedClassProperty(self): log = [] # log all calls to `MyClass.value`. class MyClass(object): @decorator_utils.cached_classproperty def value(cls): # pylint: disable=no-self-argument log.append(cls) return cls.__name__ class MySubclass(MyClass): pass # Property is computed first time it is accessed. self.assertLen(log, 0) self.assertEqual(MyClass.value, "MyClass") self.assertEqual(log, [MyClass]) # Cached values are used on subsequent accesses. self.assertEqual(MyClass.value, "MyClass") self.assertEqual(MyClass.value, "MyClass") self.assertEqual(log, [MyClass]) # The wrapped method is called for each subclass. self.assertEqual(MySubclass.value, "MySubclass") self.assertEqual(log, [MyClass, MySubclass]) self.assertEqual(MySubclass.value, "MySubclass") self.assertEqual(MySubclass.value, "MySubclass") self.assertEqual(log, [MyClass, MySubclass]) # The property can also be accessed via an instance. self.assertEqual(MyClass().value, "MyClass") self.assertEqual(MySubclass().value, "MySubclass") self.assertEqual(log, [MyClass, MySubclass]) # Attempts to modify the property via an instance will fail. with self.assertRaises(AttributeError): MyClass().value = 12 with self.assertRaises(AttributeError): del MyClass().value if __name__ == "__main__": test.main()
CachedClassPropertyTest
python
pallets__werkzeug
src/werkzeug/routing/map.py
{ "start": 14732, "end": 36516 }
class ____: """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does the URL matching and building based on runtime information. """ def __init__( self, map: Map, server_name: str, script_name: str, subdomain: str | None, url_scheme: str, path_info: str, default_method: str, query_args: t.Mapping[str, t.Any] | str | None = None, ): self.map = map self.server_name = server_name if not script_name.endswith("/"): script_name += "/" self.script_name = script_name self.subdomain = subdomain self.url_scheme = url_scheme self.path_info = path_info self.default_method = default_method self.query_args = query_args self.websocket = self.url_scheme in {"ws", "wss"} def dispatch( self, view_func: t.Callable[[str, t.Mapping[str, t.Any]], WSGIApplication], path_info: str | None = None, method: str | None = None, catch_http_exceptions: bool = False, ) -> WSGIApplication: """Does the complete dispatching process. `view_func` is called with the endpoint and a dict with the values for the view. It should look up the view function, call it, and return a response object or WSGI application. http exceptions are not caught by default so that applications can display nicer error messages by just catching them by hand. If you want to stick with the default error messages you can pass it ``catch_http_exceptions=True`` and it will catch the http exceptions. Here a small example for the dispatch usage:: from werkzeug.wrappers import Request, Response from werkzeug.wsgi import responder from werkzeug.routing import Map, Rule def on_index(request): return Response('Hello from the index') url_map = Map([Rule('/', endpoint='index')]) views = {'index': on_index} @responder def application(environ, start_response): request = Request(environ) urls = url_map.bind_to_environ(environ) return urls.dispatch(lambda e, v: views[e](request, **v), catch_http_exceptions=True) Keep in mind that this method might return exception objects, too, so use :class:`Response.force_type` to get a response object. :param view_func: a function that is called with the endpoint as first argument and the value dict as second. Has to dispatch to the actual view function with this information. (see above) :param path_info: the path info to use for matching. Overrides the path info specified on binding. :param method: the HTTP method used for matching. Overrides the method specified on binding. :param catch_http_exceptions: set to `True` to catch any of the werkzeug :class:`HTTPException`\\s. """ try: try: endpoint, args = self.match(path_info, method) except RequestRedirect as e: return e return view_func(endpoint, args) except HTTPException as e: if catch_http_exceptions: return e raise @t.overload def match( self, path_info: str | None = None, method: str | None = None, return_rule: t.Literal[False] = False, query_args: t.Mapping[str, t.Any] | str | None = None, websocket: bool | None = None, ) -> tuple[t.Any, t.Mapping[str, t.Any]]: ... @t.overload def match( self, path_info: str | None = None, method: str | None = None, return_rule: t.Literal[True] = True, query_args: t.Mapping[str, t.Any] | str | None = None, websocket: bool | None = None, ) -> tuple[Rule, t.Mapping[str, t.Any]]: ... def match( self, path_info: str | None = None, method: str | None = None, return_rule: bool = False, query_args: t.Mapping[str, t.Any] | str | None = None, websocket: bool | None = None, ) -> tuple[t.Any | Rule, t.Mapping[str, t.Any]]: """The usage is simple: you just pass the match method the current path info as well as the method (which defaults to `GET`). The following things can then happen: - you receive a `NotFound` exception that indicates that no URL is matching. A `NotFound` exception is also a WSGI application you can call to get a default page not found page (happens to be the same object as `werkzeug.exceptions.NotFound`) - you receive a `MethodNotAllowed` exception that indicates that there is a match for this URL but not for the current request method. This is useful for RESTful applications. - you receive a `RequestRedirect` exception with a `new_url` attribute. This exception is used to notify you about a request Werkzeug requests from your WSGI application. This is for example the case if you request ``/foo`` although the correct URL is ``/foo/`` You can use the `RequestRedirect` instance as response-like object similar to all other subclasses of `HTTPException`. - you receive a ``WebsocketMismatch`` exception if the only match is a WebSocket rule but the bind is an HTTP request, or if the match is an HTTP rule but the bind is a WebSocket request. - you get a tuple in the form ``(endpoint, arguments)`` if there is a match (unless `return_rule` is True, in which case you get a tuple in the form ``(rule, arguments)``) If the path info is not passed to the match method the default path info of the map is used (defaults to the root URL if not defined explicitly). All of the exceptions raised are subclasses of `HTTPException` so they can be used as WSGI responses. They will all render generic error or redirect pages. Here is a small example for matching: >>> m = Map([ ... Rule('/', endpoint='index'), ... Rule('/downloads/', endpoint='downloads/index'), ... Rule('/downloads/<int:id>', endpoint='downloads/show') ... ]) >>> urls = m.bind("example.com", "/") >>> urls.match("/", "GET") ('index', {}) >>> urls.match("/downloads/42") ('downloads/show', {'id': 42}) And here is what happens on redirect and missing URLs: >>> urls.match("/downloads") Traceback (most recent call last): ... RequestRedirect: http://example.com/downloads/ >>> urls.match("/missing") Traceback (most recent call last): ... NotFound: 404 Not Found :param path_info: the path info to use for matching. Overrides the path info specified on binding. :param method: the HTTP method used for matching. Overrides the method specified on binding. :param return_rule: return the rule that matched instead of just the endpoint (defaults to `False`). :param query_args: optional query arguments that are used for automatic redirects as string or dictionary. It's currently not possible to use the query arguments for URL matching. :param websocket: Match WebSocket instead of HTTP requests. A websocket request has a ``ws`` or ``wss`` :attr:`url_scheme`. This overrides that detection. .. versionadded:: 1.0 Added ``websocket``. .. versionchanged:: 0.8 ``query_args`` can be a string. .. versionadded:: 0.7 Added ``query_args``. .. versionadded:: 0.6 Added ``return_rule``. """ self.map.update() if path_info is None: path_info = self.path_info if query_args is None: query_args = self.query_args or {} method = (method or self.default_method).upper() if websocket is None: websocket = self.websocket domain_part = self.server_name if not self.map.host_matching and self.subdomain is not None: domain_part = self.subdomain path_part = f"/{path_info.lstrip('/')}" if path_info else "" try: result = self.map._matcher.match(domain_part, path_part, method, websocket) except RequestPath as e: # safe = https://url.spec.whatwg.org/#url-path-segment-string new_path = quote(e.path_info, safe="!$&'()*+,/:;=@") raise RequestRedirect( self.make_redirect_url(new_path, query_args) ) from None except RequestAliasRedirect as e: raise RequestRedirect( self.make_alias_redirect_url( f"{domain_part}|{path_part}", e.endpoint, e.matched_values, method, query_args, ) ) from None except NoMatch as e: if e.have_match_for: raise MethodNotAllowed(valid_methods=list(e.have_match_for)) from None if e.websocket_mismatch: raise WebsocketMismatch() from None raise NotFound() from None else: rule, rv = result if self.map.redirect_defaults: redirect_url = self.get_default_redirect(rule, method, rv, query_args) if redirect_url is not None: raise RequestRedirect(redirect_url) if rule.redirect_to is not None: if isinstance(rule.redirect_to, str): def _handle_match(match: t.Match[str]) -> str: value = rv[match.group(1)] return rule._converters[match.group(1)].to_url(value) redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to) else: redirect_url = rule.redirect_to(self, **rv) if self.subdomain: netloc = f"{self.subdomain}.{self.server_name}" else: netloc = self.server_name raise RequestRedirect( urljoin( f"{self.url_scheme or 'http'}://{netloc}{self.script_name}", redirect_url, ) ) if return_rule: return rule, rv else: return rule.endpoint, rv def test(self, path_info: str | None = None, method: str | None = None) -> bool: """Test if a rule would match. Works like `match` but returns `True` if the URL matches, or `False` if it does not exist. :param path_info: the path info to use for matching. Overrides the path info specified on binding. :param method: the HTTP method used for matching. Overrides the method specified on binding. """ try: self.match(path_info, method) except RequestRedirect: pass except HTTPException: return False return True def allowed_methods(self, path_info: str | None = None) -> t.Iterable[str]: """Returns the valid methods that match for a given path. .. versionadded:: 0.7 """ try: self.match(path_info, method="--") except MethodNotAllowed as e: return e.valid_methods # type: ignore except HTTPException: pass return [] def get_host(self, domain_part: str | None) -> str: """Figures out the full host name for the given domain part. The domain part is a subdomain in case host matching is disabled or a full host name. """ if self.map.host_matching: if domain_part is None: return self.server_name return domain_part if domain_part is None: subdomain = self.subdomain else: subdomain = domain_part if subdomain: return f"{subdomain}.{self.server_name}" else: return self.server_name def get_default_redirect( self, rule: Rule, method: str, values: t.MutableMapping[str, t.Any], query_args: t.Mapping[str, t.Any] | str, ) -> str | None: """A helper that returns the URL to redirect to if it finds one. This is used for default redirecting only. :internal: """ assert self.map.redirect_defaults for r in self.map._rules_by_endpoint[rule.endpoint]: # every rule that comes after this one, including ourself # has a lower priority for the defaults. We order the ones # with the highest priority up for building. if r is rule: break if r.provides_defaults_for(rule) and r.suitable_for(values, method): values.update(r.defaults) # type: ignore domain_part, path = r.build(values) # type: ignore return self.make_redirect_url(path, query_args, domain_part=domain_part) return None def encode_query_args(self, query_args: t.Mapping[str, t.Any] | str) -> str: if not isinstance(query_args, str): return _urlencode(query_args) return query_args def make_redirect_url( self, path_info: str, query_args: t.Mapping[str, t.Any] | str | None = None, domain_part: str | None = None, ) -> str: """Creates a redirect URL. :internal: """ if query_args is None: query_args = self.query_args if query_args: query_str = self.encode_query_args(query_args) else: query_str = None scheme = self.url_scheme or "http" host = self.get_host(domain_part) path = "/".join((self.script_name.strip("/"), path_info.lstrip("/"))) return urlunsplit((scheme, host, path, query_str, None)) def make_alias_redirect_url( self, path: str, endpoint: t.Any, values: t.Mapping[str, t.Any], method: str, query_args: t.Mapping[str, t.Any] | str, ) -> str: """Internally called to make an alias redirect URL.""" url = self.build( endpoint, values, method, append_unknown=False, force_external=True ) if query_args: url += f"?{self.encode_query_args(query_args)}" assert url != path, "detected invalid alias setting. No canonical URL found" return url def _partial_build( self, endpoint: t.Any, values: t.Mapping[str, t.Any], method: str | None, append_unknown: bool, ) -> tuple[str, str, bool] | None: """Helper for :meth:`build`. Returns subdomain and path for the rule that accepts this endpoint, values and method. :internal: """ # in case the method is none, try with the default method first if method is None: rv = self._partial_build( endpoint, values, self.default_method, append_unknown ) if rv is not None: return rv # Default method did not match or a specific method is passed. # Check all for first match with matching host. If no matching # host is found, go with first result. first_match = None for rule in self.map._rules_by_endpoint.get(endpoint, ()): if rule.suitable_for(values, method): build_rv = rule.build(values, append_unknown) if build_rv is not None: rv = (build_rv[0], build_rv[1], rule.websocket) if self.map.host_matching: if rv[0] == self.server_name: return rv elif first_match is None: first_match = rv else: return rv return first_match def build( self, endpoint: t.Any, values: t.Mapping[str, t.Any] | None = None, method: str | None = None, force_external: bool = False, append_unknown: bool = True, url_scheme: str | None = None, ) -> str: """Building URLs works pretty much the other way round. Instead of `match` you call `build` and pass it the endpoint and a dict of arguments for the placeholders. The `build` function also accepts an argument called `force_external` which, if you set it to `True` will force external URLs. Per default external URLs (include the server name) will only be used if the target URL is on a different subdomain. >>> m = Map([ ... Rule('/', endpoint='index'), ... Rule('/downloads/', endpoint='downloads/index'), ... Rule('/downloads/<int:id>', endpoint='downloads/show') ... ]) >>> urls = m.bind("example.com", "/") >>> urls.build("index", {}) '/' >>> urls.build("downloads/show", {'id': 42}) '/downloads/42' >>> urls.build("downloads/show", {'id': 42}, force_external=True) 'http://example.com/downloads/42' Because URLs cannot contain non ASCII data you will always get bytes back. Non ASCII characters are urlencoded with the charset defined on the map instance. Additional values are converted to strings and appended to the URL as URL querystring parameters: >>> urls.build("index", {'q': 'My Searchstring'}) '/?q=My+Searchstring' When processing those additional values, lists are furthermore interpreted as multiple values (as per :py:class:`werkzeug.datastructures.MultiDict`): >>> urls.build("index", {'q': ['a', 'b', 'c']}) '/?q=a&q=b&q=c' Passing a ``MultiDict`` will also add multiple values: >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b')))) '/?p=z&q=a&q=b' If a rule does not exist when building a `BuildError` exception is raised. The build method accepts an argument called `method` which allows you to specify the method you want to have an URL built for if you have different methods for the same endpoint specified. :param endpoint: the endpoint of the URL to build. :param values: the values for the URL to build. Unhandled values are appended to the URL as query parameters. :param method: the HTTP method for the rule if there are different URLs for different methods on the same endpoint. :param force_external: enforce full canonical external URLs. If the URL scheme is not provided, this will generate a protocol-relative URL. :param append_unknown: unknown parameters are appended to the generated URL as query string argument. Disable this if you want the builder to ignore those. :param url_scheme: Scheme to use in place of the bound :attr:`url_scheme`. .. versionchanged:: 2.0 Added the ``url_scheme`` parameter. .. versionadded:: 0.6 Added the ``append_unknown`` parameter. """ self.map.update() if values: if isinstance(values, MultiDict): values = { k: (v[0] if len(v) == 1 else v) for k, v in dict.items(values) if len(v) != 0 } else: # plain dict values = {k: v for k, v in values.items() if v is not None} else: values = {} rv = self._partial_build(endpoint, values, method, append_unknown) if rv is None: raise BuildError(endpoint, values, method, self) domain_part, path, websocket = rv host = self.get_host(domain_part) if url_scheme is None: url_scheme = self.url_scheme # Always build WebSocket routes with the scheme (browsers # require full URLs). If bound to a WebSocket, ensure that HTTP # routes are built with an HTTP scheme. secure = url_scheme in {"https", "wss"} if websocket: force_external = True url_scheme = "wss" if secure else "ws" elif url_scheme: url_scheme = "https" if secure else "http" # shortcut this. if not force_external and ( (self.map.host_matching and host == self.server_name) or (not self.map.host_matching and domain_part == self.subdomain) ): return f"{self.script_name.rstrip('/')}/{path.lstrip('/')}" scheme = f"{url_scheme}:" if url_scheme else "" return f"{scheme}//{host}{self.script_name[:-1]}/{path.lstrip('/')}"
MapAdapter
python
getsentry__sentry
src/sentry/api/endpoints/project_transaction_threshold_override.py
{ "start": 2179, "end": 5880 }
class ____(OrganizationEventsV2EndpointBase): publish_status = { "DELETE": ApiPublishStatus.PRIVATE, "GET": ApiPublishStatus.PRIVATE, "POST": ApiPublishStatus.PRIVATE, } permission_classes = (ProjectTransactionThresholdOverridePermission,) def get_project(self, request: Request, organization): projects = self.get_projects(request, organization) if len(projects) != 1: raise ParseError("Only 1 project per transaction threshold") return projects[0] def get(self, request: Request, organization: Organization) -> Response: if not self.has_feature(organization, request): return self.respond(status=status.HTTP_404_NOT_FOUND) project = self.get_project(request, organization) try: project_threshold = ProjectTransactionThresholdOverride.objects.get( transaction=request.GET.get("transaction"), project_id=project.id, organization_id=organization.id, ) except ProjectTransactionThresholdOverride.DoesNotExist: return Response(status=status.HTTP_404_NOT_FOUND) return Response( serialize( project_threshold, request.user, ), status.HTTP_200_OK, ) def post(self, request: Request, organization) -> Response: if not self.has_feature(organization, request): return self.respond(status=status.HTTP_404_NOT_FOUND) project = self.get_project(request, organization) serializer = ProjectTransactionThresholdOverrideSerializer( data=request.data, context={ "organization": organization, "project": project, }, ) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) data = serializer.validated_data with transaction.atomic(router.db_for_write(ProjectTransactionThresholdOverride)): ( transaction_threshold, created, ) = ProjectTransactionThresholdOverride.objects.update_or_create( transaction=data["transaction"], project_id=project.id, organization_id=organization.id, defaults={ "threshold": data["threshold"], "metric": data["metric"], "edited_by_id": request.user.id, }, ) return Response( serialize( transaction_threshold, request.user, ), status=status.HTTP_201_CREATED if created else status.HTTP_200_OK, ) def delete(self, request: Request, organization) -> Response: if not self.has_feature(organization, request): return self.respond(status=status.HTTP_404_NOT_FOUND) project = self.get_project(request, organization) transaction = request.data.get("transaction") if not transaction: return Response(status=status.HTTP_400_BAD_REQUEST) try: transaction_threshold = ProjectTransactionThresholdOverride.objects.get( transaction=transaction, project_id=project.id, organization_id=organization.id, ) except ProjectTransactionThresholdOverride.DoesNotExist: return Response(status=status.HTTP_204_NO_CONTENT) transaction_threshold.delete() return Response(status=status.HTTP_204_NO_CONTENT)
ProjectTransactionThresholdOverrideEndpoint
python
numba__llvmlite
llvmlite/ir/types.py
{ "start": 6282, "end": 7676 }
class ____(Type): """ The type for functions. """ def __init__(self, return_type, args, var_arg=False): self.return_type = return_type self.args = tuple(args) self.var_arg = var_arg def _to_string(self): if self.args: strargs = ', '.join([str(a) for a in self.args]) if self.var_arg: return '{0} ({1}, ...)'.format(self.return_type, strargs) else: return '{0} ({1})'.format(self.return_type, strargs) elif self.var_arg: return '{0} (...)'.format(self.return_type) else: return '{0} ()'.format(self.return_type) def __eq__(self, other): if isinstance(other, FunctionType): return (self.return_type == other.return_type and self.args == other.args and self.var_arg == other.var_arg) else: return False def __hash__(self): return hash(FunctionType) @classmethod def from_llvm(cls, typeref, ir_ctx): """ Create from a llvmlite.binding.TypeRef """ params = tuple(x.as_ir(ir_ctx=ir_ctx) for x in typeref.get_function_parameters()) ret = typeref.get_function_return().as_ir(ir_ctx=ir_ctx) is_vararg = typeref.is_function_vararg return cls(ret, params, is_vararg)
FunctionType
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/assets/job/asset_layer.py
{ "start": 790, "end": 987 }
class ____: """Data that relates asset-level information to a node in the execution graph.""" node_handle: NodeHandle assets_def: "AssetsDefinition" @record(checked=False)
AssetLayerData
python
huggingface__transformers
src/transformers/models/mlcd/modular_mlcd.py
{ "start": 7317, "end": 8032 }
class ____(CLIPVisionEmbeddings): def __init__(self, config: MLCDVisionConfig): super().__init__(config) del self.position_embedding def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] target_dtype = self.patch_embedding.weight.dtype # patch_embeds -> shape = [batch, width, grid, grid] patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) return embeddings
MLCDVisionEmbeddings
python
google__pytype
pytype/pytd/codegen/function.py
{ "start": 665, "end": 1354 }
class ____: """Internal representation of function parameters.""" name: str type: pytd.Type | None = None default: Any = None kind: pytd.ParameterKind = pytd.ParameterKind.REGULAR def to_pytd(self) -> pytd.Parameter: """Return a pytd.Parameter object for a normal argument.""" if self.default is not None: default_type = self.default if self.type is None and default_type != pytd.NamedType("NoneType"): self.type = default_type if self.type is None: self.type = pytd.AnythingType() optional = self.default is not None return pytd.Parameter(self.name, self.type, self.kind, optional, None) @dataclasses.dataclass(frozen=True)
Param
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
{ "start": 44330, "end": 45531 }
class ____(sqltypes.VARBINARY, sqltypes.LargeBinary): """The MSSQL VARBINARY type. This type adds additional features to the core :class:`_types.VARBINARY` type, including "deprecate_large_types" mode where either ``VARBINARY(max)`` or IMAGE is rendered, as well as the SQL Server ``FILESTREAM`` option. .. seealso:: :ref:`mssql_large_type_deprecation` """ __visit_name__ = "VARBINARY" def __init__(self, length=None, filestream=False): """ Construct a VARBINARY type. :param length: optional, a length for the column for use in DDL statements, for those binary types that accept a length, such as the MySQL BLOB type. :param filestream=False: if True, renders the ``FILESTREAM`` keyword in the table definition. In this case ``length`` must be ``None`` or ``'max'``. .. versionadded:: 1.4.31 """ self.filestream = filestream if self.filestream and length not in (None, "max"): raise ValueError( "length must be None or 'max' when setting filestream" ) super().__init__(length=length)
VARBINARY
python
tensorflow__tensorflow
third_party/xla/build_tools/ci/build.py
{ "start": 4992, "end": 27776 }
class ____: """Class representing a build of XLA.""" _builds: ClassVar[Dict[BuildType, "Build"]] = {} type_: BuildType repo: str target_patterns: Tuple[str, ...] subcommand: str = "test" configs: Tuple[str, ...] = () build_tag_filters: Tuple[str, ...] = () test_tag_filters: Tuple[str, ...] = () action_env: Dict[str, Any] = dataclasses.field(default_factory=dict) test_env: Dict[str, Any] = dataclasses.field(default_factory=dict) repo_env: Dict[str, Any] = dataclasses.field(default_factory=dict) override_repository: Dict[str, str] = dataclasses.field(default_factory=dict) options: Dict[str, Any] = dataclasses.field(default_factory=dict) startup_options: Dict[str, Any] = dataclasses.field(default_factory=dict) extra_setup_commands: Tuple[List[str], ...] = () def __post_init__(self): # pylint: disable=protected-access assert ( self.type_ not in self.__class__._builds ), "Can't have multiple builds of same BuildType!" assert ( self.repo == "openxla/xla" or self.override_repository ), "Must override repo if repo under test isn't XLA!" self.__class__._builds[self.type_] = self @classmethod def all_builds(cls): return cls._builds def bazel_command( self, subcommand: str = "test", extra_options: Tuple[str, ...] = () ) -> List[str]: """Returns a bazel test command for this build. Args: subcommand: The subcommand to give to bazel. `test` by default. extra_options: Extra options. For now just used to pass in `--nobuild`. Returns: List of command line arguments """ options = _dict_to_cli_options(self.options) startup_options = _dict_to_cli_options(self.startup_options) configs = [f"--config={config}" for config in self.configs] build_tag_filters = ( f"--build_tag_filters={','.join(self.build_tag_filters)}" ) test_tag_filters = f"--test_tag_filters={','.join(self.test_tag_filters)}" action_env = [f"--action_env={k}={v}" for k, v in self.action_env.items()] test_env = [f"--test_env={k}={v}" for k, v in self.test_env.items()] repo_env = [f"--repo_env={k}={v}" for k, v in self.repo_env.items()] override_repository = [ f"--override_repository={k}={v}" for k, v in self.override_repository.items() ] tag_filters = [build_tag_filters, test_tag_filters] all_options = ( tag_filters + configs + action_env + test_env + repo_env + override_repository + options + list(extra_options) ) return [ "bazel", *startup_options, subcommand, *all_options, "--", *self.target_patterns, ] def commands(self) -> List[List[str]]: """Returns list of commands for a build.""" cmds = [] cmds.extend(self.extra_setup_commands) # We really want `bazel fetch` here, but it uses `bazel query` and not # `cquery`, which means that it fails due to config issues that aren't # problems in practice. # TODO(ddunleavy): Remove the condition here. Need to get parallel on the # MacOS VM. macos_build = ( self.type_ == BuildType.XLA_MACOS_X86_CPU_KOKORO or self.type_ == BuildType.XLA_MACOS_ARM64_CPU_KOKORO ) windows_build = (self.type_ == BuildType.JAX_WINDOWS_X86_CPU_GITHUB_ACTIONS) if not (macos_build or windows_build): cmds.append( retry( self.bazel_command( subcommand="build", extra_options=("--nobuild",) ) ) ) cmds.append(self.bazel_command(subcommand=self.subcommand)) cmds.append(["bazel", "analyze-profile", "profile.json.gz"]) return cmds def _tag_filters_for_compute_capability( compute_capability: int, ) -> Tuple[str, ...]: """Returns the tag filters for the given compute capability.""" tag_filters = (f"requires-gpu-sm{compute_capability}-only",) for cc in (60, 70, 80, 90, 100): if compute_capability >= cc: tag_filters += (f"requires-gpu-sm{cc}",) else: tag_filters += (f"-requires-gpu-sm{cc}",) tag_filters += (f"-requires-gpu-sm{cc}-only",) tag_filters += ("-requires-gpu-amd",) tag_filters += ("-requires-gpu-intel",) return tag_filters def nvidia_gpu_build_with_compute_capability( *, type_: BuildType, configs: Tuple[str, ...], compute_capability: int, ) -> Build: extra_gpu_tags = _tag_filters_for_compute_capability(compute_capability) return Build( type_=type_, repo="openxla/xla", target_patterns=_XLA_DEFAULT_TARGET_PATTERNS, configs=configs, test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + extra_gpu_tags, build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", "//xla/tsl:ci_build": True, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={"TF_CUDA_COMPUTE_CAPABILITIES": f"{compute_capability/10}"}, extra_setup_commands=(["nvidia-smi"],), ) cpu_x86_tag_filter = ( "-no_oss", "-gpu", "-requires-gpu-nvidia", "-requires-gpu-amd", "-requires-gpu-intel", ) Build( type_=BuildType.XLA_LINUX_X86_CPU_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "nonccl", "rbe_linux_cpu"), target_patterns=_XLA_DEFAULT_TARGET_PATTERNS, build_tag_filters=cpu_x86_tag_filter, test_tag_filters=cpu_x86_tag_filter, options={**_DEFAULT_BAZEL_OPTIONS, "//xla/tsl:ci_build": True}, ) Build( type_=BuildType.XLA_LINUX_X86_CPU_BZLMOD_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "nonccl", "rbe_linux_cpu", "bzlmod"), target_patterns=_XLA_DEFAULT_TARGET_PATTERNS, build_tag_filters=cpu_x86_tag_filter, test_tag_filters=cpu_x86_tag_filter, options={**_DEFAULT_BAZEL_OPTIONS, "//xla/tsl:ci_build": True}, ) cpu_arm_tag_filter = ( "-no_oss", "-gpu", "-requires-gpu-nvidia", "-requires-gpu-amd", "-requires-gpu-intel", "-not_run:arm", ) Build( type_=BuildType.XLA_LINUX_ARM64_CPU_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "rbe_cross_compile_linux_arm64", "nonccl"), target_patterns=_XLA_DEFAULT_TARGET_PATTERNS, options={ **_DEFAULT_BAZEL_OPTIONS, "build_tests_only": True, "//xla/tsl:ci_build": True, }, build_tag_filters=cpu_arm_tag_filter, test_tag_filters=cpu_arm_tag_filter, ) nvidia_gpu_build_with_compute_capability( type_=BuildType.XLA_LINUX_X86_GPU_L4_GITHUB_ACTIONS, configs=("warnings", "rbe_linux_cuda_nvcc"), compute_capability=75, ) oneapi_build_tag_filter = ( "oneapi-only", "requires-gpu-intel", "-requires-gpu-amd", "-requires-gpu-nvidia", "-no_oss", "-cuda-only", "-rocm-only", "-no-oneapi", ) oneapi_test_tag_filter = ( "oneapi-only", # This build of oneAPI backend runs on X86 host without an Intel GPU,so # we are excluding the tests requiring Intel GPU "-requires-gpu-intel", "-requires-gpu-amd", "-requires-gpu-nvidia", "-no_oss", "-cuda-only", "-rocm-only", "-no-oneapi", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_ONEAPI_GITHUB_ACTIONS, repo="openxla/xla", configs=( "nonccl", "rbe_linux_cpu", "sycl", "sycl_hermetic", "icpx_clang", ), target_patterns=_XLA_ONEAPI_TARGET_PATTERNS, build_tag_filters=oneapi_build_tag_filter, test_tag_filters=oneapi_test_tag_filter, options={**_DEFAULT_BAZEL_OPTIONS, "//xla/tsl:ci_build": True}, subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_CPU_128_VCPU_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "nonccl", "rbe_linux_cpu"), target_patterns=_XLA_CPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, build_tag_filters=cpu_x86_tag_filter, test_tag_filters=cpu_x86_tag_filter, options={**_DEFAULT_BAZEL_OPTIONS, "//xla/tsl:ci_build": True}, subcommand="build", ) Build( type_=BuildType.XLA_LINUX_ARM64_CPU_48_VCPU_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "rbe_cross_compile_linux_arm64", "nonccl"), target_patterns=_XLA_CPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, options={ **_DEFAULT_BAZEL_OPTIONS, "build_tests_only": False, "//xla/tsl:ci_build": True, }, build_tag_filters=cpu_arm_tag_filter, test_tag_filters=cpu_arm_tag_filter, subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_L4_16_VCPU_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", target_patterns=_XLA_GPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, configs=("warnings", "rbe_linux_cuda_nvcc"), test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + _tag_filters_for_compute_capability(compute_capability=75), build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", "//xla/tsl:ci_build": True, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={ "TF_CUDA_COMPUTE_CAPABILITIES": "7.5", }, extra_setup_commands=(["nvidia-smi"],), subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_L4_16_VCPU_BENCHMARK_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", target_patterns=_XLA_GPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, configs=("warnings", "rbe_linux_cuda_nvcc"), test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + _tag_filters_for_compute_capability(compute_capability=75), build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", "//xla/tsl:ci_build": True, "@local_config_cuda//cuda:include_cuda_libs": False, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={ "TF_CUDA_COMPUTE_CAPABILITIES": "7.5", }, extra_setup_commands=(["nvidia-smi"],), subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_L4_48_VCPU_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "rbe_linux_cuda_nvcc"), target_patterns=_XLA_GPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + _tag_filters_for_compute_capability(compute_capability=75), build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", "//xla/tsl:ci_build": True, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={ "TF_CUDA_COMPUTE_CAPABILITIES": "7.5", }, extra_setup_commands=(["nvidia-smi"],), subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_L4_48_VCPU_BENCHMARK_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", configs=("warnings", "rbe_linux_cuda_nvcc"), target_patterns=_XLA_GPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + _tag_filters_for_compute_capability(compute_capability=75), build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", "//xla/tsl:ci_build": True, "@local_config_cuda//cuda:include_cuda_libs": False, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={ "TF_CUDA_COMPUTE_CAPABILITIES": "7.5", }, extra_setup_commands=(["nvidia-smi"],), subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_A4_224_VCPU_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", configs=(), target_patterns=_XLA_GPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + _tag_filters_for_compute_capability(compute_capability=100), build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", # Use User Mode and Kernel Mode Drivers pre-installed on the system. "//xla/tsl:ci_build": True, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={ "TF_CUDA_COMPUTE_CAPABILITIES": "10", "HERMETIC_CUDA_VERSION": "12.8.0", "HERMETIC_CUDNN_VERSION": "9.8.0", }, extra_setup_commands=(["nvidia-smi"],), subcommand="build", ) Build( type_=BuildType.XLA_LINUX_X86_GPU_A4_224_VCPU_BENCHMARK_PRESUBMIT_GITHUB_ACTIONS, repo="openxla/xla", configs=(), target_patterns=_XLA_GPU_PRESUBMIT_BENCHMARKS_DEFAULT_TARGET_PATTERNS, test_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ) + _tag_filters_for_compute_capability(compute_capability=100), build_tag_filters=( "-no_oss", "requires-gpu-nvidia", "gpu", "-rocm-only", "-oneapi-only", ), options={ "run_under": "//build_tools/ci:parallel_gpu_execute", # Use User Mode and Kernel Mode Drivers pre-installed on the system. "//xla/tsl:ci_build": True, "@local_config_cuda//cuda:include_cuda_libs": False, **_DEFAULT_BAZEL_OPTIONS, }, repo_env={ "TF_CUDA_COMPUTE_CAPABILITIES": "10", "HERMETIC_CUDA_VERSION": "12.8.0", "HERMETIC_CUDNN_VERSION": "9.8.0", }, extra_setup_commands=(["nvidia-smi"],), subcommand="build", ) macos_tag_filter = ( "-no_oss", "-gpu", "-no_mac", "-mac_excluded", "-requires-gpu-nvidia", "-requires-gpu-amd", "-requires-gpu-intel", ) Build( type_=BuildType.XLA_MACOS_X86_CPU_KOKORO, repo="openxla/xla", configs=("nonccl",), target_patterns=( "//xla/...", "-//xla/hlo/experimental/...", "-//xla/python_api/...", "-//xla/python/...", "-//xla/service/gpu/...", ), options={ **_DEFAULT_BAZEL_OPTIONS, "macos_minimum_os": "10.15", "test_tmpdir": "/Volumes/BuildData/bazel_output", "define": "xnn_enable_avxvnniint8=false", "//xla/tsl:ci_build": True, }, build_tag_filters=macos_tag_filter, test_tag_filters=macos_tag_filter, extra_setup_commands=( [ "sudo", "wget", "--no-verbose", "-O", "/usr/local/bin/bazel", "https://github.com/bazelbuild/bazelisk/releases/download/v1.11.0/bazelisk-darwin-amd64", ], ["chmod", "+x", "/usr/local/bin/bazel"], ["bazel", "--version"], # Sanity check due to strange failures ["mkdir", "-p", "/Volumes/BuildData/bazel_output"], ), ) Build( type_=BuildType.XLA_MACOS_ARM64_CPU_KOKORO, repo="openxla/xla", configs=("nonccl",), target_patterns=( "//xla/...", "-//xla/hlo/experimental/...", "-//xla/python_api/...", "-//xla/python/...", "-//xla/service/gpu/...", ), options={ **_DEFAULT_BAZEL_OPTIONS, "macos_minimum_os": "10.15", "test_tmpdir": "/tmpfs/bazel_output", "test_size_filters": "small,medium", "define": "xnn_enable_avxvnniint8=false", "//xla/tsl:ci_build": True, }, build_tag_filters=macos_tag_filter, test_tag_filters=macos_tag_filter, extra_setup_commands=( ["df", "-h"], # Debug "No space left on device" error: b/396611909. ["bazel", "--version"], # Sanity check due to strange failures ["mkdir", "-p", "/tmpfs/bazel_output"], ), ) Build( type_=BuildType.JAX_LINUX_X86_CPU_GITHUB_ACTIONS, repo="google/jax", configs=("rbe_linux_x86_64",), target_patterns=("//tests:cpu_tests", "//tests:backend_independent_tests"), test_env=dict( JAX_NUM_GENERATED_CASES=25, JAX_SKIP_SLOW_TESTS=1, ), override_repository=dict( xla=f"{_GITHUB_WORKSPACE}/openxla/xla", ), options=_DEFAULT_BAZEL_OPTIONS, repo_env={"HERMETIC_PYTHON_VERSION": "3.12"}, ) Build( type_=BuildType.JAX_WINDOWS_X86_CPU_GITHUB_ACTIONS, repo="google/jax", configs=("rbe_windows_amd64",), target_patterns=("//tests:cpu_tests", "//tests:backend_independent_tests"), test_env=dict( JAX_NUM_GENERATED_CASES=25, JAX_SKIP_SLOW_TESTS=1, ), override_repository=dict( xla=f"{_GITHUB_WORKSPACE}\\openxla\\xla", ), options={**_DEFAULT_BAZEL_OPTIONS, "build_runfile_links": False}, repo_env={"HERMETIC_PYTHON_VERSION": "3.12"}, subcommand="build", startup_options={ "output_base": f"{_GITHUB_WORKSPACE}\\bazel_output_base", }, ) Build( type_=BuildType.JAX_LINUX_X86_GPU_L4_GITHUB_ACTIONS, repo="google/jax", configs=("rbe_linux_x86_64_cuda",), target_patterns=("//tests:gpu_tests", "//tests:backend_independent_tests"), build_tag_filters=("-multiaccelerator",), test_tag_filters=("-multiaccelerator",), test_env=dict( JAX_SKIP_SLOW_TESTS=1, TF_CPP_MIN_LOG_LEVEL=0, JAX_EXCLUDE_TEST_TARGETS="PmapTest.testSizeOverflow", ), override_repository=dict( xla=f"{_GITHUB_WORKSPACE}/openxla/xla", ), options=_DEFAULT_BAZEL_OPTIONS, repo_env={"HERMETIC_PYTHON_VERSION": "3.11"}, extra_setup_commands=(["nvidia-smi"],), ) tensorflow_tag_filters = ( "-no_oss", "-tf_tosa", "-oss_excluded", "-oss_serial", "-tpu", "-benchmark-test", "-v1only", ) tensorflow_cpu_tag_filters = tensorflow_tag_filters + ("-gpu",) tensorflow_gpu_tag_filters = tensorflow_tag_filters + ( "-no_gpu", "-no_gpu_presubmit", "-no_cuda11", "+gpu", ) Build( type_=BuildType.TENSORFLOW_LINUX_X86_CPU_GITHUB_ACTIONS, repo="tensorflow/tensorflow", configs=( "release_cpu_linux", "rbe_linux_cpu", ), target_patterns=( "//tensorflow/compiler/...", "-//tensorflow/compiler/tf2tensorrt/...", "//tensorflow/python/...", "-//tensorflow/python/distribute/...", "-//tensorflow/python/kernel_tests/...", "-//tensorflow/python/data/...", "-//tensorflow/python/compiler/tensorrt/...", ), build_tag_filters=tensorflow_cpu_tag_filters, test_tag_filters=tensorflow_cpu_tag_filters, options=dict( verbose_failures=True, test_output="errors", profile="profile.json.gz", test_lang_filters="cc,py", color="yes", ), override_repository=dict( local_xla=f"{_GITHUB_WORKSPACE}/openxla/xla", ), repo_env={"USE_PYWRAP_RULES": "True"}, extra_setup_commands=( # This is pretty devious - but we have to do some adhoc extra Copybara # work here to get XLA into the shape TF expects. b/407638223 # pyformat:disable [ "find", f"{_GITHUB_WORKSPACE}/openxla/xla", "-type", "f", "-exec", "sed", "-i", "s/@local_xla/@local_xla/g", "{}", "+", ], [ "find", f"{_GITHUB_WORKSPACE}/openxla/xla", "-type", "f", "-exec", "sed", "-i", "s/@local_tsl/@local_tsl/g", "{}", "+", ], ), ) Build( type_=BuildType.TENSORFLOW_LINUX_X86_GPU_L4_GITHUB_ACTIONS, repo="tensorflow/tensorflow", configs=( "release_gpu_linux", "rbe_linux_cuda", ), target_patterns=( "//tensorflow/compiler/...", "-//tensorflow/compiler/tf2tensorrt/...", "//tensorflow/python/...", "-//tensorflow/python/distribute/...", "-//tensorflow/python/kernel_tests/...", "-//tensorflow/python/data/...", "-//tensorflow/python/compiler/tensorrt/...", ), build_tag_filters=tensorflow_gpu_tag_filters, test_tag_filters=tensorflow_gpu_tag_filters, override_repository=dict( local_xla=f"{_GITHUB_WORKSPACE}/openxla/xla", ), options=dict( verbose_failures=True, test_output="errors", profile="profile.json.gz", test_lang_filters="cc,py", color="yes", ), repo_env={"USE_PYWRAP_RULES": "True"}, extra_setup_commands=( # This is pretty devious - but we have to do some adhoc extra Copybara # work here to get XLA into the shape TF expects. b/407638223 # pyformat:disable [ "find", f"{_GITHUB_WORKSPACE}/openxla/xla", "-type", "f", "-exec", "sed", "-i", "s/@local_xla/@local_xla/g", "{}", "+", ], [ "find", f"{_GITHUB_WORKSPACE}/openxla/xla", "-type", "f", "-exec", "sed", "-i", "s/@local_tsl/@local_tsl/g", "{}", "+", ], ["nvidia-smi"], ), ) def dump_all_build_commands(): """Used to generate what commands are run for each build.""" # Awkward workaround b/c Build instances are not hashable for build in sorted(Build.all_builds().values(), key=lambda b: str(b.type_)): sys.stdout.write(f"# BEGIN {build.type_}\n") for cmd in build.commands(): sys.stdout.write(" ".join(cmd) + "\n") sys.stdout.write(f"# END {build.type_}\n") def _parse_args(): """Defines flags and parses args.""" parser = argparse.ArgumentParser(allow_abbrev=False) group = parser.add_mutually_exclusive_group(required=True) group.add_argument( "--build", type=BuildType.from_str, choices=list(BuildType), ) group.add_argument( "--dump_commands", action="store_true", ) return parser.parse_args() def main(): logging.basicConfig() logging.getLogger().setLevel(logging.INFO) args = _parse_args() if args.dump_commands: dump_all_build_commands() return else: for cmd in Build.all_builds()[args.build].commands(): sh(cmd) if __name__ == "__main__": main()
Build
python
ethereum__web3.py
web3/types.py
{ "start": 6530, "end": 6881 }
class ____(SubscriptionResponse): result: GethSyncingSubscriptionResult EthSubscriptionParams = Union[ BlockTypeSubscriptionResponse, TransactionTypeSubscriptionResponse, LogsSubscriptionResponse, SyncingSubscriptionResponse, GethSyncingSubscriptionResponse, ] RPCId = Optional[Union[int, str]]
GethSyncingSubscriptionResponse
python
apache__airflow
airflow-core/tests/unit/utils/test_retries.py
{ "start": 1090, "end": 3951 }
class ____: def test_retry_db_transaction_with_passing_retries(self): """Test that retries can be passed to decorator""" mock_obj = mock.MagicMock() mock_session = mock.MagicMock() op_error = OperationalError(statement=mock.ANY, params=mock.ANY, orig=mock.ANY) @retry_db_transaction(retries=2) def test_function(session): session.execute("select 1") mock_obj(2) raise op_error with pytest.raises(OperationalError): test_function(session=mock_session) assert mock_obj.call_count == 2 @pytest.mark.db_test @pytest.mark.parametrize("excection_type", [OperationalError, InternalError]) def test_retry_db_transaction_with_default_retries(self, caplog, excection_type: type[DBAPIError]): """Test that by default 3 retries will be carried out""" mock_obj = mock.MagicMock() mock_session = mock.MagicMock() mock_rollback = mock.MagicMock() mock_session.rollback = mock_rollback db_error = excection_type(statement=mock.ANY, params=mock.ANY, orig=mock.ANY) @retry_db_transaction def test_function(session): session.execute("select 1") mock_obj(2) raise db_error caplog.set_level(logging.DEBUG) caplog.clear() with pytest.raises(excection_type): test_function(session=mock_session) for try_no in range(1, 4): assert ( "Running TestRetries.test_retry_db_transaction_with_default_retries.<locals>.test_function " f"with retries. Try {try_no} of 3" in caplog.messages ) assert mock_session.execute.call_count == 3 assert mock_rollback.call_count == 3 mock_rollback.assert_has_calls([mock.call(), mock.call(), mock.call()]) def test_retry_db_transaction_fails_when_used_in_function_without_retry(self): """Test that an error is raised when the decorator is used on a function without session arg""" with pytest.raises(ValueError, match=r"has no `session` argument"): @retry_db_transaction def test_function(): print("hi") raise OperationalError(statement=mock.ANY, params=mock.ANY, orig=mock.ANY) def test_retry_db_transaction_fails_when_session_not_passed(self): """Test that an error is raised when session is not passed to the function""" @retry_db_transaction def test_function(session): session.execute("select 1;") raise OperationalError(statement=mock.ANY, params=mock.ANY, orig=mock.ANY) error_message = rf"session is a required argument for {test_function.__qualname__}" with pytest.raises(TypeError, match=error_message): test_function()
TestRetries
python
crytic__slither
slither/solc_parsing/variables/top_level_variable.py
{ "start": 468, "end": 1446 }
class ____(VariableDeclarationSolc, CallerContextExpression): def __init__( self, variable: TopLevelVariable, variable_data: Dict, slither_parser: "SlitherCompilationUnitSolc", ) -> None: super().__init__(variable, variable_data) self._slither_parser = slither_parser @property def is_compact_ast(self) -> bool: return self._slither_parser.is_compact_ast @property def compilation_unit(self) -> "SlitherCompilationUnit": return self._slither_parser.compilation_unit def get_key(self) -> str: return self._slither_parser.get_key() @property def slither_parser(self) -> "SlitherCompilationUnitSolc": return self._slither_parser @property def underlying_variable(self) -> TopLevelVariable: # Todo: Not sure how to overcome this with mypy assert isinstance(self._variable, TopLevelVariable) return self._variable
TopLevelVariableSolc
python
pandas-dev__pandas
pandas/tests/frame/methods/test_quantile.py
{ "start": 375, "end": 26936 }
class ____: @pytest.mark.parametrize( "df,expected", [ [ DataFrame( { 0: Series(pd.arrays.SparseArray([1, 2])), 1: Series(pd.arrays.SparseArray([3, 4])), } ), Series([1.5, 3.5], name=0.5), ], [ DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), Series([1.0], name=0.5), ], ], ) def test_quantile_sparse(self, df, expected): # GH#17198 # GH#24600 result = df.quantile() expected = expected.astype("Sparse[float]") tm.assert_series_equal(result, expected) def test_quantile(self, datetime_frame, interp_method, request): interpolation, method = interp_method df = datetime_frame result = df.quantile( 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method ) expected = Series( [np.percentile(df[col], 10) for col in df.columns], index=df.columns, name=0.1, ) if interpolation == "linear": # np.percentile values only comparable to linear interpolation tm.assert_series_equal(result, expected) else: tm.assert_index_equal(result.index, expected.index) assert result.name == expected.name result = df.quantile( 0.9, axis=1, numeric_only=True, interpolation=interpolation, method=method ) expected = Series( [np.percentile(df.loc[date], 90) for date in df.index], index=df.index, name=0.9, ) if interpolation == "linear": # np.percentile values only comparable to linear interpolation tm.assert_series_equal(result, expected) else: tm.assert_index_equal(result.index, expected.index) assert result.name == expected.name def test_empty(self, interp_method): interpolation, method = interp_method q = DataFrame({"x": [], "y": []}).quantile( 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method ) assert np.isnan(q["x"]) and np.isnan(q["y"]) def test_non_numeric_exclusion(self, interp_method, request): interpolation, method = interp_method df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]}) rs = df.quantile( 0.5, numeric_only=True, interpolation=interpolation, method=method ) xp = df.median(numeric_only=True).rename(0.5) if interpolation == "nearest": xp = (xp + 0.5).astype(np.int64) tm.assert_series_equal(rs, xp) def test_axis(self, interp_method): # axis interpolation, method = interp_method df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) result = df.quantile( [0.5, 0.75], axis=1, interpolation=interpolation, method=method ) expected = DataFrame( {1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75] ) if interpolation == "nearest": expected.iloc[0, :] -= 0.5 expected.iloc[1, :] += 0.25 expected = expected.astype(np.int64) tm.assert_frame_equal(result, expected, check_index_type=True) def test_axis_numeric_only_true(self, interp_method): # We may want to break API in the future to change this # so that we exclude non-numeric along the same axis # See GH #7312 interpolation, method = interp_method df = DataFrame([[1, 2, 3], ["a", "b", 4]]) result = df.quantile( 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method ) expected = Series([3.0, 4.0], index=range(2), name=0.5) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) def test_quantile_date_range(self, interp_method): # GH 2460 interpolation, method = interp_method dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific", unit="ns") ser = Series(dti) df = DataFrame(ser) result = df.quantile( numeric_only=False, interpolation=interpolation, method=method ) expected = Series( ["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]" ) tm.assert_series_equal(result, expected) def test_quantile_axis_mixed(self, interp_method): # mixed on axis=1 interpolation, method = interp_method df = DataFrame( { "A": [1, 2, 3], "B": [2.0, 3.0, 4.0], "C": pd.date_range("20130101", periods=3), "D": ["foo", "bar", "baz"], } ) result = df.quantile( 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method ) expected = Series([1.5, 2.5, 3.5], name=0.5) if interpolation == "nearest": expected -= 0.5 tm.assert_series_equal(result, expected) # must raise msg = "'<' not supported between instances of 'Timestamp' and 'float'" with pytest.raises(TypeError, match=msg): df.quantile(0.5, axis=1, numeric_only=False) def test_quantile_axis_parameter(self, interp_method): # GH 9543/9544 interpolation, method = interp_method df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) result = df.quantile(0.5, axis=0, interpolation=interpolation, method=method) expected = Series([2.0, 3.0], index=["A", "B"], name=0.5) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) expected = df.quantile( 0.5, axis="index", interpolation=interpolation, method=method ) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_series_equal(result, expected) result = df.quantile( 0.5, axis="columns", interpolation=interpolation, method=method ) tm.assert_series_equal(result, expected) msg = "No axis named -1 for object type DataFrame" with pytest.raises(ValueError, match=msg): df.quantile(0.1, axis=-1, interpolation=interpolation, method=method) msg = "No axis named column for object type DataFrame" with pytest.raises(ValueError, match=msg): df.quantile(0.1, axis="column") def test_quantile_interpolation(self): # see gh-10174 # interpolation method other than default linear df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) result = df.quantile(0.5, axis=1, interpolation="nearest") expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5) tm.assert_series_equal(result, expected) # cross-check interpolation=nearest results in original dtype exp = np.percentile( np.array([[1, 2, 3], [2, 3, 4]]), 0.5, axis=0, method="nearest", ) expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64") tm.assert_series_equal(result, expected) # float df = DataFrame({"A": [1.0, 2.0, 3.0], "B": [2.0, 3.0, 4.0]}, index=[1, 2, 3]) result = df.quantile(0.5, axis=1, interpolation="nearest") expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5) tm.assert_series_equal(result, expected) exp = np.percentile( np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]), 0.5, axis=0, method="nearest", ) expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64") tm.assert_series_equal(result, expected) # axis result = df.quantile([0.5, 0.75], axis=1, interpolation="lower") expected = DataFrame( {1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75] ) tm.assert_frame_equal(result, expected) # test degenerate case df = DataFrame({"x": [], "y": []}) q = df.quantile(0.1, axis=0, interpolation="higher") assert np.isnan(q["x"]) and np.isnan(q["y"]) # multi df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) result = df.quantile([0.25, 0.5], interpolation="midpoint") # https://github.com/numpy/numpy/issues/7163 expected = DataFrame( [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], index=[0.25, 0.5], columns=["a", "b", "c"], ) tm.assert_frame_equal(result, expected) def test_quantile_interpolation_datetime(self, datetime_frame): # see gh-10174 # interpolation = linear (default case) df = datetime_frame q = df.quantile(0.1, axis=0, numeric_only=True, interpolation="linear") assert q["A"] == np.percentile(df["A"], 10) def test_quantile_interpolation_int(self, int_frame): # see gh-10174 df = int_frame # interpolation = linear (default case) q = df.quantile(0.1) assert q["A"] == np.percentile(df["A"], 10) # test with and without interpolation keyword q1 = df.quantile(0.1, axis=0, interpolation="linear") assert q1["A"] == np.percentile(df["A"], 10) tm.assert_series_equal(q, q1) def test_quantile_multi(self, interp_method): interpolation, method = interp_method df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) result = df.quantile([0.25, 0.5], interpolation=interpolation, method=method) expected = DataFrame( [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], index=[0.25, 0.5], columns=["a", "b", "c"], ) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_frame_equal(result, expected) def test_quantile_multi_axis_1(self, interp_method): interpolation, method = interp_method df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) result = df.quantile( [0.25, 0.5], axis=1, interpolation=interpolation, method=method ) expected = DataFrame( [[1.0, 2.0, 3.0]] * 2, index=[0.25, 0.5], columns=[0, 1, 2] ) if interpolation == "nearest": expected = expected.astype(np.int64) tm.assert_frame_equal(result, expected) def test_quantile_multi_empty(self, interp_method): interpolation, method = interp_method result = DataFrame({"x": [], "y": []}).quantile( [0.1, 0.9], axis=0, interpolation=interpolation, method=method ) expected = DataFrame( {"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9] ) tm.assert_frame_equal(result, expected) def test_quantile_datetime(self, unit): dti = pd.to_datetime(["2010", "2011"]).as_unit(unit) df = DataFrame({"a": dti, "b": [0, 5]}) # exclude datetime result = df.quantile(0.5, numeric_only=True) expected = Series([2.5], index=["b"], name=0.5) tm.assert_series_equal(result, expected) # datetime result = df.quantile(0.5, numeric_only=False) expected = Series( [Timestamp("2010-07-02 12:00:00"), 2.5], index=["a", "b"], name=0.5 ) tm.assert_series_equal(result, expected) # datetime w/ multi result = df.quantile([0.5], numeric_only=False) expected = DataFrame( {"a": Timestamp("2010-07-02 12:00:00").as_unit(unit), "b": 2.5}, index=[0.5], ) tm.assert_frame_equal(result, expected) # axis = 1 df["c"] = pd.to_datetime(["2011", "2012"]).as_unit(unit) result = df[["a", "c"]].quantile(0.5, axis=1, numeric_only=False) expected = Series( [Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")], index=[0, 1], name=0.5, dtype=f"M8[{unit}]", ) tm.assert_series_equal(result, expected) result = df[["a", "c"]].quantile([0.5], axis=1, numeric_only=False) expected = DataFrame( [[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")]], index=[0.5], columns=[0, 1], dtype=f"M8[{unit}]", ) tm.assert_frame_equal(result, expected) # empty when numeric_only=True result = df[["a", "c"]].quantile(0.5, numeric_only=True) expected = Series([], index=Index([], dtype="str"), dtype=np.float64, name=0.5) tm.assert_series_equal(result, expected) result = df[["a", "c"]].quantile([0.5], numeric_only=True) expected = DataFrame(index=[0.5], columns=Index([], dtype="str")) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "dtype", [ "datetime64[ns]", "datetime64[ns, US/Pacific]", "timedelta64[ns]", "Period[D]", ], ) def test_quantile_dt64_empty(self, dtype, interp_method): # GH#41544 interpolation, method = interp_method df = DataFrame(columns=["a", "b"], dtype=dtype) res = df.quantile( 0.5, axis=1, numeric_only=False, interpolation=interpolation, method=method ) expected = Series([], index=Index([], dtype="str"), name=0.5, dtype=dtype) tm.assert_series_equal(res, expected) # no columns in result, so no dtype preservation res = df.quantile( [0.5], axis=1, numeric_only=False, interpolation=interpolation, method=method, ) expected = DataFrame(index=[0.5], columns=Index([], dtype="str")) tm.assert_frame_equal(res, expected) @pytest.mark.parametrize("invalid", [-1, 2, [0.5, -1], [0.5, 2]]) def test_quantile_invalid(self, invalid, datetime_frame, interp_method): msg = "percentiles should all be in the interval \\[0, 1\\]" interpolation, method = interp_method with pytest.raises(ValueError, match=msg): datetime_frame.quantile(invalid, interpolation=interpolation, method=method) def test_quantile_box(self, interp_method): interpolation, method = interp_method df = DataFrame( { "A": [ Timestamp("2011-01-01"), Timestamp("2011-01-02"), Timestamp("2011-01-03"), ], "B": [ Timestamp("2011-01-01", tz="US/Eastern"), Timestamp("2011-01-02", tz="US/Eastern"), Timestamp("2011-01-03", tz="US/Eastern"), ], "C": [ pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), ], } ) res = df.quantile( 0.5, numeric_only=False, interpolation=interpolation, method=method ) exp = Series( [ Timestamp("2011-01-02"), Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), ], name=0.5, index=["A", "B", "C"], ) tm.assert_series_equal(res, exp) res = df.quantile( [0.5], numeric_only=False, interpolation=interpolation, method=method ) exp = DataFrame( [ [ Timestamp("2011-01-02"), Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), ] ], index=[0.5], columns=["A", "B", "C"], ) tm.assert_frame_equal(res, exp) def test_quantile_box_nat(self): # DatetimeLikeBlock may be consolidated and contain NaT in different loc df = DataFrame( { "A": [ Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02"), Timestamp("2011-01-03"), ], "a": [ Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT, Timestamp("2011-01-03"), ], "B": [ Timestamp("2011-01-01", tz="US/Eastern"), pd.NaT, Timestamp("2011-01-02", tz="US/Eastern"), Timestamp("2011-01-03", tz="US/Eastern"), ], "b": [ Timestamp("2011-01-01", tz="US/Eastern"), Timestamp("2011-01-02", tz="US/Eastern"), pd.NaT, Timestamp("2011-01-03", tz="US/Eastern"), ], "C": [ pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), pd.NaT, ], "c": [ pd.NaT, pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days"), ], }, columns=list("AaBbCc"), ) res = df.quantile(0.5, numeric_only=False) exp = Series( [ Timestamp("2011-01-02"), Timestamp("2011-01-02"), Timestamp("2011-01-02", tz="US/Eastern"), Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), pd.Timedelta("2 days"), ], name=0.5, index=list("AaBbCc"), ) tm.assert_series_equal(res, exp) res = df.quantile([0.5], numeric_only=False) exp = DataFrame( [ [ Timestamp("2011-01-02"), Timestamp("2011-01-02"), Timestamp("2011-01-02", tz="US/Eastern"), Timestamp("2011-01-02", tz="US/Eastern"), pd.Timedelta("2 days"), pd.Timedelta("2 days"), ] ], index=[0.5], columns=list("AaBbCc"), ) tm.assert_frame_equal(res, exp) def test_quantile_nan(self, interp_method): interpolation, method = interp_method # GH 14357 - float block where some cols have missing values df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)}) df.iloc[-1, 1] = np.nan res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series( [3.0, 2.5 if interpolation == "linear" else 3.0], index=["a", "b"], name=0.5 ) tm.assert_series_equal(res, exp) res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) exp = DataFrame( { "a": [3.0, 4.0], "b": [2.5, 3.25] if interpolation == "linear" else [3.0, 4.0], }, index=[0.5, 0.75], ) tm.assert_frame_equal(res, exp) res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) exp = Series(np.arange(1.0, 6.0), name=0.5) tm.assert_series_equal(res, exp) res = df.quantile( [0.5, 0.75], axis=1, interpolation=interpolation, method=method ) exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75]) if interpolation == "nearest": exp.iloc[1, -1] = np.nan tm.assert_frame_equal(res, exp) # full-nan column df["b"] = np.nan res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series([3.0, np.nan], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75]) tm.assert_frame_equal(res, exp) def test_quantile_nat(self, interp_method, unit): interpolation, method = interp_method # full NaT column df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]}, dtype=f"M8[{unit}]") res = df.quantile( 0.5, numeric_only=False, interpolation=interpolation, method=method ) exp = Series([pd.NaT], index=["a"], name=0.5, dtype=f"M8[{unit}]") tm.assert_series_equal(res, exp) res = df.quantile( [0.5], numeric_only=False, interpolation=interpolation, method=method ) exp = DataFrame({"a": [pd.NaT]}, index=[0.5], dtype=f"M8[{unit}]") tm.assert_frame_equal(res, exp) # mixed non-null / full null column df = DataFrame( { "a": [ Timestamp("2012-01-01"), Timestamp("2012-01-02"), Timestamp("2012-01-03"), ], "b": [pd.NaT, pd.NaT, pd.NaT], }, dtype=f"M8[{unit}]", ) res = df.quantile( 0.5, numeric_only=False, interpolation=interpolation, method=method ) exp = Series( [Timestamp("2012-01-02"), pd.NaT], index=["a", "b"], name=0.5, dtype=f"M8[{unit}]", ) tm.assert_series_equal(res, exp) res = df.quantile( [0.5], numeric_only=False, interpolation=interpolation, method=method ) exp = DataFrame( [[Timestamp("2012-01-02"), pd.NaT]], index=[0.5], columns=["a", "b"], dtype=f"M8[{unit}]", ) tm.assert_frame_equal(res, exp) def test_quantile_empty_no_rows_floats(self, interp_method): interpolation, method = interp_method df = DataFrame(columns=["a", "b"], dtype="float64") res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) res = df.quantile([0.5], interpolation=interpolation, method=method) exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5]) tm.assert_frame_equal(res, exp) res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) exp = Series([], index=Index([], dtype="str"), dtype="float64", name=0.5) tm.assert_series_equal(res, exp) res = df.quantile([0.5], axis=1, interpolation=interpolation, method=method) exp = DataFrame(columns=Index([], dtype="str"), index=[0.5]) tm.assert_frame_equal(res, exp) def test_quantile_empty_no_rows_ints(self, interp_method): interpolation, method = interp_method df = DataFrame(columns=["a", "b"], dtype="int64") res = df.quantile(0.5, interpolation=interpolation, method=method) exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) tm.assert_series_equal(res, exp) def test_quantile_empty_no_rows_dt64(self, interp_method): interpolation, method = interp_method # datetimes df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]") res = df.quantile( 0.5, numeric_only=False, interpolation=interpolation, method=method ) exp = Series( [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5 ) tm.assert_series_equal(res, exp) # Mixed dt64/dt64tz df["a"] = df["a"].dt.tz_localize("US/Central") res = df.quantile( 0.5, numeric_only=False, interpolation=interpolation, method=method ) exp = exp.astype(object) if interpolation == "nearest": # GH#18463 TODO: would we prefer NaTs here? exp = exp.fillna(np.nan) tm.assert_series_equal(res, exp) # both dt64tz df["b"] = df["b"].dt.tz_localize("US/Central") res = df.quantile( 0.5, numeric_only=False, interpolation=interpolation, method=method ) exp = exp.astype(df["b"].dtype) tm.assert_series_equal(res, exp) def test_quantile_empty_no_columns(self, interp_method): # GH#23925 _get_numeric_data may drop all columns interpolation, method = interp_method df = DataFrame(pd.date_range("1/1/18", periods=5)) df.columns.name = "captain tightpants" result = df.quantile( 0.5, numeric_only=True, interpolation=interpolation, method=method ) expected = Series([], name=0.5, dtype=np.float64) expected.index.name = "captain tightpants" tm.assert_series_equal(result, expected) result = df.quantile( [0.5], numeric_only=True, interpolation=interpolation, method=method ) expected = DataFrame([], index=[0.5]) expected.columns.name = "captain tightpants" tm.assert_frame_equal(result, expected) def test_invalid_method(self): with pytest.raises(ValueError, match="Invalid method: foo"): DataFrame(range(1)).quantile(0.5, method="foo") def test_table_invalid_interpolation(self): with pytest.raises(ValueError, match="Invalid interpolation: foo"): DataFrame(range(1)).quantile(0.5, method="table", interpolation="foo")
TestDataFrameQuantile
python
django__django
tests/check_framework/test_security.py
{ "start": 4779, "end": 5243 }
class ____(SimpleTestCase): @override_settings(MIDDLEWARE=[]) def test_no_csrf_middleware(self): """ Warn if CsrfViewMiddleware isn't in MIDDLEWARE. """ self.assertEqual(csrf.check_csrf_middleware(None), [csrf.W003]) @override_settings(MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"]) def test_with_csrf_middleware(self): self.assertEqual(csrf.check_csrf_middleware(None), [])
CheckCSRFMiddlewareTest
python
ijl__orjson
test/test_non_str_keys.py
{ "start": 256, "end": 9173 }
class ____: def test_dict_keys_duplicate(self): """ OPT_NON_STR_KEYS serializes duplicate keys """ assert ( orjson.dumps({"1": True, 1: False}, option=orjson.OPT_NON_STR_KEYS) == b'{"1":true,"1":false}' ) def test_dict_keys_int(self): assert ( orjson.dumps({1: True, 2: False}, option=orjson.OPT_NON_STR_KEYS) == b'{"1":true,"2":false}' ) def test_dict_keys_substr(self): assert ( orjson.dumps({SubStr("aaa"): True}, option=orjson.OPT_NON_STR_KEYS) == b'{"aaa":true}' ) def test_dict_keys_substr_passthrough(self): """ OPT_PASSTHROUGH_SUBCLASS does not affect OPT_NON_STR_KEYS """ assert ( orjson.dumps( {SubStr("aaa"): True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_SUBCLASS, ) == b'{"aaa":true}' ) def test_dict_keys_substr_invalid(self): with pytest.raises(orjson.JSONEncodeError): orjson.dumps({SubStr("\ud800"): True}, option=orjson.OPT_NON_STR_KEYS) def test_dict_keys_strict(self): """ OPT_NON_STR_KEYS does not respect OPT_STRICT_INTEGER """ assert ( orjson.dumps( {9223372036854775807: True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER, ) == b'{"9223372036854775807":true}' ) def test_dict_keys_int_range_valid_i64(self): """ OPT_NON_STR_KEYS has a i64 range for int, valid """ assert ( orjson.dumps( {9223372036854775807: True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER, ) == b'{"9223372036854775807":true}' ) assert ( orjson.dumps( {-9223372036854775807: True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER, ) == b'{"-9223372036854775807":true}' ) assert ( orjson.dumps( {9223372036854775809: True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER, ) == b'{"9223372036854775809":true}' ) def test_dict_keys_int_range_valid_u64(self): """ OPT_NON_STR_KEYS has a u64 range for int, valid """ assert ( orjson.dumps( {0: True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER, ) == b'{"0":true}' ) assert ( orjson.dumps( {18446744073709551615: True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_STRICT_INTEGER, ) == b'{"18446744073709551615":true}' ) def test_dict_keys_int_range_invalid(self): """ OPT_NON_STR_KEYS has a range of i64::MIN to u64::MAX """ with pytest.raises(orjson.JSONEncodeError): orjson.dumps({-9223372036854775809: True}, option=orjson.OPT_NON_STR_KEYS) with pytest.raises(orjson.JSONEncodeError): orjson.dumps({18446744073709551616: True}, option=orjson.OPT_NON_STR_KEYS) def test_dict_keys_float(self): assert ( orjson.dumps({1.1: True, 2.2: False}, option=orjson.OPT_NON_STR_KEYS) == b'{"1.1":true,"2.2":false}' ) def test_dict_keys_inf(self): assert ( orjson.dumps({float("Infinity"): True}, option=orjson.OPT_NON_STR_KEYS) == b'{"null":true}' ) assert ( orjson.dumps({float("-Infinity"): True}, option=orjson.OPT_NON_STR_KEYS) == b'{"null":true}' ) def test_dict_keys_nan(self): assert ( orjson.dumps({float("NaN"): True}, option=orjson.OPT_NON_STR_KEYS) == b'{"null":true}' ) def test_dict_keys_bool(self): assert ( orjson.dumps({True: True, False: False}, option=orjson.OPT_NON_STR_KEYS) == b'{"true":true,"false":false}' ) def test_dict_keys_datetime(self): assert ( orjson.dumps( {datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True}, option=orjson.OPT_NON_STR_KEYS, ) == b'{"2000-01-01T02:03:04.000123":true}' ) def test_dict_keys_datetime_opt(self): assert ( orjson.dumps( {datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_OMIT_MICROSECONDS | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'{"2000-01-01T02:03:04Z":true}' ) def test_dict_keys_datetime_passthrough(self): """ OPT_PASSTHROUGH_DATETIME does not affect OPT_NON_STR_KEYS """ assert ( orjson.dumps( {datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True}, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_DATETIME, ) == b'{"2000-01-01T02:03:04.000123":true}' ) def test_dict_keys_uuid(self): """ OPT_NON_STR_KEYS always serializes UUID as keys """ assert ( orjson.dumps( {uuid.UUID("7202d115-7ff3-4c81-a7c1-2a1f067b1ece"): True}, option=orjson.OPT_NON_STR_KEYS, ) == b'{"7202d115-7ff3-4c81-a7c1-2a1f067b1ece":true}' ) def test_dict_keys_date(self): assert ( orjson.dumps( {datetime.date(1970, 1, 1): True}, option=orjson.OPT_NON_STR_KEYS, ) == b'{"1970-01-01":true}' ) def test_dict_keys_time(self): assert ( orjson.dumps( {datetime.time(12, 15, 59, 111): True}, option=orjson.OPT_NON_STR_KEYS, ) == b'{"12:15:59.000111":true}' ) def test_dict_non_str_and_sort_keys(self): assert ( orjson.dumps( { "other": 1, datetime.date(1970, 1, 5): 2, datetime.date(1970, 1, 3): 3, }, option=orjson.OPT_NON_STR_KEYS | orjson.OPT_SORT_KEYS, ) == b'{"1970-01-03":3,"1970-01-05":2,"other":1}' ) @pytest.mark.skipif(pytz is None, reason="pytz optional") def test_dict_keys_time_err(self): """ OPT_NON_STR_KEYS propagates errors in types """ val = datetime.time(12, 15, 59, 111, tzinfo=pytz.timezone("Asia/Shanghai")) with pytest.raises(orjson.JSONEncodeError): orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS) def test_dict_keys_str(self): assert ( orjson.dumps({"1": True}, option=orjson.OPT_NON_STR_KEYS) == b'{"1":true}' ) def test_dict_keys_type(self): class Obj: a: str val = Obj() with pytest.raises(orjson.JSONEncodeError): orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS) @pytest.mark.skipif(numpy is None, reason="numpy is not installed") def test_dict_keys_array(self): with pytest.raises(TypeError): _ = {numpy.array([1, 2]): True} # type: ignore def test_dict_keys_dataclass(self): @dataclasses.dataclass class Dataclass: a: str with pytest.raises(TypeError): _ = {Dataclass("a"): True} def test_dict_keys_dataclass_hash(self): @dataclasses.dataclass class Dataclass: a: str def __hash__(self): return 1 obj = {Dataclass("a"): True} with pytest.raises(orjson.JSONEncodeError): orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS) def test_dict_keys_list(self): with pytest.raises(TypeError): _ = {[]: True} def test_dict_keys_dict(self): with pytest.raises(TypeError): _ = {{}: True} def test_dict_keys_tuple(self): obj = {(): True} with pytest.raises(orjson.JSONEncodeError): orjson.dumps(obj, option=orjson.OPT_NON_STR_KEYS) def test_dict_keys_unknown(self): with pytest.raises(orjson.JSONEncodeError): orjson.dumps({frozenset(): True}, option=orjson.OPT_NON_STR_KEYS) def test_dict_keys_no_str_call(self): class Obj: a: str def __str__(self): return "Obj" val = Obj() with pytest.raises(orjson.JSONEncodeError): orjson.dumps({val: True}, option=orjson.OPT_NON_STR_KEYS)
TestNonStrKeyTests
python
pypa__pip
src/pip/_vendor/packaging/version.py
{ "start": 1012, "end": 1503 }
class ____(NamedTuple): epoch: int release: tuple[int, ...] dev: tuple[str, int] | None pre: tuple[str, int] | None post: tuple[str, int] | None local: LocalType | None def parse(version: str) -> Version: """Parse the given version string. >>> parse('1.0.dev1') <Version('1.0.dev1')> :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version. """ return Version(version)
_Version
python
django__django
django/core/signing.py
{ "start": 4983, "end": 7869 }
class ____: def __init__( self, *, key=None, sep=":", salt=None, algorithm=None, fallback_keys=None ): self.key = key or settings.SECRET_KEY self.fallback_keys = ( fallback_keys if fallback_keys is not None else settings.SECRET_KEY_FALLBACKS ) self.sep = sep self.salt = salt or "%s.%s" % ( self.__class__.__module__, self.__class__.__name__, ) self.algorithm = algorithm or "sha256" if _SEP_UNSAFE.match(self.sep): raise ValueError( "Unsafe Signer separator: %r (cannot be empty or consist of " "only A-z0-9-_=)" % sep, ) def signature(self, value, key=None): key = key or self.key return base64_hmac(self.salt + "signer", value, key, algorithm=self.algorithm) def sign(self, value): return "%s%s%s" % (value, self.sep, self.signature(value)) def unsign(self, signed_value): if self.sep not in signed_value: raise BadSignature('No "%s" found in value' % self.sep) value, sig = signed_value.rsplit(self.sep, 1) for key in [self.key, *self.fallback_keys]: if constant_time_compare(sig, self.signature(value, key)): return value raise BadSignature('Signature "%s" does not match' % sig) def sign_object(self, obj, serializer=JSONSerializer, compress=False): """ Return URL-safe, hmac signed base64 compressed JSON string. If compress is True (not the default), check if compressing using zlib can save some space. Prepend a '.' to signify compression. This is included in the signature, to protect against zip bombs. The serializer is expected to return a bytestring. """ data = serializer().dumps(obj) # Flag for if it's been compressed or not. is_compressed = False if compress: # Avoid zlib dependency unless compress is being used. compressed = zlib.compress(data) if len(compressed) < (len(data) - 1): data = compressed is_compressed = True base64d = b64_encode(data).decode() if is_compressed: base64d = "." + base64d return self.sign(base64d) def unsign_object(self, signed_obj, serializer=JSONSerializer, **kwargs): # Signer.unsign() returns str but base64 and zlib compression operate # on bytes. base64d = self.unsign(signed_obj, **kwargs).encode() decompress = base64d[:1] == b"." if decompress: # It's compressed; uncompress it first. base64d = base64d[1:] data = b64_decode(base64d) if decompress: data = zlib.decompress(data) return serializer().loads(data)
Signer
python
great-expectations__great_expectations
tests/data_context/conftest.py
{ "start": 12034, "end": 16743 }
class ____: # TODO: GG 08232022 update signature to accept arbitrary content types def __init__( self, json_data: JSONData, status_code: int, headers: Optional[Dict[str, str]] = None, exc_to_raise: Optional[RequestError] = None, ) -> None: self._json_data = json_data self.status_code = status_code self.headers = headers or {"content-type": "application/json" if json_data else "text/html"} self._exc_to_raise = exc_to_raise def json(self): if self.headers.get("content-type") == "application/json": return self._json_data raise json.JSONDecodeError("Uh oh - check content-type", "foobar", 1) def raise_for_status(self): if self._exc_to_raise: raise self._exc_to_raise if self.status_code >= 400: raise requests.exceptions.HTTPError(f"Mock {self.status_code} HTTPError", response=self) def __repr__(self): # type: ignore[explicit-override] # FIXME return f"<Response [{self.status_code}]>" @pytest.fixture def mock_response_factory() -> Callable[[JSONData, int, Optional[RequestError]], MockResponse]: def _make_mock_response( json_data: JSONData, status_code: int, exc_to_raise: Optional[RequestError] = None, ) -> MockResponse: return MockResponse(json_data=json_data, status_code=status_code, exc_to_raise=exc_to_raise) return _make_mock_response def basic_fluent_datasource_config() -> dict: return { "type": "pandas_filesystem", "name": "my_fluent_pandas_filesystem_datasource", "assets": [ { "name": "my_csv", "type": "csv", } ], "base_directory": pathlib.PosixPath("/path/to/trip_data"), } def basic_fluent_datasource() -> Datasource: context = gx.get_context(mode="ephemeral") datasource = context.data_sources.add_pandas_filesystem( name="pandas_filesystem", base_directory="/path/to/trip_data", # type: ignore [arg-type] ) datasource.add_csv_asset(name="my_csv") return datasource @pytest.fixture def fluent_datasource_config() -> dict: return basic_fluent_datasource_config() @pytest.fixture def mock_http_unavailable(mock_response_factory: Callable): """Mock all request http calls to return a 503 Unavailable response.""" def mocked_response(*args, **kwargs): return MockResponse( {"code": 503, "detail": "API is unavailable"}, 503, ) # should have been able to do this by mocking `requests.request` but this didn't work with unittest.mock.patch.multiple( "requests.Session", autospec=True, get=unittest.mock.DEFAULT, post=unittest.mock.DEFAULT, put=unittest.mock.DEFAULT, patch=unittest.mock.DEFAULT, delete=unittest.mock.DEFAULT, ) as mock_requests: for name, mock in cast("Dict[str, Mock]", mock_requests).items(): mock.side_effect = mocked_response print(f"Mocking `requests.{name}` with `{mocked_response.__name__}()`") yield mock_requests @pytest.fixture def checkpoint_config() -> dict: checkpoint_config = { "name": "oss_test_checkpoint", "expectation_suite_name": "oss_test_expectation_suite", "validations": [ { "name": None, "id": None, "expectation_suite_name": "taxi.demo_pass", "expectation_suite_id": None, "batch_request": None, }, { "name": None, "id": None, "expectation_suite_name": None, "expectation_suite_id": None, "batch_request": { "datasource_name": "oss_test_datasource", "data_connector_name": "oss_test_data_connector", "data_asset_name": "users", }, }, ], "action_list": [ { "action": {"class_name": "StoreValidationResultAction"}, "name": "store_validation_result", }, ], } return checkpoint_config @pytest.fixture def mocked_datasource_post_response( mock_response_factory: Callable, fake_datasource_id: str, ) -> Callable[[], MockResponse]: def _mocked_post_response(*args, **kwargs): return mock_response_factory( { "data": { "id": fake_datasource_id, } }, 201, ) return _mocked_post_response
MockResponse
python
fluentpython__example-code
21-class-metaprog/evaltime.py
{ "start": 527, "end": 992 }
class ____(ClassThree): print('<[9]> ClassFour body') def method_y(self): print('<[10]> ClassFour.method_y') if __name__ == '__main__': print('<[11]> ClassOne tests', 30 * '.') one = ClassOne() one.method_x() print('<[12]> ClassThree tests', 30 * '.') three = ClassThree() three.method_y() print('<[13]> ClassFour tests', 30 * '.') four = ClassFour() four.method_y() print('<[14]> evaltime module end')
ClassFour
python
python-openxml__python-docx
tests/parts/test_numbering.py
{ "start": 1768, "end": 2566 }
class ____: def it_knows_how_many_numbering_definitions_it_contains(self, len_fixture): numbering_definitions, numbering_definition_count = len_fixture assert len(numbering_definitions) == numbering_definition_count # fixtures ------------------------------------------------------- @pytest.fixture(params=[0, 1, 2, 3]) def len_fixture(self, request): numbering_definition_count = request.param numbering_bldr = a_numbering().with_nsdecls() for idx in range(numbering_definition_count): numbering_bldr.with_child(a_num()) numbering_elm = numbering_bldr.element numbering_definitions = _NumberingDefinitions(numbering_elm) return numbering_definitions, numbering_definition_count
Describe_NumberingDefinitions
python
pola-rs__polars
py-polars/tests/unit/io/test_scan.py
{ "start": 2634, "end": 33717 }
class ____: path: Path df: pl.DataFrame def df_with_chunk_size_limit(df: pl.DataFrame, limit: int) -> pl.DataFrame: return pl.concat( ( df.slice(i * limit, min(limit, df.height - i * limit)) for i in range(ceil(df.height / limit)) ), rechunk=False, ) @pytest.fixture(scope="session") def data_file_single(session_tmp_dir: Path, data_file_extension: str) -> _DataFile: max_rows_per_batch = 727 file_path = (session_tmp_dir / "data").with_suffix(data_file_extension) df = pl.DataFrame( { "sequence": range(10000), } ) assert max_rows_per_batch < df.height _write(df_with_chunk_size_limit(df, max_rows_per_batch), file_path) return _DataFile(path=file_path, df=df) @pytest.fixture(scope="session") def data_file_glob(session_tmp_dir: Path, data_file_extension: str) -> _DataFile: max_rows_per_batch = 200 row_counts = [ 100, 186, 95, 185, 90, 84, 115, 81, 87, 217, 126, 85, 98, 122, 129, 122, 1089, 82, 234, 86, 93, 90, 91, 263, 87, 126, 86, 161, 191, 1368, 403, 192, 102, 98, 115, 81, 111, 305, 92, 534, 431, 150, 90, 128, 152, 118, 127, 124, 229, 368, 81, ] # fmt: skip assert sum(row_counts) == 10000 # Make sure we pad file names with enough zeros to ensure correct # lexicographical ordering. assert len(row_counts) < 100 # Make sure that some of our data frames consist of multiple chunks which # affects the output of certain file formats. assert any(row_count > max_rows_per_batch for row_count in row_counts) df = pl.DataFrame( { "sequence": range(10000), } ) row_offset = 0 for index, row_count in enumerate(row_counts): file_path = (session_tmp_dir / f"data_{index:02}").with_suffix( data_file_extension ) _write( df_with_chunk_size_limit( df.slice(row_offset, row_count), max_rows_per_batch ), file_path, ) row_offset += row_count return _DataFile( path=(session_tmp_dir / "data_*").with_suffix(data_file_extension), df=df ) @pytest.fixture(scope="session", params=["single", "glob"]) def data_file( request: pytest.FixtureRequest, data_file_single: _DataFile, data_file_glob: _DataFile, ) -> _DataFile: if request.param == "single": return data_file_single if request.param == "glob": return data_file_glob raise NotImplementedError() @pytest.mark.write_disk def test_scan( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = _scan(data_file.path, data_file.df.schema).collect() assert_frame_equal(df, data_file.df) @pytest.mark.write_disk def test_scan_with_limit( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = _scan(data_file.path, data_file.df.schema).limit(4483).collect() assert_frame_equal( df, pl.DataFrame( { "sequence": range(4483), } ), ) @pytest.mark.write_disk def test_scan_with_filter( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema) .filter(pl.col("sequence") % 2 == 0) .collect() ) assert_frame_equal( df, pl.DataFrame( { "sequence": (2 * x for x in range(5000)), } ), ) @pytest.mark.write_disk def test_scan_with_filter_and_limit( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema) .filter(pl.col("sequence") % 2 == 0) .limit(4483) .collect() ) assert_frame_equal( df, pl.DataFrame( { "sequence": (2 * x for x in range(4483)), }, ), ) @pytest.mark.write_disk def test_scan_with_limit_and_filter( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema) .limit(4483) .filter(pl.col("sequence") % 2 == 0) .collect() ) assert_frame_equal( df, pl.DataFrame( { "sequence": (2 * x for x in range(2242)), }, ), ) @pytest.mark.write_disk def test_scan_with_row_index_and_limit( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema, row_index=_RowIndex()) .limit(4483) .collect() ) assert_frame_equal( df, pl.DataFrame( { "index": range(4483), "sequence": range(4483), }, schema_overrides={"index": pl.UInt32}, ), ) @pytest.mark.write_disk def test_scan_with_row_index_and_filter( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema, row_index=_RowIndex()) .filter(pl.col("sequence") % 2 == 0) .collect() ) assert_frame_equal( df, pl.DataFrame( { "index": (2 * x for x in range(5000)), "sequence": (2 * x for x in range(5000)), }, schema_overrides={"index": pl.UInt32}, ), ) @pytest.mark.write_disk def test_scan_with_row_index_limit_and_filter( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema, row_index=_RowIndex()) .limit(4483) .filter(pl.col("sequence") % 2 == 0) .collect() ) assert_frame_equal( df, pl.DataFrame( { "index": (2 * x for x in range(2242)), "sequence": (2 * x for x in range(2242)), }, schema_overrides={"index": pl.UInt32}, ), ) @pytest.mark.write_disk def test_scan_with_row_index_projected_out( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if data_file.path.suffix == ".csv" and force_async: pytest.skip(reason="async reading of .csv not yet implemented") if force_async: _enable_force_async(monkeypatch) subset = next(iter(data_file.df.schema.keys())) df = ( _scan(data_file.path, data_file.df.schema, row_index=_RowIndex()) .select(subset) .collect() ) assert_frame_equal(df, data_file.df.select(subset)) @pytest.mark.write_disk def test_scan_with_row_index_filter_and_limit( capfd: Any, monkeypatch: pytest.MonkeyPatch, data_file: _DataFile, force_async: bool ) -> None: if data_file.path.suffix == ".csv" and force_async: pytest.skip(reason="async reading of .csv not yet implemented") if force_async: _enable_force_async(monkeypatch) df = ( _scan(data_file.path, data_file.df.schema, row_index=_RowIndex()) .filter(pl.col("sequence") % 2 == 0) .limit(4483) .collect() ) assert_frame_equal( df, pl.DataFrame( { "index": (2 * x for x in range(4483)), "sequence": (2 * x for x in range(4483)), }, schema_overrides={"index": pl.UInt32}, ), ) @pytest.mark.write_disk @pytest.mark.parametrize( ("scan_func", "write_func"), [ (pl.scan_parquet, pl.DataFrame.write_parquet), (pl.scan_ipc, pl.DataFrame.write_ipc), (pl.scan_csv, pl.DataFrame.write_csv), (pl.scan_ndjson, pl.DataFrame.write_ndjson), ], ) @pytest.mark.parametrize( "streaming", [True, False], ) def test_scan_limit_0_does_not_panic( tmp_path: Path, scan_func: Callable[[Any], pl.LazyFrame], write_func: Callable[[pl.DataFrame, Path], None], streaming: bool, ) -> None: tmp_path.mkdir(exist_ok=True) path = tmp_path / "data.bin" df = pl.DataFrame({"x": 1}) write_func(df, path) assert_frame_equal( scan_func(path) .head(0) .collect(engine="streaming" if streaming else "in-memory"), df.clear(), ) @pytest.mark.write_disk @pytest.mark.parametrize( ("scan_func", "write_func"), [ (pl.scan_csv, pl.DataFrame.write_csv), (pl.scan_parquet, pl.DataFrame.write_parquet), (pl.scan_ipc, pl.DataFrame.write_ipc), (pl.scan_ndjson, pl.DataFrame.write_ndjson), ], ) @pytest.mark.parametrize( "glob", [True, False], ) def test_scan_directory( tmp_path: Path, scan_func: Callable[..., pl.LazyFrame], write_func: Callable[[pl.DataFrame, Path], None], glob: bool, ) -> None: tmp_path.mkdir(exist_ok=True) dfs: list[pl.DataFrame] = [ pl.DataFrame({"a": [0, 0, 0, 0, 0]}), pl.DataFrame({"a": [1, 1, 1, 1, 1]}), pl.DataFrame({"a": [2, 2, 2, 2, 2]}), ] paths = [ tmp_path / "0.bin", tmp_path / "1.bin", tmp_path / "dir/data.bin", ] for df, path in zip(dfs, paths): path.parent.mkdir(exist_ok=True) write_func(df, path) df = pl.concat(dfs) scan = scan_func if scan_func in [pl.scan_csv, pl.scan_ndjson]: scan = partial(scan, schema=df.schema) if scan_func is pl.scan_parquet: scan = partial(scan, glob=glob) out = scan(tmp_path).collect() assert_frame_equal(out, df) @pytest.mark.write_disk def test_scan_glob_excludes_directories(tmp_path: Path) -> None: for dir in ["dir1", "dir2", "dir3"]: (tmp_path / dir).mkdir() df = pl.DataFrame({"a": [1, 2, 3]}) df.write_parquet(tmp_path / "dir1/data.bin") df.write_parquet(tmp_path / "dir2/data.parquet") df.write_parquet(tmp_path / "data.parquet") assert_frame_equal(pl.scan_parquet(tmp_path / "**/*.bin").collect(), df) assert_frame_equal(pl.scan_parquet(tmp_path / "**/data*.bin").collect(), df) assert_frame_equal( pl.scan_parquet(tmp_path / "**/*").collect(), pl.concat(3 * [df]) ) assert_frame_equal(pl.scan_parquet(tmp_path / "*").collect(), df) @pytest.mark.parametrize("file_name", ["a b", "a %25 b"]) @pytest.mark.write_disk def test_scan_async_whitespace_in_path( tmp_path: Path, monkeypatch: Any, file_name: str ) -> None: monkeypatch.setenv("POLARS_FORCE_ASYNC", "1") tmp_path.mkdir(exist_ok=True) path = tmp_path / f"{file_name}.parquet" df = pl.DataFrame({"x": 1}) df.write_parquet(path) assert_frame_equal(pl.scan_parquet(path).collect(), df) assert_frame_equal(pl.scan_parquet(tmp_path).collect(), df) assert_frame_equal(pl.scan_parquet(tmp_path / "*").collect(), df) assert_frame_equal(pl.scan_parquet(tmp_path / "*.parquet").collect(), df) path.unlink() @pytest.mark.write_disk def test_path_expansion_excludes_empty_files_17362(tmp_path: Path) -> None: tmp_path.mkdir(exist_ok=True) df = pl.DataFrame({"x": 1}) df.write_parquet(tmp_path / "data.parquet") (tmp_path / "empty").touch() assert_frame_equal(pl.scan_parquet(tmp_path).collect(), df) assert_frame_equal(pl.scan_parquet(tmp_path / "*").collect(), df) @pytest.mark.write_disk def test_path_expansion_empty_directory_does_not_panic(tmp_path: Path) -> None: tmp_path.mkdir(exist_ok=True) with pytest.raises(pl.exceptions.ComputeError): pl.scan_parquet(tmp_path).collect() with pytest.raises(pl.exceptions.ComputeError): pl.scan_parquet(tmp_path / "**/*").collect() @pytest.mark.write_disk def test_scan_single_dir_differing_file_extensions_raises_17436(tmp_path: Path) -> None: tmp_path.mkdir(exist_ok=True) df = pl.DataFrame({"x": 1}) df.write_parquet(tmp_path / "data.parquet") df.write_ipc(tmp_path / "data.ipc") with pytest.raises( pl.exceptions.InvalidOperationError, match="different file extensions" ): pl.scan_parquet(tmp_path).collect() for lf in [ pl.scan_parquet(tmp_path / "*.parquet"), pl.scan_ipc(tmp_path / "*.ipc"), ]: assert_frame_equal(lf.collect(), df) # Ensure passing a glob doesn't trigger file extension checking with pytest.raises( pl.exceptions.ComputeError, match="parquet: File out of specification: The file must end with PAR1", ): pl.scan_parquet(tmp_path / "*").collect() @pytest.mark.parametrize("format", ["parquet", "csv", "ndjson", "ipc"]) def test_scan_nonexistent_path(format: str) -> None: path_str = f"my-nonexistent-data.{format}" path = Path(path_str) assert not path.exists() scan_function = getattr(pl, f"scan_{format}") # Just calling the scan function should not raise any errors result = scan_function(path) assert isinstance(result, pl.LazyFrame) # Upon collection, it should fail with pytest.raises(FileNotFoundError): result.collect() @pytest.mark.write_disk @pytest.mark.parametrize( ("scan_func", "write_func"), [ (pl.scan_parquet, pl.DataFrame.write_parquet), (pl.scan_ipc, pl.DataFrame.write_ipc), (pl.scan_csv, pl.DataFrame.write_csv), (pl.scan_ndjson, pl.DataFrame.write_ndjson), ], ) @pytest.mark.parametrize( "streaming", [True, False], ) def test_scan_include_file_paths( tmp_path: Path, scan_func: Callable[..., pl.LazyFrame], write_func: Callable[[pl.DataFrame, Path], None], streaming: bool, ) -> None: tmp_path.mkdir(exist_ok=True) dfs: list[pl.DataFrame] = [] for x in ["1", "2"]: path = Path(f"{tmp_path}/{x}.bin").absolute() dfs.append(pl.DataFrame({"x": 10 * [x]}).with_columns(path=pl.lit(str(path)))) write_func(dfs[-1].drop("path"), path) df = pl.concat(dfs) assert df.columns == ["x", "path"] with pytest.raises( pl.exceptions.DuplicateError, match=r'column name for file paths "x" conflicts with column name from file', ): scan_func(tmp_path, include_file_paths="x").collect( engine="streaming" if streaming else "in-memory" ) f = scan_func if scan_func in [pl.scan_csv, pl.scan_ndjson]: f = partial(f, schema=df.drop("path").schema) lf: pl.LazyFrame = f(tmp_path, include_file_paths="path") assert_frame_equal(lf.collect(engine="streaming" if streaming else "in-memory"), df) # Test projecting only the path column q = lf.select("path") assert q.collect_schema() == {"path": pl.String} assert_frame_equal( q.collect(engine="streaming" if streaming else "in-memory"), df.select("path"), ) q = q.select("path").head(3) assert q.collect_schema() == {"path": pl.String} assert_frame_equal( q.collect(engine="streaming" if streaming else "in-memory"), df.select("path").head(3), ) # Test predicates for predicate in [pl.col("path") != pl.col("x"), pl.col("path") != ""]: assert_frame_equal( lf.filter(predicate).collect( engine="streaming" if streaming else "in-memory" ), df, ) # Test codepaths that materialize empty DataFrames assert_frame_equal( lf.head(0).collect(engine="streaming" if streaming else "in-memory"), df.head(0), ) @pytest.mark.write_disk def test_async_path_expansion_bracket_17629(tmp_path: Path) -> None: path = tmp_path / "data.parquet" df = pl.DataFrame({"x": 1}) df.write_parquet(path) assert_frame_equal(pl.scan_parquet(tmp_path / "[d]ata.parquet").collect(), df) @pytest.mark.parametrize( "method", ["parquet", "csv", "ipc", "ndjson"], ) @pytest.mark.may_fail_auto_streaming # unsupported negative slice offset -1 for CSV source def test_scan_in_memory(method: str) -> None: f = io.BytesIO() df = pl.DataFrame( { "a": [1, 2, 3], "b": ["x", "y", "z"], } ) (getattr(df, f"write_{method}"))(f) f.seek(0) result = (getattr(pl, f"scan_{method}"))(f).collect() assert_frame_equal(df, result) f.seek(0) result = (getattr(pl, f"scan_{method}"))(f).slice(1, 2).collect() assert_frame_equal(df.slice(1, 2), result) f.seek(0) result = (getattr(pl, f"scan_{method}"))(f).slice(-1, 1).collect() assert_frame_equal(df.slice(-1, 1), result) g = io.BytesIO() (getattr(df, f"write_{method}"))(g) f.seek(0) g.seek(0) result = (getattr(pl, f"scan_{method}"))([f, g]).collect() assert_frame_equal(df.vstack(df), result) f.seek(0) g.seek(0) result = (getattr(pl, f"scan_{method}"))([f, g]).slice(1, 2).collect() assert_frame_equal(df.vstack(df).slice(1, 2), result) f.seek(0) g.seek(0) result = (getattr(pl, f"scan_{method}"))([f, g]).slice(-1, 1).collect() assert_frame_equal(df.vstack(df).slice(-1, 1), result) def test_scan_pyobject_zero_copy_buffer_mutate() -> None: f = io.BytesIO() df = pl.DataFrame({"x": [1, 2, 3, 4, 5]}) df.write_ipc(f) f.seek(0) q = pl.scan_ipc(f) assert_frame_equal(q.collect(), df) f.write(b"AAA") assert_frame_equal(q.collect(), df) @pytest.mark.parametrize( "method", ["csv", "ndjson"], ) def test_scan_stringio(method: str) -> None: f = io.StringIO() df = pl.DataFrame( { "a": [1, 2, 3], "b": ["x", "y", "z"], } ) (getattr(df, f"write_{method}"))(f) f.seek(0) result = (getattr(pl, f"scan_{method}"))(f).collect() assert_frame_equal(df, result) g = io.StringIO() (getattr(df, f"write_{method}"))(g) f.seek(0) g.seek(0) result = (getattr(pl, f"scan_{method}"))([f, g]).collect() assert_frame_equal(df.vstack(df), result) def test_scan_double_collect_row_index_invalidates_cached_ir_18892() -> None: lf = pl.scan_csv(io.BytesIO(b"a\n1\n2\n3")) lf.collect() out = lf.with_row_index().collect() assert_frame_equal( out, pl.DataFrame( {"index": [0, 1, 2], "a": [1, 2, 3]}, schema={"index": pl.get_index_type(), "a": pl.Int64}, ), ) def test_scan_include_file_paths_respects_projection_pushdown() -> None: q = pl.scan_csv(b"a,b,c\na1,b1,c1", include_file_paths="path_name").select( ["a", "b"] ) assert_frame_equal(q.collect(), pl.DataFrame({"a": "a1", "b": "b1"})) def test_streaming_scan_csv_include_file_paths_18257(io_files_path: Path) -> None: lf = pl.scan_csv( io_files_path / "foods1.csv", include_file_paths="path", ).select("category", "path") assert lf.collect(engine="streaming").columns == ["category", "path"] def test_streaming_scan_csv_with_row_index_19172(io_files_path: Path) -> None: lf = ( pl.scan_csv(io_files_path / "foods1.csv", infer_schema=False) .with_row_index() .select("calories", "index") .head(1) ) assert_frame_equal( lf.collect(engine="streaming"), pl.DataFrame( {"calories": "45", "index": 0}, schema={"calories": pl.String, "index": pl.get_index_type()}, ), ) @pytest.mark.write_disk def test_predicate_hive_pruning_with_cast(tmp_path: Path) -> None: tmp_path.mkdir(exist_ok=True) df = pl.DataFrame({"x": 1}) (p := (tmp_path / "date=2024-01-01")).mkdir() df.write_parquet(p / "1") (p := (tmp_path / "date=2024-01-02")).mkdir() # Write an invalid parquet file that will cause errors if polars attempts to # read it. # This works because `scan_parquet()` only looks at the first file during # schema inference. (p / "1").write_text("not a parquet file") expect = pl.DataFrame({"x": 1, "date": datetime(2024, 1, 1).date()}) lf = pl.scan_parquet(tmp_path) q = lf.filter(pl.col("date") < datetime(2024, 1, 2).date()) assert_frame_equal(q.collect(), expect) # This filter expr with stprtime is effectively what LazyFrame.sql() # generates q = lf.filter( pl.col("date") < pl.lit("2024-01-02").str.strptime( dtype=pl.Date, format="%Y-%m-%d", ambiguous="latest" ) ) assert_frame_equal(q.collect(), expect) q = lf.sql("select * from self where date < '2024-01-02'") print(q.explain()) assert_frame_equal(q.collect(), expect) def test_predicate_stats_eval_nested_binary() -> None: bufs: list[bytes] = [] for i in range(10): b = io.BytesIO() pl.DataFrame({"x": i}).write_parquet(b) b.seek(0) bufs.append(b.read()) assert_frame_equal( ( pl.scan_parquet(bufs) .filter(pl.col("x") % 2 == 0) .collect(optimizations=pl.QueryOptFlags.none()) ), pl.DataFrame({"x": [0, 2, 4, 6, 8]}), ) assert_frame_equal( ( pl.scan_parquet(bufs) # The literal eval depth limit is 4 - # * crates/polars-expr/src/expressions/mod.rs::PhysicalExpr::evaluate_inline .filter(pl.col("x") == pl.lit("222").str.slice(0, 1).cast(pl.Int64)) .collect() ), pl.DataFrame({"x": [2]}), ) @pytest.mark.slow @pytest.mark.parametrize("streaming", [True, False]) def test_scan_csv_bytesio_memory_usage( streaming: bool, # memory_usage_without_pyarrow: MemoryUsage, ) -> None: # memory_usage = memory_usage_without_pyarrow # Create CSV that is ~6-7 MB in size: f = io.BytesIO() df = pl.DataFrame({"mydata": pl.int_range(0, 1_000_000, eager=True)}) df.write_csv(f) # assert 6_000_000 < f.tell() < 7_000_000 f.seek(0, 0) # A lazy scan shouldn't make a full copy of the data: # starting_memory = memory_usage.get_current() assert ( pl.scan_csv(f) .filter(pl.col("mydata") == 999_999) .collect(engine="streaming" if streaming else "in-memory") .item() == 999_999 ) # assert memory_usage.get_peak() - starting_memory < 1_000_000 @pytest.mark.parametrize( "scan_type", [ (pl.DataFrame.write_parquet, pl.scan_parquet), (pl.DataFrame.write_ipc, pl.scan_ipc), (pl.DataFrame.write_csv, pl.scan_csv), (pl.DataFrame.write_ndjson, pl.scan_ndjson), ], ) def test_only_project_row_index(scan_type: tuple[Any, Any]) -> None: write, scan = scan_type f = io.BytesIO() df = pl.DataFrame([pl.Series("a", [1, 2, 3], pl.UInt32)]) write(df, f) f.seek(0) s = scan(f, row_index_name="row_index", row_index_offset=42) assert_frame_equal( s.select("row_index").collect(), pl.DataFrame({"row_index": [42, 43, 44]}), check_dtypes=False, ) @pytest.mark.parametrize( "scan_type", [ (pl.DataFrame.write_parquet, pl.scan_parquet), (pl.DataFrame.write_ipc, pl.scan_ipc), (pl.DataFrame.write_csv, pl.scan_csv), (pl.DataFrame.write_ndjson, pl.scan_ndjson), ], ) def test_only_project_include_file_paths(scan_type: tuple[Any, Any]) -> None: write, scan = scan_type f = io.BytesIO() df = pl.DataFrame([pl.Series("a", [1, 2, 3], pl.UInt32)]) write(df, f) f.seek(0) s = scan(f, include_file_paths="file_path") # The exact value for in-memory buffers is undefined c = s.select("file_path").collect() assert c.height == 3 assert c.columns == ["file_path"] @pytest.mark.parametrize( "scan_type", [ (pl.DataFrame.write_parquet, pl.scan_parquet), pytest.param( (pl.DataFrame.write_ipc, pl.scan_ipc), marks=pytest.mark.xfail( reason="has no allow_missing_columns parameter. https://github.com/pola-rs/polars/issues/21166" ), ), pytest.param( (pl.DataFrame.write_csv, pl.scan_csv), marks=pytest.mark.xfail( reason="has no allow_missing_columns parameter. https://github.com/pola-rs/polars/issues/21166" ), ), pytest.param( (pl.DataFrame.write_ndjson, pl.scan_ndjson), marks=pytest.mark.xfail( reason="has no allow_missing_columns parameter. https://github.com/pola-rs/polars/issues/21166" ), ), ], ) def test_only_project_missing(scan_type: tuple[Any, Any]) -> None: write, scan = scan_type f = io.BytesIO() g = io.BytesIO() write( pl.DataFrame( [pl.Series("a", [], pl.UInt32), pl.Series("missing", [], pl.Int32)] ), f, ) write(pl.DataFrame([pl.Series("a", [1, 2, 3], pl.UInt32)]), g) f.seek(0) g.seek(0) s = scan([f, g], missing_columns="insert") assert_frame_equal( s.select("missing").collect(), pl.DataFrame([pl.Series("missing", [None, None, None], pl.Int32)]), ) @pytest.mark.skipif(sys.platform == "win32", reason="windows paths are a mess") @pytest.mark.write_disk @pytest.mark.parametrize( "scan_type", [ (pl.DataFrame.write_parquet, pl.scan_parquet), (pl.DataFrame.write_ipc, pl.scan_ipc), (pl.DataFrame.write_csv, pl.scan_csv), (pl.DataFrame.write_ndjson, pl.scan_ndjson), ], ) def test_async_read_21945(tmp_path: Path, scan_type: tuple[Any, Any]) -> None: f1 = tmp_path / "f1" f2 = tmp_path / "f2" pl.DataFrame({"value": [1, 2]}).write_parquet(f1) pl.DataFrame({"value": [3]}).write_parquet(f2) df = ( pl.scan_parquet(["file://" + str(f1), str(f2)], include_file_paths="foo") .filter(value=1) .collect() ) assert_frame_equal( df, pl.DataFrame({"value": [1], "foo": ["file://" + f1.as_posix()]}) ) @pytest.mark.write_disk @pytest.mark.parametrize("with_str_contains", [False, True]) def test_hive_pruning_str_contains_21706( tmp_path: Path, capfd: Any, monkeypatch: Any, with_str_contains: bool ) -> None: df = pl.DataFrame( { "pdate": [20250301, 20250301, 20250302, 20250302, 20250303, 20250303], "prod_id": ["A1", "A2", "B1", "B2", "C1", "C2"], "price": [11, 22, 33, 44, 55, 66], } ) df.write_parquet(tmp_path, partition_by=["pdate"]) monkeypatch.setenv("POLARS_VERBOSE", "1") f = pl.col("pdate") == 20250303 if with_str_contains: f = f & pl.col("prod_id").str.contains("1") df = pl.scan_parquet(tmp_path, hive_partitioning=True).filter(f).collect() captured = capfd.readouterr().err assert "allows skipping 2 / 3" in captured assert_frame_equal( df, pl.scan_parquet(tmp_path, hive_partitioning=True).collect().filter(f), ) @pytest.mark.skipif( sys.platform == "win32", reason="path characters not valid on Windows" ) @pytest.mark.parametrize( ("scan", "write"), [ (pl.scan_ipc, pl.DataFrame.write_ipc), (pl.scan_parquet, pl.DataFrame.write_parquet), (pl.scan_csv, pl.DataFrame.write_csv), ], ) @pytest.mark.parametrize("file_name", ["%?", "[", "]"]) def test_scan_no_glob_special_chars_23292( tmp_path: Path, file_name: str, scan: Any, write: Any ) -> None: tmp_path.mkdir(exist_ok=True) path = tmp_path / file_name df = pl.DataFrame({"a": 1}) write(df, path) assert_frame_equal(scan(path, glob=False).collect(), df) assert_frame_equal(scan(f"file://{path}", glob=False).collect(), df) @pytest.mark.write_disk @pytest.mark.parametrize( ("scan_function", "failed_message", "name_in_context"), [ ( pl.scan_parquet, "failed to retrieve first file schema (parquet)", "'parquet scan'", ), (pl.scan_ipc, "failed to retrieve first file schema (ipc)", "'ipc scan'"), (pl.scan_csv, "failed to retrieve file schemas (csv)", "'csv scan'"), ( pl.scan_ndjson, "failed to retrieve first file schema (ndjson)", "'ndjson scan'", ), ], ) def test_scan_empty_paths_friendly_error( tmp_path: Path, scan_function: Any, failed_message: str, name_in_context: str, ) -> None: q = scan_function(tmp_path) with pytest.raises(pl.exceptions.ComputeError) as exc: q.collect() exc_str = exc.exconly() assert ( f"ComputeError: {failed_message}: expanded paths were empty " "(path expansion input: 'paths: [Local" ) in exc_str assert "glob: true)." in exc_str assert exc_str.count(tmp_path.name) == 1 assert ( name_in_context in exc_str.split( "This error occurred with the following context stack:", maxsplit=1 )[1] ) if scan_function is pl.scan_parquet: assert ( "Hint: passing a schema can allow this scan to succeed with an empty DataFrame." in exc_str ) # Multiple input paths q = scan_function([tmp_path, tmp_path]) with pytest.raises(pl.exceptions.ComputeError) as exc: q.collect() exc_str = exc.exconly() assert ( f"ComputeError: {failed_message}: expanded paths were empty " "(path expansion input: 'paths: [Local" ) in exc_str assert "glob: true)." in exc_str assert exc_str.count(tmp_path.name) == 2 q = scan_function([]) with pytest.raises(pl.exceptions.ComputeError) as exc: q.collect() exc_str = exc.exconly() # There is no "path expansion resulted in" for this error message as the # original input sources were empty. assert f"ComputeError: {failed_message}: empty input: paths: []" in exc_str if scan_function is pl.scan_parquet: assert ( "Hint: passing a schema can allow this scan to succeed with an empty DataFrame." in exc_str ) # TODO: glob parameter not supported in some scan types cx = ( pytest.raises(pl.exceptions.ComputeError, match="glob: false") if ( scan_function is pl.scan_csv or scan_function is pl.scan_parquet or scan_function is pl.scan_ipc ) else pytest.raises(TypeError, match="unexpected keyword argument 'glob'") # type: ignore[arg-type] ) with cx: scan_function(tmp_path, glob=False).collect()
_DataFile