n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
63
0
9
25
django/contrib/admin/options.py
203,456
Refs #33476 -- Reformatted code with Black.
django
13
Python
47
options.py
def _create_formsets(self, request, obj, change): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if change: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = self.get_formset_kwargs(request, obj, inline, prefix) formset = FormSet(**formset_params)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
188
https://github.com/django/django.git
192
def _create_formsets(self, request, obj, change): "Helper function to generate formsets for add/change_view." formsets = [] inline_instances = [] prefixes = {} get_formsets_args = [request] if change: get_formsets_args.append(obj) for FormSet, inline in self.get_formsets_with_inlines(*get_formsets_args): prefix = FormSet.get_default_prefix() prefixes[prefix] = prefixes.get(prefix, 0) + 1 if prefixes[prefix] != 1 or not prefix: prefix = "%s-%s" % (prefix, prefixes[prefix]) formset_params = self.get_
19
184
_create_formsets
22
0
2
21
modin/experimental/core/execution/native/implementations/hdk_on_native/interchange/dataframe_protocol/dataframe.py
154,593
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <Poolliver868@mail.ru> Signed-off-by: Andrey Pavlenko <andrey.a.pavlenko@gmail.com>
modin
18
Python
22
dataframe.py
def _yield_chunks(self, chunk_slices) -> "HdkProtocolDataframe": for i in range(len(chunk_slices) - 1): yield HdkProtocolDataframe( df=self._df.take_2d_labels_or_positional( row_positions=range(chunk_slices[i], chunk_slices[i + 1]) ), nan_as_null=self._nan_as_null, allow_copy=self._allow_copy, )
e5b1888cd932909e49194d58035da34b210b91c4
65
https://github.com/modin-project/modin.git
137
def _yield_chunks(self, chunk_slices) -> "HdkProtocolDataframe": for i in range(len(chunk_slices) - 1): yield HdkProtocolDataframe( df=self._df.take_2d_labels_or_positional(
15
101
_yield_chunks
139
1
7
31
jax/_src/lax/control_flow/conditionals.py
122,668
[jax2tf] An alternative support for shape polymorphism for native serialization. jax2tf already supports many cases of shape polymorphism, e.g., those where the shapes of all intermediates can be expressed as polynomials in the dimension variables in the input. We want to achieve the same same coverage, or more, while using StableHLO as the lowering format, rather than tf.Graph. For native serialization we will support two lowering implementations: * one is using the growing support in JAX for dynamic shapes, of which shape polymorphism is a special case. This implementation is enabled with the --jax_dynamic_shapes flag. At the moment, the JAX dynamic shapes support is still incomplete and over 300 jax2tf shape polymorphism tests fail. * a new one (added) here in which we form a Jaxpr using abstract values that express dimension sizes as dimension polynomials (as for the standard jax2tf). Then we lower the Jaxpr to StableHLO. This implementation is enabled when --jax_dynamic_shapes is off. With this implementation only 50 jax2tf tests fail (to be fixed separately). The key contribution here is to enable lowering a Jaxpr that contains dimension polynomials in some of the intermediate shapes. Many lowering rules already have some partial support for Jaxprs where the shapes contain `Var`s. To the extent possible, we try to write lowering rules that should cover both cases of dynamic shapes: Var or polynomials in shapes. The lowering convention is that at top level we collect the sorted list of dimension variable names in the inputs, and we store it in ModuleContext.dim_vars. All IR functions will take N additional prefix arguments of int32 type containing the values of the dimension variables. This is stored as a list of `ir.Value` in `LoweringContext.dim_var_values`. Note that the Jaxprs are not changed to have extra Vars for the dimension variable values. An alternative implementation could work by transforming the Jaxpr to replace dimension polynomials into Vars. The key code pattern used in the lowering rule is:: if not core.is_constant_shape(shape): # Handles both Var, and polynomials shape = mlir.eval_dynamic_shape(ctx, shape) return mhlo.DynamicXXX(..., shape) else: return mhlo.XXX(..., shape) with `mlir.eval_dynamic_shape` handling both cases:: def eval_dynamic_shape(ctx, shape): if config.jax_dynamic_shapes: # Using Var return ... subst using ctx.axis_size_env ... else: # Using polynomials return ... subst using ctx.module_context.dim_vars and ctx.dim_var_values In order to support the above some lowering functions need to take a LoweringContext parameter, e.g., mlir.broadcast_mhlo. I expect that the changes here will improve the --jax_dynamic_shapes coverage as well.
jax
17
Python
104
conditionals.py
def _cond_lowering(ctx, index, *args, branches, linear): del linear # Unused. joined_effects = core.join_effects(*(branch.effects for branch in branches)) ordered_effects = [eff for eff in joined_effects if eff in core.ordered_effects] num_tokens = len(ordered_effects) tokens_in = ctx.tokens_in.subset(ordered_effects) output_token_types = [mlir.token_type() for _ in ordered_effects] output_types = [ *output_token_types, *map(mlir.aval_to_ir_types, ctx.avals_out)] flat_output_types = util.flatten(output_types) # mhlo.CaseOp takes a single argument 'index' and the corresponding blocks # have no arguments; the computation within the block uses implicit # captures. case_op = mhlo.CaseOp(flat_output_types, index=index, num_branches=len(branches)) name_stack = extend_name_stack(ctx.module_context.name_stack, 'cond') for i, jaxpr in enumerate(branches): branch = case_op.regions[i].blocks.append() with ir.InsertionPoint(branch): sub_ctx = ctx.module_context.replace( name_stack=xla.extend_name_stack(name_stack, f'branch_{i}_fun')) out_vals, tokens_out = mlir.jaxpr_subcomp( sub_ctx, jaxpr.jaxpr, tokens_in, map(mlir.ir_constants, jaxpr.consts), *map(mlir.wrap_singleton_ir_values, args), dim_var_values=ctx.dim_var_values) out_tokens = [tokens_out.get(eff) for eff in ordered_effects] out_vals = [*out_tokens, *out_vals] mhlo.ReturnOp(util.flatten(out_vals)) tokens_and_outputs = util.unflatten(case_op.results, map(len, output_types)) tokens, outputs = util.split_list(tokens_and_outputs, [num_tokens]) ctx.set_tokens_out(mlir.TokenSet(zip(ordered_effects, tokens))) return outputs mlir.register_lowering(cond_p, _cond_lowering) @state.register_discharge_rule(cond_p)
8fb344a724075c2b7ea3ec3d4b9dd3ae1d8a0bd7
@state.register_discharge_rule(cond_p)
312
https://github.com/google/jax.git
279
def _cond_lowering(ctx, index, *args, branches, linear): del linear # Unused. joined_effects = core.join_effects(*(branch.effects for branch in branches)) ordered_effects = [eff for eff in joined_effects if eff in core.ordered_effects] num_tokens = len(ordered_effects) tokens_in = ctx.tokens_in.subset(ordered_effects) output_token_types = [mlir.token_type() for _ in ordered_effects] output_types = [ *output_token_types, *map(mlir.aval_to_ir_types, ctx.avals_out)] flat_output_types = util.flatten(output_types) # mhlo.CaseOp takes a single argument 'index' and the corresponding blocks # have no arguments; the computation within the block uses implicit # captures. case_op = mhlo.CaseOp(flat_output_types, index=index, num_branches=len(branches)) name_stack = extend_name_stack(ctx.module_context.name_stack, 'cond') for i, jaxpr in enumerate(branches): branch = case_op.regions[i].blocks.append() with ir.InsertionPoint(branch): sub_ctx = ctx.module_context.replace( name_stack=xla.extend_name_stack(name_stack, f'branch_{i}_fun')) out_vals, tokens_out = mlir.jaxpr_subcomp( sub_ctx, jaxpr.jaxpr, tokens_in, map(mlir.ir_constants, jaxpr.consts), *map(mlir.wrap_singleton_ir_values, args), dim_var_values=ctx.dim_var_values) out_tokens = [tokens_out.get(eff) for eff in ordered_effects] out_vals = [*out_tokens, *out_vals] mhlo.ReturnOp(util.flatten(out_vals)) tokens_and_outputs = util.unflatten(case_op.results, map(len, output_types)) tokens, outputs = util.split_list(tokens_and_outputs, [num_tokens]) ctx.set_tokens_out(mlir.TokenSet(zip(ordered_effects, tokens))) return outputs mlir.register_lowering(cond_p, _cond_lowering) @state.re
69
506
_cond_lowering
515
0
14
138
openbb_terminal/portfolio/portfolio_model.py
286,539
Incorporate portfolio class into SDK (#3401) * create functions to interact with portfolio * fix some docstrings * view docstrings * make portfolio loading available in sdk * reorder some methods * fix bug * update controller * update website * remove import * change input name * regenerate website * change portfolio arg name * fix metrics bugs * fix report * refactor assets alloc * refactor assets sectors alloc * remove unecessary attributes * refactor allocaasset sector * reorganize class * first refactor alloc * refactor portfolio alloc * black * fix alloc bug * regenerate sdk website * fix alloc bugs * forgot this exception * some refactor on portfolio alloc country region * fix some allocation bugs * add examples * regenerate website Co-authored-by: James Maslek <jmaslek11@gmail.com>
OpenBBTerminal
23
Python
269
portfolio_model.py
def __preprocess_transactions(self): p_bar = tqdm(range(14), desc="Preprocessing transactions") try: # 0. If optional fields not in the transactions add missing optional_fields = [ "Sector", "Industry", "Country", "Region", "Fees", "Premium", "ISIN", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions["Date"] = pd.to_datetime(self.__transactions["Date"]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by="Date") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions["Ticker"] = self.__transactions["Ticker"].map( lambda x: x.upper() ) self.__transactions["Type"] = self.__transactions["Type"].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 self.__transactions["Signal"] = self.__transactions["Side"].map( lambda x: 1 if x.lower() in ["deposit", "buy"] else (-1 if x.lower() in ["withdrawal", "sell"] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions["Quantity"] = ( abs(self.__transactions["Quantity"]) * self.__transactions["Signal"] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions["Investment"] = ( self.__transactions["Quantity"] * self.__transactions["Price"] + self.__transactions["Fees"] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == "CRYPTO"] self.__transactions.loc[ (self.__transactions.Type == "CRYPTO"), "Ticker" ] = [ f"{crypto}-{currency}" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions["yf_Ticker"] = self.__transactions["ISIN"].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions["yf_Ticker"] == "") | (self.__transactions["yf_Ticker"].isna()) ]["Ticker"].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker: # Invalid ISIN but valid ticker self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = np.nan else: self.__transactions.loc[ self.__transactions["Ticker"] == item, "yf_Ticker" ] = "" removed_tickers.append(item) # Merge reformated tickers into Ticker self.__transactions["Ticker"] = self.__transactions["yf_Ticker"].fillna( self.__transactions["Ticker"] ) p_bar.n += 1 p_bar.refresh() # 9. Remove unsupported ISINs that came out empty self.__transactions.drop( self.__transactions[self.__transactions["Ticker"] == ""].index, inplace=True, ) p_bar.n += 1 p_bar.refresh() # 10. Create tickers dictionary with structure {'Type': [Ticker]} for ticker_type in set(self.__transactions["Type"]): self.tickers[ticker_type] = list( set( self.__transactions[ self.__transactions["Type"].isin([ticker_type]) ]["Ticker"] ) ) p_bar.n += 1 p_bar.refresh() # 11. Create list with tickers except cash self.tickers_list = list(set(self.__transactions["Ticker"])) p_bar.n += 1 p_bar.refresh() # 12. Save transactions inception date self.inception_date = self.__transactions["Date"][0] p_bar.n += 1 p_bar.refresh() # 13. Populate fields Sector, Industry and Country if ( self.__transactions.loc[ self.__transactions["Type"] == "STOCK", optional_fields, ] .isnull() .values.any() ): # If any fields is empty for stocks (overwrites any info there) self.load_company_data() p_bar.n += 1 p_bar.refresh() # Warn user of removed ISINs if removed_tickers: p_bar.disable = True console.print( f"\n[red]The following tickers are not supported and were removed: {removed_tickers}." f"\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN." f"\nSuffix info on 'Yahoo Finance market coverage':" " https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html" f"\nE.g. IWDA -> IWDA.AS[/red]\n" ) except Exception: console.print("\nCould not preprocess transactions.")
8e9e6bd57f4bc5d57ccedfacccda6342d5881266
843
https://github.com/OpenBB-finance/OpenBBTerminal.git
2,772
def __preprocess_transactions(self): p_bar = tqdm(range(14), desc="Preprocessing transactions") try: # 0. If optional fields not in the transactions add missing optional_fields = [ "Sector", "Industry", "Country", "Region", "Fees", "Premium", "ISIN", ] if not set(optional_fields).issubset(set(self.__transactions.columns)): for field in optional_fields: if field not in self.__transactions.columns: self.__transactions[field] = np.nan p_bar.n += 1 p_bar.refresh() # 1. Convert Date to datetime self.__transactions["Date"] = pd.to_datetime(self.__transactions["Date"]) p_bar.n += 1 p_bar.refresh() # 2. Sort transactions by date self.__transactions = self.__transactions.sort_values(by="Date") p_bar.n += 1 p_bar.refresh() # 3. Capitalize Ticker and Type [of instrument...] self.__transactions["Ticker"] = self.__transactions["Ticker"].map( lambda x: x.upper() ) self.__transactions["Type"] = self.__transactions["Type"].map( lambda x: x.upper() ) p_bar.n += 1 p_bar.refresh() # 4. Translate side: ["deposit", "buy"] -> 1 and ["withdrawal", "sell"] -> -1 self.__transactions["Signal"] = self.__transactions["Side"].map( lambda x: 1 if x.lower() in ["deposit", "buy"] else (-1 if x.lower() in ["withdrawal", "sell"] else 0) ) p_bar.n += 1 p_bar.refresh() # 5. Convert quantity to signed integer self.__transactions["Quantity"] = ( abs(self.__transactions["Quantity"]) * self.__transactions["Signal"] ) p_bar.n += 1 p_bar.refresh() # 6. Determining the investment/divestment value self.__transactions["Investment"] = ( self.__transactions["Quantity"] * self.__transactions["Price"] + self.__transactions["Fees"] ) p_bar.n += 1 p_bar.refresh() # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD) crypto_trades = self.__transactions[self.__transactions.Type == "CRYPTO"] self.__transactions.loc[ (self.__transactions.Type == "CRYPTO"), "Ticker" ] = [ f"{crypto}-{currency}" for crypto, currency in zip( crypto_trades.Ticker, crypto_trades.Currency ) ] p_bar.n += 1 p_bar.refresh() # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided. # If isin not valid ticker is empty self.__transactions["yf_Ticker"] = self.__transactions["ISIN"].apply( lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan ) empty_tickers = list( self.__transactions[ (self.__transactions["yf_Ticker"] == "") | (self.__transactions["yf_Ticker"].isna()) ]["Ticker"].unique() ) # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported removed_tickers = [] for item in empty_tickers: with contextlib.redirect_stdout(None): # Suppress yfinance failed download message if occurs valid_ticker = not ( yf.download( item, start=datetime.datetime.now() + datetime.timedelta(days=-5), progress=False, ).empty ) if valid_ticker:
72
1,454
__preprocess_transactions
39
0
1
9
tests/test_table.py
105,970
Save file name in embed_storage (#5285) * save path in embed storage * fix tests * fix more tests * Apply suggestions from code review Co-authored-by: Polina Kazakova <polina@huggingface.co> Co-authored-by: Polina Kazakova <polina@huggingface.co>
datasets
14
Python
26
test_table.py
def test_embed_array_storage_nested(image_file): array = pa.array([[{"bytes": None, "path": image_file}]], type=pa.list_(Image.pa_type)) embedded_images_array = embed_array_storage(array, [Image()]) assert isinstance(embedded_images_array.to_pylist()[0][0]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0][0]["bytes"], bytes) array = pa.array([{"foo": {"bytes": None, "path": image_file}}], type=pa.struct({"foo": Image.pa_type})) embedded_images_array = embed_array_storage(array, {"foo": Image()}) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["path"], str) assert isinstance(embedded_images_array.to_pylist()[0]["foo"]["bytes"], bytes)
494a3d8356e09af6c69ded33dc7f2e1a7d239ab9
179
https://github.com/huggingface/datasets.git
62
def test_embed_array_storage_nested(image_file): array = pa.ar
15
293
test_embed_array_storage_nested
11
0
2
4
saleor/graphql/tests/fixtures.py
29,006
Drop `AnonymouUser` from the context, and assign None instead (#10575) * Fix error when app deleted product added to draft order; Fixes #10574 * Get rid of AnonymousUser from context * Ger rid of AnonymousUser * Drop anonymous_user fixture * Clean events * Fix test_checkout_complete.py file * Drop changelog entry * Update resolver for me query * Apply code review remarks * Apply changes after rebasing with main branch * Fix review remarks * Update create order from checkout tests * Drop remaining uses of is_anonymous Co-authored-by: IKarbowiak <iga.karbowiak@mirumee.com>
saleor
10
Python
10
fixtures.py
def user(self, user): self._user = user if user: self.token = create_access_token(user)
b8598fa2cf84f8bb473f2066f075ad7a374c3c80
23
https://github.com/saleor/saleor.git
35
def user(self, user): s
5
37
user
41
0
3
33
erpnext/crm/report/prospects_engaged_but_not_converted/prospects_engaged_but_not_converted.py
65,767
style: format code with black
erpnext
15
Python
35
prospects_engaged_but_not_converted.py
def get_data(filters): lead_details = [] lead_filters = get_lead_filters(filters) for lead in frappe.get_all( "Lead", fields=["name", "lead_name", "company_name"], filters=lead_filters ): data = frappe.db.sql( , {"lead": lead.name, "limit": filters.get("no_of_interaction")}, ) for lead_info in data: lead_data = [lead.name, lead.lead_name, lead.company_name] + list(lead_info) lead_details.append(lead_data) return lead_details
494bd9ef78313436f0424b918f200dab8fc7c20b
100
https://github.com/frappe/erpnext.git
27
def get_data(filters): lead_details = [] lead_filters = get_le
20
164
get_data
40
0
1
10
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py
26,655
Remove list from subsription payload. Use camel case for attached meta (#9519) * Remove list from payload. Use camel case for attached meta * Fix tests
saleor
13
Python
30
test_create_deliveries_for_subscription.py
def test_invoice_requested(fulfilled_order, subscription_invoice_requested_webhook): webhooks = [subscription_invoice_requested_webhook] event_type = WebhookEventAsyncType.INVOICE_REQUESTED invoice = fulfilled_order.invoices.first() invoice_id = graphene.Node.to_global_id("Invoice", invoice.id) deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks) expected_payload = json.dumps({"invoice": {"id": invoice_id}, "meta": None}) assert deliveries[0].payload.payload == expected_payload assert len(deliveries) == len(webhooks) assert deliveries[0].webhook == webhooks[0]
6f37bd256e1258c8effaceeac7a7cf549592eead
103
https://github.com/saleor/saleor.git
66
def test_invoice_requested(fulfilled_order, subscription_invoice_requested_webhook): webhooks = [subscription_invoice_requested_webhook] event_type = WebhookEventAsyncType.INVOICE_REQUESTED invoice = fulfilled_order.invoices.first() invoice_id = graphene.Node.to_global_id("Invoice", invoice.id) deliveries = create_deliveries_for_subscriptions(event_type, invoice, webhooks) expected_payload = json.dumps({"invoice": {"id": invoice_id}, "meta": None}) assert deliveries[0].payload.payload == expected_payload assert len(deliveries) == len(webhooks) assert deliveries[0].webhook == webhooks[0]
23
164
test_invoice_requested
88
0
1
29
test/test_pipeline.py
257,166
ElasticsearchRetriever to BM25Retriever (#2423) * change class names to bm25 * Update Documentation & Code Style * Update Documentation & Code Style * Update Documentation & Code Style * Add back all_terms_must_match * fix syntax * Update Documentation & Code Style * Update Documentation & Code Style * Creating a wrapper for old ES retriever with deprecated wrapper * Update Documentation & Code Style * New method for deprecating old ESRetriever * New attempt for deprecating the ESRetriever * Reverting to the simplest solution - warning logged * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Sara Zan <sara.zanzottera@deepset.ai>
haystack
16
Python
62
test_pipeline.py
def test_generate_code_imports(): pipeline_config = { "version": "master", "components": [ {"name": "DocumentStore", "type": "ElasticsearchDocumentStore"}, {"name": "retri", "type": "BM25Retriever", "params": {"document_store": "DocumentStore"}}, {"name": "retri2", "type": "TfidfRetriever", "params": {"document_store": "DocumentStore"}}, ], "pipelines": [ { "name": "Query", "nodes": [{"name": "retri", "inputs": ["Query"]}, {"name": "retri2", "inputs": ["Query"]}], } ], } code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name="p", generate_imports=True) assert code == ( "from haystack.document_stores import ElasticsearchDocumentStore\n" "from haystack.nodes import BM25Retriever, TfidfRetriever\n" "from haystack.pipelines import Pipeline\n" "\n" "document_store = ElasticsearchDocumentStore()\n" "retri = BM25Retriever(document_store=document_store)\n" "retri_2 = TfidfRetriever(document_store=document_store)\n" "\n" "p = Pipeline()\n" 'p.add_node(component=retri, name="retri", inputs=["Query"])\n' 'p.add_node(component=retri_2, name="retri2", inputs=["Query"])' )
d49e92e21c2f9658039da7e478e62431f801db32
134
https://github.com/deepset-ai/haystack.git
299
def test_generate_code_imports(): pipeline_config = { "version": "master", "components": [ {"name": "DocumentStore", "type": "ElasticsearchDocumentStore"}, {"name": "retri", "type": "BM25Retriever", "params": {"document_store": "DocumentStore"}}, {"name": "retri2", "type": "TfidfRetriever", "params": {"document_store": "DocumentStore"}}, ], "pipelines": [ { "name": "Query", "nodes": [{"name": "retri", "inputs": ["Query"]}, {"name": "retri2", "inputs": ["Query"]}], } ], } code = generate_code(pipeline_config=pipeline_config, pipeline_variable_name="p", generate_imports=True) assert code == ( "from haystack.document_stores import ElasticsearchDocumentStore\n" "from haystack.nodes import BM25Retriever, TfidfRetriever\n" "from haystack.pipelines import Pipeline\n" "\n" "document_store = ElasticsearchDocumentStore()\n" "retri = BM25Retriever(document_store=document_store)\n" "retr
6
284
test_generate_code_imports
12
1
1
9
src/prefect/orion/database/orm_models.py
54,126
Update Block CRUD
prefect
9
Python
12
orm_models.py
def __table_args__(cls): return ( sa.Index( "uq_block__spec_id_name", "block_spec_id", "name", unique=True, ), ) @declarative_mixin
e5bb8b9a899ed05aee5eac4e3d4ae9e90c69d66f
@declarative_mixin
24
https://github.com/PrefectHQ/prefect.git
106
def __table_args__(cls): return ( sa.Index( "uq_block__spec_id_name", "block_spec_
6
44
__table_args__
8
0
1
3
ludwig/datasets/naval/__init__.py
6,046
[cross-port from tf-legacy] Add support for additional tabular datasets to use to validate AutoML (#1722) * [cross-port from tf-legacy] Add support for additional tabular datasets to use to validate AutoML * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address flake8 issues Co-authored-by: Anne Holler <anne@vmware.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
ludwig
9
Python
8
__init__.py
def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False): dataset = Naval(cache_dir=cache_dir) return dataset.load(split=split)
6bf9cfcee8ce605bd70dad8f242830b592c6e5dc
28
https://github.com/ludwig-ai/ludwig.git
13
def load(cache_dir=DEFAULT_CACHE_LOCATION, split=False): dataset = Naval(cache_dir=cache_dir) return dataset.load(split=split)
6
44
load
25
0
3
8
django/db/models/query_utils.py
205,804
Refs #33476 -- Reformatted code with Black.
django
9
Python
20
query_utils.py
def register_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "class_lookups" not in cls.__dict__: cls.class_lookups = {} cls.class_lookups[lookup_name] = lookup cls._clear_cached_lookups() return lookup
9c19aff7c7561e3a82978a272ecdaad40dda5c00
50
https://github.com/django/django.git
81
def register_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name
7
80
register_lookup
112
1
2
14
sklearn/datasets/tests/test_openml.py
259,885
ENH improve ARFF parser using pandas (#21938) Co-authored-by: Thomas J. Fan <thomasjpfan@gmail.com> Co-authored-by: Olivier Grisel <olivier.grisel@gmail.com> Co-authored-by: Adrin Jalali <adrin.jalali@gmail.com>
scikit-learn
13
Python
80
test_openml.py
def test_fetch_openml_requires_pandas_in_future(monkeypatch): params = {"as_frame": False, "parser": "auto"} data_id = 1119 try: check_pandas_support("test_fetch_openml_requires_pandas") except ImportError: _monkey_patch_webbased_functions(monkeypatch, data_id, True) warn_msg = ( "From version 1.4, `parser='auto'` with `as_frame=False` will use pandas" ) with pytest.warns(FutureWarning, match=warn_msg): fetch_openml(data_id=data_id, **params) else: raise SkipTest("This test requires pandas to not be installed.") @pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive") # TODO(1.4): remove this filterwarning decorator for `parser` @pytest.mark.filterwarnings("ignore:The default value of `parser` will change") @pytest.mark.parametrize( "params, err_msg", [ ( {"parser": "pandas"}, "Sparse ARFF datasets cannot be loaded with parser='pandas'", ), ( {"as_frame": True}, "Sparse ARFF datasets cannot be loaded with as_frame=True.", ), ( {"parser": "pandas", "as_frame": True}, "Sparse ARFF datasets cannot be loaded with as_frame=True.", ), ], )
a47d569e670fd4102af37c3165c9b1ddf6fd3005
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive") # TODO(1.4): remove this filterwarning decorator for `parser` @pytest.mark.filterwarnings("ignore:The default value of `parser` will change") @pytest.mark.parametrize( "params, err_msg", [ ( {"parser": "pandas"}, "Sparse ARFF datasets cannot be loaded with parser='pandas'", ), ( {"as_frame": True}, "Sparse ARFF datasets cannot be loaded with as_frame=True.", ), ( {"parser": "pandas", "as_frame": True}, "Sparse ARFF datasets cannot be loaded with as_frame=True.", ), ], )
70
https://github.com/scikit-learn/scikit-learn.git
306
def test_fetch_openml_requires_pandas_in_future(monkeypatch): params = {"as_frame": False, "parser": "auto"} data_id = 1119 try: check_pandas_support("test_fetch_openml_requires_pandas") except ImportError: _monkey_patc
17
247
test_fetch_openml_requires_pandas_in_future
53
0
1
20
tests/sentry/api/endpoints/test_organization_metric_details.py
97,252
feat(metrics): Support for DM in Details Endpoint (#32744) Adds support for derived metrics in metrics detail endpoint
sentry
15
Python
49
test_organization_metric_details.py
def test_derived_metric_details(self): # 3rd Test: Test for derived metrics when indexer and dataset have data self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status="ok", release="foobar@2.0", ) ) response = self.get_success_response( self.organization.slug, "session.crash_free_rate", ) assert response.data == { "name": "session.crash_free_rate", "type": "numeric", "operations": [], "unit": "percentage", "tags": [{"key": "environment"}, {"key": "release"}, {"key": "session.status"}], }
a3254cf73734a7f6a91a8ab58d5615b82f98a2f9
101
https://github.com/getsentry/sentry.git
260
def test_derived_metric_details(self): # 3rd Test: Test for derived metrics when indexer and dataset have data self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status="ok", release="foobar@2.0", ) ) response = self.get_success_response( self.organization.slug, "session.crash_free_rate", ) assert response.data == { "name":
16
188
test_derived_metric_details
15
0
2
4
tests/orion/test_app.py
54,480
Pass `ephemeral` flag to `create_app` to drop analytics and UI
prefect
13
Python
14
test_app.py
def test_app_generates_correct_api_openapi_schema(): schema = create_app(ephemeral=True).openapi() assert len(schema["paths"].keys()) > 1 assert all([p.startswith("/api/") for p in schema["paths"].keys()])
3d60f99313923009d554cca0f310dc5dd582e22d
54
https://github.com/PrefectHQ/prefect.git
27
def test_app_generates_correct_api_openapi_schema(): schema = create_app(ephemeral=True).openapi() assert len(schema["paths"].keys()) > 1 assert all([p.startswith("/api/") for p in schema["paths"].keys()])
10
95
test_app_generates_correct_api_openapi_schema
167
0
10
45
tests/freqai/test_freqai_interface.py
151,796
fix custom_info
freqtrade
16
Python
112
test_freqai_interface.py
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) freqai_conf['runmode'] = RunMode.BACKTEST if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") Trade.use_db = False freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180120-20180130"}) freqai_conf.update({"strategy": strat}) if 'ReinforcementLearner' in model: freqai_conf = make_rl_config(freqai_conf) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) strategy.freqai_info = freqai_conf.get("freqai", {}) freqai = strategy.freqai freqai.live = False freqai.dk = FreqaiDataKitchen(freqai_conf) timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) sub_timerange = TimeRange.parse_timerange("20180110-20180130") corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") df = freqai.cache_corr_pairlist_dfs(df, freqai.dk) for i in range(5): df[f'%-constant_{i}'] = i metadata = {"pair": "LTC/BTC"} freqai.start_backtesting(df, metadata, freqai.dk) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] assert len(model_folders) == num_files Trade.use_db = True assert log_has_re( "Removed features ", caplog, ) assert log_has_re( "Removed 5 features from prediction features, ", caplog, ) Backtesting.cleanup() shutil.rmtree(Path(freqai.dk.full_path))
62c69bf2b5285196ce80760160712c04b339bad1
377
https://github.com/freqtrade/freqtrade.git
334
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog): freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) freqai_conf['runmode'] = RunMode.BACKTEST if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM") if is_mac() and 'Reinforcement' in model: pytest.skip("Reinforcement learning module not available on intel based Mac OS") Trade.use_db = False freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180120-20180130"}) freqai_conf.update({"strategy": strat}) if 'ReinforcementLearner' in model: freqai_conf = make_rl_config(freqai_conf) if 'test_4ac' in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) strategy.freqai_info = freqai_conf.get("freqai", {}) freqai = strategy.freqai freqai.live = False freqai.dk = FreqaiDataKitchen(freqai_conf) timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) sub_timerange = TimeRange.parse_timerange("20180110-20180130") corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") df = freqai.cache_corr_pairlist_dfs(df, freqai.dk) for i in range(5): df[f'%-constant_{i}'] = i
60
633
test_start_backtesting
21
0
3
4
python/ray/tune/execution/trial_runner.py
142,863
[tune/structure] Introduce execution package (#26015) Execution-specific packages are moved to tune.execution. Co-authored-by: Xiaowei Jiang <xwjiang2010@gmail.com>
ray
12
Python
20
trial_runner.py
def _reconcile_live_trials(self): for trial in list(self._live_trials): # Only for TERMINATED trials. ERRORed trials might be retried. if trial.status == Trial.TERMINATED: self._live_trials.remove(trial)
0959f44b6fc217a4f2766ed46a721eb79b067b2c
33
https://github.com/ray-project/ray.git
72
def _reconcile_live_trials(self): for trial in list(self._live_trials): # Only for TERMINATED trials. ERRORed trials might be retried. if trial.status == Trial.TERMINATE
9
56
_reconcile_live_trials
14
0
3
6
jax/experimental/pjit.py
121,148
Convert everything in pjit to the `Sharding` interface. The following contains the things that have changed in this CL: * All in_axis_resources and out_axis_resources are instances of `Sharding`. When `config.jax_array` is enabled, `in_shardings` is inferred from the inputs. * `out_shardings` are still instances of `MeshPspecSharding` even if `Array` are used. In a follow up CL, I will change out_axis_resources to accept `Sharding` instances. * This is also a reason why you still need a mesh context manager when `config.jax_array` is enabled. * cl/458267790 is WIP for this. It adds a couple of checks in MeshPspecSharding too when `AUTO` is used. * Checking of sharding with `aval` has a handler system to deal with sharding instances. * The reason for creating a `pjit` specific system rather than putting this check on the sharding instances is because each transformation has a different way of checking the sharding. The best example for this is `pjit` and `xmap`. They both have different way to check if an aval is sharded properly with respect to the given sharding because `pjit` and `xmap` has different ways to express sharding. * `MeshPspecSharding` and `SingleDeviceSharding` have `__hash__` and `__eq__`. So now we don't have to pass around canonicalized pspecs in the new path to get cache hits. The `Sharding` instances should handle that for us. * _pjit_lower still depends on mesh which is the major reason why I haven't removed `resource_env` from `params`. But in the interest of keep this CL small (LOL), I'll make those changes in a follow up CL. * Also the private functions in pxla.py are used by pathways and automap so I'll have to modify those too. * Also it has `pxla.resource_typecheck` which I haven't figured out how to move it to sharding interface. * `_to_xla_op_sharding` takes in `axis_ctx` as an extra **optional** parameter. This is required for `with_sharding_constraint`. * `with_sharding_constraint` uses the MLIR `ctx` here: cl/458042998 * `pjit`'s batching handlers add an extra dimension to the axis_resources. Since this is dependent on how each transformation adds the extra dimension and it also differs on how each sharding instance will handle it, I added a handler system for this too. Again `xmap` and `pjit` differ a lot here. This is why I went with the handler approach. * MeshPspecSharding handles this `insert_axis_partitions` on the parsed partition spec. I have added more detailed comments in the place where this is done. PiperOrigin-RevId: 459548974
jax
8
Python
10
pjit.py
def _create_mesh_pspec_sharding(mesh, x): if _is_unspecified(x): return x if _is_from_gda(x): return x return sharding.MeshPspecSharding._from_parsed_pspec(mesh, x)
231495166929be4a6ee3a0fd843858abeeca3694
34
https://github.com/google/jax.git
22
def _create_mesh_pspec_sharding(mesh, x): if _is_unspecified(x): return x if _is_from_gda(x): return x return sharding.MeshPspecSharding._from_parsed_pspec(mesh, x)
8
53
_create_mesh_pspec_sharding
124
0
11
30
keras/engine/base_layer.py
277,253
reduct too long lines
keras
19
Python
90
base_layer.py
def _flatten_modules(self, recursive=True, include_self=True): if include_self: yield self # Only instantiate set and deque if needed. trackables = getattr(self, "_self_tracked_trackables", None) if trackables: seen_object_ids = set() deque = collections.deque(trackables) while deque: trackable_obj = deque.popleft() trackable_id = id(trackable_obj) if trackable_id in seen_object_ids: continue seen_object_ids.add(trackable_id) # Metrics are not considered part of the Layer's topology. if isinstance(trackable_obj, tf.Module) and not isinstance( trackable_obj, metrics_mod.Metric ): yield trackable_obj # Introspect recursively through sublayers. if recursive: subtrackables = getattr( trackable_obj, "_self_tracked_trackables", None ) if subtrackables: deque.extendleft(reversed(subtrackables)) elif isinstance( trackable_obj, tf.__internal__.tracking.TrackableDataStructure, ): # Data structures are introspected even with # `recursive=False`. tracked_values = trackable_obj._values if tracked_values: deque.extendleft(reversed(tracked_values)) # This is a hack so that the is_layer (within # training/trackable/layer_utils.py) check doesn't get the weights attr. # TODO(b/110718070): Remove when fixed.
fa6d9107a498f7c2403ff28c7b389a1a0c5cc083
152
https://github.com/keras-team/keras.git
702
def _flatten_modules(self, recursive=True, include_self=True): if include_self: yield self # Only instantiate set and deque if needed. trackables = getattr(self, "_self_tracked_trackables", None) if trackables: seen_object_ids = set() deque = collections.deque(trackables) while deque: trackable_obj = deque.popleft() trackable_id = id(trackable_obj) if trackable_id in seen_object_ids: continue seen_object_ids.add(trackable_id) # Metrics are not considered part of the Layer's topology. if isinstance(trackable_obj, tf.Module) and not isinstance( trackable_obj, metrics_mod.Metric ): yield trackable_obj # Introspect recursively through sublayers. if recursive: subtrackables = getattr(
28
255
_flatten_modules
27
0
3
7
scapy/contrib/automotive/scanner/executor.py
209,580
Add Automotive Logger for all debug outputs of the automotive layer
scapy
13
Python
24
executor.py
def check_new_states(self, test_case): # type: (AutomotiveTestCaseABC) -> None if isinstance(test_case, StateGenerator): edge = test_case.get_new_edge(self.socket, self.configuration) if edge: log_automotive.debug("Edge found %s", edge) tf = test_case.get_transition_function(self.socket, edge) self.state_graph.add_edge(edge, tf)
495b21f2867e48286767085c8cf2918e4092e9dc
62
https://github.com/secdev/scapy.git
107
def check_new_states(self, test_case): # type: (AutomotiveTestCaseABC) -> None if isinstance(test_case, StateGenerator): edge = test_case.get_new_edge(self.socket, self.configuration) if ed
15
97
check_new_states
128
0
7
24
awx/api/serializers.py
81,347
Optimize object creation by getting fewer empty relationships (#12508) This optimizes the ActivityStreamSerializer by only getting many-to-many relationships that are speculatively non-empty based on information we have in other fields We run this every time we create an object as an on_commit action so it is expected this will have a major impact on response times for launching jobs
awx
13
Python
86
serializers.py
def _local_summarizable_fk_fields(self, obj): summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS) # Special requests summary_dict['group'] = summary_dict['group'] + ('inventory_id',) for key in summary_dict.keys(): if 'id' not in summary_dict[key]: summary_dict[key] = summary_dict[key] + ('id',) field_list = list(summary_dict.items()) # Needed related fields that are not in the default summary fields field_list += [ ('workflow_job_template_node', ('id', 'unified_job_template_id')), ('label', ('id', 'name', 'organization_id')), ('notification', ('id', 'status', 'notification_type', 'notification_template_id')), ('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')), ('o_auth2_application', ('id', 'name', 'description')), ('credential_type', ('id', 'name', 'description', 'kind', 'managed')), ('ad_hoc_command', ('id', 'name', 'status', 'limit')), ('workflow_approval', ('id', 'name', 'unified_job_id')), ('instance', ('id', 'hostname')), ] # Optimization - do not attempt to summarize all fields, pair down to only relations that exist if not obj: return field_list existing_association_types = [obj.object1, obj.object2] if 'user' in existing_association_types: existing_association_types.append('role') return [entry for entry in field_list if entry[0] in existing_association_types]
2d310dc4e50c6f7cd298f9fb8af69da258cd9ea6
234
https://github.com/ansible/awx.git
365
def _local_summarizable_fk_fields(self, obj): summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS) # Special requests summary_dict['group'] = summary_dict['group'] + ('inventory_id',) for key in summary_dict.keys(): if 'id' not in summary_dict[key]: summary_dict[key] = summary_dict[key] + ('id',) field_list = list(summary_dict.items()) # Needed related fields that are not in the default summary fields field_list += [ ('workflow_job_template_node', ('id', 'unified_job_template_id')), ('label', (
16
411
_local_summarizable_fk_fields
55
0
1
17
t/unit/utils/test_local.py
208,187
[pre-commit.ci] pre-commit autoupdate (#7625) * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/asottile/pyupgrade: v2.34.0 → v2.38.0](https://github.com/asottile/pyupgrade/compare/v2.34.0...v2.38.0) - [github.com/PyCQA/flake8: 4.0.1 → 5.0.4](https://github.com/PyCQA/flake8/compare/4.0.1...5.0.4) - [github.com/asottile/yesqa: v1.3.0 → v1.4.0](https://github.com/asottile/yesqa/compare/v1.3.0...v1.4.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * autopep8 Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Omer Katz <omer.katz@kcg.tech>
celery
9
Python
31
test_local.py
def test_listproxy(self): v = [] x = Proxy(lambda: v) x.append(1) x.extend([2, 3, 4]) assert x[0] == 1 assert x[:-1] == [1, 2, 3] del (x[-1]) assert x[:-1] == [1, 2] x[0] = 10 assert x[0] == 10 assert 10 in x assert len(x) == 3 assert iter(x) x[0:2] = [1, 2] del (x[0:2]) assert str(x)
777698c746e4d1aa8af0a7974b0559bf3b86b14a
133
https://github.com/celery/celery.git
166
def test_listproxy(self): v = [] x = Proxy(lambda: v) x.append(1)
10
200
test_listproxy
33
0
1
20
tests/acceptance/test_onboarding.py
98,309
feat(onboarding): remove welcome page experiment and use new experience (#33616) This PR copies the welcome page component from the targeted onboarding flow into the default onboarding flow and removes the TargetedOnboardingWelcomePageExperimentV2 experiment. There are some minor differences to handle the different prop types but everything else is the same.
sentry
10
Python
28
test_onboarding.py
def test_onboarding(self, generate_api_key): self.browser.get("/onboarding/%s/" % self.org.slug) # Welcome step self.browser.wait_until('[data-test-id="onboarding-step-welcome"]') self.browser.snapshot(name="onboarding - welcome") # Platform selection step self.browser.click('[aria-label="Start"]') self.browser.wait_until('[data-test-id="onboarding-step-select-platform"]') self.browser.snapshot(name="onboarding - select platform") # Select and create node JS project self.browser.click('[data-test-id="platform-node"]') self.browser.wait_until_not('[data-test-id="platform-select-next"][aria-disabled="true"]') self.browser.wait_until('[data-test-id="platform-select-next"][aria-disabled="false"]')
c407626bafad657529022fcc11ea7915d71e0c61
177
https://github.com/getsentry/sentry.git
116
def test_onboarding(self, generate_api_key): self.browser.get("/onboarding/%s/" % self.org.slug) # Welcome step self.browser.wait_until('[data-test-id="onboarding-step-welcome"]') self.browser.snapshot(name="onboarding - welcome") # Platform selection step self.browser.click('[aria-label="Start"]') self.brow
12
164
test_onboarding
26
0
3
8
scripts/validate_min_versions_in_sync.py
163,582
MISC: Check that min versions are aligned in CI and import_optional_dependency (#45219)
pandas
11
Python
24
validate_min_versions_in_sync.py
def get_versions_from_code() -> dict[str, str]: install_map = _optional.INSTALL_MAPPING versions = _optional.VERSIONS return { install_map.get(k, k).casefold(): v for k, v in versions.items() if k != "pytest" }
388ecf3d0804d7596876b53d96eb34de5bdcf8a3
52
https://github.com/pandas-dev/pandas.git
58
def get_versions_from_code() -> dict[str, str]: install_map = _optional.INSTALL_MAPPING versions = _optional
13
82
get_versions_from_code
130
0
4
27
recommenders/models/ncf/dataset.py
39,114
fix static analysis
recommenders
22
Python
95
dataset.py
def _create_test_file(self): logger.info("Creating full leave-one-out test file {} ...".format(self.test_file_full)) # create empty csv pd.DataFrame( columns=[self.col_user, self.col_item, self.col_rating, self.col_test_batch] ).to_csv(self.test_file_full, index=False) batch_idx = 0 with self.train_datafile as train_datafile: with self.test_datafile as test_datafile: for user in test_datafile.users: if user in train_datafile.users: user_test_data = test_datafile.load_data(user) user_train_data = train_datafile.load_data(user) # for leave-one-out evaluation, exclude items seen in both training and test sets # when sampling negatives user_positive_item_pool = set( user_test_data[self.col_item].unique()).union(user_train_data[self.col_item].unique() ) user_negative_item_pool = self._get_user_negatives_pool(user_positive_item_pool) n_samples = self.n_neg_test n_samples = self._check_sample_size(user, n_samples, user_negative_item_pool, training=False) user_examples_dfs = [] # sample n_neg_test negatives for each positive example and assign a batch index for positive_example in np.array_split(user_test_data, user_test_data.shape[0]): negative_examples = self._get_negative_examples(user, user_negative_item_pool, n_samples) examples = pd.concat([positive_example, negative_examples]) examples[self.col_test_batch] = batch_idx user_examples_dfs.append(examples) batch_idx += 1 # append user test data to file user_examples = pd.concat(user_examples_dfs) user_examples.to_csv(self.test_file_full, mode='a', index=False, header=False)
fd2bf8bfb71aa94fe70e1fe462d317b9bbaa6c52
248
https://github.com/microsoft/recommenders.git
734
def _create_test_file(self): logger.info("Creating full leave-one-out test file {} ...".format(self.test_file_full)) # create empty csv pd.DataFrame( columns=[self.col_user, self.col_item, self.col_rating, self.col_test_batch] ).to_csv(self.test_file_full, index=False) batch_idx = 0 with self.train_datafile as train_datafile: with self.test_datafile as test_datafile: for user in test_datafile.users: if user in train_datafile.users: user_test_data = test_datafile.load_data(user) user_train_data = train_datafile.load_data(user) # for leave-one-out evaluation, exclude items seen in both training and test sets # when sampling negatives user_positive_item_pool = set( user_test_data[self.col_item].unique()).union(user_train_data[self.col_item].unique() ) user_negative_item_pool = self._get_user_negatives_pool(user_positive_item_pool) n_samples = self.n_neg_test n_samples = self._check_sample_size(user, n_samples, user_negative_item_pool, training=False) user_examples_dfs = [] # sample n_neg_test negatives for each positive example and assign a batch index for positive_example in np.array_split(user_test_data, user_test_data.shape[0]): negative_examples = self._get_negative_examples(user, user_negative_item_pool, n_samples) examples = pd.concat([positive_example, negative_examples]) examples[self.col_test_batch] = batch_idx user_examples_dfs.append(examples)
46
392
_create_test_file
56
0
1
11
onnx/backend/test/case/node/softmaxcrossentropy.py
255,069
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
12
Python
46
softmaxcrossentropy.py
def export_softmaxcrossentropy_sum_log_prob() -> None: # Define operator attributes. reduction = 'sum' # Create operator. node = onnx.helper.make_node('SoftmaxCrossEntropyLoss', inputs=['x', 'y'], outputs=['z', 'log_prob'], reduction=reduction) # Define operator inputs. np.random.seed(0) x = np.random.rand(3, 5).astype(np.float32) labels = np.random.randint(0, high=5, size=(3, )).astype(np.int64) # Compute SoftmaxCrossEntropyLoss loss, log_prob = softmaxcrossentropy(x, labels, reduction='sum', get_log_prob=True) # Check results expect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_sce_sum_log_prob')
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
136
https://github.com/onnx/onnx.git
247
def export_softmaxcrossentropy_sum_log_prob() -> None: # Define operator attributes. reduction = 'sum' # Create operator. node = onnx.helper.make_node('SoftmaxCrossEntropyLoss', inputs=['x', 'y'], outputs=['z', 'log_prob'], reduction=reduction) # Define operator inputs. np.random.seed(0) x = np.random.rand(3, 5).astype(np.float32) labels = np.random.randint(0, high=5, size=(3, )).astype(np.int64) # Compute SoftmaxCrossEntropyLoss loss, log_prob = softmaxcrossentropy(x, labels, reduction='sum', get_log_prob=True) # Check results expect(node, inputs=[x, labels], outputs=[loss, log_prob], name='test_sce_sum_log_prob')
26
218
export_softmaxcrossentropy_sum_log_prob
11
1
1
9
erpnext/accounts/doctype/sales_invoice/sales_invoice.py
65,046
style: format code with black
erpnext
8
Python
11
sales_invoice.py
def get_mode_of_payment_info(mode_of_payment, company): return frappe.db.sql( , (company, mode_of_payment), as_dict=1, ) @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
27
https://github.com/frappe/erpnext.git
4
def get_mode_of_payment_info(mode_of_payment, company): return frappe.db.sql( , (company, mode_of_payment
8
51
get_mode_of_payment_info
11
0
2
5
homeassistant/components/egardia/alarm_control_panel.py
314,637
Use attributes in egardia alarm (#74098)
core
7
Python
10
alarm_control_panel.py
def should_poll(self) -> bool: if not self._rs_enabled: return True return False
bc33818b20d145cba370247f5bb3b69d078cd9f3
18
https://github.com/home-assistant/core.git
43
def should_poll(self) -> bool:
4
32
should_poll
108
1
9
34
tests/profiler/test_profiler.py
241,548
Remove `profile("training_step_and_backward")` (#11222)
lightning
13
Python
64
test_profiler.py
def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler): model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, enable_progress_bar=False, max_epochs=1, limit_train_batches=5, limit_val_batches=5, profiler=pytorch_profiler, strategy="ddp", gpus=2, ) trainer.fit(model) expected = {"[Strategy]DDPStrategy.validation_step"} if not _KINETO_AVAILABLE: expected |= { "[Strategy]DDPStrategy.training_step", "[Strategy]DDPStrategy.backward", } for name in expected: assert sum(e.name == name for e in pytorch_profiler.function_events), name files = set(os.listdir(pytorch_profiler.dirpath)) expected = f"fit-profiler-{trainer.local_rank}.txt" assert expected in files path = pytorch_profiler.dirpath / expected assert path.read_text("utf-8") if _KINETO_AVAILABLE: files = os.listdir(pytorch_profiler.dirpath) files = [file for file in files if file.endswith(".json")] assert len(files) == 2, files local_rank = trainer.local_rank assert any(f"{local_rank}-optimizer_step_with_closure_" in f for f in files) assert any(f"{local_rank}-[Strategy]DDPStrategy.validation_step" in f for f in files) @pytest.mark.parametrize("fast_dev_run", [1, 2, 3, 4, 5]) @pytest.mark.parametrize("boring_model_cls", [ManualOptimBoringModel, BoringModel])
7fa1aebcc99297e4d7eb8dcf2deb22e6da814edf
@pytest.mark.parametrize("fast_dev_run", [1, 2, 3, 4, 5]) @pytest.mark.parametrize("boring_model_cls", [ManualOptimBoringModel, BoringModel])
199
https://github.com/Lightning-AI/lightning.git
289
def test_pytorch_profiler_trainer_ddp(tmpdir, pytorch_profiler): model = BoringModel() trainer = Trainer( default_root_dir=tmpdir, enable_progress_bar=False, max_epochs=1, limit_train_batches=5, limit_val_batches=5, profiler=pytorch_profiler, strategy="ddp", gpus=2, ) trainer.fit(model) expected = {"[Strategy]DDPStrategy.validation_step"} if not _KINETO_AVAILABLE: expected |= { "[Strategy]DDPStrategy.training_step", "[Strategy]DDPStrategy.backward", } for name in expected: assert sum(e.name == name for e in pytorch_profiler.function_events), name files = set(os.listdir(pytorch_profiler.dirpath)) expected = f"fit-profiler-{trainer.local_rank}.txt" assert expected in files path = pytorch_profiler.dirpath / expected assert path.read_text("utf-8") if _KINETO_AVAILABLE: files = os.listdir(pytorch_profiler.dirpath) files = [fi
39
378
test_pytorch_profiler_trainer_ddp
217
1
1
49
tests/components/recorder/test_websocket_api.py
289,566
Use US_CUSTOMARY_SYSTEM in tests (#80658) * Use US_CUSTOMARY_SYSTEM in tests * Don't update test_unit_system
core
18
Python
113
test_websocket_api.py
async def test_statistics_during_period(recorder_mock, hass, hass_ws_client): now = dt_util.utcnow() hass.config.units = US_CUSTOMARY_SYSTEM await async_setup_component(hass, "sensor", {}) await async_recorder_block_till_done(hass) hass.states.async_set("sensor.test", 10, attributes=POWER_SENSOR_KW_ATTRIBUTES) await async_wait_recording_done(hass) do_adhoc_statistics(hass, start=now) await async_wait_recording_done(hass) client = await hass_ws_client() await client.send_json( { "id": 1, "type": "recorder/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == {} await client.send_json( { "id": 2, "type": "recorder/statistics_during_period", "start_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "5minute", } ) response = await client.receive_json() assert response["success"] assert response["result"] == { "sensor.test": [ { "statistic_id": "sensor.test", "start": now.isoformat(), "end": (now + timedelta(minutes=5)).isoformat(), "mean": approx(10), "min": approx(10), "max": approx(10), "last_reset": None, "state": None, "sum": None, } ] } @pytest.mark.parametrize( "attributes, state, value, custom_units, converted_value", [ (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {"distance": "cm"}, 1000), (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {"distance": "m"}, 10), (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {"distance": "in"}, 10 / 0.0254), (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {"power": "W"}, 10000), (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {"power": "kW"}, 10), (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {"pressure": "Pa"}, 1000), (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {"pressure": "hPa"}, 10), (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {"pressure": "psi"}, 1000 / 6894.757), (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {"speed": "m/s"}, 2.77778), (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {"speed": "km/h"}, 10), (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {"speed": "mph"}, 6.21371), (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {"temperature": "°C"}, 10), (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {"temperature": "°F"}, 50), (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {"temperature": "K"}, 283.15), (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {"volume": "m³"}, 10), (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {"volume": "ft³"}, 353.14666), ], )
e84e5f134ee6ccd04ad098a16c41dd2ed141371c
@pytest.mark.parametrize( "attributes, state, value, custom_units, converted_value", [ (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {"distance": "cm"}, 1000), (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {"distance": "m"}, 10), (DISTANCE_SENSOR_M_ATTRIBUTES, 10, 10, {"distance": "in"}, 10 / 0.0254), (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {"power": "W"}, 10000), (POWER_SENSOR_KW_ATTRIBUTES, 10, 10, {"power": "kW"}, 10), (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {"pressure": "Pa"}, 1000), (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {"pressure": "hPa"}, 10), (PRESSURE_SENSOR_HPA_ATTRIBUTES, 10, 10, {"pressure": "psi"}, 1000 / 6894.757), (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {"speed": "m/s"}, 2.77778), (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {"speed": "km/h"}, 10), (SPEED_SENSOR_KPH_ATTRIBUTES, 10, 10, {"speed": "mph"}, 6.21371), (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {"temperature": "°C"}, 10), (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {"temperature": "°F"}, 50), (TEMPERATURE_SENSOR_C_ATTRIBUTES, 10, 10, {"temperature": "K"}, 283.15), (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {"volume": "m³"}, 10), (VOLUME_SENSOR_M3_ATTRIBUTES, 10, 10, {"volume": "ft³"}, 353.14666), ], )
263
https://github.com/home-assistant/core.git
719
async def test_statistics_during_period(recorder_mock, hass, hass_ws_client): now = dt_util.utcnow() hass.config.units = US_CUSTOMARY_SYSTEM await async_setup_component(hass, "sensor", {}) await async_recorder_block_till_done(hass) hass.states.async_set("sensor.test", 10, attributes=POWER_SENSOR_KW_ATTRIBUTES) await async_wait_recording_done(hass) do_adhoc_statistics(hass, start=now) await async_wait_recording_done(hass) client = await hass_ws_client() await client.send_json( { "id": 1, "type": "recorder/statistics_during_period", "start_time": now.isoformat(), "end_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "hour", } ) response = await client.receive_json() assert response["success"] assert response["result"] == {} await client.send_json( { "id": 2, "type": "recorder/statistics_during_period", "start_time": now.isoformat(), "statistic_ids": ["sensor.test"], "period": "5minute",
35
863
test_statistics_during_period
5
0
1
2
homeassistant/components/mazda/sensor.py
309,021
Use SensorEntityDescription in Mazda integration (#63423) * Use SensorEntityDescription in Mazda integration * Change lambdas to functions * Minor fixes * Address review comments
core
11
Python
5
sensor.py
def _front_right_tire_pressure_value(data, unit_system): return round(data["status"]["tirePressure"]["frontRightTirePressurePsi"])
8915b73f724b58e93284a823c0d2e99fbfc13e84
22
https://github.com/home-assistant/core.git
11
def _front_right_tire_pressure_value(data, unit_system): return round(data["status"]["tirePressure"]["frontRightTirePressurePsi"])
4
41
_front_right_tire_pressure_value
39
0
1
8
deploy/python/preprocess.py
210,497
add YOLOX codes (#5727)
PaddleDetection
10
Python
30
preprocess.py
def apply_image(self, image, offsets, im_size, size): x, y = offsets im_h, im_w = im_size h, w = size canvas = np.ones((h, w, 3), dtype=np.float32) canvas *= np.array(self.fill_value, dtype=np.float32) canvas[y:y + im_h, x:x + im_w, :] = image.astype(np.float32) return canvas
4984ff0ffe6ce0996907f1a6b47bbdfbd4b1a879
92
https://github.com/PaddlePaddle/PaddleDetection.git
87
def apply_image(self, image, offsets, im_size, size): x, y = offsets im_h, im_w = im_size h, w = size canvas = np.ones((h, w, 3), dtype=np.float32) canvas *=
20
133
apply_image
35
1
1
6
tests/openbb_terminal/forecast/test_forecast_controller.py
285,905
Forecasting Menu [Work in Progress] (#1933) * Gave forecasting memory * Fixed scripts, refactored * FIxed poetry lock * edge case check for forecast target * Improved combine and load functionality * Cleaned up translations * Fixed issue with covariates * Fixed issue checking covariates * Another covariates check fix * Ignored regr and linregr warnings * Fixed covariate issues * switched from forecasting to forecast * Finished transition to forecast * Can add entire dataset with one command * Improved combine description * Removed naming covariates * Created new installation * typo * Make plot show dates if available * Added better handling or users without the menu * Removed unused file * Fix * Better handling for nontraditional datasets * Fixed black and pylint * Fixed tests * Added darts install to main tests * Working on darts with CI * Added back test file * Made large tables print better * naive baseline * typo * Finished naive * no dollar on prediction * fixed positive MAPE bug * quick refactoring * Fixed two different args for same thing * added extra patience * linreg mape fix * info fix * Refactored API, bumped to Darts 0.21.0 * Added fixes * Increased verbosity for wrong column * Updated dependencies * Hid warnings * Fixed importing * Fixed tests * Fixed ugly seasonal plotting * Fixed forecast line color * Switched chart output to blue * Simplified lambda_price_prediction_color * fixed residuals * Chnage * Removed darts from CI per Chavi * Added fixes to tests * Added knnfix * Fixed issue where n!= o * Added changes * Added changes * Imrpoved forecast dash * Added Theo notebook * Added enhancements to dash * Added notebook * Added fix for jupyter lab * Added debug stuff * Change * Updated docs * Fixed formatting * Fixed formatting * Removed prints * Filtered some info * Added button to run model * Improved api * Added secret feautr (no peeking Martin) * Cleaned code * Fixed tests * Added test fixes * Added fixes * Fixes * FIxes for pres * Remove bad tests * Removed knn * Fixed issues with removing mc * doc for conda * Added forecast improvements * Added streamlit support * Fixed issues * fix expo with streamlit due to quantile() * fixed performance issues with streamlit for now.. * clean up historical forecast with new trainer * quick fix for regression trainer params * Added fixes * quick fix for other fix for regression trainer params * table formatting for timestamp * potential fix for inf in feature engineered datasets * Basic working in new format * dw * Trying * Fixed issues * Improved graphing * fixing trainer for LR and formatting * doge and linting * page break * automatic cleaning of datasets * automatic cleaning of datasets- fix * Fixed forecast dates * Made dashboard prettier * Added fixes * Added fixes * Added options * Fixed error * remove caching * adding in spinner * Added vairable n_predict in streamlit * Added mypy fix * renaming and range change * new index for n predict * check positive float for window size * Update _index.md * Update _index.md * Update _index.md * Update _index.md * Update _index.md * Update _index.md * Update _index.md * Update _index.md * Update _index.md * renaming * reorg files * Update _index.md * hidden which command for versions * Update _index.md * Update _index.md * which: ns parser * hugo for: which * hugo for: forecasting fix * formatting black * update stock controller test * Lay groundwork for better residual plotting * improved delete to allow for periods in title * improved automatic cleaning of inf * Added new API * Added new API * Added new API * formatting for black * Updated our testing CI * Reverted changes * Added forecast docs * Fixed mypy issues * Fixes tests * Did some refactoring, added a report * new api in streamlit * Added integrated tests * Update _index.md * improved loading in custom dataset * menu spacing * installer fixes * Added docs fixes * Adding comments to test if commit working * Fixed report * naming conventions * formatting * removing unused var * Made last report imporvements * Update README.md * Added fix * Switched to warning * Added fixes * Added fixes * Added fixes * Added fixes * Update economy av view test * Remove forgotten print statement * Update depencencies * Added verbosity to pytest * Added fixes * Fixed pylint * Fixed actions checkout * Added fixes Co-authored-by: colin99d <colin99delahunty@gmail.com> Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com> Co-authored-by: James Simmons <simmonsj330@gmail.com> Co-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com> Co-authored-by: Theodore Aptekarev <aptekarev@gmail.com>
OpenBBTerminal
10
Python
32
test_forecast_controller.py
def test_models(mocker, opt, func): mocker.patch(base + "helpers.check_parser_input", return_value=True) mocker.patch(base + func) cont = fc.ForecastController() cont.datasets = {"data": df} getattr(cont, f"call_{opt}")(["data"]) @pytest.mark.parametrize( "opt", [ "expo", "theta", "rnn", "nbeats", "tcn", "regr", "linregr", "brnn", "trans", "tft", ], )
7fd72d9ee1e8847717195859bf6d608268a94e2f
@pytest.mark.parametrize( "opt", [ "expo", "theta", "rnn", "nbeats", "tcn", "regr", "linregr", "brnn", "trans", "tft", ], )
57
https://github.com/OpenBB-finance/OpenBBTerminal.git
126
def test_models(mocker, opt, func): mocker.patch(base + "helpers.check_parser_input", return_valu
16
161
test_models
12
0
1
2
freqtrade/rpc/replicate/serializer.py
150,469
minor improvements and pairlist data transmission
freqtrade
7
Python
12
serializer.py
def _deserialize(self, data): # The WebSocketSerializer gives bytes not string return json.loads(data)
6834db11f3ec4d0b9d9a6540633e1b363c11c889
14
https://github.com/freqtrade/freqtrade.git
25
def _deserialize(self, data): # The WebSocketSerializer gives bytes not string return json.lo
5
23
_deserialize
66
0
4
13
tests/builtin_server/tests.py
201,910
Refs #33476 -- Reformatted code with Black.
django
11
Python
58
tests.py
def write(self, data): assert isinstance(data, bytes), "write() argument must be bytestring" if not self.status: raise AssertionError("write() before start_response()") elif not self.headers_sent: # Before the first output, send the stored headers self.bytes_sent = len(data) # make sure we know content-length self.send_headers() else: self.bytes_sent += len(data) # XXX check Content-Length and truncate if too many bytes written? data = BytesIO(data) for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b""): self._write(chunk) self._flush()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
92
https://github.com/django/django.git
200
def write(self, data): assert isinstance(data, bytes), "write() argument must be bytestring" if not self.status: raise AssertionError("write() before start_response()") elif not self.headers_sent: # Before the first output, send the stored headers self.bytes_sent = len(data) # make sure we know content-length self.send_headers() else: self.bytes_sent += len(data)
18
157
write
14
0
2
5
gradio/state.py
179,632
state fixes; deprecation
gradio
12
Python
12
state.py
def __setattr__(self, name, value): if name.startswith("_"): self.__dict__[name] = value else: StateHolder.state_dict[(self.__id, name)] = value
8e1577e6debd76caffac1b1102a00f94348d7a3f
41
https://github.com/gradio-app/gradio.git
49
def __setattr__(self, name, value): if name.startswith("_"): self.__dict__
9
64
__setattr__
41
1
1
13
python/ray/serve/tests/test_cli.py
147,088
[serve] Implement `serve.run()` and `Application` (#23157) These changes expose `Application` as a public API. They also introduce a new public method, `serve.run()`, which allows users to deploy their `Applications` or `DeploymentNodes`. Additionally, the Serve CLI's `run` command and Serve's REST API are updated to use `Applications` and `serve.run()`. Co-authored-by: Edward Oakes <ed.nmi.oakes@gmail.com>
ray
12
Python
38
test_cli.py
def test_run_deployment_node(ray_start_stop): # Tests serve run with specified args and kwargs # Deploy via import path p = subprocess.Popen( [ "serve", "run", "--address=auto", "ray.serve.tests.test_cli.molly_macaw", ] ) wait_for_condition(lambda: ping_endpoint("Macaw") == "Molly is green!", timeout=10) p.send_signal(signal.SIGINT) p.wait() assert ping_endpoint("Macaw") == "connection error" @serve.deployment
aaf47b2493beb985bfbc52dbdf1f52fc48377d74
@serve.deployment
57
https://github.com/ray-project/ray.git
121
def test_run_deployment_node(ray_start_stop): # Tests serve run with specified args and kwargs # Deploy via import path p = subprocess.Popen( [ "serve", "run", "--address=auto", "ray.serve.tests.test_cli.molly_macaw", ] ) wait_for_condition(lambda: ping_endpoint("Macaw") == "Molly is green!", timeout=10) p.send_signal(signal.SIGINT) p.wait() assert ping_endpoint("Macaw") == "connection error" @serve.deployment
14
113
test_run_deployment_node
121
0
1
40
zerver/tests/test_widgets.py
84,776
tests: Consistently JSON-encode ‘to’ parameter Although our POST /messages handler accepts the ‘to’ parameter with or without JSON encoding, there are two problems with passing it as an unencoded string. Firstly, you’d fail to send a message to a stream named ‘true’ or ‘false’ or ‘null’ or ‘2022’, as the JSON interpretation is prioritized over the plain string interpretation. Secondly, and more importantly for our tests, it violates our OpenAPI schema, which requires the parameter to be JSON-encoded. This is because OpenAPI has no concept of a parameter that’s “optionally JSON-encoded”, nor should it: such a parameter cannot be unambiguously decoded for the reason above. Our version of openapi-core doesn’t currently detect this schema violation, but after the next upgrade it will. Signed-off-by: Anders Kaseorg <anders@zulip.com>
zulip
14
Python
77
test_widgets.py
def test_poll_command_extra_data(self) -> None: sender = self.example_user("cordelia") stream_name = "Verona" # We test for both trailing and leading spaces, along with blank lines # for the poll options. content = "/poll What is your favorite color?\n\nRed\nGreen \n\n Blue\n - Yellow" payload = dict( type="stream", to=orjson.dumps(stream_name).decode(), topic="whatever", content=content, ) result = self.api_post(sender, "/api/v1/messages", payload) self.assert_json_success(result) message = self.get_last_message() self.assertEqual(message.content, content) expected_submessage_content = dict( widget_type="poll", extra_data=dict( options=["Red", "Green", "Blue", "Yellow"], question="What is your favorite color?", ), ) submessage = SubMessage.objects.get(message_id=message.id) self.assertEqual(submessage.msg_type, "widget") self.assertEqual(orjson.loads(submessage.content), expected_submessage_content) # Now don't supply a question. content = "/poll" payload["content"] = content result = self.api_post(sender, "/api/v1/messages", payload) self.assert_json_success(result) expected_submessage_content = dict( widget_type="poll", extra_data=dict( options=[], question="", ), ) message = self.get_last_message() self.assertEqual(message.content, content) submessage = SubMessage.objects.get(message_id=message.id) self.assertEqual(submessage.msg_type, "widget") self.assertEqual(orjson.loads(submessage.content), expected_submessage_content)
bd9a1dc9710293e36d2d47d970d7afb95100c2e6
263
https://github.com/zulip/zulip.git
489
def test_poll_command_extra_data(self) -> None: sender = self.example_user("cordelia") stream_name = "Verona" # We test for both trailing and leading spaces, alo
33
445
test_poll_command_extra_data
17
0
4
4
openbb_terminal/custom/prediction_techniques/pred_controller.py
283,255
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <andrew.kenreich@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt> Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
14
Python
16
pred_controller.py
def update_runtime_choices(self): if session and obbff.USE_PROMPT_TOOLKIT: self.choices["pick"] = {c: None for c in list(self.df.columns)} self.completer = NestedCompleter.from_nested_dict(self.choices)
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
48
https://github.com/OpenBB-finance/OpenBBTerminal.git
49
def update_runtime_choices(self): if session and obbff.USE_PROMPT_TOOLKIT: self.choices["pick"] = {c: None for c in list(self.df.
13
79
update_runtime_choices
20
0
3
9
gamestonk_terminal/stocks/stocks_controller.py
281,020
improve usage of timezone in terminal (#1126) * improve usage of timezone in terminal * lint * update dependencies * address James review comments * remove seconds from time on cmd line * skip test
OpenBBTerminal
15
Python
14
stocks_controller.py
def call_reset(self, _): if self.ticker: if self.suffix: self.queue.insert(0, f"load {self.ticker}.{self.suffix}") else: self.queue.insert(0, f"load {self.ticker}") self.queue.insert(0, "stocks") self.queue.insert(0, "reset") self.queue.insert(0, "quit")
d5d581b59b614d45f105f3bda91645667ad623b8
72
https://github.com/OpenBB-finance/OpenBBTerminal.git
107
def call_reset(self, _): if self.ticker: if self.suffix: self.queue.insert(0, f"load {self.ticker}.{self.suffix}") else: self.queue.insert(0, f"load {self.ticker}") self.queue.in
7
141
call_reset
433
0
1
183
deploy/python/utils.py
211,310
[deploy] alter save coco format json in deploy/python/infer.py (#6705)
PaddleDetection
11
Python
238
utils.py
def argsparser(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--model_dir", type=str, default=None, help=("Directory include:'model.pdiparams', 'model.pdmodel', " "'infer_cfg.yml', created by tools/export_model.py."), required=True) parser.add_argument( "--image_file", type=str, default=None, help="Path of image file.") parser.add_argument( "--image_dir", type=str, default=None, help="Dir of image file, `image_file` has a higher priority.") parser.add_argument( "--batch_size", type=int, default=1, help="batch_size for inference.") parser.add_argument( "--video_file", type=str, default=None, help="Path of video file, `video_file` or `camera_id` has a highest priority." ) parser.add_argument( "--camera_id", type=int, default=-1, help="device id of camera to predict.") parser.add_argument( "--threshold", type=float, default=0.5, help="Threshold of score.") parser.add_argument( "--output_dir", type=str, default="output", help="Directory of output visualization files.") parser.add_argument( "--run_mode", type=str, default='paddle', help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)") parser.add_argument( "--device", type=str, default='cpu', help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU." ) parser.add_argument( "--use_gpu", type=ast.literal_eval, default=False, help="Deprecated, please use `--device`.") parser.add_argument( "--run_benchmark", type=ast.literal_eval, default=False, help="Whether to predict a image_file repeatedly for benchmark") parser.add_argument( "--enable_mkldnn", type=ast.literal_eval, default=False, help="Whether use mkldnn with CPU.") parser.add_argument( "--enable_mkldnn_bfloat16", type=ast.literal_eval, default=False, help="Whether use mkldnn bfloat16 inference with CPU.") parser.add_argument( "--cpu_threads", type=int, default=1, help="Num of threads with CPU.") parser.add_argument( "--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.") parser.add_argument( "--trt_max_shape", type=int, default=1280, help="max_shape for TensorRT.") parser.add_argument( "--trt_opt_shape", type=int, default=640, help="opt_shape for TensorRT.") parser.add_argument( "--trt_calib_mode", type=bool, default=False, help="If the model is produced by TRT offline quantitative " "calibration, trt_calib_mode need to set True.") parser.add_argument( '--save_images', action='store_true', default=False, help='Save visualization image results.') parser.add_argument( '--save_mot_txts', action='store_true', help='Save tracking results (txt).') parser.add_argument( '--save_mot_txt_per_img', action='store_true', help='Save tracking results (txt) for each image.') parser.add_argument( '--scaled', type=bool, default=False, help="Whether coords after detector outputs are scaled, False in JDE YOLOv3 " "True in general detector.") parser.add_argument( "--tracker_config", type=str, default=None, help=("tracker donfig")) parser.add_argument( "--reid_model_dir", type=str, default=None, help=("Directory include:'model.pdiparams', 'model.pdmodel', " "'infer_cfg.yml', created by tools/export_model.py.")) parser.add_argument( "--reid_batch_size", type=int, default=50, help="max batch_size for reid model inference.") parser.add_argument( '--use_dark', type=ast.literal_eval, default=True, help='whether to use darkpose to get better keypoint position predict ') parser.add_argument( "--action_file", type=str, default=None, help="Path of input file for action recognition.") parser.add_argument( "--window_size", type=int, default=50, help="Temporal size of skeleton feature for action recognition.") parser.add_argument( "--random_pad", type=ast.literal_eval, default=False, help="Whether do random padding for action recognition.") parser.add_argument( "--save_results", action='store_true', default=False, help="Whether save detection result to file using coco format") parser.add_argument( '--use_coco_category', action='store_true', default=False, help='Whether to use the coco format dictionary `clsid2catid`') parser.add_argument( "--slice_infer", action='store_true', help="Whether to slice the image and merge the inference results for small object detection." ) parser.add_argument( '--slice_size', nargs='+', type=int, default=[640, 640], help="Height of the sliced image.") parser.add_argument( "--overlap_ratio", nargs='+', type=float, default=[0.25, 0.25], help="Overlap height ratio of the sliced image.") parser.add_argument( "--combine_method", type=str, default='nms', help="Combine method of the sliced images' detection results, choose in ['nms', 'nmm', 'concat']." ) parser.add_argument( "--match_threshold", type=float, default=0.6, help="Combine method matching threshold.") parser.add_argument( "--match_metric", type=str, default='iou', help="Combine method matching metric, choose in ['iou', 'ios'].") return parser
10e7fe232c83dacee0f517d78644b705e5d24a57
739
https://github.com/PaddlePaddle/PaddleDetection.git
1,542
def argsparser(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--model_dir", type=str, default=None, help=("Directory include:'model.pdiparams', 'model.pdmodel', " "'infer_cfg.yml', created by tools/export_model.py."), required=True) parser.add_argument( "--image_file", type=str, default=None, help="Path of image file.") parser.add_argument( "--image_dir", type=str, default=None, help="Dir of image file, `image_file` has a higher priority.") parser.add_argument( "--batch_size", type=int, default=1, help="batch_size for inference.") parser.add_argument( "--video_file", type=str, default=None, help="Path of video file, `video_file` or `camera_id` has a highest priority." ) parser.add_argument( "--camera_id", type=int, default=-1, help="device id of camera to predict.") parser.add_argument( "--threshold", type=float, default=0.5, help="Threshold of score.") parser.add_argument( "--output_dir", type=str, default="output", help="Directory of output visualization files.") parser.add_argument( "--run_mode", type=str, default='paddle', help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)") parser.add_argument( "--device", type=str, default='cpu', help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU." ) parser.add_argument( "--use_gpu", type=ast.literal_eval, default=False, help="Deprecated, please use `--device`.") parser.add_argument( "--run_benchmark", type=ast.literal_eval, default=False, help="Whether to predict a image_file repeatedly for benchmark") parser.add_argument( "--enable_mkldnn", type=ast.literal_eval, default=False, help="Whether use mkldnn with CPU.") parser.add_argument( "--enable_mkldnn_bfloat16", type=ast.literal_eval, default=False, help="Whether use mkldnn bfloat16 inference with CPU.") parser.add_argument( "--cpu_threads", type=int, default=1, help="Num of threads with CPU.") parser.add_argument( "--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.") parser.add_argument( "--trt_max_shape", type=int, default=1280, help="max_shape for TensorRT.") parser.add_argument( "--trt_opt_shape", type=int, default=640, help="opt_shape for TensorRT.") parser.add_argument( "--trt_calib_mode", type=bool, default=False, help="If the model is
19
1,210
argsparser
9
0
1
3
tests/sentry/models/test_groupsnooze.py
86,151
fix(tests): Use `RedisSnubaTSDB` by default in all tests (#39297) `RedisSnubaTSDB` has been the default in productions. To make our tests reflect production we should use it there as well. Removed most uses of `tsdb.incr` from the tests. The only ones left are places that are actually still using `tsdb.incr`.
sentry
10
Python
9
test_groupsnooze.py
def test_user_rate_without_test(self): snooze = GroupSnooze.objects.create(group=self.group, count=100, window=60) assert snooze.is_valid(test_rates=False)
1449643f60404c3ec50ec4eab11bc1c3b3bfe1ab
36
https://github.com/getsentry/sentry.git
22
def test_user_rate_without_test(self): sno
11
55
test_user_rate_without_test
71
0
4
25
tests/ludwig/utils/test_defaults.py
7,237
feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)
ludwig
11
Python
54
test_defaults.py
def test_merge_with_defaults_early_stop(use_train, use_hyperopt_scheduler): all_input_features = [ binary_feature(), category_feature(), number_feature(), text_feature(), ] all_output_features = [ category_feature(), sequence_feature(), vector_feature(), ] # validate config with all features config = { "input_features": all_input_features, "output_features": all_output_features, HYPEROPT: HYPEROPT_CONFIG, } config = copy.deepcopy(config) if use_train: config[TRAINER] = {"batch_size": 42} if use_hyperopt_scheduler: # hyperopt scheduler cannot be used with early stopping config[HYPEROPT]["executor"][SCHEDULER] = SCHEDULER_DICT merged_config = merge_with_defaults(config) expected = -1 if use_hyperopt_scheduler else ECDTrainerConfig().early_stop assert merged_config[TRAINER]["early_stop"] == expected
aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173
123
https://github.com/ludwig-ai/ludwig.git
200
def test_merge_with_defaults_early_stop(use_train, use_hyperopt_scheduler): all_input_features = [ binary_feature(), category_feature(), number_feature(), text_feature(), ] all_output_features = [ category_feature(), sequence_feature(), vector_feature(), ] # validate config with all features config = { "input_features": all_input_features, "output_features": all_output_features, HYPEROPT: HYPEROPT_CONFIG, } config = copy.deepcopy(config) if use_train: config[TRAINER] = {"batch_size": 42} if use_hyperopt_scheduler: # hyperopt sched
24
199
test_merge_with_defaults_early_stop
48
0
3
23
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/candidates.py
61,098
upd; format
transferlearning
13
Python
40
candidates.py
def make_install_req_from_dist(dist, template): # type: (Distribution, InstallRequirement) -> InstallRequirement project_name = canonicalize_name(dist.project_name) if template.req: line = str(template.req) elif template.link: line = f"{project_name} @ {template.link.url}" else: line = f"{project_name}=={dist.parsed_version}" ireq = install_req_from_line( line, user_supplied=template.user_supplied, comes_from=template.comes_from, use_pep517=template.use_pep517, isolated=template.isolated, constraint=template.constraint, options=dict( install_options=template.install_options, global_options=template.global_options, hashes=template.hash_options, ), ) ireq.satisfied_by = dist return ireq
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
111
https://github.com/jindongwang/transferlearning.git
184
def make_install_req_from_dist(dist, template): # type: (Distribution, InstallRequirement) -> InstallRequirement project_name = canonicalize_name(dist.project_name) if template.req: line = str(template.req) elif template.link: line = f"{project_name} @ {template.link.url}" else: line = f"{project_name}=={dist.parsed_version}" ireq = install_req_from_line( line, user_supplied=template.user_supplied, comes_from=template.comes_from, use_pep517=t
25
192
make_install_req_from_dist
45
0
1
16
tests/sentry/api/endpoints/test_organization_environments.py
99,954
ref(tests): Remove `get_valid_response()` (#34822)
sentry
12
Python
26
test_organization_environments.py
def test_project_filter(self): other_project = self.create_project() project_env = self.create_environment(name="project", project=self.project) other_project_env = self.create_environment(name="other", project=other_project) response = self.get_success_response( self.project.organization.slug, project=[self.project.id] ) assert response.data == serialize([project_env]) response = self.get_success_response( self.project.organization.slug, project=[other_project.id] ) assert response.data == serialize([other_project_env]) response = self.get_success_response( self.project.organization.slug, project=[self.project.id, other_project.id] ) assert response.data == serialize([other_project_env, project_env])
096b5511e244eecd8799b2a0324655207ce8985e
151
https://github.com/getsentry/sentry.git
161
def test_project_filter(self): other_project = self.create_project() project_env = self.create_environment(name="project", project=self.project) other_project_env = self.create_environment(name="other", project=other_project) response = self.get_success_response( self.project.organization.slug, project=[self.project.id] ) assert r
16
233
test_project_filter
116
0
8
28
erpnext/manufacturing/doctype/bom_update_log/bom_update_log.py
68,739
feat: Track progress in Log Batch/Job wise - This was done due to stale reads while the background jobs tried updating status of the log - Added a table where all bom jobs within log will be tracked with what level they are processing - Cron job will check if table jobs are all processed every 5 mins - If yes, it will prepare parents and call `process_boms_cost_level_wise` to start next level - If pending jobs, do nothing - Current BOM Level is being tracked that helps adding rows to the table - Individual bom cost jobs (that are queued) will process and update boms > will update BOM Update Batch table row with list of updated BOMs
erpnext
15
Python
80
bom_update_log.py
def resume_bom_cost_update_jobs(): in_progress_logs = frappe.db.get_all( "BOM Update Log", {"update_type": "Update Cost", "status": "In Progress"}, ["name", "processed_boms", "current_level"], ) if not in_progress_logs: return for log in in_progress_logs: # check if all log batches of current level are processed bom_batches = frappe.db.get_all( "BOM Update Batch", {"parent": log.name, "level": log.current_level}, ["name", "boms_updated"] ) incomplete_level = any(not row.get("boms_updated") for row in bom_batches) if not bom_batches or incomplete_level: continue # Prep parent BOMs & updated processed BOMs for next level current_boms, processed_boms = get_processed_current_boms(log, bom_batches) parent_boms = get_next_higher_level_boms(child_boms=current_boms, processed_boms=processed_boms) set_values_in_log( log.name, values={ "processed_boms": json.dumps(processed_boms), "parent_boms": json.dumps(parent_boms), "status": "Completed" if not parent_boms else "In Progress", }, commit=True, ) if parent_boms: # there is a next level to process process_boms_cost_level_wise(update_doc=frappe.get_doc("BOM Update Log", log.name))
62857e3e080b3888f40a09112be63238974dd175
180
https://github.com/frappe/erpnext.git
87
def resume_bom_cost_update_jobs(): in_progress_logs = frappe.db.get_all( "BOM Update Log", {"update_type": "Update Cost", "status": "In Progress"}, ["name", "processed_boms", "current_level"], ) if not in_progress_logs: return for log in in_progress_logs: # check if all log batches of current level are processed bom_batches = frappe.db.get_all( "BOM Update Batch", {"parent": log.name, "level": log.current_level}, ["name", "boms_updated"] ) incomplete_level = any(not row.get("boms_updated") for row in bom_batches) if not bom_batches or incomplete_level: continue # Prep parent BOMs & updated processed BOMs for next level current_boms, processed_boms = get_processed_current_boms(log, bom_batches) parent_boms = get_next_higher_level_boms(child_boms=current_boms, processed_boms=processed_boms) set_values_in_log( log.name, values={ "processed_boms": json.dumps(processed_boms), "parent_boms": json.dumps(parent_boms), "status": "Completed" if not parent_boms else "In Progress", }, commit=True, ) if parent_boms: # there is a next l
27
311
resume_bom_cost_update_jobs
20
0
3
4
airflow/providers/docker/operators/docker_swarm.py
46,966
Fix new MyPy errors in main (#22884) Those MyPe errors are side effect of some new dependencies.
airflow
11
Python
17
docker_swarm.py
def on_kill(self) -> None: if self.cli is not None and self.service is not None: self.log.info('Removing docker service: %s', self.service['ID']) self.cli.remove_service(self.service['ID'])
6933022e94acf139b2dea9a589bb8b25c62a5d20
50
https://github.com/apache/airflow.git
48
def on_kill(self) -> None:
7
82
on_kill
41
0
2
8
tests/core/full_node/test_mempool.py
102,720
Merge standalone wallet into main (#9793) * wallet changes from pac * cat changes * pool tests * pooling tests passing * offers * lint * mempool_mode * black * linting * workflow files * flake8 * more cleanup * renamed * remove obsolete test, don't cast announcement * memos are not only bytes32 * trade renames * fix rpcs, block_record * wallet rpc, recompile settlement clvm * key derivation * clvm tests * lgtm issues and wallet peers * stash * rename * mypy linting * flake8 * bad initializer * flaky tests * Make CAT wallets only create on verified hints (#9651) * fix clvm tests * return to log lvl warn * check puzzle unhardened * public key, not bytes. api caching change * precommit changes * remove unused import * mypy ci file, tests * ensure balance before creating a tx * Remove CAT logic from full node test (#9741) * Add confirmations and sleeps for wallet (#9742) * use pool executor * rever merge mistakes/cleanup * Fix trade test flakiness (#9751) * remove precommit * older version of black * lint only in super linter * Make announcements in RPC be objects instead of bytes (#9752) * Make announcements in RPC be objects instead of bytes * Lint * misc hint'ish cleanup (#9753) * misc hint'ish cleanup * unremove some ci bits * Use main cached_bls.py * Fix bad merge in main_pac (#9774) * Fix bad merge at 71da0487b9cd5564453ec24b76f1ac773c272b75 * Remove unused ignores * more unused ignores * Fix bad merge at 3b143e705057d6c14e2fb3e00078aceff0552d7e * One more byte32.from_hexstr * Remove obsolete test * remove commented out * remove duplicate payment object * remove long sync * remove unused test, noise * memos type * bytes32 * make it clear it's a single state at a time * copy over asset ids from pacr * file endl linter * Update chia/server/ws_connection.py Co-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com> Co-authored-by: Matt Hauff <quexington@gmail.com> Co-authored-by: Kyle Altendorf <sda@fstab.net> Co-authored-by: dustinface <35775977+xdustinface@users.noreply.github.com>
chia-blockchain
18
Python
33
test_mempool.py
def test_agg_sig_mixed(self): npc_list = [ NPC(self.h1, self.h2, [(self.ASM, [ConditionWithArgs(self.ASM, [bytes(self.pk1), b"msg1"])])]), NPC(self.h1, self.h2, [(self.ASU, [ConditionWithArgs(self.ASU, [bytes(self.pk2), b"msg2"])])]), ] pks, msgs = pkm_pairs(npc_list, b"foobar") assert [bytes(pk) for pk in pks] == [bytes(self.pk1), bytes(self.pk2)] assert msgs == [b"msg1" + self.h1 + b"foobar", b"msg2"]
89f15f591cc3cc3e8ae40e95ffc802f7f2561ece
144
https://github.com/Chia-Network/chia-blockchain.git
97
def test_agg_sig_mixed(self): npc_list = [ NPC(self.h1, self.h2, [(self.ASM, [ConditionWithArgs(self.ASM, [bytes(self.pk1), b"msg1"])])]), NPC(self.h1, self.h2, [(self.ASU, [ConditionWithArgs(self.ASU, [bytes(self.pk2), b"msg2"])])]), ] pks, msgs = pkm_pairs(npc_list, b"foobar")
16
211
test_agg_sig_mixed
26
0
2
5
homeassistant/components/life360/device_tracker.py
315,032
Convert life360 integration to entity based (#72461) * Convert life360 integration to entity based * Improve config_flow.py type checking * Add tests for config flow Fix form defaults for reauth flow. * Cover reauth when config entry loaded * Update per review (except for dataclasses) * Restore check for missing location information This is in current code but was accidentally removed in this PR. * Fix updates from review * Update tests per review changes * Change IntegData to a dataclass * Use dataclasses to represent fetched Life360 data * Always add extra attributes * Update per review take 2 * Tweak handling of bad last_seen or location_accuracy * Fix type of Life360Member.gps_accuracy * Update per review take 3 * Update .coveragerc * Parametrize successful reauth flow test * Fix test coverage failure * Update per review take 4 * Fix config schema
core
10
Python
25
device_tracker.py
def entity_picture(self) -> str | None: if self.available: self._attr_entity_picture = self._data.entity_picture return super().entity_picture # All of the following will only be called if self.available is True.
0a65f53356e124592cae37ea1f1873b789e0726b
30
https://github.com/home-assistant/core.git
61
def entity_picture(self) -> str | None: if self.available: self._attr_entity_picture = self._data.entity_picture return super().entity_picture # All of the following will only be called if self.available is True
7
52
entity_picture
94
0
1
20
python/ray/tests/test_resource_demand_scheduler.py
131,772
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
ray
18
Python
78
test_resource_demand_scheduler.py
def test_packing(self): provider = MockProvider() scheduler = ResourceDemandScheduler( provider, TYPES_A, 10, head_node_type="p2.8xlarge" ) provider.create_node({}, {TAG_RAY_USER_NODE_TYPE: "p2.8xlarge"}, 1) # At this point our cluster has 1 p2.8xlarge instances (8 GPUs) and is # fully idle. nodes = provider.non_terminated_nodes({}) resource_demands = [{"GPU": 1}] * 2 pending_placement_groups = [ PlacementGroupTableData( state=PlacementGroupTableData.PENDING, strategy=PlacementStrategy.STRICT_PACK, bundles=[Bundle(unit_resources={"GPU": 2})] * 3, ), ] # The 2 resource demand gpus should still be packed onto the same node # as the 6 GPU placement group. to_launch, rem = scheduler.get_nodes_to_launch( nodes, {}, resource_demands, {}, pending_placement_groups, {} ) assert to_launch == {} assert not rem
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
127
https://github.com/ray-project/ray.git
294
def test_packing(self): provider = MockProvider() scheduler = ResourceDemandScheduler( provider, TYPES_A, 10, head_node_type="p2.8xlarge" ) provider.create_node({}, {TAG_RAY_USER_NODE_TYPE: "p2.8xlarge"}, 1) # At this point our cluster has 1 p2.8xlarge i
26
202
test_packing
21
0
1
7
tests/sentry/auth/test_access.py
88,511
feature(hybrid-cloud): Access with silo tests (#41305) Goal of this PR is implement a secondary interface for creating `Access` objects that work on service dataclasses only. It validates that secondary interface by running the access test suite against both implementations *in all silo modes* ensuring full compatibility. Notably, while most of the org member access logic is left untouched, some parts of existing logic have been slightly refactored: 1. Organizationless Access objects no longer need the DB, and act on shared logic from the service layer. 2. sso state and permissions querying is now extracted into the service layer, and even the existing access uses that.
sentry
10
Python
12
test_access.py
def test_superuser(self): request = self.make_request(user=self.superuser, is_superuser=False) result = self.from_request(request) assert not result.has_permission("test.permission") request = self.make_request(user=self.superuser, is_superuser=True) result = self.from_request(request) assert result.has_permission("test.permission")
fef9c695a1a7d3384fb3ce7ec6c264632e77061d
68
https://github.com/getsentry/sentry.git
62
def test_superuser(self): request = self.make_request(user=self.superuser, is_superuser=False) result = self.from_request(request) assert not result.has_permission("test.permission") request = self.make_request(user=self.superuser, is_superuser=True) result = self.from_request(request) assert result.has_permission("te
10
111
test_superuser
16
0
2
6
plugins/extract/recognition/vgg_face2_keras.py
101,600
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
faceswap
8
Python
14
vgg_face2_keras.py
def _integer_iterator(cls) -> Generator[int, None, None]: i = -1 while True: i += 1 yield i
98d01760e469fd2108eed8d0b0a1ba6297c3177c
27
https://github.com/deepfakes/faceswap.git
59
def _integer_iterator(cls) -> Generator[int, None, None]: i = -1 while True: i += 1 yield i
5
45
_integer_iterator
77
0
3
18
freqtrade/freqai/data_drawer.py
150,159
use cloudpickle in place of pickle. define Paths once in data_drawer.
freqtrade
14
Python
64
data_drawer.py
def load_historic_predictions_from_disk(self): exists = self.historic_predictions_path.is_file() # resolve().exists() if exists: with open(self.historic_predictions_path, "rb") as fp: self.historic_predictions = cloudpickle.load(fp) logger.info( f"Found existing historic predictions at {self.full_path}, but beware " "that statistics may be inaccurate if the bot has been offline for " "an extended period of time." ) elif not self.follow_mode: logger.info("Could not find existing historic_predictions, starting from scratch") else: logger.warning( f"Follower could not find historic predictions at {self.full_path} " "sending null values back to strategy" ) return exists
40f00196ebe4abc91b9987bf4365ea43f48c0eee
73
https://github.com/freqtrade/freqtrade.git
276
def load_historic_predictions_from_disk(self): exists = self.historic_predictions_path.is_file() # resolve().exists() if exists: with open(self.historic_predictions_path, "rb") as fp: self.historic_predictions = cloudpickle.load(fp) logger.info( f"Found existing historic predictions at {self.full_path}, but beware " "that statistics may be inaccurate if the bot has been offline for " "an extended period of time." ) elif not self.follow_mode: logger.info("Could not find existing historic_predictions, starting from scratch") else: logger.warning(
15
151
load_historic_predictions_from_disk
18
0
2
6
kitty_tests/datatypes.py
103,763
Use a regex for bracketed paste sanitization
kitty
13
Python
17
datatypes.py
def test_bracketed_paste_sanitizer(self): from kitty.utils import sanitize_for_bracketed_paste for x in ('\x1b[201~ab\x9b201~cd', '\x1b[201\x1b[201~~ab'): q = sanitize_for_bracketed_paste(x.encode('utf-8')) self.assertNotIn(b'\x1b[201~', q) self.assertNotIn('\x9b201~'.encode('utf-8'), q)
26b8ab9adf28dd2cab8614ec223d0cb4519763fa
53
https://github.com/kovidgoyal/kitty.git
64
def test_bracketed_paste_sanitizer(self): from kitty.utils import sanitize_for_bracketed_paste for x
9
98
test_bracketed_paste_sanitizer
60
0
3
17
tests/integration/reduce/test_reduce.py
11,385
fix: remove return_results (#4347)
jina
18
Python
46
test_reduce.py
def test_reduce_needs(): flow = ( Flow(port_expose=exposed_port) .add(uses=Executor1, name='pod0') .add(uses=Executor2, needs='gateway', name='pod1') .add(uses=Executor3, needs='gateway', name='pod2') .add(needs=['pod0', 'pod1', 'pod2'], name='pod3') ) with flow as f: da = DocumentArray([Document() for _ in range(5)]) resp = Client(port=exposed_port, return_responses=True).post('/', inputs=da) assert len(resp[0].docs) == 5 for doc in resp[0].docs: assert doc.text == 'exec1' assert doc.tags == {'a': 'b'} assert doc.modality == 'image' assert (doc.embedding == np.zeros(3)).all()
ae6df58f80d20fe4d8a11dbd3927593f228e990f
176
https://github.com/jina-ai/jina.git
151
def test_reduce_needs(): flow = ( Flow(port_expose=exposed_port) .add(uses=Executor1, name='pod0') .add(uses=Executor2
34
294
test_reduce_needs
20
0
1
10
tests/test_serializers.py
59,154
Remove deep serialization from `PickleSerializer` and add tests (#7044)
prefect
10
Python
18
test_serializers.py
def test_picklelib_is_used(self, monkeypatch): dumps = MagicMock(return_value=b"test") loads = MagicMock(return_value="test") monkeypatch.setattr("pickle.dumps", dumps) monkeypatch.setattr("pickle.loads", loads) serializer = PickleSerializer(picklelib="pickle") serializer.dumps("test") dumps.assert_called_once_with("test") serializer.loads(b"test") loads.assert_called_once_with(base64.decodebytes(b"test"))
7092f0403a97154d3c3909e3fcd95e7db5776246
79
https://github.com/PrefectHQ/prefect.git
82
def test_picklelib_is_used(self, monkeypatch):
14
140
test_picklelib_is_used
17
0
1
7
networkx/algorithms/tests/test_distance_measures.py
177,098
Add weight distance metrics (#5305) Adds the weight keyword argument to allow users to compute weighted distance metrics e.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the weight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable, compute the weight via the function. Default is None, meaning return unweighted result which is the current behavior. Co-authored-by: Dan Schult <dschult@colgate.edu> Co-authored-by: Ross Barnowski <rossbar@berkeley.edu>
networkx
13
Python
13
test_distance_measures.py
def test_bound_center_weight_attr(self): result = {0} assert ( set(nx.center(self.G, usebounds=True, weight="weight")) == set(nx.center(self.G, usebounds=True, weight="cost")) == result )
28f78cfa9a386620ee1179582fda1db5ffc59f84
54
https://github.com/networkx/networkx.git
70
def test_bound_center_weight_attr(self): result = {0} assert ( set(nx.center(self.G, usebounds=True, weight="weight")) == set(nx.ce
9
84
test_bound_center_weight_attr
6
0
1
2
dash/testing/browser.py
40,109
:hocho: deprecated find_element(s)_by_css_selector
dash
8
Python
6
browser.py
def find_element(self, selector): return self.driver.find_element(By.CSS_SELECTOR, selector)
5dfa6b0782803cb0635119ee1dcf8775dd76c8a7
21
https://github.com/plotly/dash.git
20
def find_element(self, selector): return self.driv
6
34
find_element
19
0
2
6
tests/components/risco/test_sensor.py
304,733
Support for local push in Risco integration (#75874) * Local config flow * Local entities * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Address code review comments * More type hints * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * More annotations * Even more annonations * New entity naming * Move fixtures to conftest * Improve state tests for local * Remove mutable default arguments * Remove assertions for lack of state * Add missing file * Switch setup to fixtures * Use error fixtures in test_config_flow * Apply suggestions from code review Co-authored-by: Martin Hjelmare <marhje52@gmail.com> Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
10
Python
18
test_sensor.py
async def test_error_on_login(hass, login_with_error, cloud_config_entry): await hass.config_entries.async_setup(cloud_config_entry.entry_id) await hass.async_block_till_done() registry = er.async_get(hass) for id in ENTITY_IDS.values(): assert not registry.async_is_registered(id)
635eda584dc8f932af235b72bb36ad76e74662f5
52
https://github.com/home-assistant/core.git
41
async def test_error_on_login(hass, login_with_error, cloud_config_entry): await hass.config_entries.async_setup(cloud_config_entry.entry_id) await hass.async_block_till_done() registry = er.async_get(hass) for id in ENT
15
87
test_error_on_login
53
0
3
18
mmdet/models/roi_heads/standard_roi_head.py
244,375
Simplify api of one-stage detector
mmdetection
14
Python
40
standard_roi_head.py
def aug_test(self, x, proposal_list, aug_batch_img_metas, rescale=False): det_bboxes, det_labels = self.aug_test_bboxes(x, aug_batch_img_metas, proposal_list, self.test_cfg) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( aug_batch_img_metas[0][0]['scale_factor']) bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) # det_bboxes always keep the original scale if self.with_mask: segm_results = self.aug_test_mask(x, aug_batch_img_metas, det_bboxes, det_labels) return [(bbox_results, segm_results)] else: return [bbox_results]
9c5b3331ac8edbfa328922fbab45c382380da540
120
https://github.com/open-mmlab/mmdetection.git
375
def aug_test(self, x, proposal_list, aug_batch_img_metas, rescale=False): det_bboxes, det_labels = self.aug_test_bboxes(x, aug_batch_img_metas, proposal_list, self.test_cfg) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= det_bboxes.new_tensor( aug_batch_img_metas[0][0]['scale_factor']) bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) # det_bboxes always keep the original scale if self.with_mask: segm_results = self.aug_test_mask(x, aug_batch_img_metas, det_bboxes, det_labels)
20
180
aug_test
11
0
1
3
tests/components/zwave_js/test_init.py
309,854
Avoid removing zwave_js devices for non-ready nodes (#59964) * Only replace a node if the mfgr id / prod id / prod type differ * Prefer original device name for unready node * move register_node_in_dev_reg into async_setup_entry * simplify get_device_id_ext * Don't need hex ids * Revert "move register_node_in_dev_reg into async_setup_entry" This reverts commit f900e5fb0c67cc81657a1452b51c313bccb6f9e1. * Revert Callable change * Revert device backup name * Add test fixtures * Update existing not ready test with new fixture data * Check device properties after node added event * Add entity check * Check for extended device id * better device info checks * Use receive_event to properly setup components * Cleanup tests * improve test_replace_different_node * improve test_replace_same_node * add test test_node_model_change * Clean up long comments and strings * Format * Reload integration to detect node device config changes * update assertions * Disable entities on "value removed" event * Disable node status sensor on node replacement * Add test for disabling entities on remove value event * Add test for disabling node status sensor on node replacement * disable entity -> remove entity Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
10
Python
11
test_init.py
async def test_null_name(hass, client, null_name_check, integration): node = null_name_check assert hass.states.get(f"switch.node_{node.node_id}")
cb89c23c0ffd7beba1ecc0cb84d80e8842f9a571
25
https://github.com/home-assistant/core.git
20
async def test_null_name(hass, client, null_name_check, integration): node = null_name_check assert hass.states.get(f"switch.node_{node.node_id}")
9
48
test_null_name
26
0
3
12
tests/model_test_utils.py
214,851
refactor sequence tagger
flair
10
Python
24
model_test_utils.py
def build_model(self, embeddings, label_dict, **kwargs): model_args = dict(self.model_args) for k in kwargs.keys(): if k in model_args: del model_args[k] return self.model_cls( embeddings=embeddings, label_dictionary=label_dict, label_type=self.train_label_type, **model_args, **kwargs, )
5d210c14f5b903291cde509d34142c220c06de9e
65
https://github.com/flairNLP/flair.git
134
def build_model(self, embeddings, label_dict, **kwargs): model_args = dict(self.model_args) for k in kwargs.keys(): if k in model_args: del model_args[k] return self.model_cls( embeddings=embeddings, label_dictionary=label_dict, label_type=self.train_label_type, **model_args, **kwargs,
13
95
build_model
19
0
4
6
src/transformers/models/mobilevit/modeling_mobilevit.py
31,932
add MobileViT model (#17354) * add MobileViT * fixup * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * remove empty line Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * use clearer variable names * rename to MobileViTTransformerLayer * no longer inherit from nn.Sequential * fixup * fixup * not sure why this got added twice * rename organization for checkpoints * fix it up * Update src/transformers/models/mobilevit/__init__.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/configuration_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/configuration_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/configuration_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update tests/models/mobilevit/test_modeling_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * code style improvements * fixup * Update docs/source/en/model_doc/mobilevit.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/model_doc/mobilevit.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/mobilevit/configuration_mobilevit.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/mobilevit/configuration_mobilevit.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * download labels from hub * rename layers * rename more layers * don't compute loss in separate function * remove some nn.Sequential * replace nn.Sequential with new MobileViTTransformer class * replace nn.Sequential with MobileViTMobileNetLayer * fix pruning since model structure changed * fixup * fix doc comment * remove custom resize from feature extractor * fix ONNX import * add to doc tests * use center_crop from image_utils * move RGB->BGR flipping into image_utils * fix broken tests * wrong type hint * small tweaks Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
14
Python
17
modeling_mobilevit.py
def _prune_heads(self, heads_to_prune): for layer_index, heads in heads_to_prune.items(): mobilevit_layer = self.encoder.layer[layer_index] if isinstance(mobilevit_layer, MobileViTLayer): for transformer_layer in mobilevit_layer.transformer.layer: transformer_layer.attention.prune_heads(heads)
fbc7598babd06a49797db7142016f0029cdc41b2
54
https://github.com/huggingface/transformers.git
89
def _prune_heads(self, heads_to_prune): for layer_index, heads in heads_to_prune.items(): mobilevit_layer = self.encoder.layer[lay
15
85
_prune_heads
12
0
2
5
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/metadata.py
62,080
upd; format
transferlearning
11
Python
10
metadata.py
def provides(self, value): if self._legacy: self._legacy['Provides-Dist'] = value else: self._data['provides'] = value
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
30
https://github.com/jindongwang/transferlearning.git
47
def provides(self, value): if self._legacy: self._legacy['
5
51
provides
23
0
1
6
freqtrade/freqai/data_handler.py
149,782
use logger in favor of print
freqtrade
11
Python
21
data_handler.py
def compute_distances(self) -> float: logger.info("computing average mean distance for all training points") pairwise = pairwise_distances(self.data_dictionary["train_features"], n_jobs=-1) avg_mean_dist = pairwise.mean(axis=1).mean() logger.info("avg_mean_dist", avg_mean_dist) return avg_mean_dist
29c2d1d1891f7e804a133908702f435ff4fd8f32
53
https://github.com/freqtrade/freqtrade.git
57
def compute_distances(self) -> float: logger.info("computing average mean distance for all training points") pairwise = pairwise_distances(self.data_dictionary["train_features"], n_jobs=-1) avg_mean_dist = pairwise.mean(axis=1).mean() logger.info("avg_mean_dist", avg_mean_dist) return avg_mean_dist
12
90
compute_distances
19
1
1
11
tests/fixtures/database.py
55,379
Blocks Refactor (PrefectHQ/orion#1670) * Rename BlockSpec to BlockSchema * Renames API Block to Block Document
prefect
14
Python
17
database.py
async def block_schema(session): block_schema = await models.block_schemas.create_block_schema( session=session, block_schema=schemas.core.BlockSchema( name="x", version="1.0", type="abc", ), ) await session.commit() return block_schema @pytest.fixture
b9f2761989e5b324beb9a5b88688f9a75c50312b
@pytest.fixture
49
https://github.com/PrefectHQ/prefect.git
83
async def block_schema(session): block_schema = await models.block_schemas.cr
14
89
block_schema
86
1
2
23
sklearn/linear_model/tests/test_ridge.py
259,355
Fix Ridge sparse + sample_weight + intercept (#22899) Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org>
scikit-learn
14
Python
71
test_ridge.py
def test_ridge_fit_intercept_sparse_sag(with_sample_weight, global_random_seed): X, y = _make_sparse_offset_regression( n_features=5, n_samples=20, random_state=global_random_seed, X_offset=5.0 ) if with_sample_weight: rng = np.random.RandomState(global_random_seed) sample_weight = 1.0 + rng.uniform(size=X.shape[0]) else: sample_weight = None X_csr = sp.csr_matrix(X) params = dict( alpha=1.0, solver="sag", fit_intercept=True, tol=1e-10, max_iter=100000 ) dense_ridge = Ridge(**params) sparse_ridge = Ridge(**params) dense_ridge.fit(X, y, sample_weight=sample_weight) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) sparse_ridge.fit(X_csr, y, sample_weight=sample_weight) assert_allclose(dense_ridge.intercept_, sparse_ridge.intercept_, rtol=1e-4) assert_allclose(dense_ridge.coef_, sparse_ridge.coef_, rtol=1e-4) with pytest.warns(UserWarning, match='"sag" solver requires.*'): Ridge(solver="sag").fit(X_csr, y) @pytest.mark.parametrize("return_intercept", [False, True]) @pytest.mark.parametrize("sample_weight", [None, np.ones(1000)]) @pytest.mark.parametrize("arr_type", [np.array, sp.csr_matrix]) @pytest.mark.parametrize( "solver", ["auto", "sparse_cg", "cholesky", "lsqr", "sag", "saga", "lbfgs"] )
d76f87c8eb5a50da917cab8ea87ed0bfdfb7dd3c
@pytest.mark.parametrize("return_intercept", [False, True]) @pytest.mark.parametrize("sample_weight", [None, np.ones(1000)]) @pytest.mark.parametrize("arr_type", [np.array, sp.csr_matrix]) @pytest.mark.parametrize( "solver", ["auto", "sparse_cg", "cholesky", "lsqr", "sag", "saga", "lbfgs"] )
214
https://github.com/scikit-learn/scikit-learn.git
181
def test_ridge_fit_intercept_sparse_sag(with_sample_weight, global_random_seed): X, y = _make_sparse_offset_regression( n_features=5, n_samples=20, random_state=global_random_seed, X_offset=5.0 ) if with_sample_weight: rng = np.random.RandomState(global_random_seed) sample_weight = 1.0 + rng.uniform(size=X.shape[0]) else: sample_weight = None X_csr = sp.csr_matrix(X) params = dict( alpha=1.0, solver="sag", fit_intercept=True, tol=1e-10, max_iter=100000 ) dense_ridge = Ridge(**params) sparse_ridge = Ridge(**params) dense_ridge.fit(X, y, sample_weight=sample_weight) with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) sparse_ridge.fit(X_csr, y, sample_weight=sample_weight
47
447
test_ridge_fit_intercept_sparse_sag
42
0
6
11
django/utils/encoding.py
206,638
Refs #33476 -- Reformatted code with Black.
django
15
Python
32
encoding.py
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"): # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == "utf-8": return s else: return s.decode("utf-8", errors).encode(encoding, errors) if strings_only and is_protected_type(s): return s if isinstance(s, memoryview): return bytes(s) return str(s).encode(encoding, errors)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
86
https://github.com/django/django.git
110
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"): # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == "utf-8":
12
141
force_bytes
31
0
1
8
pandas/tests/util/test_assert_frame_equal.py
163,813
REGR: check_flags not respected in assert_frame_equal (#45565)
pandas
11
Python
22
test_assert_frame_equal.py
def test_assert_frame_equal_checking_allow_dups_flag(): # GH#45554 left = DataFrame([[1, 2], [3, 4]]) left.flags.allows_duplicate_labels = False right = DataFrame([[1, 2], [3, 4]]) right.flags.allows_duplicate_labels = True tm.assert_frame_equal(left, right, check_flags=False) with pytest.raises(AssertionError, match="allows_duplicate_labels"): tm.assert_frame_equal(left, right, check_flags=True)
49bddad8b16d7c881a3440340035b1b83854e55e
90
https://github.com/pandas-dev/pandas.git
58
def test_assert_frame_equal_checking_allow_dups_flag(): # GH#45554 left = DataFrame([[1, 2], [3, 4]]) left.flags.allows_duplicate_labels = False right = DataFrame([[1, 2], [3, 4]]) right.flags.allows_duplicate_labels = True tm.assert_
13
137
test_assert_frame_equal_checking_allow_dups_flag
42
0
5
14
paddlenlp/taskflow/knowledge_mining.py
322,196
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <chenzeyu01@baidu.com> Co-authored-by: tianxin <tianxin04@baidu.com>
PaddleNLP
11
Python
25
knowledge_mining.py
def _load_task_resources(self): if self._tag_path is None: self._tag_path = os.path.join(self._task_path, "tags.txt") self._tags_to_index, self._index_to_tags, self._all_tags = self._load_labels( self._tag_path) if self._term_schema_path is None: self._term_schema_path = os.path.join(self._task_path, "termtree_type.csv") if self._term_data_path is None: self._term_data_path = os.path.join(self._task_path, "termtree_data") if self._linking is True: self._termtree = TermTree.from_dir( self._term_schema_path, self._term_data_path, self._linking)
621357338437ee420eabbbf5ab19065bc85e73a5
121
https://github.com/PaddlePaddle/PaddleNLP.git
242
def _load_task_resources(self): if self._tag_path is None: self._tag_path = os.path.join(self._task_path, "tags.txt") self._tags_to_index, self._index_to_tags, self._all_tags = self._load_labels( self._tag_path) if self._term_schema_path is None: self._term_schema_path = os.path.join(self._task_path, "termtree_type.csv") if self._term_data_path is None: self._term_data_path = os.path.join(self._task_path, "termtree_data") if self._linking
17
190
_load_task_resources
31
0
1
11
tests/components/insteon/test_api_device.py
299,414
Insteon Device Control Panel (#70834) Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
core
13
Python
28
test_api_device.py
async def test_cancel_add_device(hass, hass_ws_client): ws_client, devices, _, _ = await _async_setup(hass, hass_ws_client) with patch.object(insteon.api.aldb, "devices", devices): await ws_client.send_json( { ID: 2, TYPE: "insteon/device/add/cancel", } ) msg = await ws_client.receive_json() assert msg["success"]
a9ca774e7ed1d8fe502a53d5b765c1d9b393a524
68
https://github.com/home-assistant/core.git
120
async def test_cancel_add_device(hass, hass_ws_client): ws_client, devices, _, _ = await _async_setup(hass, hass_ws_client) with patch.object(insteon.api.aldb, "dev
17
113
test_cancel_add_device
187
0
1
59
tests/handlers/test_appservice.py
250,255
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
synapse
16
Python
119
test_appservice.py
def test_application_services_receive_local_to_device(self) -> None: interested_appservice = self._register_application_service( namespaces={ ApplicationService.NS_USERS: [ { "regex": "@exclusive_as_user:.+", "exclusive": True, } ], }, ) # Have local_user send a to-device message to exclusive_as_user message_content = {"some_key": "some really interesting value"} chan = self.make_request( "PUT", "/_matrix/client/r0/sendToDevice/m.room_key_request/3", content={ "messages": { self.exclusive_as_user: { self.exclusive_as_user_device_id: message_content } } }, access_token=self.local_user_token, ) self.assertEqual(chan.code, 200, chan.result) # Have exclusive_as_user send a to-device message to local_user chan = self.make_request( "PUT", "/_matrix/client/r0/sendToDevice/m.room_key_request/4", content={ "messages": { self.local_user: {self.local_user_device_id: message_content} } }, access_token=self.exclusive_as_user_token, ) self.assertEqual(chan.code, 200, chan.result) # Check if our application service - that is interested in exclusive_as_user - received # the to-device message as part of an AS transaction. # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS. # # The uninterested application service should not have been notified at all. self.send_mock.assert_called_once() ( service, _events, _ephemeral, to_device_messages, _otks, _fbks, _device_list_summary, ) = self.send_mock.call_args[0] # Assert that this was the same to-device message that local_user sent self.assertEqual(service, interested_appservice) self.assertEqual(to_device_messages[0]["type"], "m.room_key_request") self.assertEqual(to_device_messages[0]["sender"], self.local_user) # Additional fields 'to_user_id' and 'to_device_id' specifically for # to-device messages via the AS API self.assertEqual(to_device_messages[0]["to_user_id"], self.exclusive_as_user) self.assertEqual( to_device_messages[0]["to_device_id"], self.exclusive_as_user_device_id ) self.assertEqual(to_device_messages[0]["content"], message_content)
652d1669c5a103b1c20478770c4aaf18849c09a3
262
https://github.com/matrix-org/synapse.git
871
def test_application_services_receive_local_to_device(self) -> None: interested_appservice = self._register_application_service( namespaces={ ApplicationService.NS_USERS: [ { "regex": "@exclusive_as_user:.+", "exclusive": True, } ], }, ) # Have local_user send a to-device message to exclusive_as_user message_content = {"some_key": "some really interesting value"} chan = self.make_request( "PUT", "/_matrix/client/r0/sendToDevice/m.room_key_request/3", content={ "messages": { self.exclusive_as_user: { self.exclusive_as_user_device_id: message_content } } }, access_token=self.local_user_token, ) self.assertEqual(chan.code, 200, chan.result) # Have exclusive_as_user send a to-device message to local_user chan = self.make_request( "PUT", "/_matrix/client/r0/sendToDevice/m.room_key_request/4", content={ "messages": { self.local_user: {self.local_user_device_id: message_content} } }, access_token=self.exclusive_as_user_token, ) self.assertEqual(chan.code, 200, chan.result) # Check if our application service - that is interested in exclusive_as_user - received # the to-device message as part of an AS transaction. # Only the local_user -> exclusive_as_user to-device message should have been forwarded to the AS. # # The uninterested application service should not have been notified at all. self.send_mock.assert_called_once() ( service, _events, _ephemeral, to_device_messages, _otks, _fbks, _device_list_summary, ) = self.send_mock.call_args[0] # Assert that this was the same to-device message that local_user sent self.assertEqual(service, interested_appservice) self.assertEqual(to_device_messages[0]["type"], "m.room_key_request") s
31
423
test_application_services_receive_local_to_device
17
0
1
5
erpnext/assets/doctype/asset/depreciation.py
69,242
fix: calculate depreciation properly on asset sale entry and scrap entry
erpnext
8
Python
17
depreciation.py
def reset_depreciation_schedule(asset, date): asset.flags.ignore_validate_update_after_submit = True # recreate original depreciation schedule of the asset asset.prepare_depreciation_data(date_of_return=date) modify_depreciation_schedule_for_asset_repairs(asset) asset.save()
ff5cad1cd617a23d6ffc9903f29d713a8db8d949
31
https://github.com/frappe/erpnext.git
11
def reset_depreciation_schedule(asset, date): asset.flags.ignore_validate_update_after_submit = True # recreate original depreciation sche
9
52
reset_depreciation_schedule
45
0
1
14
Tests/test_imagefont.py
243,073
update test_imagefont to use textbbox
Pillow
13
Python
31
test_imagefont.py
def test_multiline_width(self): ttf = self.get_font() im = Image.new(mode="RGB", size=(300, 100)) draw = ImageDraw.Draw(im) assert ( draw.textbbox((0, 0), "longest line", font=ttf)[2] == draw.multiline_textbbox((0, 0), "longest line\nline", font=ttf)[2] ) with pytest.warns(DeprecationWarning) as log: assert ( draw.textsize("longest line", font=ttf)[0] == draw.multiline_textsize("longest line\nline", font=ttf)[0] ) assert len(log) == 2
e2158344a0b4b4016a39dcf40c7220aa77b60579
127
https://github.com/python-pillow/Pillow.git
167
def test_multiline_width(self): ttf = self.get_font() im = Image.new(mode="RGB", size=(300, 100)) draw = ImageDraw.Draw(im) assert (
22
202
test_multiline_width
29
0
2
8
src/paperless/serialisers.py
320,228
feat: add users and groups API routes
paperless-ngx
14
Python
25
serialisers.py
def get_permissions(self, obj): # obj.get_user_permissions() returns more permissions than desired permission_natural_keys = [] permissions = obj.user_permissions.all() for permission in permissions: permission_natural_keys.append( permission.natural_key()[1] + "." + permission.natural_key()[0], ) return permission_natural_keys
4333bd58cfeec5c613a8b9b5d3a3b713964f5c8e
52
https://github.com/paperless-ngx/paperless-ngx.git
100
def get_permissions(self, obj): # obj.get_user_permissions() returns more perm
10
85
get_permissions
12
0
1
3
dev/breeze/src/airflow_breeze/shell/shell_params.py
46,779
Prepare Breeze2 for prime time :) (#22713) This is a review and clean-up for all the parameters and commands for Breeze2 in order to prepare it for being used by the contribugors. There are various small fixes here and there, removal of duplicated code, refactoring and moving code around as well as cleanup and review all the parameters used for all implemented commands. The parameters, default values and their behaviours were updated to match "new" life of Breeze rather than old one. Some improvements are made to the autocomplete and click help messages printed. Full list of choices is always displayed, parameters are groups according to their target audience, and they were sorted according to importance and frequency of use. Various messages have been colourised according to their meaning - warnings as yellow, errors as red and informational messages as bright_blue. The `dry-run` option has been added to just show what would have been run without actually running some potentially "write" commands (read commands are still executed) so that you can easily verify and manually copy and execute the commands with option to modify them before. The `dry_run` and `verbose` options are now used for all commands. The "main" command now runs "shell" by default similarly as the original Breeze. All "shortcut" parameters have been standardized - i.e common options (verbose/dry run/help) have one and all common flags that are likely to be used often have an assigned shortcute. The "stop" and "cleanup" command have been added as they are necessary for average user to complete the regular usage cycle. Documentation for all the important methods have been updated.
airflow
9
Python
11
shell_params.py
def md5sum_cache_dir(self) -> Path: cache_dir = Path(BUILD_CACHE_DIR, self.airflow_branch, self.python, self.the_image_type) return cache_dir
4ffd4f09532fceb67675fce4c1f5cd383eff992e
27
https://github.com/apache/airflow.git
25
def md5sum_cache_dir(self) -> Path: cache_dir = Path(BUILD_CACHE_DIR, self.airflow_branch, self.python, self.the_image_type) r
8
40
md5sum_cache_dir
7
0
1
2
src/transformers/testing_utils.py
37,492
Update all require decorators to use skipUnless when possible (#16999)
transformers
10
Python
7
testing_utils.py
def require_tokenizers(test_case): return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
57e6464ac9a31156f1c93e59107323e6ec01309e
20
https://github.com/huggingface/transformers.git
13
def require_tokenizers(test_case): return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
5
37
require_tokenizers
11
0
1
6
src/streamlink/stream/ffmpegmux.py
187,901
stream.ffmpegmux: validate FFmpeg version and log FFmpeg version output on the debug logging level
streamlink
13
Python
11
ffmpegmux.py
def command(cls, session): with _lock_resolve_command: return cls._resolve_command( session.options.get("ffmpeg-ffmpeg"), not session.options.get("ffmpeg-no-validation"), )
d82184af1d8dfddd5e4ddcf4ee5f141e2e398d5e
35
https://github.com/streamlink/streamlink.git
69
def command(cls, session): with _lock_resolve_command:
7
60
command
9
0
20
103
yt_dlp/extractor/tiktok.py
162,176
[TikTok] Misc fixes (#2271) Closes #2265 Authored by: MinePlayersPE
yt-dlp
8
Python
8
tiktok.py
def _parse_aweme_video_app(self, aweme_detail): aweme_id = aweme_detail['aweme_id'] video_info = aweme_detail['video']
be1f331f2103e6c89c8d25e47e1b445072b498dd
838
https://github.com/yt-dlp/yt-dlp.git
22
def _parse_aweme_video_app(self, aweme_detail): aweme_id = aweme_detail['aweme_id'] video_info = aweme_detail['video']
5
33
_parse_aweme_video_app
97
1
1
9
test/test_file_converter.py
257,021
Change return types of indexing pipeline nodes (#2342) * Change return types of file converters * Change return types of preprocessor * Change return types of crawler * Adapt utils to functions to new return types * Adapt __init__.py to new method names * Prevent circular imports * Update Documentation & Code Style * Let DocStores' run method accept Documents * Adapt tests to new return types * Update Documentation & Code Style * Put "# type: ignore" to right place * Remove id_hash_keys property from Document primitive * Update Documentation & Code Style * Adapt tests to new return types and missing id_hash_keys property * Fix mypy * Fix mypy * Adapt PDFToTextOCRConverter * Remove id_hash_keys from RestAPI tests * Update Documentation & Code Style * Rename tests * Remove redundant setting of content_type="text" * Add DeprecationWarning * Add id_hash_keys to elasticsearch_index_to_document_store * Change document type from dict to Docuemnt in PreProcessor test * Fix file path in Tutorial 5 * Remove added output in Tutorial 5 * Update Documentation & Code Style * Fix file_paths in Tutorial 9 + fix gz files in fetch_archive_from_http * Adapt tutorials to new return types * Adapt tutorial 14 to new return types * Update Documentation & Code Style * Change assertions to HaystackErrors * Import HaystackError correctly Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
13
Python
68
test_file_converter.py
def test_convert(Converter): converter = Converter() document = converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")[0] pages = document.content.split("\f") assert len(pages) == 4 # the sample PDF file has four pages. assert pages[0] != "" # the page 1 of PDF contains text. assert pages[2] == "" # the page 3 of PDF file is empty. # assert text is retained from the document. # As whitespace can differ (\n," ", etc.), we standardize all to simple whitespace page_standard_whitespace = " ".join(pages[0].split()) assert "Adobe Systems made the PDF specification available free of charge in 1993." in page_standard_whitespace @pytest.mark.tika @pytest.mark.parametrize("Converter", [PDFToTextConverter, TikaConverter])
834f8c49024063ce17a63e50a9d7cff12f1c4f91
@pytest.mark.tika @pytest.mark.parametrize("Converter", [PDFToTextConverter, TikaConverter])
77
https://github.com/deepset-ai/haystack.git
127
def test_convert(Converter): converter = Converter() document = converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")[0] pages = document.content.split("\f") assert len(pages) == 4 # the sample PDF file has four pages. assert pages[0] != "" # t
19
174
test_convert
46
0
1
6
analytics/tests/test_counts.py
83,356
docs: Add missing space in “time zone”. Signed-off-by: Anders Kaseorg <anders@zulip.com>
zulip
13
Python
39
test_counts.py
def test_bad_fill_to_time(self) -> None: stat = self.make_dummy_count_stat("test stat") with self.assertRaises(ValueError): process_count_stat(stat, installation_epoch() + 65 * self.MINUTE) with self.assertRaises(TimeZoneNotUTCException): process_count_stat(stat, installation_epoch().replace(tzinfo=None)) # This tests the LoggingCountStat branch of the code in do_delete_counts_at_hour. # It is important that do_delete_counts_at_hour not delete any of the collected # logging data!
21cd1c10b3f12467f8f7d9b98b0589f31c2da852
60
https://github.com/zulip/zulip.git
97
def test_bad_fill_to_time(self) -> None: stat = self.make_dummy_count_stat("test stat") with self.assertRaises(ValueError): process_count_stat(stat, installation_epoch() + 65 * self.MINUTE) with se
12
106
test_bad_fill_to_time
25
0
1
6
code/deep/BJMMD/caffe/examples/pycaffe/tools.py
60,220
Balanced joint maximum mean discrepancy for deep transfer learning
transferlearning
9
Python
21
tools.py
def deprocess(self, im): im = im.transpose(1, 2, 0) im /= self.scale im += self.mean im = im[:, :, ::-1] # change to RGB return np.uint8(im)
cc4d0564756ca067516f71718a3d135996525909
50
https://github.com/jindongwang/transferlearning.git
68
def deprocess(self, im): im = im.transpose(1, 2, 0) im /= self.scale im += self.mean im = im[:, :, ::-1
8
80
deprocess
45
0
1
17
keras/metrics/base_metric.py
274,628
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
12
Python
29
base_metric.py
def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) [ y_true, y_pred, ], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( [y_true, y_pred], sample_weight ) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions( y_pred, y_true ) ag_fn = tf.__internal__.autograph.tf_convert( self._fn, tf.__internal__.autograph.control_status_ctx() ) matches = ag_fn(y_true, y_pred, **self._fn_kwargs) return super().update_state(matches, sample_weight=sample_weight)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
121
https://github.com/keras-team/keras.git
184
def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, self._dtype) y_pred = tf.cast(y_pred, self._dtype) [ y_true, y_pred, ], sample_weight = metric
21
181
update_state
9
0
1
32
tests/gamestonk_terminal/stocks/screener/test_yahoofinance_view.py
281,015
Tests : Stocks > Research + Screener (#1131) * Updating tests : stocks/research * Updating tests : stocks/screener * Updating tests : stocks/screener
OpenBBTerminal
8
Python
9
test_yahoofinance_view.py
def test_historical_no_d_signals(mocker): # FORCE SINGLE THREADING yf_download = yahoofinance_view.yf.download
8f8147c3af76f03223943fe630a94dfb326b13c7
146
https://github.com/OpenBB-finance/OpenBBTerminal.git
14
def test_historical_no_d_signals(mocker): # FORCE SINGLE THREADING yf_download = yahoofinance_view.yf.download
6
21
test_historical_no_d_signals
17
0
2
11
airflow/utils/db.py
46,158
Enhance `db upgrade` args (#22102) Make `db upgrade` args more like `db downgrade`. ``` usage: airflow db upgrade [-h] [--from-revision FROM_REVISION] [--from-version FROM_VERSION] [-r REVISION] [-s] [-n VERSION] Upgrade the schema of the metadata database. To print but not execute commands, use option ``--show-sql-only``. If using options ``--from-revision`` or ``--from-version``, you must also use ``--show-sql-only``, because if actually *running* migrations, we should only migrate from the *current* revision. optional arguments: -h, --help show this help message and exit --from-revision FROM_REVISION (Optional) If generating sql, may supply a *from* revision --from-version FROM_VERSION (Optional) If generating sql, may supply a *from* version -r REVISION, --revision REVISION (Optional) The airflow revision to upgrade to. Note: must provide either `--revision` or `--version`. -s, --show-sql-only Don't actually run migrations; just print out sql scripts for offline migration. Required if using either `--from-version` or `--from-version`. -n VERSION, --version VERSION (Optional) The airflow version to upgrade to. Note: must provide either `--revision` or `--version`. ```
airflow
11
Python
12
db.py
def print_happy_cat(message): if sys.stdout.isatty(): size = os.get_terminal_size().columns else: size = 0 print(message.center(size)) print(.center(size)) print(.center(size)) print(.center(size)) print(.center(size)) return
3452f7ce45607af04bade5e5edebaa18fdc13819
74
https://github.com/apache/airflow.git
54
def print_happy_cat(message): if sy
11
135
print_happy_cat
8
0
1
4
tests/sentry/api/endpoints/test_organization_teams.py
99,994
ref(tests): Remove `get_valid_response()` (#34822)
sentry
9
Python
8
test_organization_teams.py
def test_missing_permission(self): user = self.create_user() self.login_as(user=user) self.get_error_response(self.organization.slug, status_code=403)
096b5511e244eecd8799b2a0324655207ce8985e
34
https://github.com/getsentry/sentry.git
28
def test_missing_permission(self): user = self.create_user() self.login_as
9
55
test_missing_permission
10
0
1
4
homeassistant/components/motion_blinds/cover.py
294,227
Motion request update till stop (#68580) * update untill stop * fixes * fix spelling
core
8
Python
10
cover.py
def set_cover_position(self, **kwargs): position = kwargs[ATTR_POSITION] self._blind.Set_position(100 - position) self.request_position_till_stop()
83983bc875445d7147cb98e70f1214c6ed270da9
30
https://github.com/home-assistant/core.git
38
def set_cover_position(self, **kwargs): position = kwargs[ATTR_POSITION] self._blind.Set_position(100 - position) se
8
51
set_cover_position
8
0
1
6
wagtail/contrib/settings/forms.py
73,473
Reformat with black
wagtail
12
Python
8
forms.py
def media(self): return forms.Media( js=[ versioned_static("wagtailsettings/js/site-switcher.js"), ] )
d10f15e55806c6944827d801cd9c2d53f5da4186
20
https://github.com/wagtail/wagtail.git
58
def media(self): return forms.Media( js=[ versioned_static("wagtailsettings/js/site-switcher.js"), ] )
6
34
media
16
0
1
10
tests/unit/orchestrate/flow/flow-construct/test_flow.py
12,410
test: fix tests because join disappeared (#4832)
jina
17
Python
16
test_flow.py
def test_dry_run_with_two_pathways_diverging_at_non_gateway(): f = ( Flow() .add(name='r1') .add(name='r2') .add(name='r3', needs='r1') .needs(['r2', 'r3']) ) with f: _validate_flow(f)
0a8a4fa6d9aeddc2a1271b7db16c8cac8b66b2b5
52
https://github.com/jina-ai/jina.git
66
def test_dry_run_with_two_pathways_diverging_at_non_gateway(): f = ( Flow()
7
97
test_dry_run_with_two_pathways_diverging_at_non_gateway
14
0
1
2
python3.10.4/Lib/email/_header_value_parser.py
223,594
add python 3.10.4 for windows
XX-Net
8
Python
14
_header_value_parser.py
def fold(self, policy): # message-id tokens may not be folded. return str(self) + policy.linesep
8198943edd73a363c266633e1aa5b2a9e9c9f526
16
https://github.com/XX-net/XX-Net.git
27
def fold(self, policy): # message-id tokens may not be folded.
5
26
fold
26
0
1
6
onnx/test/shape_inference_test.py
255,848
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
13
Python
25
shape_inference_test.py
def test_einsum_sum_along_dim(self) -> None: graph = self._make_graph( [('x', TensorProto.FLOAT, (3, 4))], [make_node('Einsum', ['x'], ['y'], equation='i j->i ')], [],) self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, ))]) # type: ignore
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
74
https://github.com/onnx/onnx.git
73
def test_einsum_sum_along_dim(self) -> None: graph = self._make_graph( [('x', TensorProto.FLOAT, (3, 4))], [make_node('Einsum', ['x'], ['y'], equation='i j->i ')], [],) self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.
10
117
test_einsum_sum_along_dim
21
0
1
3
pandas/core/computation/ops.py
167,746
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
pandas
8
Python
20
ops.py
def __call__(self, env) -> MathCall: operand = self.operand(env) # error: Cannot call function of unknown type return self.func(operand) # type: ignore[operator]
f65417656ba8c59438d832b6e2a431f78d40c21c
24
https://github.com/pandas-dev/pandas.git
42
def __call__(self, env) -> MathCall: operand = self.operand(env) # error: Cannot call function of unknown type return self.func(operand) #
6
40
__call__
13
0
1
4
test/document_stores/test_base.py
258,086
Document Store test refactoring (#3449) * add new marker * start using test hierarchies * move ES tests into their own class * refactor test workflow * job steps * add more tests * move more tests * more tests * test labels * add more tests * Update tests.yml * Update tests.yml * fix * typo * fix es image tag * map es ports * try * fix * default port * remove opensearch from the markers sorcery * revert * skip new tests in old jobs * skip opensearch_faiss
haystack
13
Python
13
test_base.py
def test_get_all_documents_with_incorrect_filter_value(self, ds, documents): ds.write_documents(documents) result = ds.get_all_documents(filters={"year": ["nope"]}) assert len(result) == 0
b694c7b5cbf612926fea3b0bf79ac9b12b136a2e
38
https://github.com/deepset-ai/haystack.git
33
def test_get_all_documents_with_incorrect_filter_value(self, ds, documents): ds.write_documents(documents) result = ds.get_all_documents(filt
9
63
test_get_all_documents_with_incorrect_filter_value
10
0
1
3
keras/saving/experimental/serialization_lib_test.py
279,744
Remaster serialization logic. There were several significant flaws, most prominently: - We had 2 separate serialization systems partially overlapping and interacting with each other: the JSON encoder/decoder one, and serialize/deserialize_keras_objects. The new system is fully standalone. - We ignored objects passed via `custom_objects` most of the time. PiperOrigin-RevId: 473794783
keras
8
Python
10
serialization_lib_test.py
def test_simple_objects(self, obj): serialized, _, reserialized = self.roundtrip(obj) self.assertEqual(serialized, reserialized)
e3e3a428f0a7955040c8a8fb8b2ad6f3e16d29eb
27
https://github.com/keras-team/keras.git
23
def test_simple_objects(self, obj): serialized, _, reserialized = self.roundtrip(obj) self.assertEqual(s
8
41
test_simple_objects
23
0
1
7
tests/integration_tests/flows/test_http.py
114,433
test file upload
mindsdb
10
Python
11
test_http.py
def test_7_utils(self): response = requests.get(f'{root}/util/ping') assert response.status_code == 200 response = requests.get(f'{root}/util/ping_native') assert response.status_code == 200 response = requests.get(f'{root}/config/vars') assert response.status_code == 200
e641c0c6b79558388d5f0d019fd9015f0ed17f8f
51
https://github.com/mindsdb/mindsdb.git
72
def test_7_utils(self): response = requests.get(f'{root}/util/ping') assert response.status_code == 200 response = requests.get(f'{root}/util/ping_native') assert response.status_code == 200 res
7
97
test_7_utils
12
0
1
22
tests/components/recorder/test_util.py
300,937
Tune sqlite based on configured settings (#72016)
core
9
Python
10
test_util.py
def test_setup_connection_for_dialect_sqlite(sqlite_version, db_supports_row_number): instance_mock = MagicMock(_db_supports_row_number=True) execute_args = [] close_mock = MagicMock()
a4c1bcefb9d2a6f2aa0bc189fca496d46c78e3b0
143
https://github.com/home-assistant/core.git
24
def test_setup_connection_for_dialect_sqlite(sqlite_version, db_supports_row_number): instance_mock = MagicMock(_db_supports_row_number=True) execute_args = [] close_mock = MagicMock()
8
44
test_setup_connection_for_dialect_sqlite
64
0
4
24
seaborn/_marks/lines.py
41,830
Differentiate Line/Path and add Lines/Paths alternatives (#2822) * Add lines module and differentiate Path/Line * Add markers to Line/Path and add Lines/Paths * Implement unstatisfying but workable approach to keep_na * Add tests for Line(s)/Path(s) * Add backcompat for matplotlib<3.3.0
seaborn
14
Python
49
lines.py
def _plot(self, split_gen, scales, orient): for keys, data, ax in split_gen(keep_na=not self._sort): vals = resolve_properties(self, keys, scales) vals["color"] = resolve_color(self, keys, scales=scales) vals["fillcolor"] = resolve_color(self, keys, prefix="fill", scales=scales) vals["edgecolor"] = resolve_color(self, keys, prefix="edge", scales=scales) # https://github.com/matplotlib/matplotlib/pull/16692 if Version(mpl.__version__) < Version("3.3.0"): vals["marker"] = vals["marker"]._marker if self._sort: data = data.sort_values(orient) line = mpl.lines.Line2D( data["x"].to_numpy(), data["y"].to_numpy(), color=vals["color"], linewidth=vals["linewidth"], linestyle=vals["linestyle"], marker=vals["marker"], markersize=vals["pointsize"], markerfacecolor=vals["fillcolor"], markeredgecolor=vals["edgecolor"], markeredgewidth=vals["edgewidth"], **self.artist_kws, ) ax.add_line(line)
fefd94023aa2238a6971a4cbe3a37362e3205bc6
222
https://github.com/mwaskom/seaborn.git
375
def _plot(self, split_gen, scales, orient): for keys, data, ax in split_gen(keep_na=not self._sort): vals = resolve_properties(self, keys, scales) vals["color"] = resolve_color(self, keys, scales=scales) vals["fil
33
352
_plot
42
0
1
26
pipenv/patched/notpip/_internal/commands/completion.py
19,847
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
pipenv
9
Python
26
completion.py
def add_options(self) -> None: self.cmd_opts.add_option( "--bash", "-b", action="store_const", const="bash", dest="shell", help="Emit completion code for bash", ) self.cmd_opts.add_option( "--zsh", "-z", action="store_const", const="zsh", dest="shell", help="Emit completion code for zsh", ) self.cmd_opts.add_option( "--fish", "-f", action="store_const", const="fish", dest="shell", help="Emit completion code for fish", ) self.parser.insert_option_group(0, self.cmd_opts)
f3166e673fe8d40277b804d35d77dcdb760fc3b3
100
https://github.com/pypa/pipenv.git
288
def add_options(self) -> None: self.cmd_opts.add_option( "--bash", "-b", action="store_const", const="bash", dest="shell", help="Emit completion code for bash", ) self.cmd_opts.add_option( "--zsh", "-z", action="store_const", const="zsh", dest="shell", help="Emit completion code for zsh", ) self.cmd_opts.add_option( "--fish", "-f", action="store_const", const="fish", dest="shell", help="Emit completion co
10
174
add_options
303
1
6
62
openbb_terminal/portfolio/portfolio_optimization/po_view.py
286,794
Portfolio optimization controller/sdk fixes (#3604) * fix plot and show * clean duplicated code * fix msg * fix if no portfolios * improve error msg * fix msg and add integration test * final fixes * Portfolio/po | alloc : fix paths * Linting Co-authored-by: Chavithra PARANA <chavithra@gmail.com>
OpenBBTerminal
14
Python
104
po_view.py
def display_heat(**kwargs): weights = kwargs.get("weights", None) data = kwargs.get("data", None) category = kwargs.get("category", None) title = kwargs.get("title", "") external_axes = kwargs.get("external_axes", None) if len(weights) == 1: console.print(f"Heatmap needs at least two values for '{category}'.") return if external_axes is None: _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI) else: ax = external_axes[0] if len(weights) <= 3: number_of_clusters = len(weights) else: number_of_clusters = None ax = rp.plot_clusters( returns=data, codependence="pearson", linkage="ward", k=number_of_clusters, max_k=10, leaf_order=True, dendrogram=True, cmap="RdYlBu", # linecolor='tab:purple', ax=ax, ) ax = ax.get_figure().axes ax[0].grid(False) ax[0].axis("off") if category is None: # Vertical dendrogram l, b, w, h = ax[4].get_position().bounds l1 = l * 0.5 w1 = w * 0.2 b1 = h * 0.05 ax[4].set_position([l - l1, b + b1, w * 0.8, h * 0.95]) # Heatmap l, b, w, h = ax[1].get_position().bounds ax[1].set_position([l - l1 - w1, b + b1, w * 0.8, h * 0.95]) w2 = w * 0.2 # colorbar l, b, w, h = ax[2].get_position().bounds ax[2].set_position([l - l1 - w1 - w2, b, w, h]) # Horizontal dendrogram l, b, w, h = ax[3].get_position().bounds ax[3].set_position([l - l1 - w1, b, w * 0.8, h]) else: # Vertical dendrogram l, b, w, h = ax[4].get_position().bounds l1 = l * 0.5 w1 = w * 0.4 b1 = h * 0.2 ax[4].set_position([l - l1, b + b1, w * 0.6, h * 0.8]) # Heatmap l, b, w, h = ax[1].get_position().bounds ax[1].set_position([l - l1 - w1, b + b1, w * 0.6, h * 0.8]) w2 = w * 0.05 # colorbar l, b, w, h = ax[2].get_position().bounds ax[2].set_position([l - l1 - w1 - w2, b, w, h]) # Horizontal dendrogram l, b, w, h = ax[3].get_position().bounds ax[3].set_position([l - l1 - w1, b, w * 0.6, h]) title = "Portfolio - " + title + "\n" title += ax[3].get_title(loc="left") ax[3].set_title(title) if external_axes is None: theme.visualize_output(force_tight_layout=True) @log_start_end(log=logger)
2ef3f86b835f31d71c4349d97fdd4bd1dadc2736
@log_start_end(log=logger)
657
https://github.com/OpenBB-finance/OpenBBTerminal.git
707
def display_heat(**kwargs): weights = kwargs.get("weights", None) data = kwargs.get("data", None) category = kwargs.get("category", None) title = kwargs.get("title", "") external_axes = kwargs.get("external_axes", None) if len(weights) == 1: console.print(f"Heatmap needs at least two values for '{category}'.") return if external_axes is None: _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI) else: ax = external_axes[0] if len(weights) <= 3: number_of_clusters = len(weights) else: number_of_clusters = None ax = rp.plot_clusters( returns=data, codependence="pearson", linkage="ward", k=number_of_clusters, max_k=10, leaf_order=True, dendrogram=True, cmap="RdYlBu", # linecolor='tab:purple', ax=ax, ) ax = ax.get_figure().axes ax[0].grid(False) ax[0].axis("off") if category is None: # Vertical de
54
970
display_heat
14
0
1
4
jina/jaml/parsers/__init__.py
10,560
refactor: use absolute imports (#4167)
jina
7
Python
11
__init__.py
def _get_flow_parser(): from jina.jaml.parsers.flow.legacy import LegacyParser from jina.jaml.parsers.flow.v1 import V1Parser return [V1Parser, LegacyParser], V1Parser
cea300655ed8be70d74c390ca12e8b09fb741665
36
https://github.com/jina-ai/jina.git
22
def _get_flow_parser(): from jina.jaml.parsers.flow.legacy import LegacyParser from jina.jaml.parsers.flow.v1 import V1Parser return [V1Parser, LegacyParser], V
9
49
_get_flow_parser