n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
66
0
1
15
tests/test_styles.py
182,278
Update tests/test_styles.py Co-authored-by: Darren Burns <darrenburns@users.noreply.github.com>
textual
11
Python
40
test_styles.py
def test_render_styles_border(): base = Styles() inline = Styles() styles_view = RenderStyles(None, base, inline) base.border_top = ("heavy", "red") # Base has border-top: heavy red assert styles_view.border_top == ("heavy", Color.parse("red")) inline.border_left = ("rounded", "green") # Base has border-top heavy red, inline has border-left: rounded green assert styles_view.border_top == ("heavy", Color.parse("red")) assert styles_view.border_left == ("rounded", Color.parse("green")) assert styles_view.border == ( ("heavy", Color.parse("red")), ("", Color.default()), ("", Color.default()), ("rounded", Color.parse("green")), )
927d04d6b08ff0983d4486733ec8d728cad43bf0
136
https://github.com/Textualize/textual.git
129
def test_render_styles_border(): base = Styles() inline = Styles() styles_view = RenderStyles(None, base, inline) base.border_top = ("heavy", "red") # Base has border-top: heavy red assert styles_view.border_top == ("heavy", Color.parse("red")) inline.border_left = ("rounded", "green") # Base has border-top heavy red, inline has border-left: rounded green assert styles_view.border_top == ("heavy", Color.parse("red")) assert styles_view.border_left == ("rounded", Color.parse("green")) assert styles_
12
235
test_render_styles_border
158
0
1
38
wagtail/admin/tests/pages/test_copy_page.py
71,429
Reformat with black
wagtail
14
Python
107
test_copy_page.py
def test_page_copy_alias_post_copy_subpages(self): post_data = { "new_title": "Hello world 2", "new_slug": "hello-world-2", "new_parent_page": str(self.root_page.id), "copy_subpages": True, "publish_copies": False, "alias": True, } response = self.client.post( reverse("wagtailadmin_pages:copy", args=(self.test_page.id,)), post_data ) # Check that the user was redirected to the parents explore page self.assertRedirects( response, reverse("wagtailadmin_explore", args=(self.root_page.id,)) ) # Get copy page_copy = self.root_page.get_children().get(slug="hello-world-2") # Check the copy is an alias of the original self.assertEqual(page_copy.alias_of, self.test_page.page_ptr) # Check that the copy is live # Note: publish_copies is ignored. Alias pages always keep the same state as their original self.assertTrue(page_copy.live) self.assertFalse(page_copy.has_unpublished_changes) # Check that the owner of the page is set correctly self.assertEqual(page_copy.owner, self.user) # Check that the children were copied self.assertEqual(page_copy.get_children().count(), 2) # Check the the child pages # Neither of them should be live child_copy = page_copy.get_children().filter(slug="child-page").first() self.assertIsNotNone(child_copy) self.assertEqual(child_copy.alias_of, self.test_child_page.page_ptr) self.assertTrue(child_copy.live) self.assertFalse(child_copy.has_unpublished_changes) unpublished_child_copy = ( page_copy.get_children().filter(slug="unpublished-child-page").first() ) self.assertIsNotNone(unpublished_child_copy) self.assertEqual( unpublished_child_copy.alias_of, self.test_unpublished_child_page.page_ptr ) self.assertFalse(unpublished_child_copy.live) self.assertTrue(unpublished_child_copy.has_unpublished_changes) # treebeard should report no consistency problems with the tree self.assertFalse( any(Page.find_problems()), "treebeard found consistency problems" )
d10f15e55806c6944827d801cd9c2d53f5da4186
287
https://github.com/wagtail/wagtail.git
530
def test_page_copy_alias_post_copy_subpages(self): post_data = { "new_title": "Hello world 2", "new_slug": "hello-world-2", "new_parent_page": str(self.root_page.id), "copy_subpages": True, "publish_copies": False, "alias": True, } response = self.client.post( reverse("wagtailadmin_pages:copy", args=(self.test_page.id,)), post_data ) # Check that the user was redirected to the parents explore page self.assertRedirects( response, reverse("wagtailadmin_explore", args=(self.root_page.id,)) ) # Get copy page_copy = self.root_page.get_children().get(slug="hello-world-2") # Check the copy is an alias of the original self.assertEqual(page_copy.alias_of, self.test_page.page_ptr) # Check that the copy is live # Note: publish_copies is ignored. Alias pages always keep the same state as their original self.assertTrue(page_copy.live) self.assertFalse(page_copy.has_unpublished_changes) # Check that the owner of the page is set correctly
37
483
test_page_copy_alias_post_copy_subpages
61
1
1
12
dask/dataframe/io/tests/test_parquet.py
156,617
Enfore consistent schema in `to_parquet` pyarrow (#9131) * Enfore consistent schema in `to_parquet` pyarrow Previously each partition written in `to_parquet` with `engine="pyarrow"` had their schema inferred based solely on data within that partition alone. This leads to a few common problems: - Mismatched inferred schemas between partitions aren't caught, leading to issues in downstream consumers. Most readers assume a uniform parquet schema across files, which wasn't enforced by default by the pyarrow engine. - Inference was supported with `schema="infer"`, but: - It was expensive, requiring computing at least one partition (sometimes more) to get a sample of any `object` dtypes. - Inference here for non-string object dtypes is silently incorrect, users should really be explicit here when trying to write more complicated schemas containing structured fields. - Inference was off by default - When writing a `_metadata` file, differences in per-file schemas would result in opaque errors when merging the metadata before writing. This error is deep in the `arrow` codebase, and doesn't provide enough information for the user to debug where the schema mismatch occured. Turning on schema enforcement by default lets us generate a better error message, This PR changes the `schema` parameter in the following way: - `schema="infer"` is the default - schema inference assumes object fields are strings (common), and makes no attempt at more complicated inference. Users with more complicated schemas should specify those explicitly. This means that schema inference is now cheap. - A better error message is generated for mismatched partition dtypes in `to_parquet` for the `pyarrow` engine. The fastparquet engine already does this.
dask
12
Python
47
test_parquet.py
def test_pyarrow_schema_mismatch_error(tmpdir): df1 = pd.DataFrame({"x": [1, 2, 3], "y": [4.5, 6, 7]}) df2 = pd.DataFrame({"x": [4, 5, 6], "y": ["a", "b", "c"]}) ddf = dd.from_delayed( [dask.delayed(df1), dask.delayed(df2)], meta=df1, verify_meta=False ) with pytest.raises(ValueError) as rec: ddf.to_parquet(str(tmpdir), engine="pyarrow") msg = str(rec.value) assert "Failed to convert partition to expected pyarrow schema" in msg assert "y: double" in str(rec.value) assert "y: string" in str(rec.value) @PYARROW_MARK
73c985c2bed5d61be6d05d67f7f381435fe8d1e2
@PYARROW_MARK
146
https://github.com/dask/dask.git
100
def test_pyarrow_schema_mismatch_error(tmpdir): df1 = pd.DataFrame({"x": [1, 2, 3], "y": [4.5, 6, 7]}) df2 = pd.DataFrame({"x": [4, 5, 6], "y": ["a", "b", "c"]}) ddf = dd.from_delayed( [dask.delayed(df1), dask.delayed(df2)], meta=df1, verify_meta=False ) with pytest.raises(ValueError)
23
241
test_pyarrow_schema_mismatch_error
15
0
2
5
homeassistant/components/hassio/__init__.py
315,621
Remove hassio from mypy ignore list (#74603) * Remove hassio from mypy ignore list * Avoid if TYPE_CHECKING
core
10
Python
14
__init__.py
def get_supervisor_ip() -> str | None: if "SUPERVISOR" not in os.environ: return None return os.environ["SUPERVISOR"].partition(":")[0]
6540ba623978813668a30e5822b97e076fc05a93
34
https://github.com/home-assistant/core.git
31
def get_supervisor_ip() -> str | None: if "SUPERVISOR" not in os.environ: return None return os.environ["SUPERVISOR"].partition(":")[0]
5
61
get_supervisor_ip
9
0
1
4
netbox/dcim/tests/test_models.py
264,889
Update Cable instantiations to match new signature
netbox
11
Python
9
test_models.py
def test_cable_cannot_have_the_same_terminination_on_both_ends(self): cable = Cable(a_terminations=[self.interface1], b_terminations=[self.interface1]) with self.assertRaises(ValidationError): cable.clean()
3a461d02793e6f9d41c2b1a92647e691de1abaac
39
https://github.com/netbox-community/netbox.git
41
def test_cable_cannot_have_the_same_terminination_on_both_ends(self): cable = Cable(a_terminations=[self.interface1], b_terminations=[self.interface1]) with self.assertRaises(ValidationError): cable.clea
10
67
test_cable_cannot_have_the_same_terminination_on_both_ends
28
0
1
16
tests/providers/google/cloud/transfers/test_bigquery_to_mssql.py
48,048
Bigquery assets (#23165)
airflow
11
Python
24
test_bigquery_to_mssql.py
def test_execute_good_request_to_bq(self, mock_hook): destination_table = 'table' operator = BigQueryToMsSqlOperator( task_id=TASK_ID, source_project_dataset_table=f'{TEST_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}', mssql_table=destination_table, replace=False, ) operator.execute(context=mock.MagicMock()) # fmt: off mock_hook.return_value.list_rows.assert_called_once_with( dataset_id=TEST_DATASET, table_id=TEST_TABLE_ID, max_results=1000, selected_fields=None, start_index=0, ) # fmt: on
511d0ee256b819690ccf0f6b30d12340b1dd7f0a
73
https://github.com/apache/airflow.git
182
def test_execute_good_request_to_bq(self, mock_hook): destination_table = 'table' operator =
26
123
test_execute_good_request_to_bq
27
0
4
8
homeassistant/components/text/__init__.py
291,310
Add `text` platform (#79454) Co-authored-by: Franck Nijhof <frenck@frenck.nl> Co-authored-by: Franck Nijhof <git@frenck.dev>
core
11
Python
18
__init__.py
def pattern_cmp(self) -> re.Pattern | None: if self.pattern is None: self.__pattern_cmp = None return None if not self.__pattern_cmp or self.pattern != self.__pattern_cmp.pattern: self.__pattern_cmp = re.compile(self.pattern) return self.__pattern_cmp
003e4224c89a6da381960dc5347750d1521d85c9
58
https://github.com/home-assistant/core.git
88
def pattern_cmp(self) -> re.Pattern | None: if self.pattern is None: self.__pattern_cmp = None
7
94
pattern_cmp
20
0
1
10
wagtail/core/tests/test_tests.py
74,587
Reformat with black
wagtail
13
Python
18
test_tests.py
def test_nested_form_data(self): result = nested_form_data( { "foo": "bar", "parent": { "child": "field", }, } ) self.assertEqual(result, {"foo": "bar", "parent-child": "field"})
d10f15e55806c6944827d801cd9c2d53f5da4186
41
https://github.com/wagtail/wagtail.git
126
def test_nested_form_data(self): result = nested_form_data( { "foo": "bar", "parent": {
5
79
test_nested_form_data
43
1
1
10
tests/ludwig/decoders/test_sequence_decoder.py
6,054
Squeeze explicilty. (#1726)
ludwig
11
Python
37
test_sequence_decoder.py
def test_sequence_rnn_decoder(cell_type, num_layers, batch_size): hidden_size = 256 vocab_size = 50 max_sequence_length = 10 combiner_outputs = {HIDDEN: torch.rand([batch_size, hidden_size])} sequence_rnn_decoder = SequenceRNNDecoder( hidden_size, vocab_size, max_sequence_length, cell_type, num_layers=num_layers ) output = sequence_rnn_decoder(combiner_outputs, target=None) assert list(output.size()) == [batch_size, max_sequence_length, vocab_size] @pytest.mark.parametrize("num_layers", [1, 2]) @pytest.mark.parametrize("batch_size", [20, 1])
0fea5903b211823b5319ec03cd5262aadf97969e
@pytest.mark.parametrize("num_layers", [1, 2]) @pytest.mark.parametrize("batch_size", [20, 1])
77
https://github.com/ludwig-ai/ludwig.git
71
def test_sequence_rnn_decoder(cell_type, num_layers, batch_size): hidden_size = 256 vocab_size = 50 max_sequence_length = 10 combiner_outputs = {HIDDEN: torch.rand([batch_size, hidden_size])} sequence_rnn_decoder = SequenceRNNDecoder( hidden_size, vocab_size, max_sequence_length, cell_type, num_layers=num_layers ) output = sequence_rnn_decoder(combiner_outputs, target=None) assert list(output.size()) == [batch_size, max_sequence_length, vocab_size] @pytest.mark.parametrize("num_layers", [1, 2]) @pytest.mark.parametrize("batch_size", [20, 1])
20
160
test_sequence_rnn_decoder
14
0
1
5
modules/sd_hijack.py
152,293
Complete cross attention update
stable-diffusion-webui
8
Python
11
sd_hijack.py
def nonlinearity_hijack(x): # swish t = torch.sigmoid(x) x *= t del t return x
3b1b1444d4d90415fb42252406437b3d2ceb2110
21
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
28
def nonlinearity_hijack(x): # swish t = torch.sigmoid(x) x *= t del t return x
5
34
nonlinearity_hijack
27
0
3
11
src/datasets/builder.py
104,670
Avoid writing empty license files (#4090) * Avoid writing empty license files * Fix empty license file for MetricInfo
datasets
18
Python
21
builder.py
def _save_info(self): if os.path.exists(self._cache_dir): super()._save_info() else: import apache_beam as beam fs = beam.io.filesystems.FileSystems with fs.create(os.path.join(self._cache_dir, config.DATASET_INFO_FILENAME)) as f: self.info._dump_info(f) if self.info.license: with fs.create(os.path.join(self._cache_dir, config.LICENSE_FILENAME)) as f: self.info._dump_license(f)
bf08ea3f95e8209a7afd2b50410ad5db51409d11
108
https://github.com/huggingface/datasets.git
144
def _save_info(self): if os.path.exists(self._cache_dir): super()._save_info() else: import apache_beam as beam fs = beam.io.filesystems.FileSystems with fs.create(os.pa
23
179
_save_info
143
0
13
36
sympy/simplify/hyperexpand.py
198,668
Code optimizations
sympy
14
Python
75
hyperexpand.py
def try_shifted_sum(func, z): abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1) if len(abuckets[S.Zero]) != 1: return None r = abuckets[S.Zero][0] if r <= 0: return None if S.Zero not in bbuckets: return None l = list(bbuckets[S.Zero]) l.sort() k = l[0] if k <= 0: return None nap = list(func.ap) nap.remove(r) nbq = list(func.bq) nbq.remove(k) k -= 1 nap = [x - k for x in nap] nbq = [x - k for x in nbq] ops = [] for n in range(r - 1): ops.append(ShiftA(n + 1)) ops.reverse() fac = factorial(k)/z**k fac *= Mul(*[rf(b, k) for b in nbq]) fac /= Mul(*[rf(a, k) for a in nap]) ops += [MultOperator(fac)] p = 0 for n in range(k): m = z**n/factorial(n) m *= Mul(*[rf(a, n) for a in nap]) m /= Mul(*[rf(b, n) for b in nbq]) p += m return Hyper_Function(nap, nbq), ops, -p
19114acf6514bc87f5c8cfde35e0fcab88965be7
314
https://github.com/sympy/sympy.git
287
def try_shifted_sum(func, z): abuckets, bbuckets = sift(func.ap, _mod1), sift(func.bq, _mod1) if len(abuckets[S.Zero]) != 1: return None r = abuckets[S.Zero][0] if r <= 0: return None if S.Zero not in bbuckets: return None l = list(bbuckets[S.Zero]) l.sort() k = l[0] if k <= 0: return None nap = list(func.ap) nap.remove(r) nbq =
37
481
try_shifted_sum
63
0
1
16
onnx/backend/test/case/node/shape.py
255,020
Use Python type annotations rather than comments (#3962) * These have been supported since Python 3.5. ONNX doesn't support Python < 3.6, so we can use the annotations. Diffs generated by https://pypi.org/project/com2ann/. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Remove MYPY conditional logic in gen_proto.py It breaks the type annotations and shouldn't be needed. Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * Get rid of MYPY bool from more scripts Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * move Descriptors class above where its referenced in type annotation Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fixes Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * remove extra blank line Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotations Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix type annotation in gen_docs Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix Operators.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix TestCoverage.md Signed-off-by: Gary Miguel <garymiguel@microsoft.com> * fix protoc-gen-mypy.py Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
onnx
12
Python
44
shape.py
def export() -> None: x = np.array([ [1, 2, 3], [4, 5, 6], ]).astype(np.float32) test_shape('_example', x) # preserve names of original test cases x = np.random.randn(3, 4, 5).astype(np.float32) test_shape('', x) # preserve names of original test cases test_shape('_start_1', x, start=1) test_shape('_end_1', x, end=1) test_shape('_start_negative_1', x, start=-1) test_shape('_end_negative_1', x, end=-1) test_shape('_start_1_end_negative_1', x, start=1, end=-1) test_shape('_start_1_end_2', x, start=1, end=2) test_shape('_clip_start', x, start=-10) test_shape('_clip_end', x, end=10)
83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd
163
https://github.com/onnx/onnx.git
177
def export() -> None: x = np.array([ [1, 2, 3], [4, 5, 6], ]).astype(np.float32) test_shape('_example', x) # preserve names of original test cases x = np.random.randn(3, 4, 5).astype(np.float32) test_shape('', x) # preserve names of original test cases test_shape('_start_1', x, start=1) test_shape('_end_1', x, end=1) test_shape('_start_negative_1', x, start=-1) test_shape('_end_negative_1', x, end=-1) test_shape('_start_1_end_negative_1', x, start=1, end=-1) test_shape('_start_1_end_2', x, start=1, end=2) test_shape('_clip_
11
256
export
26
0
1
9
tests/sentry/api/endpoints/test_project_profiling_profile.py
93,618
feat(profiling): Use new functions endpoint for suspect functions query (#36922) This change uses the new endpoint to query for suspect functions which is backed by the functions table.
sentry
12
Python
24
test_project_profiling_profile.py
def test_sort_missing(self): with self.feature(PROFILING_FEATURES): response = self.client.get(self.url) assert response.status_code == 400 assert response.data == { "detail": ErrorDetail( string="Invalid query: Missing value for sort", code="parse_error" ) }
f2fd5c3780796affcf1d7685ba6a3a0634bb6ceb
50
https://github.com/getsentry/sentry.git
101
def test_sort_missing(self): with self.f
13
86
test_sort_missing
76
0
1
34
tests/aggregation/tests.py
200,888
Refs #33476 -- Reformatted code with Black.
django
15
Python
43
tests.py
def test_backwards_m2m_annotate(self): authors = ( Author.objects.filter(name__contains="a") .annotate(Avg("book__rating")) .order_by("name") ) self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 4.5), ("Brad Dayley", 3.0), ("Jacob Kaplan-Moss", 4.5), ("James Bennett", 4.0), ("Paul Bissex", 4.0), ("Stuart Russell", 4.0), ], lambda a: (a.name, a.book__rating__avg), ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 1), ("Brad Dayley", 1), ("Jacob Kaplan-Moss", 1), ("James Bennett", 1), ("Jeffrey Forcier", 1), ("Paul Bissex", 1), ("Peter Norvig", 2), ("Stuart Russell", 1), ("Wesley J. Chun", 1), ], lambda a: (a.name, a.num_books), )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
200
https://github.com/django/django.git
470
def test_backwards_m2m_annotate(self): authors = ( Author.objects.filter(name__contains="a") .annotate(Avg("book__rating")) .order_by("name") ) self.assertQuerysetEqual( authors, [ ("Adrian Holovaty", 4.5), ("Brad Dayley", 3.0), ("Jacob Kaplan-Moss", 4.5), ("James Bennett", 4.0),
16
295
test_backwards_m2m_annotate
29
0
1
7
tpot/base.py
181,816
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
tpot
8
Python
26
base.py
def _combine_individual_stats(self, operator_count, cv_score, individual_stats): stats = deepcopy( individual_stats ) # Deepcopy, since the string reference to predecessor should be cloned stats["operator_count"] = operator_count stats["internal_cv_score"] = cv_score return stats
388616b6247ca4ea8de4e2f340d6206aee523541
32
https://github.com/EpistasisLab/tpot.git
83
def _combine_individual_stats(self, operator_count, cv_score, individual_stats): stats = deepcopy( individual_stats ) # Deepcopy, since the string reference to predecessor
7
55
_combine_individual_stats
56
0
4
35
erpnext/accounts/report/tax_detail/tax_detail.py
65,357
style: format code with black
erpnext
12
Python
44
tax_detail.py
def execute(filters=None): if not filters: return [], [] fieldlist = required_sql_fields fieldstr = get_fieldstr(fieldlist) gl_entries = frappe.db.sql( .format( fieldstr=fieldstr ), filters, as_dict=1, ) report_data = modify_report_data(gl_entries) summary = None if filters["mode"] == "run" and filters["report_name"] != "Tax Detail": report_data, summary = run_report(filters["report_name"], report_data) # return columns, data, message, chart, report_summary return get_columns(fieldlist), report_data, None, None, summary
494bd9ef78313436f0424b918f200dab8fc7c20b
100
https://github.com/frappe/erpnext.git
38
def execute(filters=None): if not filters: return [], [] fieldlist = required_sql_fields fieldstr = get_fieldstr(fieldlist) gl_entries = frappe.db.sql( .format( fieldstr=fieldstr ), filters, as_dict=1, ) report_data = modify_report_data(gl_entries) summary = None if filters["mode"] == "run" and filters["report_name"] != "Tax Detail": report_data, summary = run_rep
17
165
execute
282
0
10
38
python/ccxt/async_support/okcoin.py
17,415
1.72.15 [ci skip]
ccxt
19
Python
123
okcoin.py
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) duration = self.parse_timeframe(timeframe) request = { 'instrument_id': market['id'], 'granularity': self.timeframes[timeframe], } options = self.safe_value(self.options, 'fetchOHLCV', {}) defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles type = self.safe_string(params, 'type', defaultType) params = self.omit(params, 'type') method = market['type'] + 'GetInstrumentsInstrumentId' + type if type == 'Candles': if since is not None: if limit is not None: request['end'] = self.iso8601(self.sum(since, limit * duration * 1000)) request['start'] = self.iso8601(since) else: if limit is not None: now = self.milliseconds() request['start'] = self.iso8601(now - limit * duration * 1000) request['end'] = self.iso8601(now) elif type == 'HistoryCandles': if market['option']: raise NotSupported(self.id + ' fetchOHLCV does not have ' + type + ' for ' + market['type'] + ' markets') if since is not None: if limit is None: limit = 300 # default request['start'] = self.iso8601(self.sum(since, limit * duration * 1000)) request['end'] = self.iso8601(since) else: if limit is not None: now = self.milliseconds() request['end'] = self.iso8601(now - limit * duration * 1000) request['start'] = self.iso8601(now) response = await getattr(self, method)(self.extend(request, params)) # # spot markets # # [ # { # close: "0.02683401", # high: "0.02683401", # low: "0.02683401", # open: "0.02683401", # time: "2018-12-17T23:47:00.000Z", # volume: "0" # }, # { # close: "0.02684545", # high: "0.02685084", # low: "0.02683312", # open: "0.02683894", # time: "2018-12-17T20:28:00.000Z", # volume: "101.457222" # } # ] # # futures # # [ # [ # 1545090660000, # 0.3171, # 0.3174, # 0.3171, # 0.3173, # 1648, # 51930.38579450868 # ], # [ # 1545072720000, # 0.3159, # 0.3161, # 0.3144, # 0.3149, # 22886, # 725179.26172331 # ] # ] # return self.parse_ohlcvs(response, market, timeframe, since, limit)
e01461d06258d5c0956f03f4f4b858a183116276
373
https://github.com/ccxt/ccxt.git
1,437
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) duration = self.parse_timeframe(timeframe) request = { 'instrument_id': market['id'], 'granularity': self.timeframes[timeframe], } options = self.safe_value(self.options, 'fetchOHLCV', {}) defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles type = self.safe_string(params, 'type', defaultType) params = self.omit(params, 'type') method = market['type'] + 'GetInstrumentsInstrumentId' + type if type == 'Candles': if since is not None: if limit is not None: request['end'] = self.iso8601(self.sum(since, limit * duration * 1000)) request['start'] = self.iso8601(since) else: if limit is not None: now = self.milliseconds() request['start'] = self.iso8601(now - limit * duration * 1000) request['end'] = self.iso8601(now) elif type == '
30
663
fetch_ohlcv
30
1
1
9
tests/bots/stocks/options/test_opt_chain.py
283,865
Add tests for bots/stocks (#1616) * Added test_quote * Added dps tests * Added more tests * Added government tests * Added insider tests * Added options tests * Added sia tests * Added ta tests * Readd coverage rc * Added test
OpenBBTerminal
10
Python
30
test_opt_chain.py
def vcr_config(): return { "filter_headers": [("User-Agent", None)], "filter_query_parameters": [ ("period1", "MOCK_PERIOD_1"), ("period2", "MOCK_PERIOD_2"), ("date", "MOCK_DATE"), ], } @pytest.mark.vcr @pytest.mark.bots @pytest.mark.parametrize( "opt_type, min_sp, max_sp", [("Calls", None, None), ("Puts", 100.0, 1000.0)] )
826cd8a723d8e2b810c51bf8266c09e8e55059c4
@pytest.mark.vcr @pytest.mark.bots @pytest.mark.parametrize( "opt_type, min_sp, max_sp", [("Calls", None, None), ("Puts", 100.0, 1000.0)] )
40
https://github.com/OpenBB-finance/OpenBBTerminal.git
88
def vcr_config(): return { "filter_headers": [("User-Agent", None)], "filter_query_parameters": [ ("period1", "MOCK_PERIOD_1"), ("period2", "MOCK_PERIOD_2
6
132
vcr_config
42
0
1
16
sklearn/utils/tests/test_utils.py
259,690
MNT Replace pytest.warns(None) in test_utils (#23137)
scikit-learn
13
Python
25
test_utils.py
def test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory, expected): with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) actual = get_chunk_n_rows( row_bytes=row_bytes, max_n_rows=max_n_rows, working_memory=working_memory, ) assert actual == expected assert type(actual) is type(expected) with config_context(working_memory=working_memory): with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) actual = get_chunk_n_rows(row_bytes=row_bytes, max_n_rows=max_n_rows) assert actual == expected assert type(actual) is type(expected)
a739f6ca7cd54bd7a8a3c1e22b54d194098d85af
106
https://github.com/scikit-learn/scikit-learn.git
150
def test_get_chunk_n_rows(row_bytes, max_n_rows, working_memory, expected): with warnings.catch_warnings(): warnings.simplefilter("error", UserWarning) actual = get_chunk_n_rows( row_bytes=row_bytes, max_n_rows=max_n_rows, working_memory=working_memory, ) assert actual == expected assert type(actual) is type(expected) with config_context(working_memory=working_memory): with warnings.catch_warnings(): warnings.simplefilter("e
13
172
test_get_chunk_n_rows
22
0
2
6
homeassistant/components/skybell/binary_sensor.py
303,247
Add config flow to skybell (#70887)
core
10
Python
18
binary_sensor.py
def extra_state_attributes(self) -> dict[str, str | int | tuple[str, str]]: attrs = super().extra_state_attributes if event := self._event.get(CONST.CREATED_AT): attrs["event_date"] = event return attrs
a502a8798ff74eb6185473df7f69553fc4663634
51
https://github.com/home-assistant/core.git
61
def extra_state_attributes(self) -> dict[str, str | int | tuple[str, str]]: attrs = super(
13
82
extra_state_attributes
18
0
2
5
homeassistant/components/recorder/core.py
299,979
Complete strict typing for recorder (#71274) * Complete strict typing for recorder * update tests * Update tests/components/recorder/test_migrate.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Update tests/components/recorder/test_migrate.py Co-authored-by: Martin Hjelmare <marhje52@gmail.com> * Remove the asserts * remove ignore comments Co-authored-by: Martin Hjelmare <marhje52@gmail.com>
core
10
Python
18
core.py
def get_session(self) -> Session: if self._get_session is None: raise RuntimeError("The database connection has not been established") return self._get_session()
eb77f8db8559dba95e5e36c8a9314f89e1ae82b1
26
https://github.com/home-assistant/core.git
50
def get_session(self) -> Session:
5
47
get_session
64
0
1
7
Tests/test_image_thumbnail.py
243,365
Do not call load() before draft()
Pillow
11
Python
45
test_image_thumbnail.py
def test_load_first(): # load() may change the size of the image # Test that thumbnail() is calling it before performing size calculations with Image.open("Tests/images/g4_orientation_5.tif") as im: im.thumbnail((64, 64)) assert im.size == (64, 10) # Test thumbnail(), without draft(), # on an image that is large enough once load() has changed the size with Image.open("Tests/images/g4_orientation_5.tif") as im: im.thumbnail((590, 88), reducing_gap=None) assert im.size == (590, 88)
38b53a9fd704570fb29abd10910ea7939b1185e1
68
https://github.com/python-pillow/Pillow.git
109
def test_load_first(): # load() may change the size of the image # Test that thumbnail() is calling it before performing size calculations with Image.open("Tests/images/g4_orientation_5.tif") as im: im.thumbnail((64, 64)) assert im.si
7
117
test_load_first
88
0
1
29
python/ray/tune/tests/test_utils.py
127,362
[tune] Add timeout ro retry_fn to catch hanging syncs (#28155) Syncing sometimes hangs in pyarrow for unknown reasons. We should introduce a timeout for these syncing operations. Signed-off-by: Kai Fricke <kai@anyscale.com>
ray
14
Python
56
test_utils.py
def test_format_vars(): # Format brackets correctly assert ( format_vars( { ("a", "b", "c"): 8.1234567, ("a", "b", "d"): [7, 8], ("a", "b", "e"): [[[3, 4]]], } ) == "c=8.1235,d=7_8,e=3_4" ) # Sorted by full keys, but only last key is reported assert ( format_vars( { ("a", "c", "x"): [7, 8], ("a", "b", "x"): 8.1234567, } ) == "x=8.1235,x=7_8" ) # Filter out invalid chars. It's ok to have empty keys or values. assert ( format_vars( { ("a c?x",): " <;%$ok ", ("some",): " ", } ) == "a_c_x=ok,some=" )
3590a86db0369ce8a8f9c3965cddc9e4c817c2b8
118
https://github.com/ray-project/ray.git
349
def test_format_vars(): # Format brackets correctly assert ( format_vars( {
2
202
test_format_vars
56
0
1
28
tests/pytests/functional/utils/win_dacl/test_file.py
216,382
Add tests
salt
15
Python
38
test_file.py
def test_applies_to_this_folder_subfolders_files(test_dir): result = win_dacl.set_permissions( obj_name=test_dir, principal="Backup Operators", permissions="full_control", access_mode="grant", applies_to="this_folder_subfolders_files", obj_type="file", reset_perms=False, protected=None, ) assert result is True expected = { "Not Inherited": { "Backup Operators": { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } } } } result = win_dacl.get_permissions( obj_name=test_dir, principal="Backup Operators", obj_type="file", ) assert result == expected
72bffdc59f2f62bbaa5e4686d4a408802cec1e89
96
https://github.com/saltstack/salt.git
264
def test_applies_to_this_folder_subfolders_files(test_dir): result = win_dacl.set_permissions( obj_name=test_dir, principal="Backup Operators", permissions="full_control", access_mode="grant", applies_to="this_folder_subfolders_files", obj_type="file", reset_perms=False, protected=None, ) assert result is True expected = { "Not Inherited": { "Backup Operators": { "grant": { "applies to": "This folder, subfolders and files", "permissions": "Full control", } } } } result = win_dacl.get_permissions( obj_name=test_dir, principal="Backup Operators", obj_type="file",
15
167
test_applies_to_this_folder_subfolders_files
9
0
1
4
Lib/test/test_enum.py
175,355
bpo-40066: [Enum] update str() and format() output (GH-30582) Undo rejected PEP-663 changes: - restore `repr()` to its 3.10 status - restore `str()` to its 3.10 status New changes: - `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result - zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == '<Color: 0>'` - update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type - added `_numeric_repr_` to `Flag` to control display of unnamed values - enums without doc strings have a more comprehensive doc string added - `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`
cpython
11
Python
8
test_enum.py
def test_overridden_str(self): NS = self.NewStrEnum self.assertEqual(str(NS.first), NS.first.name.upper()) self.assertEqual(format(NS.first), NS.first.name.upper())
acf7403f9baea3ae1119fc6b4a3298522188bf96
52
https://github.com/python/cpython.git
29
def test_overridden_str(self): NS = self.NewSt
10
83
test_overridden_str
27
0
1
12
tests/sentry/integrations/gitlab/test_issues.py
91,369
ref: replace self.assertRaises with pytest.raises (#35685) * add flake8 plugin to detect assertRaises * ref: replace self.assertRaises with pytest.raises * non-sed fixes
sentry
11
Python
25
test_issues.py
def test_after_link_issue_failure(self): responses.add( responses.POST, "https://example.gitlab.com/api/v4/projects/2/issues/321/notes", status=502, ) data = {"externalIssue": "2#321", "comment": "This is not good."} external_issue = ExternalIssue.objects.create( organization_id=self.organization.id, integration_id=self.integration.id, key="2#321" ) with pytest.raises(IntegrationError): self.installation.after_link_issue(external_issue, data=data)
284e980df0018f8baee659999268bdd4c7d08255
79
https://github.com/getsentry/sentry.git
123
def test_after_link_issue_failure(self): responses.add( responses.POST, "https://example.gitlab.com/api/v4/projects/2/issues/321/notes", status=502, ) data = {"externalIssue": "2#321", "comment": "This is not good."} external_issue = ExternalIssue.objects.create( organizatio
22
132
test_after_link_issue_failure
85
0
7
22
sympy/integrals/intpoly.py
196,328
Updated import locations
sympy
20
Python
57
intpoly.py
def left_integral2D(m, index, facets, x0, expr, gens): value = S.Zero for j in range(0, m): intersect = () if j in ((index - 1) % m, (index + 1) % m): intersect = intersection(facets[index], facets[j], "segment2D") if intersect: distance_origin = norm(tuple(map(lambda x, y: x - y, intersect, x0))) if is_vertex(intersect): if isinstance(expr, Expr): if len(gens) == 3: expr_dict = {gens[0]: intersect[0], gens[1]: intersect[1], gens[2]: intersect[2]} else: expr_dict = {gens[0]: intersect[0], gens[1]: intersect[1]} value += distance_origin * expr.subs(expr_dict) else: value += distance_origin * expr return value
498015021131af4dbb07eb110e5badaba8250c7b
197
https://github.com/sympy/sympy.git
455
def left_integral2D(m, index, facets, x0, expr, gens): value = S.Zero for j in range(0, m): intersect = () if j in ((index - 1) % m, (index + 1) % m): intersect = intersection(facets[index], facets[j], "segment2D") if intersect: distance_origin = norm(tuple(map(lambda x, y: x - y, intersect, x0))) if is_vertex(intersect): if isinstance(expr, Expr): if len(gens) == 3: expr_dict = {gens[0]: intersect[0], gens[1]: intersect[1], gens[2]: intersect[2]} else: expr_dict = {gens[0]: intersect[0],
26
293
left_integral2D
279
0
22
62
flair/datasets/treebanks.py
214,403
Label logic and new unit tests
flair
20
Python
143
treebanks.py
def _read_next_sentence(self, file): line = file.readline() sentence: Sentence = Sentence([]) # current token ID token_idx = 0 # handling for the awful UD multiword format current_multiword_text = "" current_multiword_sequence = "" current_multiword_first_token = 0 current_multiword_last_token = 0 while line: line = line.strip() fields: List[str] = re.split("\t+", line) # end of sentence if line == "": if len(sentence) > 0: break # comments elif line.startswith("#"): line = file.readline() continue # ellipsis elif "." in fields[0]: line = file.readline() continue # if token is a multi-word elif "-" in fields[0]: line = file.readline() current_multiword_first_token = int(fields[0].split("-")[0]) current_multiword_last_token = int(fields[0].split("-")[1]) current_multiword_text = fields[1] current_multiword_sequence = "" if self.split_multiwords: continue else: token = Token(fields[1]) token.add_label("lemma", str(fields[2])) if len(fields) > 9 and "SpaceAfter=No" in fields[9]: token.whitespace_after = False sentence.add_token(token) token_idx += 1 # normal single-word tokens else: # if we don't split multiwords, skip over component words if not self.split_multiwords and token_idx < current_multiword_last_token: token_idx += 1 line = file.readline() continue # add token token = Token(fields[1], head_id=int(fields[6])) token.add_label("lemma", str(fields[2])) token.add_label("upos", str(fields[3])) token.add_label("pos", str(fields[4])) token.add_label("dependency", str(fields[7])) if len(fields) > 9 and "SpaceAfter=No" in fields[9]: token.whitespace_after = False # add morphological tags for morph in str(fields[5]).split("|"): if "=" not in morph: continue token.add_label(morph.split("=")[0].lower(), morph.split("=")[1]) if len(fields) > 10 and str(fields[10]) == "Y": token.add_label("frame", str(fields[11])) token_idx += 1 # derive whitespace logic for multiwords if token_idx <= current_multiword_last_token: current_multiword_sequence += token.text # print(token) # print(current_multiword_last_token) # print(current_multiword_first_token) # if multi-word equals component tokens, there should be no whitespace if token_idx == current_multiword_last_token and current_multiword_sequence == current_multiword_text: # go through all tokens in subword and set whitespace_after information for i in range(current_multiword_last_token - current_multiword_first_token): # print(i) sentence[-(i + 1)].whitespace_after = False sentence.add_token(token) line = file.readline() return sentence
660b3006dfa49841c553ac952624aac5d18b634d
488
https://github.com/flairNLP/flair.git
1,408
def _read_next_sentence(self, file): line = file.readline() sentence: Sentence = Sentence([]) # current token ID token_idx = 0 # handling for the awful UD multiword format current_multiword_text = "" current_multiword_sequence = "" current_multiword_first_token = 0 current_multiword_last_token = 0 while line: line = line.strip() fields: List[str] = re.split("\t+", line) # end of sentence if line == "": if len(sentence) > 0: break # comments elif line.startswith("#"): line = file.readline() continue # ellipsis elif
33
831
_read_next_sentence
12
0
1
5
gamestonk_terminal/cryptocurrency/crypto_controller.py
281,220
Baseclass (#1141) * A working decorator * Basic intro * Added more * Refactor * Refactor * Cleaned code * Simplified function (thanks Chavi) * Small change * Updating tests : fix issue with mock * Updating tests : fix remaining mocks after merging * Updating tests : black * Cleaned up * Finished base cases * Notes * Slight changes * Added dynamic options handling, error persists * Fixed pylint issues * Fixed mock * fix decorator with dynamic dictionary of args * move choices from dynamic to const in crypto/ov * Updated var names * Check * Moved decorators * Fixed import issues * Fixed tests, update payoff controller * Fixed tests * Fixed pylint * Updated files * Added base class * Added reset * Improved base class * For James * More menues converted * Added contexts * 24 controllers left * 18 Controllers left * Changes choices * 9 controllers left * Added all controllers * Fixed glitch * Replaced all improper callings of class * Removed menu decorator * refactored try_except * Last commit * Black fix * Bug fix * Added James' new menus * Fixed tests * Fixed 8 tests * Fixing mypy issue * Updating tests : stocks/options * Fixed options * Fixed tests * Updating tests : stocks/options * Fixed tests * More test fixes * Updating tests : stocks/ba * Fixed options test * More bug fixes * Fixed tests * fixed pylint * Skipped test_call_load * Add typings to base class * Fix issue with appending auto completer options + bugfixes * Add typings to base class * Terminal throws error for bad path * sexy solution to auto completer in runtime * more sexy reset with reset_level stored * no so sexy jump between indirect menus * Removing choices argument * refactor custom_reset * Fixed tests * Theo fixes * Added back function * Fixed tests Co-authored-by: Chavithra PARANA <chavithra@gmail.com> Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt>
OpenBBTerminal
12
Python
12
crypto_controller.py
def call_disc(self, _): from gamestonk_terminal.cryptocurrency.discovery.discovery_controller import ( DiscoveryController, ) self.queue = DiscoveryController(queue=self.queue).menu()
006b3570b795215a17c64841110b649b03db9a98
37
https://github.com/OpenBB-finance/OpenBBTerminal.git
51
def call_disc(self, _): from gamestonk_terminal.cryptocurrency.discovery.discovery_controller import ( DiscoveryController, ) self.queue =
10
57
call_disc
14
0
1
5
wagtail/core/tests/test_blocks.py
73,999
Reformat with black
wagtail
9
Python
12
test_blocks.py
def test_get_searchable_content_whitespace(self): block = blocks.RichTextBlock() value = RichText("<p>mashed</p><p>po<i>ta</i>toes</p>") result = block.get_searchable_content(value) self.assertEqual(result, ["mashed potatoes"])
d10f15e55806c6944827d801cd9c2d53f5da4186
36
https://github.com/wagtail/wagtail.git
41
def test_get_searchable_content_whitespace(self): block = blocks.RichTextBlock() value = RichText("<p>mashed</p><p>po<i>ta</i>toes</
10
63
test_get_searchable_content_whitespace
17
0
1
11
tests/providers/google/cloud/hooks/test_dlp.py
47,083
Fix new MyPy errors in main (#22884) Those MyPe errors are side effect of some new dependencies.
airflow
9
Python
17
test_dlp.py
def test_list_stored_info_types_with_project_id(self, get_conn): result = self.hook.list_stored_info_types(project_id=PROJECT_ID) assert isinstance(result, list) get_conn.return_value.list_stored_info_types.assert_called_once_with( parent=PROJECT_PATH, page_size=None, order_by=None, retry=DEFAULT, timeout=None, metadata=(), )
6933022e94acf139b2dea9a589bb8b25c62a5d20
60
https://github.com/apache/airflow.git
110
def test_list_stored_info_types_with_project_id(self, get_conn): result = self.hook.list_stored_info_types(project_id=PROJECT_ID) assert isinstance(result, list) get_conn.return_value.list_stored_info_types.assert_called_once_with( parent=PROJECT_PATH, page_size=None, or
20
87
test_list_stored_info_types_with_project_id
26
0
1
14
python/ray/serve/tests/test_schema.py
139,700
[Serve] Add deployment graph `import_path` and `runtime_env` to `ServeApplicationSchema` (#24814) A newly planned version of the Serve schema (used in the REST API and CLI) requires the user to pass in their deployment graph's`import_path` and optionally a runtime_env containing that graph. This new schema can then pick up any `init_args` and `init_kwargs` values directly from the graph, instead of requiring them to be serialized and passed explicitly into the REST request. This change: * Adds the `import_path` and `runtime_env` fields to the `ServeApplicationSchema`. * Updates or disables outdated unit tests. Follow-up changes should: * Update the status schemas (i.e. `DeploymentStatusSchema` and `ServeApplicationStatusSchema`). * Remove deployment-level `import_path`s. * Process the new `import_path` and `runtime_env` fields instead of silently ignoring them. * Remove `init_args` and `init_kwargs` from `DeploymentSchema` afterwards. Co-authored-by: Edward Oakes <ed.nmi.oakes@gmail.com>
ray
12
Python
25
test_schema.py
def get_invalid_runtime_envs() -> List[Dict]: return [ # Local URIs in working_dir and py_modules { "working_dir": ".", "py_modules": [ "/Desktop/my_project", ( "https://github.com/shrekris-anyscale/" "test_deploy_group/archive/HEAD.zip" ), ], } ]
3a2bd16ecae15d6e26585c32c113dcfe7469ccd7
31
https://github.com/ray-project/ray.git
172
def get_invalid_runtime_envs() -> List[Dict]: return [ # Local URIs in working_dir and py_modules { "working_dir": ".", "py_modules": [ "/Desktop/my_project",
3
61
get_invalid_runtime_envs
31
1
1
8
Tests/test_file_mpo.py
243,216
Parametrize tests
Pillow
13
Python
25
test_file_mpo.py
def test_app(test_file): # Test APP/COM reader (@PIL135) with Image.open(test_file) as im: assert im.applist[0][0] == "APP1" assert im.applist[1][0] == "APP2" assert ( im.applist[1][1][:16] == b"MPF\x00MM\x00*\x00\x00\x00\x08\x00\x03\xb0\x00" ) assert len(im.applist) == 2 @pytest.mark.parametrize("test_file", test_files)
0ed03d4a58d5f31d570fc9fc391298ce032ad7ce
@pytest.mark.parametrize("test_file", test_files)
67
https://github.com/python-pillow/Pillow.git
81
def test_app(test_file): # Test APP/COM reader (@PIL135) with Image.open(test_file) as im: assert im.applist[0][0] == "APP1" assert im.applist[1][0] == "APP2" assert ( im.applist[1][1][:16] == b"MPF\x00MM\x00*\x00\x00\x00\x08\x00\x03\xb0\x00" ) assert len(im.applist) == 2 @pytest.mark.pa
11
138
test_app
50
0
7
14
sympy/vector/integrals.py
198,671
Code optimizations
sympy
16
Python
36
integrals.py
def _bounds_case(cls, parameters, limits): V = list(limits.keys()) E = [] for p in V: lower_p = limits[p][0] upper_p = limits[p][1] lower_p = lower_p.atoms() upper_p = upper_p.atoms() E.extend((p, q) for q in V if p != q and (lower_p.issuperset({q}) or upper_p.issuperset({q}))) if not E: return parameters else: return topological_sort((V, E), key=default_sort_key)
19114acf6514bc87f5c8cfde35e0fcab88965be7
119
https://github.com/sympy/sympy.git
181
def _bounds_case(cls, parameters, limits): V = list(limits.keys()) E = [] for p in V: lower_p = limits[p][0] upper_p = limits[p][1] lower_p = lower_p.atoms() upper_p = upper_p.atoms() E.extend((p, q) for q in V if p != q and (lower_p.issuperset({q}) or upper_p.issuperset({q}))) if not E: return parameters else: return topological_sort((V, E), key=default_so
18
183
_bounds_case
22
0
1
15
tests/cli/test_block.py
59,568
Remove console, update client logs, fix tests, and add docs
prefect
11
Python
21
test_block.py
def test_listing_blocks_after_saving_a_block(): system.JSON(value="a casual test block").save("wildblock") expected_output = ( "ID", "Type", "Name", "Slug", "wildblock", ) invoke_and_assert( ["block", "ls"], expected_code=0, expected_output_contains=expected_output, expected_line_count=9, )
447f475d95da0b19b9d94e9367dee05dd248ed53
52
https://github.com/PrefectHQ/prefect.git
99
def test_listing_blocks_after_saving_a_block(): system.JSON(value="a casual test block").save("wildblock") expected_output = ( "ID", "Type", "Name", "Slug", "wildblock", ) invoke_and_assert( ["bl
10
92
test_listing_blocks_after_saving_a_block
95
0
6
27
python3.10.4/Lib/http/server.py
217,875
add python 3.10.4 for windows
XX-Net
13
Python
70
server.py
def handle_one_request(self): try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) return if not self.raw_requestline: self.close_connection = True return if not self.parse_request(): # An error code has been sent, just exit return mname = 'do_' + self.command if not hasattr(self, mname): self.send_error( HTTPStatus.NOT_IMPLEMENTED, "Unsupported method (%r)" % self.command) return method = getattr(self, mname) method() self.wfile.flush() #actually send the response if not already done. except TimeoutError as e: #a read or a write timed out. Discard this connection self.log_error("Request timed out: %r", e) self.close_connection = True return
8198943edd73a363c266633e1aa5b2a9e9c9f526
143
https://github.com/XX-net/XX-Net.git
463
def handle_one_request(self): try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG) return if not self.raw_requestline: self.close_connection = True return if not self.parse_request(): # An error code has been sent, just exit return mname = 'do_' + self.command if not hasattr(self, mname): self.send_error( HTTPStatus.NOT_IMPLEMENTED, "Unsupported method (%r)" % self.command) return method = getattr(self, mn
24
251
handle_one_request
12
0
1
4
test/test_components.py
181,326
gr.ScatterPlot component (#2764) * Try clean install * Resolve peer dependencies? * CHANGELOG * Add outbreak_forcast notebook * generate again * CHANGELOG * Add image to changelog * Color palette * Fix colors + legend * Tooltip * Add axis titles * Clean up code a bit + quant scales * Add code * Add size, shape + rename legend title * Fix demo * Add update + demo * Handle darkmode better * Try new font * Use sans-serif * Add caption * Changelog + tests * More tests * Address comments * Make caption fontsize smaller and enable interactivity * Add docstrings + add height + width * Use normal font weight * Make last values keyword only Co-authored-by: Abubakar Abid <abubakar@huggingface.co> * Fix typo * Accept value as fn * reword changelog a bit Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
gradio
9
Python
11
test_components.py
def test_update_visibility(self): output = gr.ScatterPlot.update(visible=False) assert not output["visible"] assert output["value"] is gr.components._Keywords.NO_VALUE
f60053d85ffb7f0fd4bb648906914370b7aa4598
36
https://github.com/gradio-app/gradio.git
32
def test_update_visibility(self): output = gr.ScatterPlot.update(visible=False) assert not output["visible"] assert output["value"] is
10
60
test_update_visibility
63
0
7
28
dashboard/modules/node/node_head.py
135,534
[Dashboard] Remove the view data (#29701) View data is unnecessary in the new dashboard. Same for node detail view. When the node is idle at 250 nodes, CPU usage is about 20~30%, and I found most of usage is from MessageToDict. Since we have lots of view data, I assume most of overhead is from view data.
ray
16
Python
46
node_head.py
async def get_all_nodes(self, req) -> aiohttp.web.Response: view = req.query.get("view") if view == "summary": all_node_summary = await DataOrganizer.get_all_node_summary() return dashboard_optional_utils.rest_response( success=True, message="Node summary fetched.", summary=all_node_summary ) elif view is not None and view.lower() == "hostNameList".lower(): alive_hostnames = set() for node in DataSource.nodes.values(): if node["state"] == "ALIVE": alive_hostnames.add(node["nodeManagerHostname"]) return dashboard_optional_utils.rest_response( success=True, message="Node hostname list fetched.", host_name_list=list(alive_hostnames), ) else: return dashboard_optional_utils.rest_response( success=False, message=f"Unknown view {view}" )
9e3e3280e440c3ce026e29c1cc8d1f73f124e21a
172
https://github.com/ray-project/ray.git
298
async def get_all_nodes(self, req) -> aiohttp.web.Response: view = req.query.get("view") if view == "summary": all_node_summary = await DataOrganizer.get_all_node_summary() return dashboard_optional_utils.rest_response( success=True, message="Node summary fetched.", summary=all_node_summary ) elif view is not None and view.lower() == "hostNameList".lower(): alive_hostnames = set() for node in DataSource.nodes.values(): if node["state"] == "ALIVE": alive_hostnames.add(node["nodeManagerHostname"]) return dashboard_optional_utils.rest_response( success=True, message="Node hostname list fetched.", host_name_list=list(alive_hostnames), ) else: return dashboard_optional_utils.rest_response( success=False, message=f"Unknown view {view}"
27
239
get_all_nodes
8
0
1
4
homeassistant/components/vlc/media_player.py
307,021
Use new media player enums [u-w] (#78067)
core
8
Python
8
media_player.py
def media_play(self) -> None: self._vlc.play() self._state = MediaPlayerState.PLAYING
8bdeb3ca5b27b5d92163a14c7dd7c5eca37cfe13
22
https://github.com/home-assistant/core.git
29
def media_play(self) -> None: self._vlc.play() self._state = MediaP
7
39
media_play
394
1
2
119
tests/components/recorder/test_statistics.py
290,750
Reduce size of get_statistics_during_period WS API response (#82131)
core
14
Python
126
test_statistics.py
def test_compile_hourly_statistics(hass_recorder): hass = hass_recorder() instance = recorder.get_instance(hass) setup_component(hass, "sensor", {}) zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four) assert dict(states) == dict(hist) # Should not fail if there is nothing there yet stats = get_latest_short_term_statistics( hass, ["sensor.test1"], {"last_reset", "max", "mean", "min", "state", "sum"} ) assert stats == {} for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}): stats = statistics_during_period(hass, zero, period="5minute", **kwargs) assert stats == {} stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} do_adhoc_statistics(hass, start=zero) do_adhoc_statistics(hass, start=four) wait_recording_done(hass) expected_1 = { "start": process_timestamp(zero), "end": process_timestamp(zero + timedelta(minutes=5)), "mean": approx(14.915254237288135), "min": approx(10.0), "max": approx(20.0), "last_reset": None, "state": None, "sum": None, } expected_2 = { "start": process_timestamp(four), "end": process_timestamp(four + timedelta(minutes=5)), "mean": approx(20.0), "min": approx(20.0), "max": approx(20.0), "last_reset": None, "state": None, "sum": None, } expected_stats1 = [expected_1, expected_2] expected_stats2 = [expected_1, expected_2] # Test statistics_during_period stats = statistics_during_period(hass, zero, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} # Test statistics_during_period with a far future start and end date future = dt_util.as_utc(dt_util.parse_datetime("2221-11-01 00:00:00")) stats = statistics_during_period(hass, future, end_time=future, period="5minute") assert stats == {} # Test statistics_during_period with a far future end date stats = statistics_during_period(hass, zero, end_time=future, period="5minute") assert stats == {"sensor.test1": expected_stats1, "sensor.test2": expected_stats2} stats = statistics_during_period( hass, zero, statistic_ids=["sensor.test2"], period="5minute" ) assert stats == {"sensor.test2": expected_stats2} stats = statistics_during_period( hass, zero, statistic_ids=["sensor.test3"], period="5minute" ) assert stats == {} # Test get_last_short_term_statistics and get_latest_short_term_statistics stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} stats = get_last_short_term_statistics( hass, 1, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": [expected_2]} stats = get_latest_short_term_statistics( hass, ["sensor.test1"], {"last_reset", "max", "mean", "min", "state", "sum"} ) assert stats == {"sensor.test1": [expected_2]} metadata = get_metadata(hass, statistic_ids=['sensor.test1"']) stats = get_latest_short_term_statistics( hass, ["sensor.test1"], {"last_reset", "max", "mean", "min", "state", "sum"}, metadata=metadata, ) assert stats == {"sensor.test1": [expected_2]} stats = get_last_short_term_statistics( hass, 2, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": expected_stats1[::-1]} stats = get_last_short_term_statistics( hass, 3, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {"sensor.test1": expected_stats1[::-1]} stats = get_last_short_term_statistics( hass, 1, "sensor.test3", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} instance.get_session().query(StatisticsShortTerm).delete() # Should not fail there is nothing in the table stats = get_latest_short_term_statistics( hass, ["sensor.test1"], {"last_reset", "max", "mean", "min", "state", "sum"} ) assert stats == {} @pytest.fixture
607a0e7697a640e524405f5560868125781bdf0c
@pytest.fixture
763
https://github.com/home-assistant/core.git
996
def test_compile_hourly_statistics(hass_recorder): hass = hass_recorder() instance = recorder.get_instance(hass) setup_component(hass, "sensor", {}) zero, four, states = record_states(hass) hist = history.get_significant_states(hass, zero, four) assert dict(states) == dict(hist) # Should not fail if there is nothing there yet stats = get_latest_short_term_statistics( hass, ["sensor.test1"], {"last_reset", "max", "mean", "min", "state", "sum"} ) assert stats == {} for kwargs in ({}, {"statistic_ids": ["sensor.test1"]}): stats = statistics_during_period(hass, zero, period="5minute", **kwargs) assert stats == {} stats = get_last_short_term_statistics( hass, 0, "sensor.test1", True, {"last_reset", "max", "mean", "min", "state", "sum"}, ) assert stats == {} do_adhoc_statistics(hass, start=zero) do_adhoc_statistics(hass, start=four) wait_recording_done(hass) expected_1 = { "start": process_time
46
1,277
test_compile_hourly_statistics
21
0
1
5
tests/orion/models/test_orm.py
54,326
Add ORM repr
prefect
11
Python
17
test_orm.py
async def test_repr(self, db, session, flow): assert repr(flow) == f"Flow(id={flow.id})" assert repr(db.Flow()) == f"Flow(id=None)" flow_id = uuid4() assert repr(db.Flow(id=flow_id)) == f"Flow(id={flow_id})"
763390cded874b2427017347c75f0fd35b743e1a
51
https://github.com/PrefectHQ/prefect.git
48
async def test_repr(self, db, session, flow): assert repr(flow) == f"Flow(id={flow.id})" assert repr(db.Flow()) == f"Flow(id=None)" flow_id = uuid4() assert repr(db.Flow(id=flow_id)) == f"Flo
10
93
test_repr
37
0
3
33
python/ray/data/tests/test_stats.py
124,971
[Datasets] Refactor `split_at_indices()` to minimize number of split tasks and data movement. (#26363) The current Dataset.split_at_indices() implementation suffers from O(n^2) memory usage in the small-split case (see issue) due to recursive splitting of the same blocks. This PR implements a split_at_indices() algorithm that minimizes the number of split tasks and data movement while ensuring that at most one block is used in each split task, for the sake of memory stability. Co-authored-by: scv119 <scv119@gmail.com>
ray
12
Python
25
test_stats.py
def test_dataset_split_stats(ray_start_regular_shared, tmp_path): ds = ray.data.range(100, parallelism=10).map(lambda x: x + 1) dses = ds.split_at_indices([49]) dses = [ds.map(lambda x: x + 1) for ds in dses] for ds_ in dses: stats = canonicalize(ds_.stats()) assert ( stats == )
fb54679a239e4c7368a72a2fe3023cac04380827
81
https://github.com/ray-project/ray.git
92
def test_dataset_split_stats(ray_start_regular_shared, tmp_path): ds = ray.data.range(100, parallelism=10).map(lambda x: x + 1) dses = ds.split_at_indices([49]) dses = [ds.map(lambda x: x + 1) for ds in dses] for ds_ in dses: stats = canonicalize(ds_.stats()) assert (
15
128
test_dataset_split_stats
56
0
3
12
wagtail/images/fields.py
79,979
Enforce the use of a single string formatting mechanism for translation source strings Close #9377
wagtail
13
Python
45
fields.py
def check_image_pixel_size(self, f): # Upload pixel size checking can be disabled by setting max upload pixel to None if self.max_image_pixels is None: return # Check the pixel size width, height = f.image.get_size() frames = f.image.get_frame_count() num_pixels = width * height * frames if num_pixels > self.max_image_pixels: raise ValidationError( self.error_messages["file_too_many_pixels"] % {"num_pixels": num_pixels, "max_pixels_count": self.max_image_pixels}, code="file_too_many_pixels", )
5c1c2c8f531d96f4568f6dfa6ce71bc32dd9d16c
76
https://github.com/wagtail/wagtail.git
182
def check_image_pixel_size(self, f): # Upload pixel size checking can be disabled by setting max upload pixel to None if self.max_image_pixels is None: return # Check the pixel size width, height = f.image.get_size() frames = f.image.get_frame_cou
14
127
check_image_pixel_size
40
0
1
13
tests/sentry/models/test_group.py
89,912
feat(issue-platform): Include the `IssueOccurrence` with the `GroupEvent` when fetching latest event (#42279) This ensures that when we fetch the latest event for a `Group` that if an `IssueOccurrence` exists and is associated with the event that we fetch it and include it in the `GroupEvent`. This also adds various other necessary work to be able to query this dataset in snuba. I haven't included all the columns, that can happen as needed.
sentry
12
Python
27
test_group.py
def test_get_latest_event(self): self.store_event( data={"event_id": "a" * 32, "fingerprint": ["group-1"], "timestamp": self.two_min_ago}, project_id=self.project.id, ) self.store_event( data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago}, project_id=self.project.id, ) group = Group.objects.first() group_event = group.get_latest_event() assert group_event.event_id == "b" * 32 assert group_event.occurrence is None
32f7a18046786e84cd852334178a3ecedefb12cc
105
https://github.com/getsentry/sentry.git
139
def test_get_latest_event(self): self.store_event( data={"event_id": "a" * 32, "fingerprint": ["group-1"], "timestamp": self.two_min_ago}, project_id=self.project.id, ) self.store_event( data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago}, project_id=self.project.id, ) group = Group.objects.first() group_event = group.
17
179
test_get_latest_event
10
0
2
3
apps/common/permissions.py
188,186
fix: fix rbac to dev (#7636) * feat: 添加 RBAC 应用模块 * feat: 添加 RBAC Model、API * feat: 添加 RBAC Model、API 2 * feat: 添加 RBAC Model、API 3 * feat: 添加 RBAC Model、API 4 * feat: RBAC * feat: RBAC * feat: RBAC * feat: RBAC * feat: RBAC * feat: RBAC 整理权限位 * feat: RBAC 整理权限位2 * feat: RBAC 整理权限位2 * feat: RBAC 整理权限位 * feat: RBAC 添加默认角色 * feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定 * feat: RBAC 添加迁移文件;迁移用户角色->用户角色绑定 * feat: RBAC 修改用户模块API * feat: RBAC 添加组织模块迁移文件 & 修改组织模块API * feat: RBAC 添加组织模块迁移文件 & 修改组织模块API * feat: RBAC 修改用户角色属性的使用 * feat: RBAC No.1 * xxx * perf: 暂存 * perf: ... * perf(rbac): 添加 perms 到 profile serializer 中 * stash * perf: 使用init * perf: 修改migrations * perf: rbac * stash * stash * pref: 修改rbac * stash it * stash: 先去修复其他bug * perf: 修改 role 添加 users * pref: 修改 RBAC Model * feat: 添加权限的 tree api * stash: 暂存一下 * stash: 暂存一下 * perf: 修改 model verbose name * feat: 添加model各种 verbose name * perf: 生成 migrations * perf: 优化权限位 * perf: 添加迁移脚本 * feat: 添加组织角色迁移 * perf: 添加迁移脚本 * stash * perf: 添加migrateion * perf: 暂存一下 * perf: 修改rbac * perf: stash it * fix: 迁移冲突 * fix: 迁移冲突 * perf: 暂存一下 * perf: 修改 rbac 逻辑 * stash: 暂存一下 * perf: 修改内置角色 * perf: 解决 root 组织的问题 * perf: stash it * perf: 优化 rbac * perf: 优化 rolebinding 处理 * perf: 完成用户离开组织的问题 * perf: 暂存一下 * perf: 修改翻译 * perf: 去掉了 IsSuperUser * perf: IsAppUser 去掉完成 * perf: 修改 connection token 的权限 * perf: 去掉导入的问题 * perf: perms define 格式,修改 app 用户 的全新啊 * perf: 修改 permission * perf: 去掉一些 org admin * perf: 去掉部分 org admin * perf: 再去掉点 org admin role * perf: 再去掉部分 org admin * perf: user 角色搜索 * perf: 去掉很多 js * perf: 添加权限位 * perf: 修改权限 * perf: 去掉一个 todo * merge: with dev * fix: 修复冲突 Co-authored-by: Bai <bugatti_it@163.com> Co-authored-by: Michael Bai <baijiangjie@gmail.com> Co-authored-by: ibuler <ibuler@qq.com>
jumpserver
10
Python
10
permissions.py
def has_permission(self, request, view): return super().has_permission(request, view) \ and request.user.is_superuser
e259d2a9e9167c58fa75a78d1050dd5dcfde96f4
27
https://github.com/jumpserver/jumpserver.git
30
def has_permission(self, request, view): return super().has_permission(request, view) \
7
39
has_permission
13
0
2
6
python3.10.4/Lib/asyncio/base_events.py
220,347
add python 3.10.4 for windows
XX-Net
11
Python
13
base_events.py
def _do_shutdown(self, future): try: self._default_executor.shutdown(wait=True) self.call_soon_threadsafe(future.set_result, None) except Exception as ex: self.call_soon_threadsafe(future.set_exception, ex)
8198943edd73a363c266633e1aa5b2a9e9c9f526
44
https://github.com/XX-net/XX-Net.git
59
def _do_shutdown(self, future): try: self._default_executor.shutdown(wait=True) self.call_soon_threadsafe(future.set_result, None) except Exception as ex: self.call_soon_threadsafe(future.set_exception, ex)
11
70
_do_shutdown
10
0
1
11
tests/components/webhook/test_init.py
308,837
Allow registering a webhook as local only (#63516)
core
8
Python
9
test_init.py
async def test_webhook_put(hass, mock_client): hooks = [] webhook_id = webhook.async_generate_id()
7872f87dd74fb4e2b610bb589facc0f763f153ae
93
https://github.com/home-assistant/core.git
19
async def test_webhook_put(hass, mock_client): hooks = [] webhook_i
7
35
test_webhook_put
22
0
3
7
erpnext/accounts/doctype/bank_statement_import/bank_statement_import.py
64,795
style: format code with black
erpnext
12
Python
19
bank_statement_import.py
def update_mapping_db(bank, template_options): bank = frappe.get_doc("Bank", bank) for d in bank.bank_transaction_mapping: d.delete() for d in json.loads(template_options)["column_to_field_map"].items(): bank.append("bank_transaction_mapping", {"bank_transaction_field": d[1], "file_field": d[0]}) bank.save()
494bd9ef78313436f0424b918f200dab8fc7c20b
73
https://github.com/frappe/erpnext.git
15
def update_mapping_db(bank, template_options): bank = frappe.get
13
121
update_mapping_db
26
0
2
5
netbox/extras/forms/customfields.py
264,259
Merge v3.1.6
netbox
11
Python
21
customfields.py
def _append_customfield_fields(self): for customfield in self._get_custom_fields(self._get_content_type()): field_name = f'cf_{customfield.name}' self.fields[field_name] = self._get_form_field(customfield) # Annotate the field in the list of CustomField form fields self.custom_fields[field_name] = customfield
3e3880823b6f2fb528cd64c00acb863f17e96bae
45
https://github.com/netbox-community/netbox.git
84
def _append_customfield_fields(self): for customfield in self._get_cu
10
82
_append_customfield_fields
50
0
1
25
tests/components/aws/test_init.py
309,555
Upgrade boto3 to 1.20.24 + aiobotocore to 2.1.0 (#64045)
core
18
Python
42
test_init.py
async def test_credential_skip_validate(hass): with async_patch("aiobotocore.session.AioSession", new=MockAioSession): await async_setup_component( hass, "aws", { "aws": { "credentials": [ { "name": "key", "aws_access_key_id": "not-valid", "aws_secret_access_key": "dont-care", "validate": False, } ] } }, ) await hass.async_block_till_done() sessions = hass.data[aws.DATA_SESSIONS] assert sessions is not None assert len(sessions) == 1 session = sessions.get("key") assert isinstance(session, MockAioSession) session.get_user.assert_not_awaited()
b17860a7dd283d54bc452e5dca23532d05822589
103
https://github.com/home-assistant/core.git
361
async def test_credential_skip_validate(hass): with async_patch("aiobotocore.session.AioSession", new=MockAioSession): await async_setup_component(
17
185
test_credential_skip_validate
99
0
3
6
awx/sso/pipeline.py
81,375
Allow multiple values in SOCIAL_AUTH_SAML_USER_FLAGS_BY_ATTR.is_*_[value|role] settings (#12558)
awx
9
Python
72
pipeline.py
def _get_matches(list1, list2): # Because we are just doing an intersection here we don't really care which list is in which parameter # A SAML provider could return either a string or a list of items so we need to coerce the SAML value into a list (if needed) if not isinstance(list1, (list, tuple)): list1 = [list1] # In addition, we used to allow strings in the SAML config instead of Lists. The migration should take case of that but just in case, we will convert our list too if not isinstance(list2, (list, tuple)): list2 = [list2] return set(list1).intersection(set(list2))
782667a34ee45bfe825b29db39c67d4465391bdb
56
https://github.com/ansible/awx.git
130
def _get_matches(list1, list2): # Because we are just doing an intersection here we don't really care which list is in which parameter # A SAML provider could return either a string or a list of items so we need to coerce the SA
8
89
_get_matches
17
0
1
4
modin/pandas/series.py
154,060
FEAT-#4147: Add partial compatibility with Python 3.6 and pandas 1.1 (#4301) Signed-off-by: Devin Petersohn <devin.petersohn@gmail.com> Signed-off-by: Vasily Litvinov <fam1ly.n4me@yandex.ru> Co-authored-by: Alexey Prutskov <lehaprutskov@gmail.com> Co-authored-by: Rehan Durrani <rehan@ponder.io> Co-authored-by: Igoshev, Yaroslav <Poolliver868@mail.ru> Co-authored-by: Myachev, Anatoly <anatoly.myachev@intel.com>
modin
9
Python
15
series.py
def _between(self, left, right, inclusive): # noqa: PR01, RT01, D200 return self._default_to_pandas( pandas.Series.between, left, right, inclusive=inclusive )
6ce9cf4daec7f9996038205289bce2186be87611
31
https://github.com/modin-project/modin.git
50
def _between(self, left, right, inclusive): # noqa: PR01, RT01, D200 return self._default_to_pandas( pandas.Series.between, left, right, inclusive=inc
9
46
_between
264
0
1
52
tests/components/hue/test_light_v2.py
294,272
Add effects feature to Hue lights (#68567)
core
11
Python
124
test_light_v2.py
async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, "light") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1") assert light_1 is not None assert ( light_1.attributes["friendly_name"] == "Hue light with color and color temperature 1" ) assert light_1.state == "on" assert light_1.attributes["brightness"] == int(46.85 / 100 * 255) assert light_1.attributes["mode"] == "normal" assert light_1.attributes["color_mode"] == COLOR_MODE_XY assert set(light_1.attributes["supported_color_modes"]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes["xy_color"] == (0.5614, 0.4058) assert light_1.attributes["min_mireds"] == 153 assert light_1.attributes["max_mireds"] == 500 assert light_1.attributes["dynamics"] == "dynamic_palette" assert light_1.attributes["effect_list"] == ["None", "candle", "fire"] assert light_1.attributes["effect"] == "None" # test light which supports color temperature only light_2 = hass.states.get("light.hue_light_with_color_temperature_only") assert light_2 is not None assert ( light_2.attributes["friendly_name"] == "Hue light with color temperature only" ) assert light_2.state == "off" assert light_2.attributes["mode"] == "normal" assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes["min_mireds"] == 153 assert light_2.attributes["max_mireds"] == 454 assert light_2.attributes["dynamics"] == "none" assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"] # test light which supports color only light_3 = hass.states.get("light.hue_light_with_color_only") assert light_3 is not None assert light_3.attributes["friendly_name"] == "Hue light with color only" assert light_3.state == "on" assert light_3.attributes["brightness"] == 128 assert light_3.attributes["mode"] == "normal" assert light_3.attributes["supported_color_modes"] == [COLOR_MODE_XY] assert light_3.attributes["color_mode"] == COLOR_MODE_XY assert light_3.attributes["dynamics"] == "dynamic_palette" # test light which supports on/off only light_4 = hass.states.get("light.hue_on_off_light") assert light_4 is not None assert light_4.attributes["friendly_name"] == "Hue on/off light" assert light_4.state == "off" assert light_4.attributes["mode"] == "normal" assert light_4.attributes["supported_color_modes"] == []
dbef90654f3693401a2df88fa00afbbffbdffcd2
423
https://github.com/home-assistant/core.git
458
async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, "light") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1") assert light_1 is not None assert ( light_1.attributes["friendly_name"] == "Hue light with color and color temperature 1" ) assert light_1.state == "on" assert light_1.attributes["brightness"] == int(46.85 / 100 * 255) assert light_1.attributes["mode"] == "normal" assert light_1.attributes["color_mode"] == COLOR_MODE_XY assert set(light_1.attributes["supported_color_modes"]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes["xy_color"] == (0.5614, 0.4058) assert light_1.attributes["min_mireds"] == 153 assert light_1.attributes["max_mireds"] == 500 assert light_1.attributes["dynamics"] == "dynamic_palette" assert light_1.attributes["effect_list"] == ["None", "candle", "fire"] assert light_1.attributes["effect"] == "None" # test light which supports color temperature only light_2 = hass.states.get("light.hue_light_with_color_temperature_only") assert light_2 is not None assert ( light_2.attributes["friendly_name"] == "Hue light with color temperature only" ) assert light_2.state == "off" assert light_2.attributes["mode"] == "normal" assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes["min_mireds"] == 153 assert light_2.attributes["max_mireds"] == 454 assert light_2.attributes["dynamics"] == "none" assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"] # test light which supports color only light_3 = hass.states.get("light.hue_light_with_color_only") assert light_3 is not None assert light_3.attributes["friendly_name"] == "Hue light with color only" assert light_3.state == "on" assert light_3.attributes["brightness"] == 128 assert light_3.attributes["mode"] == "normal" assert light_3.attributes["supported_color_modes"] == [COLOR_MODE_XY] assert light_3.attributes["color_mode"] == COLOR_MODE_XY assert light_3.attributes["dynamics"] == "dynamic_palette" # test light which supports on/off only light_4 = hass.states.get("light.hue_on_off_light") assert light_4 is not None assert light_4.attributes["friendl
22
729
test_lights
85
0
1
30
tests/test_scene_rendering/opengl/test_cli_flags_opengl.py
190,019
Migrate more `os.path` to `pathlib` in tests (#2991) * Migrate more `os.path` to `pathlib` in tests * Convert test fixtures to pathlib * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix mypy errors in tests * migrate another pathlib instance Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <devel@benjamin-hackl.at>
manim
12
Python
56
test_cli_flags_opengl.py
def test_mov_can_be_set_as_output_format(tmp_path, manim_cfg_file, simple_scenes_path): scene_name = "SquareToCircle" command = [ sys.executable, "-m", "manim", "--renderer", "opengl", "-ql", "--media_dir", str(tmp_path), "--format", "mov", str(simple_scenes_path), scene_name, ] out, err, exit_code = capture(command) assert exit_code == 0, err unexpected_webm_path = ( tmp_path / "videos" / "simple_scenes" / "480p15" / "SquareToCircle.webm" ) assert not unexpected_webm_path.exists(), "unexpected webm file found at " + str( unexpected_webm_path, ) expected_mov_path = ( tmp_path / "videos" / "simple_scenes" / "480p15" / "SquareToCircle.mov" ) assert expected_mov_path.exists(), "expected .mov file not found at " + str( expected_mov_path, )
206db54af53a87985c0d243d75304ea620dad520
120
https://github.com/ManimCommunity/manim.git
239
def test_mov_can_be_set_as_output_format(tmp_path, manim_cfg_file, simple_scenes_path): scene_name = "SquareToCircle" command = [ sys.executable, "-m
16
211
test_mov_can_be_set_as_output_format
20
0
2
4
tests/sentry/api/endpoints/test_organization_member_details.py
98,436
feat(access): Add retirement flag for org roles (#33603) Add an `is_retired` attribute to organization-level roles. Roles marked as retired will be hidden in the UI, and the role can no longer be assigned to members. Members who already have the role will keep it, receiving the associated permissions and minimum team role. Role retirement is gated by the "organizations:team-roles" feature flag. Organizations without the flag will see retired roles as normal.
sentry
10
Python
18
test_organization_member_details.py
def test_lists_organization_roles(self): response = self.get_success_response(self.organization.slug, "me") role_ids = [role["id"] for role in response.data["roles"]] assert role_ids == ["member", "admin", "manager", "owner"]
6621356bbf73f25d6e0df98a78360c7db0f9ee42
48
https://github.com/getsentry/sentry.git
40
def test_lists_organization_roles(self): response = self.get_success_response(self.organization.slug, "me") role_ids = [role["id"] for role in response.data["roles"]]
9
83
test_lists_organization_roles
63
0
2
22
airbyte-integrations/connectors/source-google-ads/unit_tests/test_google_ads.py
3,668
Source Google Ads: handle page token expired exception (#9812) * dynamic date range * raise exception if exites the cycle without error * if range days is 1 already do not retry * added unit tests * added comments * added comments * common mock classes are moved to common module * change read_records * refactored get_date_params * handle corner case * added parse_dates function * added test_streams * check mock calls * fix unit tests for chunk date range refactoring * removed commented codes * remove commented line * refactor test_streams * refactor CustomQuery.get_query * remove TODO * deleted unused json * format * fix chunk_date_range * added docstring * set range_days to 15 for ShoppingPerformanceReport * refactor chunk_date_range * format code 2 * call parent read_records method * add return type in get_date_params * change e to exception * set start_date as end_date * log page token has expired * bump version * updated spec and def yaml Co-authored-by: auganbay <auganenu@gmail.com>
airbyte
13
Python
45
test_google_ads.py
def test_get_date_params_with_time_zone(): time_zone_chatham = Timezone("Pacific/Chatham") # UTC+12:45 mock_start_date_chatham = pendulum.today(tz=time_zone_chatham).subtract(days=1).to_date_string() time_zone_honolulu = Timezone("Pacific/Honolulu") # UTC-10:00 mock_start_date_honolulu = pendulum.today(tz=time_zone_honolulu).subtract(days=1).to_date_string() mock_conversion_window_days = 14 incremental_stream_config = dict( conversion_window_days=mock_conversion_window_days, start_date=mock_start_date_chatham, api=MockGoogleAdsClient(SAMPLE_CONFIG), time_zone=time_zone_chatham, ) stream = IncrementalGoogleAdsStream(**incremental_stream_config) start_date_chatham, end_date_chatham = get_date_params( start_date=mock_start_date_chatham, time_zone=stream.time_zone, range_days=stream.range_days ) incremental_stream_config.update({"start_date": mock_start_date_honolulu, "time_zone": time_zone_honolulu}) stream_2 = IncrementalGoogleAdsStream(**incremental_stream_config) start_date_honolulu, end_date_honolulu = get_date_params( start_date=mock_start_date_honolulu, time_zone=stream_2.time_zone, range_days=stream_2.range_days ) assert start_date_honolulu != start_date_chatham and end_date_honolulu != end_date_chatham
359fcd801128239b39297828d39821f631ce00c0
165
https://github.com/airbytehq/airbyte.git
151
def test_get_date_params_with_time_zone(): time_zone_chatham = Timezone("Pacific/Chatham") # UTC+12:45 mock_start_date_chatham = pendulum.today(tz=time_zone_chatham).subtract(days=1).to_date_string() time_zone_honolulu = Timezone("Pacific/Honolulu") # UTC-10:00 mock_start_date_honolulu = pendulum.today(tz=time_zone_honolulu).subtract(days=1).to_date_string() mock_conversion_window_days = 14 incremental_stream_config = dict( conversion_window_days=mock_conversion_window_days, start_date=mock_start_date_chatham, api=MockGoogleAdsClient(SAMPLE_CONFIG), time_zone=time_zone_chatham, ) stream = IncrementalGoogleAdsStream(**incremental_str
31
266
test_get_date_params_with_time_zone
17
0
1
4
tests/pipelines/test_pipelines_common.py
31,992
[Pipelines] Add revision tag to all default pipelines (#17667) * trigger test failure * upload revision poc * Update src/transformers/pipelines/base.py Co-authored-by: Julien Chaumond <julien@huggingface.co> * up * add test * correct some stuff * Update src/transformers/pipelines/__init__.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * correct require flag Co-authored-by: Julien Chaumond <julien@huggingface.co> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>
transformers
10
Python
17
test_pipelines_common.py
def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf)
e4d2588573f2c68eb792f2d11f092eb2c562bef5
35
https://github.com/huggingface/transformers.git
38
def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) #
9
59
test_load_default_pipelines_tf_table_qa
116
0
5
33
tests/snuba/api/endpoints/test_organization_events_v2.py
91,295
fix(discover): Handle unicode values in parameters (#35272) - This handles unicode values in parameters becoming aliased and used in clickhouse which doesn't support that - Uses the ascii values instead eg. u716e - Which works out for the new events endpoint since we transform them back to what was passed in
sentry
16
Python
67
test_organization_events_v2.py
def test_count_if(self): unicode_phrase1 = "\u716e\u6211\u66f4\u591a\u7684\u98df\u7269\uff0c\u6211\u9913\u4e86" for i in range(5): data = load_data( "transaction", timestamp=before_now(minutes=(1 + i)), start_timestamp=before_now(minutes=(1 + i), milliseconds=100 if i < 3 else 200), ) data["tags"] = { "sub_customer.is-Enterprise-42": "yes" if i == 0 else "no", "unicode-phrase": unicode_phrase1 if i == 0 else "no", } self.store_event(data, project_id=self.project.id) query = { "field": [ "count_if(transaction.duration, less, 150)", "count_if(transaction.duration, greater, 150)", "count_if(sub_customer.is-Enterprise-42, equals, yes)", "count_if(sub_customer.is-Enterprise-42, notEquals, yes)", f"count_if(unicode-phrase, equals, {unicode_phrase1})", ], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200 assert len(response.data["data"]) == 1 assert response.data["data"][0]["count_if(transaction.duration, less, 150)"] == 3 assert response.data["data"][0]["count_if(transaction.duration, greater, 150)"] == 2 assert response.data["data"][0]["count_if(sub_customer.is-Enterprise-42, equals, yes)"] == 1 assert ( response.data["data"][0]["count_if(sub_customer.is-Enterprise-42, notEquals, yes)"] == 4 ) assert response.data["data"][0][f"count_if(unicode-phrase, equals, {unicode_phrase1})"] == 1
d6da9e3f9a72428db7de318fd6c13641dbb41825
234
https://github.com/getsentry/sentry.git
455
def test_count_if(self): unicode_phrase1 = "\u716e\u6211\u66f4\u591a\u7684\u98df\u7269\uff0c\u6211\u9913\u4e86" for i in range(5): data = load_data( "transaction", timestamp=before_now(minutes=(1 + i)), start_timestamp=before_now(minutes=(1 + i), milliseconds=100 if i < 3 else 200), ) data["tags"] = { "sub_customer.is-Enterprise-42": "yes" if i == 0 else "no", "unicode-phrase": unicode_phrase1 if i == 0 else "no", } self.store_event(data, project_id=self.project.id) query = { "field": [ "count_if(transaction.duration, less, 150)", "count_if(transaction.duration, greater, 150)", "count_if(sub_customer.is-Enterprise-42, equals, yes)", "count_if(sub_customer.is-Enterprise-42, notEquals, yes)", f"count_if(unicode-phrase, equals, {unicode_phrase1})", ], "project": [self.project.id], } response = self.do_request(query) assert response.status_code == 200 assert len(response.data["data"]) == 1 assert response.data["data"][0]["count_if(transaction.duration, less, 150)"] == 3 assert response.data["data"][0]["count_if(transaction.duration, greater, 150)"] == 2 assert response.data["data"][0]["count_if(sub_customer.is-Enterprise-42, equals, yes)"] == 1 assert ( response.data["data"][0]["count_if(sub_customer.is-Enterprise-42, notEquals, yes)
21
403
test_count_if
24
1
1
7
tests/helpers/fixtures.py
321,539
tests: Adjust most imports
qutebrowser
9
Python
22
fixtures.py
def webengineview(qtbot, monkeypatch, web_tab_setup): QtWebEngineWidgets = pytest.importorskip('qutebrowser.qt.webenginewidgets') monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine) view = QtWebEngineWidgets.QWebEngineView() qtbot.add_widget(view) yield view view.setPage(None) # Avoid warning if using QWebEngineProfile @pytest.fixture
d387b1a1084b9649009e5cffb9d71facc80bb41f
@pytest.fixture
53
https://github.com/qutebrowser/qutebrowser.git
45
def webengineview(qtbot, monkeypatch, web_tab_setup): QtWebEngineWidgets = pytest.importorskip('qutebrowser.qt.webenginewidgets') monkeypatch.setattr(objects, 'backend', usertypes.Backend.QtWebEngine) view = QtWebEngineWidgets.QWebEngineView() qtbot.add_widget(view) yield view view.setPage(No
17
98
webengineview
51
0
1
17
docs/examples/styles/link_hover_color.py
186,246
Add example for link hover color.
textual
10
Python
33
link_hover_color.py
def compose(self): yield Label( "Visit the [link=https://textualize.io]Textualize[/link] website.", id="lbl1", # (1)! ) yield Label( "Click [@click=app.bell]here[/] for the bell sound.", id="lbl2", # (2)! ) yield Label( "You can also click [@click=app.bell]here[/] for the bell sound.", id="lbl3", # (3)! ) yield Label( "[@click=app.quit]Exit this application.[/]", id="lbl4", # (4)! ) app = LinkHoverColorApp(css_path="link_hover_color.css")
e68e02405f813a5e7c4dc7b8e11bd8cc742d8055
45
https://github.com/Textualize/textual.git
197
def compose(self): yield Label( "Visit the [link=https://textualize.io]Textualize[/link] website.", id="lbl1", # (1)! ) yield Label( "Click [@click=app.bell]here[/] for the bell sound.", id="lbl2", # (2)! ) yield Label( "You can also click [@click=app.bell]here[/] for the bell sound.", id="lbl3", # (3)! ) yield Label( "[@click=app.quit]Exit this application.[/]", id="lbl4", # (4)! ) app = LinkHoverColo
7
104
compose
152
0
6
27
lib/model/layers.py
100,351
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
faceswap
13
Python
71
layers.py
def call(self, inputs, *args, **kwargs): input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': batch_size, channels, height, width = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, r_height, r_width, o_channels, height, width)) out = K.permute_dimensions(out, (0, 3, 4, 1, 5, 2)) out = K.reshape(out, (batch_size, o_channels, o_height, o_width)) elif self.data_format == 'channels_last': batch_size, height, width, channels = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch_size, height, width, r_height, r_width, o_channels)) out = K.permute_dimensions(out, (0, 1, 3, 2, 4, 5)) out = K.reshape(out, (batch_size, o_height, o_width, o_channels)) return out
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
267
https://github.com/deepfakes/faceswap.git
467
def call(self, inputs, *args, **kwargs): input_shape = K.int_shape(inputs) if len(input_shape) != 4: raise ValueError('Inputs should have rank ' + str(4) + '; Received input shape:', str(input_shape)) if self.data_format == 'channels_first': batch_size, channels, height, width = input_shape if batch_size is None: batch_size = -1 r_height, r_width = self.size o_height, o_width = height * r_height, width * r_width o_channels = channels // (r_height * r_width) out = K.reshape(inputs, (batch
25
406
call
98
0
12
38
homeassistant/components/openuv/sensor.py
310,537
Fix small inconsistency in OpenUV data storage (#64717)
core
16
Python
54
sensor.py
def update_from_latest_data(self) -> None: if (data := self.openuv.data[DATA_UV]) is None: self._attr_available = False return self._attr_available = True if self.entity_description.key == TYPE_CURRENT_OZONE_LEVEL: self._attr_native_value = data["ozone"] elif self.entity_description.key == TYPE_CURRENT_UV_INDEX: self._attr_native_value = data["uv"] elif self.entity_description.key == TYPE_CURRENT_UV_LEVEL: if data["uv"] >= 11: self._attr_native_value = UV_LEVEL_EXTREME elif data["uv"] >= 8: self._attr_native_value = UV_LEVEL_VHIGH elif data["uv"] >= 6: self._attr_native_value = UV_LEVEL_HIGH elif data["uv"] >= 3: self._attr_native_value = UV_LEVEL_MODERATE else: self._attr_native_value = UV_LEVEL_LOW elif self.entity_description.key == TYPE_MAX_UV_INDEX: self._attr_native_value = data["uv_max"] if uv_max_time := parse_datetime(data["uv_max_time"]): self._attr_extra_state_attributes.update( {ATTR_MAX_UV_TIME: as_local(uv_max_time)} ) elif self.entity_description.key in ( TYPE_SAFE_EXPOSURE_TIME_1, TYPE_SAFE_EXPOSURE_TIME_2, TYPE_SAFE_EXPOSURE_TIME_3, TYPE_SAFE_EXPOSURE_TIME_4, TYPE_SAFE_EXPOSURE_TIME_5, TYPE_SAFE_EXPOSURE_TIME_6, ): self._attr_native_value = data["safe_exposure_time"][ EXPOSURE_TYPE_MAP[self.entity_description.key] ]
a70c9802839e7e5d57230d9315dc48a8b7124590
220
https://github.com/home-assistant/core.git
509
def update_from_latest_data(self) -> None: if (data := self.openuv.data[DATA_UV]) is None: self._attr_available = False return self._attr_available = True if self.entity_description.key == TYPE_CURRENT_OZONE_LEVEL: self._attr_native_value = data["ozone"] elif self.entity_description.key == TYPE_CURRENT_UV_INDEX: self._attr_native_value = data["uv"] elif self.entity_description.key == TYPE_CURRENT_UV_LEVEL: if data["uv"] >=
31
355
update_from_latest_data
58
0
6
16
d2l/paddle.py
157,924
[Paddle]Add chapter_computational-performance (#1167) * [Paddle]Add chapter_computational-performance * add Residual * Fix sgd bugs * remove the number * Fix gpu config * Fix some syntax and describe issue * Update gpu configuration Co-authored-by: w5688414 <w5688414@gmail.com>
d2l-zh
17
Python
44
paddle.py
def evaluate_accuracy_gpu(net, data_iter, device=None): if isinstance(net, nn.Layer): net.eval() # 设置为评估模式 if not device: device = next(iter(net.parameters())).place paddle.set_device("gpu:{}".format(str(device)[-2])) # 正确预测的数量,总预测的数量 metric = d2l.Accumulator(2) with paddle.no_grad(): for X, y in data_iter: if isinstance(X, list): # BERT微调所需的 X = [paddle.to_tensor(x, place=device) for x in X] else: X = paddle.to_tensor(X, place=device) y = paddle.to_tensor(y, place=device) metric.add(d2l.accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1]
b0cb7bed0f74c8c263c38b2a5d9c0fca70db4d56
171
https://github.com/d2l-ai/d2l-zh.git
202
def evaluate_accuracy_gpu(net, data_iter, device=None): if isinstance(net, nn.Layer): net.eval() # 设置为评估模式
28
274
evaluate_accuracy_gpu
36
0
2
28
tests/providers/google/cloud/operators/test_bigquery.py
46,813
Add autodetect arg in BQCreateExternalTable Operator (#22710) * Add autodetect parameter * Update docstring * Update google provider documentation
airflow
11
Python
32
test_bigquery.py
def test_execute(self, mock_hook): operator = BigQueryCreateExternalTableOperator( task_id=TASK_ID, destination_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}', schema_fields=[], bucket=TEST_GCS_BUCKET, source_objects=TEST_GCS_DATA, source_format=TEST_SOURCE_FORMAT, autodetect=True, ) operator.execute(None) mock_hook.return_value.create_external_table.assert_called_once_with( external_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}', schema_fields=[], source_uris=[f'gs://{TEST_GCS_BUCKET}/{source_object}' for source_object in TEST_GCS_DATA], source_format=TEST_SOURCE_FORMAT, autodetect=True, compression='NONE', skip_leading_rows=0, field_delimiter=',', max_bad_records=0, quote_character=None, allow_quoted_newlines=False, allow_jagged_rows=False, src_fmt_configs={}, labels=None, encryption_configuration=None, )
f9e18472c0c228fc3de7c883c7c3d26d7ee49e81
127
https://github.com/apache/airflow.git
312
def test_execute(self, mock_hook): operator = BigQueryCreateExternalTableOperator( task_id=TASK_ID, destination_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}', schema_fields=[], bucket=TEST_GCS_BUCKET, source_objects=TEST_GCS_DATA,
35
203
test_execute
26
0
1
8
freqtrade/freqai/data_handler.py
149,760
add freqao backend machinery, user interface, documentation
freqtrade
10
Python
21
data_handler.py
def append_predictions(self, predictions, do_predict, len_dataframe): ones = np.ones(len_dataframe) s_mean, s_std = ones*self.data['s_mean'], ones*self.data['s_std'] self.predictions = np.append(self.predictions,predictions) self.do_predict = np.append(self.do_predict,do_predict) self.target_mean = np.append(self.target_mean,s_mean) self.target_std = np.append(self.target_std,s_std) return
fc837c4daa27a18ff0e86128f4d52089b88fa5fb
98
https://github.com/freqtrade/freqtrade.git
82
def append_predictions(self, predictions, do_predict, len_dataframe): ones = np.ones(len_dataframe) s_mean, s_std = ones*self.data['s_mean'], ones*self.data['s_std'] self.predictions = np.append(self.predictions,predictions) self.do_predict = np.append(self.do_predict,do_predict) self.target_mean = np.append(self.target_mean,s_mean) self.target_std = np.append(self.target_std,s_std) return
13
153
append_predictions
14
0
1
5
tests/test_context.py
53,847
Fix errors with multiple tokens and ban behavior
prefect
11
Python
13
test_context.py
def test_exiting_a_context_more_than_entering_raises(): context = ExampleContext(x=1) with pytest.raises(RuntimeError, match="Asymmetric use of context"): with context: context.__exit__()
51da0df3edbbf2f812b34616ef2b0fa83c676e04
32
https://github.com/PrefectHQ/prefect.git
37
def test_exiting_a_context_more_than_entering_raises(): context = ExampleContext(x=1) with pytest.raises(Run
9
59
test_exiting_a_context_more_than_entering_raises
12
0
1
6
src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py
336,702
Fix `disable_attention_slicing` in pipelines (#498) Fix `disable_attention_slicing` in pipelines.
diffusers
7
Python
12
pipeline_stable_diffusion_img2img.py
def disable_attention_slicing(self): r # set slice_size = `None` to disable `set_attention_slice` self.enable_attention_slicing(None)
f7cd6b87e1ee8c7909de760f22f1a6b0c6ae0592
13
https://github.com/huggingface/diffusers.git
32
def disable_attention_slicing(self): r #
3
24
disable_attention_slicing
72
0
1
11
erpnext/regional/india/e_invoice/utils.py
67,112
style: format code with black
erpnext
13
Python
54
utils.py
def raise_document_name_too_long_error(): title = _("Document ID Too Long") msg = _("As you have E-Invoicing enabled, to be able to generate IRN for this invoice") msg += ", " msg += _("document id {} exceed 16 letters.").format(bold(_("should not"))) msg += "<br><br>" msg += _("You must {} your {} in order to have document id of {} length 16.").format( bold(_("modify")), bold(_("naming series")), bold(_("maximum")) ) msg += _("Please account for ammended documents too.") frappe.throw(msg, title=title)
494bd9ef78313436f0424b918f200dab8fc7c20b
88
https://github.com/frappe/erpnext.git
61
def raise_document_name_too_long_error(): title = _("Document ID Too Long") msg = _("As you have E-Invoicing enabled, to be able to generate IRN for this invoice") msg += ", " msg += _("document id {} exceed 16 letters.").format(bold(_("should not"))) msg
8
165
raise_document_name_too_long_error
32
0
2
17
wagtail/documents/views/chooser.py
78,170
Further reshuffles to match generic views as closely as possible
wagtail
11
Python
30
chooser.py
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update( { "results": self.documents, "table": self.table, "results_url": self.get_results_url(), "is_searching": self.is_searching, "search_query": self.search_query, "can_create": self.can_create(), "collection_id": self.collection_id, } ) if context["can_create"]: creation_form = self.get_creation_form() context.update(self.get_creation_form_context_data(creation_form)) return context
b301fb17a70ad6494a391f17277f8a5410313d69
98
https://github.com/wagtail/wagtail.git
215
def get_context_data(self, **kwargs): context = super().get_context_dat
16
166
get_context_data
13
0
1
3
tests/freqai/conftest.py
150,204
isolate data_drawer functions from data_kitchen, accommodate tests, add new test
freqtrade
8
Python
10
conftest.py
def get_patched_data_drawer(mocker, freqaiconf): # dd = mocker.patch('freqtrade.freqai.data_drawer', MagicMock()) dd = FreqaiDataDrawer(freqaiconf) return dd
e213d0ad55db09d83a172019234398b64469de6f
15
https://github.com/freqtrade/freqtrade.git
21
def get_patched_data_drawer(mocker, freqaiconf): # dd = mocker.patch('freqtrade.freqai.data_drawer', MagicMock())
5
25
get_patched_data_drawer
19
0
3
7
modin/pandas/groupby.py
154,653
REFACTOR-#5026: Change exception names to simplify grepping (#5027) Signed-off-by: Myachev <anatoly.myachev@intel.com>
modin
12
Python
18
groupby.py
def __getattr__(self, key): try: return object.__getattribute__(self, key) except AttributeError as err: if key in self._columns: return self.__getitem__(key) raise err
0a2c0de4451f7e2e8f337a9478d7595473aa348e
40
https://github.com/modin-project/modin.git
88
def __getattr__(self, key): try: return object.__getattribute__(self, key) except Attr
9
66
__getattr__
91
0
1
35
tests/sentry/utils/suspect_resolutions/test_commit_correlation.py
94,757
fix(suspect-resolutions): Tweak commit correlation logic and track total events (#37891)
sentry
13
Python
56
test_commit_correlation.py
def test_no_files_changed(self): project = self.create_project() group1 = self.create_group(project=project, resolved_at=timezone.now()) group2 = self.create_group(project=project, status=GroupStatus.UNRESOLVED) release = self.create_release(project=project, version="1") release2 = self.create_release(project=project, version="2") repo = self.create_repo(project=project, name=project.name) commit = Commit.objects.create( organization_id=project.organization_id, repository_id=repo.id, key="1" ) ReleaseCommit.objects.create( organization_id=project.organization_id, release=release, commit=commit, order=1 ) ReleaseCommit.objects.create( organization_id=project.organization_id, release=release2, commit=commit, order=1 ) GroupRelease.objects.create( project_id=project.id, group_id=group1.id, release_id=release.id, last_seen=(group1.resolved_at - timedelta(hours=2)), ) GroupRelease.objects.create( project_id=project.id, group_id=group2.id, release_id=release2.id, last_seen=(group1.resolved_at - timedelta(hours=2)), ) res1 = get_files_changed_in_releases(group1.resolved_at, group1.id, project.id) res2 = get_files_changed_in_releases(group1.resolved_at, group2.id, project.id) assert res1.files_changed == set() assert res2.files_changed == set() assert res1.release_ids.first().id == release.id assert res2.release_ids.first().id == release2.id assert not is_issue_commit_correlated(group1.id, group2.id, project.id).is_correlated
04193742b952f3ebedfc36857771fc15489f7cd0
336
https://github.com/getsentry/sentry.git
372
def test_no_files_changed(self): project = self.create_project() group1 = self.create_group(project=project, resolved_at=timezone.now()) group2 = self.create_group(project=project, status=GroupStatus.UNRESOLVED) release = self.create_release(project=project, version="1") release2 = self.create_release(project=project, version="2") repo = self.create_repo(project=project, name=project.name) commit = Commit.objects.create( organization_id=project.organization_id, repository_id=repo.id, key="1" ) ReleaseCommit.objects.create( organization_id=project.organization_id, release=release, commit=commit, order=1 ) ReleaseCommit.objects.create( organization_id=project.organization_id, release=release2, commit=commit, order=1 )
46
506
test_no_files_changed
11
0
1
5
homeassistant/components/lacrosse_view/sensor.py
288,415
Fix LaCrosse View not updating (#79474)
core
10
Python
11
sensor.py
def native_value(self) -> float | str: return self.entity_description.value_fn( self.coordinator.data[self.index], self.entity_description.key )
d6a6d0d7548307c143fd2c44a589bd29f729f1e6
34
https://github.com/home-assistant/core.git
43
def native_value(self) -> float | str: return
10
54
native_value
154
0
8
22
lib/ansible/template/__init__.py
268,350
refactor and fixes for doc parsing (#77719) * refactor and remove redundant code in documentation allow location and building api to be more accessible fix issues with displaying ansible.legacy and ansible.builtin ensure we don't x2 process tokens (some modules reference them also) fixes #77764 move to constants vs hardcoded more informative errors and comments now have actual filter/test plugins, which expose the filter/test functions moved filter/test loading/finding logic into jinja2pluginloader, removed dupe implementations added tests for case in which we unique by basename when listing Update lib/ansible/utils/plugin_docs.py Co-authored-by: Sloane Hertel <19572925+s-hertel@users.noreply.github.com>
ansible
16
Python
99
__init__.py
def __getitem__(self, key): if not isinstance(key, string_types): raise ValueError('key must be a string, got %s instead' % type(key)) if key not in self._loaded_builtins: plugin = None try: plugin = self._pluginloader.get(key) except (AnsibleError, KeyError) as e: raise TemplateSyntaxError('Could not load "%s": %s' % (key, to_native(e)), 0) except Exception as e: display.vvvv('Unexpected plugin load (%s) exception: %s' % (key, to_native(e))) raise e # if a plugin was found/loaded if plugin: # set in filter cache and avoid expensive plugin load self._delegatee[key] = plugin.j2_function self._loaded_builtins.add(key) # let it trigger keyerror if we could not find ours or jinja2 one func = self._delegatee[key] # if i do have func and it is a filter, it nees wrapping if self._pluginloader.type == 'filter': # filter need wrapping if key in C.STRING_TYPE_FILTERS: # avoid litera_eval when you WANT strings func = _wrap_native_text(func) else: # conditionally unroll iterators/generators to avoid having to use `|list` after every filter func = _unroll_iterator(func) return func
4260b71cc77b7a44e061668d0d408d847f550156
157
https://github.com/ansible/ansible.git
477
def __getitem__(self, key): if not isinstance(key, string_types): raise ValueError('key must be a string, got %s instead' % type(key)) if key not in self._loaded_builtins: plugin = None try: plugin = self._pluginloader.get(key) except (AnsibleError, KeyError) as e: raise TemplateSyntaxError('Could not load "%s": %s' % (key, to_native(e)), 0) except Exception as e: display.vvvv('Unexpected plugin load (%s) exception: %s' % (key, to_native(e))) raise e # if a plugin was found/loaded if plugin: # set in filter cache and avoid expensive plugin load self._delegatee[key] = plugin.j2_function self._loaded_builtins.add(key) # let it trigger keyerror if we could not find ours or jinja2 one func = self._delegatee[key] # if i do have func and it is a filter, it nees wrapping if self._pluginloader.type == 'filter': # filter need wrappin
27
262
__getitem__
30
0
3
7
lib/matplotlib/axis.py
107,482
DOC: More cleanup axes -> Axes
matplotlib
12
Python
28
axis.py
def tick_bottom(self): label = True if 'label1On' in self._major_tick_kw: label = (self._major_tick_kw['label1On'] or self._major_tick_kw['label2On']) self.set_ticks_position('bottom') # If labels were turned off before this was called, leave them off. self.set_tick_params(which='both', labelbottom=label)
f156db08eee54d285ab0fb4e031e48d078ba6aa3
51
https://github.com/matplotlib/matplotlib.git
103
def tick_bottom(self): label = True if 'label1On' in self._major_tick_kw: label = (self._major_tick_kw['label1On'] or self._major_tick_kw['label2On'])
8
93
tick_bottom
45
0
1
21
flair/embeddings/transformer.py
214,627
move transformer embeddings to own file
flair
13
Python
42
transformer.py
def __getstate__(self): config_dict = self.model.config.to_dict() tokenizer_data = self._tokenizer_bytes() model_state = { "model": self.base_model_name, "fine_tune": self.fine_tune, "layers": ",".join(map(str, self.layer_indexes)), "layer_mean": self.layer_mean, "subtoken_pooling": self.subtoken_pooling, "cls_pooling": self.cls_pooling, "is_token_embedding": self.token_embedding, "is_document_embedding": self.document_embedding, "allow_long_sentences": self.allow_long_sentences, "config_state_dict": config_dict, "tokenizer_data": tokenizer_data, "name": self.name, "context_length": self.context_length, "respect_document_boundaries": self.respect_document_boundaries, "context_dropout": self.context_dropout, } return model_state
6ffedc473b3d89bff82dc45c5fd16c9003111f86
125
https://github.com/flairNLP/flair.git
244
def __getstate__(self): config_dict = self.model.config.to_dict()
25
210
__getstate__
48
0
1
14
tests/rpc/test_rpc.py
150,887
add /stopentry alias for /stopbuy
freqtrade
10
Python
38
test_rpc.py
def test_rpc_stopentry(mocker, default_conf) -> None: mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) mocker.patch.multiple( 'freqtrade.exchange.Exchange', fetch_ticker=MagicMock() ) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) freqtradebot.state = State.RUNNING assert freqtradebot.config['max_open_trades'] != 0 result = rpc._rpc_stopentry() assert {'status': 'No more entries will occur from now. Run /reload_config to reset.'} == result assert freqtradebot.config['max_open_trades'] == 0
b9f35cadb330763e70c52dd867ab74dc4555a94e
91
https://github.com/freqtrade/freqtrade.git
94
def test_rpc_stopentry(mocker, default_conf) -> None: mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) mocker.patch.multiple( 'freq
18
154
test_rpc_stopentry
58
0
1
18
examples/tutorial/jupyter/execution/pandas_on_ray/test/test_notebooks.py
153,226
REFACTOR-#4213: Refactor `modin/examples/tutorial/` directory (#4214) Signed-off-by: Igoshev, Yaroslav <yaroslav.igoshev@intel.com>
modin
10
Python
45
test_notebooks.py
def test_exercise_2(): modified_notebook_path = ( "examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2_test.ipynb" ) nb = nbformat.read( "examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2.ipynb", as_version=nbformat.NO_CONVERT, ) _replace_str( nb, 'path = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"', '# path = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"', ) new_optional_cell = f'path = "{test_dataset_path}"\n' + download_taxi_dataset optional_cell_idx = _find_code_cell_idx(nb, "[Optional] Download data locally.") nb["cells"][optional_cell_idx]["source"] = new_optional_cell nbformat.write(nb, modified_notebook_path) _execute_notebook(modified_notebook_path) # in this notebook user should add custom mad implementation # to make notebook work
6f20abdf65515b7dd0d451259f41ccc010c1f7a4
71
https://github.com/modin-project/modin.git
130
def test_exercise_2(): modified_notebook_path = ( "examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2_test.ipynb" ) nb = nbformat.read( "examples/tutorial/jupyter/execution/pandas_on_ray/local/exercise_2.ipynb", as_version=nbformat.NO_CONVERT, ) _replace_str( nb, 'path = "s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv"', '# path = "s3://dask-data/nyc-taxi/2015/
15
129
test_exercise_2
12
1
1
4
saleor/tests/fixtures.py
26,633
Refactor app tokens (#9438) * Save last_4 chars of current app tokens and store their hashes * Update app mutations, commands and tests * Update changelog Co-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>
saleor
10
Python
10
fixtures.py
def app_with_token(db): app = App.objects.create(name="Sample app objects", is_active=True) app.tokens.create(name="Test") return app @pytest.fixture
81d02e76b22e3d3b3603e5ae27c5788033ac01b3
@pytest.fixture
33
https://github.com/saleor/saleor.git
19
def app_with_token(db): app = App.objects.create(name=
11
63
app_with_token
32
1
1
5
tests/test_settings.py
53,756
Using new settings access pattern everywhere
prefect
10
Python
29
test_settings.py
def test_nested_settings(monkeypatch): assert get_current_settings().get(PREFECT_ORION_DATABASE_ECHO) is False monkeypatch.setenv("PREFECT_ORION_DATABASE_ECHO", "1") new_settings = Settings() assert new_settings.get(PREFECT_ORION_DATABASE_ECHO) is True @pytest.mark.parametrize( "value,expected", [ ("foo", ["foo"]), ("foo,bar", ["foo", "bar"]), ("foo, bar, foobar ", ["foo", "bar", "foobar"]), ], )
38a964d05aba99c743802b158ffb7f16201d85aa
@pytest.mark.parametrize( "value,expected", [ ("foo", ["foo"]), ("foo,bar", ["foo", "bar"]), ("foo, bar, foobar ", ["foo", "bar", "foobar"]), ], )
38
https://github.com/PrefectHQ/prefect.git
71
def test_nested_settings(monkeypatch): assert get_current_settings().get(PREFECT_ORION_DATABASE_ECHO) is False monkeypatch.setenv("PREFECT_ORION_DATABASE_ECHO", "1") new_settings = S
11
142
test_nested_settings
40
0
6
13
erpnext/accounts/general_ledger.py
68,423
test: Unit test for round off entry dimensions
erpnext
12
Python
28
general_ledger.py
def update_accounting_dimensions(round_off_gle): dimensions = get_accounting_dimensions() meta = frappe.get_meta(round_off_gle["voucher_type"]) has_all_dimensions = True for dimension in dimensions: if not meta.has_field(dimension): has_all_dimensions = False if dimensions and has_all_dimensions: dimension_values = frappe.db.get_value( round_off_gle["voucher_type"], round_off_gle["voucher_no"], dimensions, as_dict=1 ) for dimension in dimensions: round_off_gle[dimension] = dimension_values.get(dimension)
3fa1c634790095bf7eabc135ed717e124efa4ff0
86
https://github.com/frappe/erpnext.git
27
def update_accounting_dimensions(round_off_gle): dimensions = get_accounting_dimensions() meta = frappe.get_met
15
138
update_accounting_dimensions
429
0
29
156
tools/infer/predict_rec.py
24,726
fix sar export
PaddleOCR
19
Python
173
predict_rec.py
def __call__(self, img_list): img_num = len(img_list) # Calculate the aspect ratio of all text bars width_list = [] for img in img_list: width_list.append(img.shape[1] / float(img.shape[0])) # Sorting can speed up the recognition process indices = np.argsort(np.array(width_list)) rec_res = [['', 0.0]] * img_num batch_num = self.rec_batch_num st = time.time() if self.benchmark: self.autolog.times.start() for beg_img_no in range(0, img_num, batch_num): end_img_no = min(img_num, beg_img_no + batch_num) norm_img_batch = [] imgC, imgH, imgW = self.rec_image_shape[:3] max_wh_ratio = imgW / imgH # max_wh_ratio = 0 for ino in range(beg_img_no, end_img_no): h, w = img_list[indices[ino]].shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) for ino in range(beg_img_no, end_img_no): if self.rec_algorithm == "SAR": norm_img, _, _, valid_ratio = self.resize_norm_img_sar( img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] valid_ratio = np.expand_dims(valid_ratio, axis=0) valid_ratios = [] valid_ratios.append(valid_ratio) norm_img_batch.append(norm_img) elif self.rec_algorithm == "SRN": norm_img = self.process_image_srn( img_list[indices[ino]], self.rec_image_shape, 8, 25) encoder_word_pos_list = [] gsrm_word_pos_list = [] gsrm_slf_attn_bias1_list = [] gsrm_slf_attn_bias2_list = [] encoder_word_pos_list.append(norm_img[1]) gsrm_word_pos_list.append(norm_img[2]) gsrm_slf_attn_bias1_list.append(norm_img[3]) gsrm_slf_attn_bias2_list.append(norm_img[4]) norm_img_batch.append(norm_img[0]) elif self.rec_algorithm == "SVTR": norm_img = self.resize_norm_img_svtr(img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) elif self.rec_algorithm == "VisionLAN": norm_img = self.resize_norm_img_vl(img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) elif self.rec_algorithm == 'SPIN': norm_img = self.resize_norm_img_spin(img_list[indices[ino]]) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) elif self.rec_algorithm == "ABINet": norm_img = self.resize_norm_img_abinet( img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) else: norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) norm_img_batch = np.concatenate(norm_img_batch) norm_img_batch = norm_img_batch.copy() if self.benchmark: self.autolog.times.stamp() if self.rec_algorithm == "SRN": encoder_word_pos_list = np.concatenate(encoder_word_pos_list) gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list) gsrm_slf_attn_bias1_list = np.concatenate( gsrm_slf_attn_bias1_list) gsrm_slf_attn_bias2_list = np.concatenate( gsrm_slf_attn_bias2_list) inputs = [ norm_img_batch, encoder_word_pos_list, gsrm_word_pos_list, gsrm_slf_attn_bias1_list, gsrm_slf_attn_bias2_list, ] if self.use_onnx: input_dict = {} input_dict[self.input_tensor.name] = norm_img_batch outputs = self.predictor.run(self.output_tensors, input_dict) preds = {"predict": outputs[2]} else: input_names = self.predictor.get_input_names() for i in range(len(input_names)): input_tensor = self.predictor.get_input_handle( input_names[i]) input_tensor.copy_from_cpu(inputs[i]) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) if self.benchmark: self.autolog.times.stamp() preds = {"predict": outputs[2]} elif self.rec_algorithm == "SAR": valid_ratios = np.concatenate(valid_ratios) inputs = [ norm_img_batch, np.array( [valid_ratios], dtype=np.float32), ] if self.use_onnx: input_dict = {} input_dict[self.input_tensor.name] = norm_img_batch outputs = self.predictor.run(self.output_tensors, input_dict) preds = outputs[0] else: input_names = self.predictor.get_input_names() for i in range(len(input_names)): input_tensor = self.predictor.get_input_handle( input_names[i]) input_tensor.copy_from_cpu(inputs[i]) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) if self.benchmark: self.autolog.times.stamp() preds = outputs[0] else: if self.use_onnx: input_dict = {} input_dict[self.input_tensor.name] = norm_img_batch outputs = self.predictor.run(self.output_tensors, input_dict) preds = outputs[0] else: self.input_tensor.copy_from_cpu(norm_img_batch) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) if self.benchmark: self.autolog.times.stamp() if len(outputs) != 1: preds = outputs else: preds = outputs[0] rec_result = self.postprocess_op(preds) for rno in range(len(rec_result)): rec_res[indices[beg_img_no + rno]] = rec_result[rno] if self.benchmark: self.autolog.times.end(stamp=True) return rec_res, time.time() - st
6e89ec8d09c06453edeee3874a826e750a6947d6
1,116
https://github.com/PaddlePaddle/PaddleOCR.git
3,313
def __call__(self, img_list): img_num = len(img_list) # Calculate the aspect ratio of all text bars width_list = [] for img in img_list: width_list.append(img.shape[1] / float(img.shape[0])) # Sorting can speed up the recognition process indices = np.argsort(np.array(width_list)) rec_res = [['', 0.0]] * img_num batch_num = self.rec_batch_num st = time.time() if self.benchmark: self.autolog.times.start() for beg_img_no in range(0, img_num, batch_num): end_img_no = min(img_num, beg_img_no + batch_num) norm_img_batch = [] imgC, imgH, imgW = self.rec_image_shape[:3] max_wh_ratio = imgW / imgH # max_wh_ratio = 0 for ino in range(beg_img_no, end_img_no): h, w = img_list[indices[ino]].shape[0:2] wh_ratio = w * 1.0 / h max_wh_ratio = max(max_wh_ratio, wh_ratio) for ino in range(beg_img_no, end_img_no): if self.rec_algorithm == "SAR": norm_img, _, _, valid_ratio = self.resize_norm_img_sar( img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] valid_ratio = np.expand_dims(valid_ratio, axis=0) valid_ratios = [] valid_ratios.append(valid_ratio) norm_img_batch.append(norm_img) elif self.rec_algorithm == "SRN": norm_img = self.process_image_srn( img_list[indices[ino]], self.rec_image_shape, 8, 25) encoder_word_pos_list = [] gsrm_word_pos_list = [] gsrm_slf_attn_bias1_list = [] gsrm_slf_attn_bias2_list = [] encoder_word_pos_list.append(norm_img[1]) gsrm_word_pos_list.append(norm_img[2]) gsrm_slf_attn_bias1_list.append(norm_img[3]) gsrm_slf_attn_bias2_list.append(norm_img[4]) norm_img_batch.append(norm_img[0]) elif self.rec_algorithm == "SVTR": norm_img = self.resize_norm_img_svtr(img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) elif self.rec_algorithm == "VisionLAN": norm_img = self.resize_norm_img_vl(img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) elif self.rec_algorithm == 'SPIN': norm_img = self.resize_norm_img_spin(img_list[indices[ino]]) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) elif self.rec_algorithm == "ABINet": norm_img = self.resize_norm_img_abinet( img_list[indices[ino]], self.rec_image_shape) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) else: norm_img = self.resize_norm_img(img_list[indices[ino]], max_wh_ratio) norm_img = norm_img[np.newaxis, :] norm_img_batch.append(norm_img) norm_img_batch = np.concatenate(norm_img_batch) norm_img_batch = norm_img_batch.copy() if self.benchmark: self.autolog.times.stamp() if self.rec_algorithm == "SRN": encoder_word_pos_list = np.concatenate(encoder_word_pos_list) gsrm_word_pos_list = np.concatenate(gsrm_word_pos_list) gsrm_slf_attn_bias1_list = np.concatenate( gsrm_slf_attn_bias1_list) gsrm_slf_attn_bias2_list = np.concatenate( gsrm_slf_attn_bias2_list) inputs = [ norm_img_batch, encoder_word_pos_list, gsrm_word_pos_list, gsrm_slf_attn_bias1_list, gsrm_slf_attn_bias2_list, ] if self.use_onnx: input_dict = {} input_dict[self.input_tensor.name] = norm_img_batch outputs = self.predictor.run(self.output_tensors, input_dict) preds = {"predict": outputs[2]} else: input_names = self.predictor.get_input_names() for i in range(len(input_names)): input_tensor = self.predictor.get_input_handle( input_names[i]) input_tensor.copy_from_cpu(inputs[i]) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) if self.benchmark: self.autolog.times.stamp() preds = {"predict": outputs[2]} elif self.rec_algorithm == "SAR": valid_ratios = np.concatenate(valid_ratios) inputs = [ norm_img_batch, np.array( [valid_ratios], dtype=np.float32), ] if self.use_onnx: input_dict = {} input_dict[self.input_tensor.name] = norm_img_batch outputs = self.predictor.run(self.output_tensors, input_dict) preds = outputs[0] else: input_names = self.predictor.get_input_names() for i in range(len(input_names)): input_tensor = self.predictor.get_input_handle( input_names[i]) input_tensor.copy_from_cpu(inputs[i]) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) if self.benchmark: self.autolog.times.stamp() preds = outputs[0] else: if self.use_onnx: input_dict = {} input_dict[self.input_tensor.name] = norm_img_batch outputs = self.predictor.run(self.output_tensors, input_dict) preds = outputs[0] else: self.input_tensor.copy_from_cpu(norm_img_batch) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: ou
84
1,751
__call__
15
0
1
6
packages/syft/src/syft/core/adp/data_subject_list.py
1,710
Implement DSL setitem properly AT LONG LAST
PySyft
9
Python
15
data_subject_list.py
def __getitem__(self, item) -> DataSubjectList: result = self.data_subjects_indexed[item] return DataSubjectList( one_hot_lookup=self.one_hot_lookup, # np.unique(self.one_hot_lookup[result]), data_subjects_indexed=result, )
d171fcd8726dccfffd7d13f5188a7a91cffc5b6b
31
https://github.com/OpenMined/PySyft.git
58
def __getitem__(self, item) -> DataSubjectList: result = self.data_subjects_indexed[item] return DataSubjectList( one_ho
7
47
__getitem__
20
0
1
6
code/default/launcher/tests/integrate_testing.py
219,312
Improve testing.
XX-Net
9
Python
16
integrate_testing.py
def xtunnel_logout(self): xlog.info("Start testing XTunnel logout") res = simple_http_client.request("POST", "http://127.0.0.1:8085/module/x_tunnel/control/logout", timeout=10) self.assertEqual(res.status, 200) self.xtunnel_login_status = False xlog.info("Finished testing XTunnel logout")
1bfb2da67c76758578eff6f3a7c3e6cf4967bc77
46
https://github.com/XX-net/XX-Net.git
54
def xtunnel_logout(self): xlog.info("Start testing XTunnel logout") res = simple_http_client.request("P
11
80
xtunnel_logout
34
0
2
12
mkdocs/commands/gh_deploy.py
224,214
Format code with `black -l100 --skip-string-normalization`
mkdocs
12
Python
34
gh_deploy.py
def _is_cwd_git_repo(): try: proc = subprocess.Popen( ['git', 'rev-parse', '--is-inside-work-tree'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) except FileNotFoundError: log.error("Could not find git - is it installed and on your path?") raise Abort('Deployment Aborted!') proc.communicate() return proc.wait() == 0
dca7cbb43fcd6ea7c677c98ba585395b070d387b
60
https://github.com/mkdocs/mkdocs.git
106
def _is_cwd_git_repo(): try: proc = subprocess.Popen( ['git', 'rev-parse', '--is-inside-work-tree'], stdout=sub
13
104
_is_cwd_git_repo
12
0
2
5
thumbor/engines/__init__.py
191,047
Reformat to 80 chars and mypy.ini
thumbor
10
Python
12
__init__.py
def is_multiple(self): return ( hasattr(self, "multiple_engine") and self.multiple_engine is not None )
301124c5b377fa56b940d298900dbc5816dbc24e
21
https://github.com/thumbor/thumbor.git
47
def is_multiple(self): return ( hasattr(self, "multiple_engine")
4
34
is_multiple
32
0
1
6
test/mitmproxy/utils/test_magisk.py
252,656
Magisk module onboarding for Android (#5547) * Added magisk module generation * Fixed typo * changelog * Fixed mypy bug * Changed action based on ubuntu 18.04 due to https://bit.ly/3QOw87Z * Workflow pinned to ubuntu 20.04 * Moved magisk code to utils and gen on download * Styling * Removed magisk from git repo * Added tests * Fixed dead line * Update CHANGELOG.md * Hardcoded hash Co-authored-by: Joran van Apeldoorn <joran@bitsoffreedom.nl> Co-authored-by: Maximilian Hils <github@maximilianhils.com>
mitmproxy
11
Python
27
test_magisk.py
def test_subject_hash_old(tdata): # checks if the hash is the same as that comming form openssl with taddons.context() as tctx: tctx.options.confdir = tdata.path("mitmproxy/data/confdir") ca = magisk.get_ca_from_files() our_hash = magisk.subject_hash_old(ca) assert our_hash == "efb15d7d"
cba66953a303c4411a47f987170e08f30110c6ed
45
https://github.com/mitmproxy/mitmproxy.git
65
def test_subject_hash_old(tdata): # ch
13
82
test_subject_hash_old
60
0
4
28
erpnext/stock/report/incorrect_serial_no_valuation/incorrect_serial_no_valuation.py
68,867
refactor: DB independent quoting and truthy/falsy values (#31358) * refactor: DB independent quoting and truthy/falsy values * style: reformat to black spec * fix: ifnull -> coalesce * fix: coalesce -> Coalesce * fix: revert pypika comparison * refactor: convert queries to QB * fix: incorrect value types for query `=` query makes no sense with list of values * fix: remove warehouse docstatus condition * fix: keep using base rate as rate Co-authored-by: Ankush Menat <ankush@frappe.io>
erpnext
13
Python
53
incorrect_serial_no_valuation.py
def get_stock_ledger_entries(report_filters): fields = [ "name", "voucher_type", "voucher_no", "item_code", "serial_no as serial_nos", "actual_qty", "posting_date", "posting_time", "company", "warehouse", "(stock_value_difference / actual_qty) as valuation_rate", ] filters = {"serial_no": ("is", "set"), "is_cancelled": 0} if report_filters.get("item_code"): filters["item_code"] = report_filters.get("item_code") if report_filters.get("from_date") and report_filters.get("to_date"): filters["posting_date"] = ( "between", [report_filters.get("from_date"), report_filters.get("to_date")], ) return frappe.get_all( "Stock Ledger Entry", fields=fields, filters=filters, order_by="posting_date asc, posting_time asc, creation asc", )
74a782d81d8f8c4a4d9214a9c06377e5e6e464dd
125
https://github.com/frappe/erpnext.git
32
def get_stock_ledger_entries(report_filters): fields = [ "name", "voucher_type", "voucher_no", "item_code", "serial_no as serial_nos", "actual_qty", "posting_date", "posting_time", "company", "warehouse", "(stock_value_difference / actual_qty) as valuation_rate", ] filters = {"serial_no": ("is", "set"), "is_cancelled": 0} if report_filters.get("item_code"): filters["item_code"] = report_filters.get("item_code") if report_filters.get("from_date") and report_filters.get("to_date"): filters["posting_date"] = ( "between", [report_filters.get("from_date"), report_filters.get("to_date")], ) return frappe.get_all( "Stock Ledger Entry", fields=fields, filters=filters
8
228
get_stock_ledger_entries
42
0
5
28
dash/_validate.py
40,176
f-strings everywhere! fffff
dash
18
Python
30
_validate.py
def validate_callback(outputs, inputs, state, extra_args, types): Input, Output, State = types if extra_args: if not isinstance(extra_args[0], (Output, Input, State)): raise exceptions.IncorrectTypeException( dedent( f ) ) raise exceptions.IncorrectTypeException( dedent( f ) ) for args in [outputs, inputs, state]: for arg in args: validate_callback_arg(arg)
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
83
https://github.com/plotly/dash.git
197
def validate_callback(outputs, inputs, state, extra_args, types): Input, Output, State = types if extra_args: if not isinstance(extra_args[0], (Output, Input, State)): raise exceptions.IncorrectTypeException( dedent( f ) ) raise exceptions.Incorr
17
163
validate_callback
114
0
1
38
keras/feature_column/sequence_feature_column_test.py
272,119
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
15
Python
64
sequence_feature_column_test.py
def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_a, categorical_column_b], dimension=2 ) sequence_input_layer = ksfc.SequenceFeatures( shared_embedding_columns ) with self.assertRaisesRegex( ValueError, r"In embedding_column: aaa_shared_embedding\. " r"categorical_column must " r"be of type SequenceCategoricalColumn to use SequenceFeatures\.", ): _, _ = sequence_input_layer( {"aaa": sparse_input_a, "bbb": sparse_input_b} )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
216
https://github.com/keras-team/keras.git
680
def test_shared_embedding_column_with_non_sequence_categorical(self): with tf.Graph().as_default(): vocabulary_size = 3 sparse_input_a = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) sparse_input_b = tf.compat.v1.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2), ) categorical_column_a = ( tf.feature_column.categorical_column_with_identity( key="aaa", num_buckets=vocabulary_size ) ) categorical_column_b = ( tf.feature_column.categorical_column_with_identity( key="bbb", num_buckets=vocabulary_size ) ) shared_embedding_columns = tf.feature_column.shared_embeddings( [categorical_column_
29
329
test_shared_embedding_column_with_non_sequence_categorical
34
0
2
16
tests/sentry/api/endpoints/test_accept_organization_invite.py
89,537
chore(hybrid-cloud): use organization_slug in AcceptOrganizationInvite API (#42138)
sentry
11
Python
28
test_accept_organization_invite.py
def test_cannot_accept_unapproved_invite(self): self.login_as(self.user) om = OrganizationMember.objects.create( email="newuser@example.com", role="member", token="abc", organization=self.organization, invite_status=InviteStatus.REQUESTED_TO_JOIN.value, ) for path in self._get_paths([om.id, om.token]): resp = self.client.post(path) assert resp.status_code == 400 om = OrganizationMember.objects.get(id=om.id) assert not om.invite_approved assert om.is_pending assert om.token
e94d7cd092d813d88c2216fca3ca6bd48e0747a3
109
https://github.com/getsentry/sentry.git
166
def test_cannot_accept_unapproved_invite(self): self.login_as(self.user) om = OrganizationMember.objects.create( email="newuser@example.com", role="member", token="abc", organization=self.organization, invite_status=InviteStatus.REQUESTED_TO_JOIN.value,
26
170
test_cannot_accept_unapproved_invite
15
0
1
6
src/sentry/testutils/cases.py
89,491
ref: ban exam.patcher (#42222) blocked on https://github.com/getsentry/getsentry/pull/9091
sentry
15
Python
15
cases.py
def _setup_today(self): with mock.patch( "django.utils.timezone.now", return_value=(datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc)), ): yield
458900af44ec0ceb675ce8159d33c4b361847471
42
https://github.com/getsentry/sentry.git
61
def _setup_today(self): with mock.patch( "django.utils.timezone.now", return_value=(datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc)),
9
63
_setup_today
7
0
1
5
tests/components/melnor/__init__.py
305,270
Add Melnor Bluetooth valve watering Integration (#70457)
core
8
Python
7
__init__.py
def patch_async_setup_entry(return_value=True): return patch( "homeassistant.components.melnor.async_setup_entry", return_value=return_value, )
8d94c8f74aea9a6a75dbc5ffbb8fb6b8ad4442d7
18
https://github.com/home-assistant/core.git
30
def patch_async_setup_entry(return_value=True): return patch( "homeassistant.components.melnor.async
3
32
patch_async_setup_entry
28
0
4
8
salt/modules/linux_shadow.py
215,952
Update to latest ``pyupgrade`` hook. Stop skipping it on CI. Signed-off-by: Pedro Algarvio <palgarvio@vmware.com>
salt
12
Python
25
linux_shadow.py
def list_users(root=None): if root is not None: getspall = functools.partial(_getspall, root=root) else: getspall = functools.partial(spwd.getspall) return sorted( user.sp_namp if hasattr(user, "sp_namp") else user.sp_nam for user in getspall() )
f2a783643de61cac1ff3288b40241e5ce6e1ddc8
62
https://github.com/saltstack/salt.git
64
def list_users(root=None): if root is not None: getspall = functools.partial(_getspall, root=root) else: getspall = functools.partial(spwd.getspall) return sorted( user.sp_namp if hasattr(user, "sp_namp") else user.sp_nam for user in getspall() )
12
100
list_users
24
0
1
14
tests/rest/media/v1/test_filepath.py
247,356
Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey`
synapse
10
Python
16
test_filepath.py
def test_url_cache_thumbnail(self) -> None: self.assertEqual( self.filepaths.url_cache_thumbnail_rel( "2020-01-02_GerZNDnDZVjsOtar", 800, 600, "image/jpeg", "scale" ), "url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale", ) self.assertEqual( self.filepaths.url_cache_thumbnail( "2020-01-02_GerZNDnDZVjsOtar", 800, 600, "image/jpeg", "scale" ), "/media_store/url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale", )
7e91107be1a4287873266e588a3c5b415279f4c8
56
https://github.com/matrix-org/synapse.git
155
def test_url_cache_thumbnail(self) -> None: self.assertEqual( self.filepaths.url_cache_thumbnail_rel( "2020-01-02_GerZNDnDZVjsOtar", 800, 600, "image/jpeg", "scale" ), "url_cache_thumbnails/2020-01-02/GerZNDnDZVjsOtar/800-600-image-jpeg-scale", ) self.assertEqual( self.filepaths.url_cache_thumbnail( "2020-01-02_GerZNDnDZVjsOtar", 800, 600, "image/jpeg", "scale" ), "/media_
6
96
test_url_cache_thumbnail
103
0
9
28
flair/models/tars_model.py
214,395
Fix TARS models
flair
20
Python
57
tars_model.py
def _print_predictions(self, batch, gold_label_type): lines = [] if self.tars_model.predict_spans: for datapoint in batch: # all labels default to "O" for token in datapoint: token.set_label("gold_bio", "O") token.set_label("predicted_bio", "O") # set gold token-level for gold_label in datapoint.get_labels(gold_label_type): gold_span: Span = gold_label.data_point prefix = "B-" for token in gold_span: token.set_label("gold_bio", prefix + gold_label.value) prefix = "I-" # set predicted token-level for predicted_label in datapoint.get_labels("predicted"): predicted_span: Span = predicted_label.data_point prefix = "B-" for token in predicted_span: token.set_label("predicted_bio", prefix + predicted_label.value) prefix = "I-" # now print labels in CoNLL format for token in datapoint: eval_line = ( f"{token.text} " f"{token.get_label('gold_bio').value} " f"{token.get_label('predicted_bio').value}\n" ) lines.append(eval_line) lines.append("\n") return lines
538a531926c36124593a0afedaf8a24f44a11c31
155
https://github.com/flairNLP/flair.git
639
def _print_predictions(self, batch, gold_label_type): lines = [] if self.tars_model.predict_spans: for datapoint in batch: # all labels default to "O" for token in datapoint: token.set_label("gold_bio", "O") token.set_label("predicted_bio", "O") # set gold token-level for gold_label in datapoint.get_labels(gold_label_type): gold_span: Span = gold_label.data_point prefix = "B-" for token in gold_span: token.set_label("gold_bio", prefix + gold_label.value) prefix = "I-" # set predicted token-level for predicted_label in datapoint.get_labels("predicted"): predicted_span: Span = predicted_label.data_point prefix = "B-" for token in predicted_span: token.set_label("predicted_bio", prefix + predicted_label.value) prefix = "I-" # now print labels in CoNLL format for token in datapoint: eval_line = ( f"{token.text} "
23
309
_print_predictions
21
0
1
2
src/sentry/testutils/helpers/api_gateway.py
86,643
feat(api-gateway): Initial plumbing for api gateway (#39739) This sets up the initial plumbing for middleware -> proxy -> region silo. The API Gateway isn't active as long as the silo's SiloMode is set to MONOLITH. Also the middleware hasn't been added to settings.MIDDLEWARE. There are various fixes scattered around to get the gateway test passing. Still to be done in future PRs: - Checking if the API needs to be proxied - Adding proxy headers to responses - Handling of other methods - Handling non-json responses
sentry
10
Python
19
api_gateway.py
def get(self, request, organization): return Response({"proxy": False}) urlpatterns = [ url( r"^organizations/(?P<organization_slug>[^\/]+)/control/$", ControlEndpoint.as_view(), name="control-endpoint", ), url( r"^organizations/(?P<organization_slug>[^\/]+)/region/$", RegionEndpoint.as_view(), name="region-endpoint", ), ]
6c1cb91778860eeb8141f9d7df788519c5ef9319
18
https://github.com/getsentry/sentry.git
79
def get(self, request, organization): return Response({"proxy": False}) urlpatterns = [ url( r"^organizations/(?P<organization_slug>[^\/]+)/control/$", ControlEndpoint.as_view(), n
11
88
get
83
0
3
24
freqtrade/configuration/configuration.py
149,500
extract load_from_files to load_config
freqtrade
12
Python
62
configuration.py
def load_config(self) -> Dict[str, Any]: # Load all configs config: Dict[str, Any] = load_from_files(self.args.get("config", [])) # Load environment variables env_data = enironment_vars_to_dict() config = deep_merge_dicts(env_data, config) # Normalize config if 'internals' not in config: config['internals'] = {} if 'pairlists' not in config: config['pairlists'] = [] # Keep a copy of the original configuration file config['original_config'] = deepcopy(config) self._process_logging_options(config) self._process_runmode(config) self._process_common_options(config) self._process_trading_options(config) self._process_optimize_options(config) self._process_plot_options(config) self._process_data_options(config) # Check if the exchange set by the user is supported check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True)) self._resolve_pairs_list(config) process_temporary_deprecated_settings(config) return config
1347107c1e4c77daa7ddf11520d3ae020a43a5d1
159
https://github.com/freqtrade/freqtrade.git
266
def load_config(self) -> Dict[str, Any]: # Load all configs config: Dict[str, Any] = load_from_files(self.args.get("config", [])) # Load environment variables env_data = enironment_vars_to_dict() config = deep_merge_dicts(env_data, config) # Normalize config if 'internals' not in config: config['internals'] = {} if 'pairlists' not in config: conf
23
273
load_config
143
0
2
37
jina/parsers/orchestrate/deployment.py
11,854
refactor: rename switch feature (#4494)
jina
10
Python
94
deployment.py
def mixin_base_deployment_parser(parser): gp = add_arg_group(parser, title='Deployment') gp.add_argument( '--uses-before', type=str, help='The executor attached after the Pods described by --uses, typically before sending to all ' 'shards, accepted type follows `--uses`', ) gp.add_argument( '--uses-after', type=str, help='The executor attached after the Pods described by --uses, typically used for receiving from ' 'all shards, accepted type follows `--uses`', ) gp.add_argument( '--when', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help='The condition that the documents need to fulfill before reaching the Executor.' 'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`', ) gp.add_argument( '--external', action='store_true', default=False, help='The Deployment will be considered an external Deployment that has been started independently from the Flow.' 'This Deployment will not be context managed by the Flow.', ) # hidden CLI used for internal only gp.add_argument( '--deployment-role', type=DeploymentRoleType.from_string, choices=list(DeploymentRoleType), help='The role of this deployment in the flow' if _SHOW_ALL_ARGS else argparse.SUPPRESS, )
efff15494d3a955b2211dcd2abcd8659c0d006c0
122
https://github.com/jina-ai/jina.git
357
def mixin_base_deployment_parser(parser): gp = add_arg_group(parser, title='Deployment') gp.add_argument( '--uses-before', type=str, help='The executor attached after the Pods described by --uses, typically before sending to all ' 'shards, accepted type follows `--uses`', ) gp.add_argument( '--uses-after', type=str, help='The executor attached after the Pods described by --uses, typically used for receiving from ' 'all shards, accepted type follows `--uses`', ) gp.add_argument( '--when', action=KVAppendAction, metavar='KEY: VALUE', nargs='*', help='The condition that the documents need to fulfill before reaching the Executor.' 'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`', ) gp.add_argument( '--external', action='store_true', default=False, help='The Deployment will be considered an external Deployment that has been started independently from the Flow.' 'This Deployment will not be context managed by the Flow.', ) # hidden CLI used for internal only gp.add_argument( '--dep
21
214
mixin_base_deployment_parser