n_words
int64
3
1.95k
n_ast_errors
int64
0
2
complexity
int64
1
151
nloc
int64
2
546
path
stringlengths
8
125
id
int64
280
339k
commit_message
stringlengths
3
18.1k
repo
stringlengths
3
28
ast_levels
int64
4
28
language
stringclasses
1 value
vocab_size
int64
3
677
file_name
stringlengths
5
67
code
stringlengths
101
24k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
2.76k
token_counts
int64
7
3.77k
url
stringlengths
31
61
n_whitespaces
int64
4
13.9k
random_cut
stringlengths
21
13.9k
n_identifiers
int64
1
157
n_ast_nodes
int64
10
3.6k
fun_name
stringlengths
3
72
121
0
9
32
src/textual/_compositor.py
182,523
box model
textual
17
Python
85
_compositor.py
def _get_renders(self) -> Iterable[tuple[Region, Region, Lines]]: # If a renderable throws an error while rendering, the user likely doesn't care about the traceback # up to this point. _rich_traceback_guard = True if self.map: widget_regions = sorted( [ (widget, region, order, clip) for widget, (region, order, clip) in self.map.items() if widget.is_visual and widget.visible ], key=itemgetter(2), reverse=True, ) else: widget_regions = [] for widget, region, _order, clip in widget_regions: lines = widget._get_lines() if region in clip: yield region, clip, lines elif clip.overlaps(region): new_region = region.intersection(clip) delta_x = new_region.x - region.x delta_y = new_region.y - region.y splits = [delta_x, delta_x + new_region.width] lines = lines[delta_y : delta_y + new_region.height] divide = Segment.divide lines = [list(divide(line, splits))[1] for line in lines] yield region, clip, lines
ef99069cf4cc0782f207a0a53689567bf85110ee
203
https://github.com/Textualize/textual.git
488
def _get_renders(self) -> Iterable[tuple[Region, Region, Lines]]: # If a renderable throws an error while rendering, the user likely doesn't care about the traceback # up to this point. _rich_traceback_guard = True if self.map: widget_regions = sorted( [ (widget, region, order, clip) for widget, (region, order, clip) in self.map.items() if widget.is_visual and widget.visible ], key=itemgetter(2), reverse=True, ) else: widget_regions = [] for widget, region, _order, clip in widget_regions: lines = widget._get_lines() if region in clip: yield region, clip, lines elif clip.overlaps(region): new_region = region.intersection(clip) delta_x = new_region.x - region.x delta_y = new_region.y - region.y splits = [delta_x, delta_x + new_region.width] lines = lines[delta_y : delta_y + new_region.h
37
306
_get_renders
30
0
1
8
tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py
33,785
Add support for Japanese GPT-NeoX-based model by ABEJA, Inc. (#18814) * add gpt-neox-japanese model and tokenizer as new model * Correction to PR's comment for GPT NeoX Japanese - Fix to be able to use gpu - Add comment # Copied... at the top of RotaryEmbedding - Implement nn.Linear instead of original linear class - Add generation test under @slow * fix bias treatment for gpt-neox-japanese * Modidy gpt-neox-japanese following PR - add doc for bias_dropout_add - style change following a PR comment * add document for gpt-neox-japanese * remove unused import from gpt-neox-japanese * fix README for gpt-neox-japanese
transformers
9
Python
18
test_tokenization_gpt_neox_japanese.py
def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("abeja/gpt-neox-japanese-2.7b") ids_1 = tokenizer.encode("γ‚γ‚ŠγŒγ¨γ†γ€‚", add_special_tokens=False) ids_2 = tokenizer.encode("γ©γ†γ„γŸγ—γΎγ—γ¦γ€‚", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1) encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2) assert encoded_sentence == ids_1 assert encoded_pair == ids_1 + ids_2
f5f430e5c80b85b57bb910435e45d84746210133
67
https://github.com/huggingface/transformers.git
78
def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("abeja/gpt-neox-japanese-2.7b") ids_1 = tokenizer.encode("γ‚γ‚ŠγŒγ¨γ†γ€‚", add
12
110
test_sequence_builders
82
0
5
20
pandas/tests/computation/test_eval.py
164,623
TST: Don't use autouse fixture in test_eval (#45832)
pandas
12
Python
53
test_eval.py
def test_pow(self, lhs, rhs, engine, parser): # odd failure on win32 platform, so skip ex = "lhs ** rhs" expected = _eval_single_bin(lhs, "**", rhs, engine) result = pd.eval(ex, engine=engine, parser=parser) if ( is_scalar(lhs) and is_scalar(rhs) and isinstance(expected, (complex, np.complexfloating)) and np.isnan(result) ): msg = "(DataFrame.columns|numpy array) are different" with pytest.raises(AssertionError, match=msg): tm.assert_numpy_array_equal(result, expected) else: tm.assert_almost_equal(result, expected) ex = "(lhs ** rhs) ** rhs" result = pd.eval(ex, engine=engine, parser=parser) middle = _eval_single_bin(lhs, "**", rhs, engine) expected = _eval_single_bin(middle, "**", rhs, engine) tm.assert_almost_equal(result, expected)
a1ce6a0eb07e5f969ab192b792083cb1c1f702d5
161
https://github.com/pandas-dev/pandas.git
277
def test_pow(self, lhs, rhs, engine, parser): # odd failure on win32 platform, so skip ex = "lhs ** rhs" expected = _eval_single_bin(lhs, "**", rhs, engine) result = pd.eval(ex, engine=engine, parser=parser) if ( is_scalar(lhs) and is_scalar(rhs) and isinstance(expected, (complex, np.complexfloating)) and np.isnan(result) ): msg = "(DataFrame.columns|numpy array) are different" with pytest.raises(AssertionError, match=msg): tm.assert_numpy_array_equal(result, expected) else: tm.assert_almost_equal(result, expected) ex = "(lhs ** rhs) ** rhs" result = pd.eval(ex, engine=engine, parser=parser) middle = _eval_single_bin(lhs, "**", rhs, engine)
27
249
test_pow
30
0
1
5
tests/test_region_group.py
182,508
Convert method inline_ranges to function, remove RegionGroup class
textual
10
Python
23
test_region_group.py
def test_inline_ranges_fully_overlapping_regions(): regions = [Region(1, 1, 3, 3), Region(2, 2, 1, 1), Region(0, 2, 3, 1)] assert list(inline_ranges(regions)) == [ InlineRange(1, 1, 3), InlineRange(2, 0, 3), InlineRange(3, 1, 3) ]
f541c26587ba6e1eb07e5d7b6030082c729e9f2e
77
https://github.com/Textualize/textual.git
45
def test_inline_ranges_fully_overlapping_regions(): regions
6
103
test_inline_ranges_fully_overlapping_regions
38
0
5
9
src/sentry_plugins/slack/client.py
95,409
ref(slack plugin): Don't raise errors for unactionable things (#30998) * ref(slack plugin): Don't raise errors for unactionable things
sentry
12
Python
35
client.py
def request(self, data): try: return self._request( path=self.webhook, method="post", data=data, json=False, allow_text=True ) except ApiError as e: # Ignore 404 and ignorable errors from slack webhooks if e.text and e.text in IGNORABLE_SLACK_ERRORS or e.code == 404: return raise e
2a7a0aac94e3b612cb49ca24c8882b7290c788f8
61
https://github.com/getsentry/sentry.git
136
def request(self, data): try: return self._request( path=self.webhook,
14
95
request
22
0
1
5
modules/deepbooru.py
152,693
deepdanbooru interrogator
stable-diffusion-webui
10
Python
20
deepbooru.py
def get_deepbooru_tags(pil_image, threshold=0.5): with ProcessPoolExecutor() as executor: f = executor.submit(_load_tf_and_return_tags, pil_image, threshold) ret = f.result() # will rethrow any exceptions return ret
59a2b9e5afc27d2fda72069ca0635070535d18fe
39
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
42
def get_deepbooru_tags(pil_image, threshold=0.5): with ProcessPoolExecutor() as execut
10
62
get_deepbooru_tags
19
0
1
5
sympy/combinatorics/permutations.py
196,160
Updated import locations
sympy
11
Python
15
permutations.py
def __add__(self, other): rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank) rv._rank = rank return rv
498015021131af4dbb07eb110e5badaba8250c7b
42
https://github.com/sympy/sympy.git
54
def __add__(self, other): rank = (self.rank() + other) % self.cardinality rv = self.unrank_lex(self.size, rank)
9
68
__add__
73
1
3
17
pandas/tests/io/test_stata.py
164,631
TST: Don't use autouse fixture in test_stata (#45831)
pandas
19
Python
57
test_stata.py
def test_chunked_categorical_partial(datapath): dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") values = ["a", "b", "a", "b", 3.0] with StataReader(dta_file, chunksize=2) as reader: with tm.assert_produces_warning(CategoricalConversionWarning): for i, block in enumerate(reader): assert list(block.cats) == values[2 * i : 2 * (i + 1)] if i < 2: idx = pd.Index(["a", "b"]) else: idx = pd.Index([3.0], dtype="float64") tm.assert_index_equal(block.cats.cat.categories, idx) with tm.assert_produces_warning(CategoricalConversionWarning): with StataReader(dta_file, chunksize=5) as reader: large_chunk = reader.__next__() direct = read_stata(dta_file) tm.assert_frame_equal(direct, large_chunk) @pytest.mark.parametrize("chunksize", (-1, 0, "apple"))
c055dc4e6be9fc1b68d873a1ace286322dadd5e1
@pytest.mark.parametrize("chunksize", (-1, 0, "apple"))
174
https://github.com/pandas-dev/pandas.git
223
def test_chunked_categorical_partial(datapath): dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta") values = ["a", "b", "a", "b", 3.0] with StataReader(dta_file, chunksize=2) as reader: with tm.assert_produces_warning(CategoricalConversionWarning): for i, block in enumerate(reader): assert list(block.cats) == values[2 * i : 2 * (i + 1)] if i < 2: idx = pd.Index(["a", "b"]) else: idx = pd.Index([3.0], dtype="float64") tm.assert_index_equal(block.cats.cat.categories, idx) wit
30
319
test_chunked_categorical_partial
93
0
8
21
mmdet/datasets/samplers/batch_sampler.py
244,407
[Refactor] Refactor samplers.
mmdetection
14
Python
66
batch_sampler.py
def __iter__(self) -> Sequence[int]: for idx in self.sampler: data_info = self.sampler.dataset.get_data_info(idx) width, height = data_info['width'], data_info['height'] bucket_id = 0 if width < height else 1 bucket = self._aspect_ratio_buckets[bucket_id] bucket.append(idx) # yield a batch of indices in the same aspect ratio group if len(bucket) == self.batch_size: yield bucket[:] del bucket[:] # yield the rest data and reset the bucket left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[ 1] self._aspect_ratio_buckets = [[] for _ in range(2)] while len(left_data) > 0: if len(left_data) <= self.batch_size: if not self.drop_last: yield left_data[:] left_data = [] else: yield left_data[:self.batch_size] left_data = left_data[self.batch_size:]
19441631117425b5521655b23cd4d885a7858478
167
https://github.com/open-mmlab/mmdetection.git
346
def __iter__(self) -> Sequence[int]: for idx in self.sampler: data_info = self.sampler.dataset.get_data_info(idx) width, height = data_info['width'], data_info['height'] bucket_id = 0 if width < height else 1 bucket = self._aspect_ratio_buckets[bucket_id] bucket.append(idx) # yield a batch of indices in the same aspect ratio group if len(bucket) == self.batch_size: yield bucket[:] del bucket[:] # yield the rest data and reset the bucket left_data = self._aspect_ratio_buckets[0] + self._aspect_ratio_buckets[ 1] se
21
269
__iter__
5
0
1
3
manim/mobject/opengl/opengl_vectorized_mobject.py
189,767
Reuse shader wrappers and shader data (#2062) * reuse shader wrappers and shader data arrays * Update uniforms Co-authored-by: Laith Bahodi <70682032+hydrobeam@users.noreply.github.com> Co-authored-by: Darylgolden <darylgolden@gmail.com>
manim
7
Python
5
opengl_vectorized_mobject.py
def get_stroke_shader_wrapper(self): self.update_stroke_shader_wrapper() return self.stroke_shader_wrapper
10a5f40aa5c51bd6822742b707acd2390ce2cc23
14
https://github.com/ManimCommunity/manim.git
18
def get_stroke_shader_wrapper(self): self.update_stroke_shader_wrapper() return
4
24
get_stroke_shader_wrapper
23
0
1
8
python/ray/data/tests/test_dataset.py
124,140
[dataset] Support push-based shuffle in groupby operations (#25910) Allows option for push-based shuffle in groupby operations, to improve scalability to larger Datasets.
ray
17
Python
23
test_dataset.py
def test_groupby_arrow(ray_start_regular_shared, use_push_based_shuffle): # Test empty dataset. agg_ds = ( ray.data.range_table(10) .filter(lambda r: r["value"] > 10) .groupby("value") .count() ) assert agg_ds.count() == 0
68b893369cada42be61d05843a9b7267b4f0b353
49
https://github.com/ray-project/ray.git
62
def test_groupby_arrow(ray_start_regular_shared, use_push_based_shuffle): # Test empty dataset. agg_ds = ( ray.data.range_table(10)
11
83
test_groupby_arrow
177
0
19
49
mindsdb/api/http/namespaces/config.py
116,380
storage
mindsdb
15
Python
105
config.py
def put(self, name): params = {} params.update((request.json or {}).get('params', {})) params.update(request.form or {}) if len(params) == 0: abort(400, "type of 'params' must be dict") files = request.files temp_dir = None if files is not None and len(files) > 0: temp_dir = tempfile.mkdtemp(prefix='integration_files_') for key, file in files.items(): temp_dir_path = Path(temp_dir) file_name = Path(file.filename) file_path = temp_dir_path.joinpath(file_name).resolve() if temp_dir_path not in file_path.parents: raise Exception(f'Can not save file at path: {file_path}') file.save(file_path) params[key] = file_path is_test = params.get('test', False) if is_test: del params['test'] handler = request.integration_controller.create_tmp_handler( handler_type=params.get('type'), connection_data=params ) status = handler.check_connection() if temp_dir is not None: shutil.rmtree(temp_dir) return status, 200 integration = request.integration_controller.get(name, sensitive_info=False) if integration is not None: abort(400, f"Integration with name '{name}' already exists") try: engine = params['type'] if engine is not None: del params['type'] request.integration_controller.add(name, engine, params) if is_test is False and params.get('publish', False) is True: stream_controller = StreamController(request.company_id) if engine in stream_controller.known_dbs and params.get('publish', False) is True: stream_controller.setup(name) except Exception as e: log.error(str(e)) if temp_dir is not None: shutil.rmtree(temp_dir) abort(500, f'Error during config update: {str(e)}') if temp_dir is not None: shutil.rmtree(temp_dir) return '', 200
25201296fe9944420667cb5fed4d676b869f48ff
368
https://github.com/mindsdb/mindsdb.git
712
def put(self, name): params = {} params.update((request.json or {}).get('params', {})) params.update(request.form or {}) if len(params) == 0: abort(400, "type of 'params' must be dict") files = request.files temp_dir = None if files is not None and len(files) > 0: temp_dir = tempfile.mkdtemp(prefix='integration_files_') for key, file in files.items(): temp_dir_path = Path(temp_dir) file_name = Path(file.filename) file_path = temp_dir_path.joinpath(file_name).resolve() if temp_dir_path not in file_path.parents: raise Exception(f'Can not save file at path: {file_path}') file.save(file_path) params[key] = file_path is_test = params.get('test', False) if is_test: del params['test'] handler = request.integration_controller.create_tmp_handler( handler_type=params.get('type'), connection_data=params ) status = handler.check_connection() if temp_dir is not None: shutil.rmtree(temp_dir) return status, 200 integration = request.integration_controller.get(name, sensitive_info=False) if integration is not None: abort(400, f"Integration with name '{name}' already exists") try: engine = params['type'] if engine is not None: del params['type'] request.integration_controller.add(name, engine, params) if is_test is False and params.get('publish', False) is True: stream_controller = StreamController(request.company_id) if engine in stream_controller.known_dbs and params.get('publish', False) is True: stream_controller.setup(name) except Exception as e: lo
52
616
put
50
0
1
16
tests/orion/api/test_task_runs.py
54,988
Use status constants instead of hardcoded values Closes: PrefectHQ/orion#1673
prefect
17
Python
37
test_task_runs.py
async def test_set_task_run_state(self, task_run, client, session): response = await client.post( f"/task_runs/{task_run.id}/set_state", json=dict(state=dict(type="RUNNING", name="Test State")), ) assert response.status_code == status.HTTP_201_CREATED api_response = OrchestrationResult.parse_obj(response.json()) assert api_response.status == responses.SetStateStatus.ACCEPT task_run_id = task_run.id session.expire_all() run = await models.task_runs.read_task_run( session=session, task_run_id=task_run_id ) assert run.state.type == states.StateType.RUNNING assert run.state.name == "Test State" assert run.run_count == 1
37549d157007f6eef07ed8b1e2e14efb73134840
123
https://github.com/PrefectHQ/prefect.git
166
async def test_set_task_run_state(self, task_run, client, session): response = await client.post( f"/task_runs/{task_run.id}/set_state", json=dict(state=dict(type="RUNNING", name="Test State")), ) assert response.status_code == status.HTTP_201_CREATED api_response = OrchestrationResult.parse_obj(response.json()) assert api_response.status == respons
32
202
test_set_task_run_state
27
0
1
4
keras/dtensor/test_util.py
270,640
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
9
Python
24
test_util.py
def tearDown(self): super().tearDown() # Make sure all async ops finish. context.async_wait() # TODO(hthu): Remove the reset once we fixed the CopyToMesh with # DefaultMesh placement issue. reset_dtensor()
84afc5193d38057e2e2badf9c889ea87d80d8fbf
20
https://github.com/keras-team/keras.git
68
def tearDown(self): super().tearDown() #
6
40
tearDown
25
0
1
13
tests/rest/media/v1/test_media_storage.py
247,522
Add type hints to `tests/rest`. (#12208) Co-authored-by: Patrick Cloke <clokep@users.noreply.github.com>
synapse
14
Python
22
test_media_storage.py
def default_config(self) -> Dict[str, Any]: config = default_config("test") config.update( { "spam_checker": [ { "module": TestSpamChecker.__module__ + ".TestSpamChecker", "config": {}, } ] } ) return config
32c828d0f760492711a98b11376e229d795fd1b3
46
https://github.com/matrix-org/synapse.git
188
def default_config(self) -> Dict[str, Any]: config = default_config("test") config.update( { "spam_checker": [ { "module": TestSpamChecker.__module__ + ".TestSpamChecker", "config": {}, } ] } ) return config
9
80
default_config
14
0
1
5
keras/tests/model_subclassing_test_util.py
276,547
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
8
Python
10
model_subclassing_test_util.py
def call(self, inputs): x = self.dense1(inputs) x = self.bn(x) x = self.test_net(x) return self.dense2(x)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
38
https://github.com/keras-team/keras.git
41
def call(self, inputs): x = self.dense1(inputs) x = self.bn(x) x = self.test_net(x) return self.d
8
61
call
116
0
1
39
tests/orion/api/test_deployments.py
54,916
Use status constants instead of hardcoded values Closes: PrefectHQ/orion#1673
prefect
19
Python
67
test_deployments.py
async def test_delete_deployment(self, session, client, deployment): # schedule both an autoscheduled and manually scheduled flow run # for this deployment id, these should be deleted when the deployment is deleted flow_run_1 = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=deployment.flow_id, deployment_id=deployment.id, flow_version="1.0", auto_scheduled=False, state=schemas.states.Scheduled( scheduled_time=pendulum.now("UTC"), message="Flow run scheduled", ), ), ) flow_run_2 = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=deployment.flow_id, deployment_id=deployment.id, flow_version="1.0", auto_scheduled=True, state=schemas.states.Scheduled( scheduled_time=pendulum.now("UTC"), message="Flow run scheduled", ), ), ) await session.commit() # delete the deployment response = await client.delete(f"/deployments/{deployment.id}") assert response.status_code == status.HTTP_204_NO_CONTENT # make sure it's deleted response = await client.get(f"/deployments/{deployment.id}") assert response.status_code == status.HTTP_404_NOT_FOUND # make sure scheduled flow runs are deleted n_runs = await models.flow_runs.count_flow_runs( session, flow_run_filter=schemas.filters.FlowRunFilter( id={"any_": [flow_run_1.id, flow_run_2.id]} ), ) assert n_runs == 0
37549d157007f6eef07ed8b1e2e14efb73134840
228
https://github.com/PrefectHQ/prefect.git
604
async def test_delete_deployment(self, session, client, deployment): # schedule both an autoscheduled and manually scheduled flow run # for this deployment id, these should be deleted when the deployment is deleted flow_run_1 = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=deployment.flow_id, deployment_id=deployment.id, flow_version="1.0", auto_scheduled=False, state=schemas.states.Scheduled( scheduled_time=pendulum.now("UTC"), message="Flow run scheduled", ), ), ) flow_run_2 = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=deployment.flow_id, deployment_id=deployment.id, flow_version="1.0", auto_scheduled=True, state=schemas.states.Scheduled( scheduled_time=pendulum.now("UTC"), message="Flow run scheduled", ), ), ) await session.commit() # delete the deployment response = await client.delete(f"/deployments/{deployment.id}") assert response.status_code == status.HTTP_204_NO_CONTENT # make sure it's deleted response = await client.get(f"/deployments/{deployment.id}") assert response.status_code == status.HTTP_404_NOT_FOUND # make sure scheduled flow runs are deleted n_runs = await models.flow_runs.count_flo
39
371
test_delete_deployment
22
0
1
6
wagtail/admin/tests/test_templatetags.py
72,176
Reformat with black
wagtail
10
Python
17
test_templatetags.py
def test_timesince_last_update_before_today_shows_timeago(self): dt = timezone.now() - timedelta(weeks=1, days=2) timesince = timesince_last_update(dt, use_shorthand=False) self.assertEqual(timesince, "1\xa0week, 2\xa0days ago") timesince = timesince_last_update(dt) self.assertEqual(timesince, "1\xa0week ago")
d10f15e55806c6944827d801cd9c2d53f5da4186
55
https://github.com/wagtail/wagtail.git
56
def test_timesince_last_update_before_today_shows_timeago(self): dt = timezone.now() - timedelta(weeks=1, days=2) timesince = timesince_last_update(dt, use_shorthand=False) self.assertEqual(timesince, "1\xa0week, 2\xa0days ago") timesince = timesince_last_update(dt) self.assertEqual(timesince, "1\xa0week ago")
12
93
test_timesince_last_update_before_today_shows_timeago
38
0
5
11
lib/matplotlib/bezier.py
110,252
DOC: improve grammar and consistency
matplotlib
10
Python
28
bezier.py
def split_de_casteljau(beta, t): beta = np.asarray(beta) beta_list = [beta] while True: beta = _de_casteljau1(beta, t) beta_list.append(beta) if len(beta) == 1: break left_beta = [beta[0] for beta in beta_list] right_beta = [beta[-1] for beta in reversed(beta_list)] return left_beta, right_beta
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
79
https://github.com/matplotlib/matplotlib.git
91
def split_de_casteljau(beta, t): beta = np.asarray(beta) beta_list = [beta] while True: beta = _de_casteljau1(beta, t) beta_list.append(beta) if len(bet
12
125
split_de_casteljau
290
0
15
66
lib/matplotlib/patches.py
110,059
Simplify some patches path definitions. - When a Path ends with a CLOSEPOLY, it is not necessary to put a LINETO to the closing position before it (in fact that can result in an incorrect line join at that position), and the xy position associated with the CLOSEPOLY can just be (0, 0), as it is irrelevant. - For defining the codes arrays, for short paths (such as the patch shapes here), one can just use list unpacking for shorter definitions. - Rename the _path and _fillable lists in ArrowStyle to plural names. - Rely on the default tolerance of split_bezier_intersecting_with_closedpath (which is 0.01) rather than re-specifying the same magic value everywhere. - Remove inapplicable comment re: make_compound_path_from_polys (which only applies to polygons all of with the same number of sides, which is not the case when clipping to a bbox).
matplotlib
15
Python
150
patches.py
def transmute(self, path, mutation_size, linewidth): if self._beginarrow_head or self._endarrow_head: head_length = self.head_length * mutation_size head_width = self.head_width * mutation_size head_dist = np.hypot(head_length, head_width) cos_t, sin_t = head_length / head_dist, head_width / head_dist scaleA = mutation_size if self.scaleA is None else self.scaleA scaleB = mutation_size if self.scaleB is None else self.scaleB # begin arrow x0, y0 = path.vertices[0] x1, y1 = path.vertices[1] # If there is no room for an arrow and a line, then skip the arrow has_begin_arrow = self._beginarrow_head and (x0, y0) != (x1, y1) verticesA, codesA, ddxA, ddyA = ( self._get_arrow_wedge(x1, y1, x0, y0, head_dist, cos_t, sin_t, linewidth) if has_begin_arrow else ([], [], 0, 0) ) # end arrow x2, y2 = path.vertices[-2] x3, y3 = path.vertices[-1] # If there is no room for an arrow and a line, then skip the arrow has_end_arrow = self._endarrow_head and (x2, y2) != (x3, y3) verticesB, codesB, ddxB, ddyB = ( self._get_arrow_wedge(x2, y2, x3, y3, head_dist, cos_t, sin_t, linewidth) if has_end_arrow else ([], [], 0, 0) ) # This simple code will not work if ddx, ddy is greater than the # separation between vertices. paths = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)], path.vertices[1:-1], [(x3 + ddxB, y3 + ddyB)]]), path.codes)] fills = [False] if has_begin_arrow: if self.fillbegin: paths.append( Path([*verticesA, (0, 0)], [*codesA, Path.CLOSEPOLY])) fills.append(True) else: paths.append(Path(verticesA, codesA)) fills.append(False) elif self._beginarrow_bracket: x0, y0 = path.vertices[0] x1, y1 = path.vertices[1] verticesA, codesA = self._get_bracket(x0, y0, x1, y1, self.widthA * scaleA, self.lengthA * scaleA, self.angleA) paths.append(Path(verticesA, codesA)) fills.append(False) if has_end_arrow: if self.fillend: fills.append(True) paths.append( Path([*verticesB, (0, 0)], [*codesB, Path.CLOSEPOLY])) else: fills.append(False) paths.append(Path(verticesB, codesB)) elif self._endarrow_bracket: x0, y0 = path.vertices[-1] x1, y1 = path.vertices[-2] verticesB, codesB = self._get_bracket(x0, y0, x1, y1, self.widthB * scaleB, self.lengthB * scaleB, self.angleB) paths.append(Path(verticesB, codesB)) fills.append(False) return paths, fills
73622a0173916bfcb4cb7b9b393929be025e18c9
575
https://github.com/matplotlib/matplotlib.git
1,632
def transmute(self, path, mutation_size, linewidth): if self._beginarrow_head or self._endarrow_head: head_length = self.head_length * mutation_size head_width = self.head_width * mutation_size head_dist = np.hypot(head_length, head_width) cos_t, sin_t = head_length / head_dist, head_width / head_dist scaleA = mutation_size if self.scaleA is None else self.scaleA scaleB = mutation_size if self.scaleB is None else self.scaleB # begin arrow x0, y0 = path.vertices[
54
842
transmute
11
0
1
2
freqtrade/rpc/replicate/channel.py
150,519
DataFrame transmission, strategy follower logic
freqtrade
9
Python
11
channel.py
async def send(self, data): # logger.info(f"Serialized Send - {self._wrapped_ws._serialize(data)}") await self._wrapped_ws.send(data)
6f5478cc029bc146e3980affa61dd7956c5cb416
17
https://github.com/freqtrade/freqtrade.git
32
async def send(self, data): # logger.info(f"Serialized Send - {self._wrapped_ws._serialize(
4
32
send
160
0
14
49
scapy/contrib/automotive/scanner/executor.py
209,485
Add assert to GMLAN Scanner to enforce fast fail on to many open TestSockets Fix bugs in TestSocket Fix bugs in the AutomotiveScanner execution_time handling Simplify test code for UDS_Scanner and reuse ObjectPipes to avoid mass creation
scapy
17
Python
116
executor.py
def scan(self, timeout=None): # type: (Optional[int]) -> None kill_time = time.time() + (timeout or 0xffffffff) log_interactive.debug("[i] Set kill_time to %s" % time.ctime(kill_time)) while kill_time > time.time(): test_case_executed = False log_interactive.debug("[i] Scan paths %s", self.state_paths) for p, test_case in product( self.state_paths, self.configuration.test_cases): log_interactive.info("[i] Scan path %s", p) terminate = kill_time <= time.time() if terminate: log_interactive.debug( "[-] Execution time exceeded. Terminating scan!") break final_state = p[-1] if test_case.has_completed(final_state): log_interactive.debug("[+] State %s for %s completed", repr(final_state), test_case) continue try: if not self.enter_state_path(p): log_interactive.error( "[-] Error entering path %s", p) continue log_interactive.info( "[i] Execute %s for path %s", str(test_case), p) self.execute_test_case(test_case, kill_time) test_case_executed = True except (OSError, ValueError, Scapy_Exception) as e: log_interactive.critical("[-] Exception: %s", e) if self.configuration.debug: raise e if isinstance(e, OSError): log_interactive.critical( "[-] OSError occurred, closing socket") self.socket.close() if cast(SuperSocket, self.socket).closed and \ self.reconnect_handler is None: log_interactive.critical( "Socket went down. Need to leave scan") raise e finally: self.cleanup_state() if not test_case_executed: log_interactive.info( "[i] Execute failure or scan completed. Exit scan!") break self.cleanup_state() self.reset_target()
e6eaa484b8fa3d10051e82f5a784fe8dedbd5592
280
https://github.com/secdev/scapy.git
1,044
def scan(self, timeout=None): # type: (Optional[int]) -> None kill_time = time.time() + (timeout or 0xffffffff) log_interactive.debug("[i] Set kill_time to %s" % time.ctime(kill_time)) while kill_time > time.time(): test_case_executed = False log_interactive.debug("[i] Scan paths %s", self.state_paths) for p, test_case in product( self.state_paths, self.configuration.test_cases): log_interactive.info("[i] Scan path %s", p) terminate = kill_time <= time.time() if terminate: log_interactive.debug( "[-] Execution time exceeded. Terminating scan!") break final_state = p[-1] if test_case.has_completed(final_state): log_interactive.debug("[+] State %s for %s completed", repr(final_state), test_case) continue try: if not self.enter_state_path(p): log_interactive.error( "[-] Error entering path %s", p) continue log_interactive.info( "[i] Execute %s for path %s", str(test_case), p) self.execute_test_case(test_case, kill_time) test_case_executed = True except (OSError, ValueError, Scapy_Exception) as e: log_interactive.critical("[-] Exception: %s", e)
38
468
scan
33
0
1
12
tests/api/test_auth.py
246,682
Replace assertEquals and friends with non-deprecated versions. (#12092)
synapse
11
Python
26
test_auth.py
def test_get_user_by_req_appservice_valid_token(self): app_service = Mock( token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None ) self.store.get_app_service_by_token = Mock(return_value=app_service) self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) request.getClientIP.return_value = "127.0.0.1" request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() requester = self.get_success(self.auth.get_user_by_req(request)) self.assertEqual(requester.user.to_string(), self.test_user)
02d708568b476f2f7716000b35c0adfa4cbd31b3
118
https://github.com/matrix-org/synapse.git
113
def test_get_user_by_req_appservice_valid_token(self): app_service = Mock( token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None ) se
28
191
test_get_user_by_req_appservice_valid_token
42
0
8
13
python/ray/air/config.py
141,215
Enable streaming ingest in AIR (#25428) This adds the following options to DatasetConfig, which can be used to enable streaming ingest. ``` # Whether the dataset should be streamed into memory using pipelined reads. # When enabled, get_dataset_shard() returns DatasetPipeline instead of Dataset. # The amount of memory to use is controlled by `stream_window_size`. # False by default for all datasets. use_stream_api: Optional[bool] = None # Configure the streaming window size in bytes. A typical value is something like # 20% of object store memory. If set to -1, then an infinite window size will be # used (similar to bulk ingest). This only has an effect if use_stream_api is set. # Set to 1.0 GiB by default. stream_window_size: Optional[float] = None # Whether to enable global shuffle (per pipeline window in streaming mode). Note # that this is an expensive all-to-all operation, and most likely you want to use # local shuffle instead. # False by default for all datasets. global_shuffle: Optional[bool] = None ```
ray
11
Python
27
config.py
def fill_defaults(self) -> "DatasetConfig": return DatasetConfig( fit=self.fit or False, split=self.split or False, required=self.required or False, use_stream_api=self.use_stream_api or False, stream_window_size=self.stream_window_size if self.stream_window_size is not None else 1024 * 1024 * 1024, global_shuffle=self.global_shuffle or False, transform=self.transform if self.transform is not None else True, )
78688a0903d01421b000eb37d11607571dd80dfa
86
https://github.com/ray-project/ray.git
162
def fill_defaults(self) -> "DatasetConfig": return DatasetConfig( fit=self.fit or False, split=self.split or False, required=self.required or False, use_stream_api=self.use_stream_api or False, stream_window_size=self.stream_window_size if self.stream_window_size is not None
10
126
fill_defaults
16
0
1
7
gamestonk_terminal/stocks/stocks_controller.py
281,655
Remember Contexts (#1187) * Refacotred classes * Handling for new instance desired * Added feature flag * Converted all menu calls
OpenBBTerminal
9
Python
13
stocks_controller.py
def call_sia(self, _): from gamestonk_terminal.stocks.sector_industry_analysis.sia_controller import ( SectorIndustryAnalysisController, ) self.queue = self.load_class( SectorIndustryAnalysisController, self.ticker, self.queue )
9e671aeba98dacc69ecbbfec1f087aca3b139ee7
39
https://github.com/OpenBB-finance/OpenBBTerminal.git
73
def call_sia(self, _): from game
11
57
call_sia
110
0
2
43
test/test_inputs.py
179,333
Format The Codebase - black formatting - isort formatting
gradio
15
Python
55
test_inputs.py
def test_in_interface(self): iface = gr.Interface(lambda x: x[::-1], "textbox", "textbox") self.assertEqual(iface.process(["Hello"])[0], ["olleH"]) iface = gr.Interface( lambda sentence: max([len(word) for word in sentence.split()]), gr.inputs.Textbox(), gr.outputs.Textbox(), interpretation="default", ) scores, alternative_outputs = iface.interpret( ["Return the length of the longest word in this sentence"] ) self.assertEqual( scores, [ [ ("Return", 0.0), (" ", 0), ("the", 0.0), (" ", 0), ("length", 0.0), (" ", 0), ("of", 0.0), (" ", 0), ("the", 0.0), (" ", 0), ("longest", 0.0), (" ", 0), ("word", 0.0), (" ", 0), ("in", 0.0), (" ", 0), ("this", 0.0), (" ", 0), ("sentence", 1.0), (" ", 0), ] ], ) self.assertEqual( alternative_outputs, [[["8"], ["8"], ["8"], ["8"], ["8"], ["8"], ["8"], ["8"], ["8"], ["7"]]], )
cc0cff893f9d7d472788adc2510c123967b384fe
308
https://github.com/gradio-app/gradio.git
699
def test_in_interface(self): iface = gr.Interface(lambda x: x[::-1], "textbox", "textbox") self.assertEqual(iface.process(["Hello"])[0], ["olleH"]) iface = gr.Interface( lambda sentence: max([len(word) for word in sentence.split()]), gr.inputs.Textbox(), gr.outputs.Textbox(), interpretation="default", ) scores, alternative_outputs = iface.interpret( ["Return the length of the longest word in this sentence"] ) self.assertEqual( scores, [ [ ("Return", 0.0), (" ", 0), ("the", 0.0), (" ", 0), ("length", 0.0), (" ", 0), ("of", 0.0), (" ", 0), ("the", 0.0), (" ", 0), ("longest", 0.0), (" ", 0), ("word", 0.0), (" ", 0), ("in", 0.0), (" ", 0), ("this", 0.0), (" ", 0), ("sentence", 1.0), (" ", 0), ] ], ) self.assertEqual( alternative_outputs, [[["8"], ["8"], ["8"], ["8"], ["8"], ["8"
20
459
test_in_interface
32
0
1
15
wagtail/documents/views/chooser.py
74,903
Reformat with black
wagtail
13
Python
32
chooser.py
def render_to_response(self): return render_modal_workflow( self.request, "wagtaildocs/chooser/chooser.html", None, self.get_context_data(), json_data={ "step": "chooser", "error_label": _("Server Error"), "error_message": _( "Report this error to your website administrator with the following information:" ), "tag_autocomplete_url": reverse("wagtailadmin_tag_autocomplete"), }, )
d10f15e55806c6944827d801cd9c2d53f5da4186
53
https://github.com/wagtail/wagtail.git
205
def render_to_response(self): return render_modal_workflow( self.request, "wagtaildocs/chooser/chooser.html", None, self.get_context_data(), json_data={ "step": "chooser", "error_label": _("Server Error"), "error_messag
8
94
render_to_response
32
0
3
9
erpnext/setup/doctype/item_group/item_group.py
67,489
style: format code with black
erpnext
15
Python
29
item_group.py
def get_child_item_groups(item_group_name): item_group = frappe.get_cached_value("Item Group", item_group_name, ["lft", "rgt"], as_dict=1) child_item_groups = [ d.name for d in frappe.get_all( "Item Group", filters={"lft": (">=", item_group.lft), "rgt": ("<=", item_group.rgt)} ) ] return child_item_groups or {}
494bd9ef78313436f0424b918f200dab8fc7c20b
70
https://github.com/frappe/erpnext.git
23
def get_child_item_groups(item_group_name): item_group = frappe.get_cached_value("Item Group", item_group_name, ["lft", "rgt"], as_dict=1) child_item_groups = [ d.name for d in frappe.get_all( "Item Group", filters={"lft": (">=", item_group.lft), "rgt": ("<=", item_group.rgt)} ) ] return child_item_groups
13
116
get_child_item_groups
23
0
3
4
nni/nas/oneshot/pytorch/supermodule/base.py
113,432
One-shot sub state dict implementation (#5054)
nni
14
Python
21
base.py
def _save_module_to_state_dict(self, destination, prefix, keep_vars): for name, module in self._modules.items(): if module is not None: sub_state_dict(module, destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)
9e2a069d0f38da64d4c945b2c951fa64d19b9d94
51
https://github.com/microsoft/nni.git
63
def _save_module_to_state_dict(self, destination, prefix, keep_vars): for name, module in self._modules.items(): if module is not None: sub_state_dict(module, destination=destination, prefix=prefix + name + '.', keep_vars=keep_vars)
10
78
_save_module_to_state_dict
15
0
1
11
tests/providers/amazon/aws/sensors/test_redshift_cluster.py
43,727
Standardize AWS Redshift naming (#20374) * Standardize AWS Redshift naming
airflow
10
Python
15
test_redshift_cluster.py
def test_poke_cluster_not_found(self): self._create_cluster() op = RedshiftClusterSensor( task_id='test_cluster_sensor', poke_interval=1, timeout=5, aws_conn_id='aws_default', cluster_identifier='test_cluster_not_found', target_status='cluster_not_found', ) assert op.poke({})
88ea1575079c0e94e1f62df38d6d592b8c827bbd
47
https://github.com/apache/airflow.git
108
def test_poke_cluster_not_found(self): self._create_cluster() op = RedshiftClusterSensor( task_id='test_cluster_sensor', poke_interval=1, timeout=5, aws_conn_id='aws_default',
12
78
test_poke_cluster_not_found
10
0
1
3
python3.10.4/Lib/distutils/tests/test_upload.py
223,352
add python 3.10.4 for windows
XX-Net
10
Python
9
test_upload.py
def _urlopen(self, url): self.last_open = FakeOpen(url, msg=self.next_msg, code=self.next_code) return self.last_open
8198943edd73a363c266633e1aa5b2a9e9c9f526
31
https://github.com/XX-net/XX-Net.git
23
def _urlopen(self, url): self.last_open = Fak
9
46
_urlopen
8
0
1
5
tests/acceptance/page_objects/issue_list.py
97,398
feat(ui): Remove issues from issue stream (#31910) * Revert "Revert "feat(ui): Remove issues from issue stream" (#31908)" This reverts commit 4bd69628c4ec1b5a4cd980a4fa8cd1291072d47f. * fix(acceptance): Fix delete_issues test (#31909) * add checks for issue-list-removal-action flag and check if realtimeActive to use feature * update tests Co-authored-by: Vu Luong <vuluongj20@gmail.com>
sentry
8
Python
8
issue_list.py
def delete_issues(self): self.browser.click('[aria-label="More issue actions"]') self.browser.wait_until('[data-test-id="delete"]') self.browser.click('[data-test-id="delete"]') self.browser.click('[data-test-id="confirm-button"]')
266dbc5a8172612679c6549a9ca240cfba9dab3d
37
https://github.com/getsentry/sentry.git
35
def delete_issues(self): self.browse
5
69
delete_issues
201
0
18
55
mindsdb/api/mysql/mysql_proxy/data_types/mysql_datum.py
117,328
ML handler supbrocess (#3377) * log -> logger dividing components: app initialize parse args set env.MINDSDB_CONFIG_PATH config requiers env.MINDSDB_CONFIG_PATH sets env.MINDSDB_DB_CON Config() - makes initialization log uses config initialize_log - makes initialization database uses env.MINDSDB_DB_CON have init() method file storage uses config * partial sync for model storage interfaces * ml handler in subprocess interface * fix delete model * fix: model with error in success status * fix: remove hf predictor * fix pg handler * MLHandlerPersistWrapper keeps wrapper process opened * predictor with error keeps 'success' status #3362 * lock for sending tasks to subprocess one by one * check target of predictor before run learn in subproccess * fix check target * fix: json_ai override and problem definition generation * fix None case * folder for ml handler tests * fix: add timeseries_settings key to learn_args * fixes in lw_handler * fix: del join_learn_process * tests for LW handler * finish unit test for LW * changes in tests: - set-project -> to base class - return of ml handler is dataframe - draft for project structure test * merge from staging * create_validation method to check learn params before send to subprocess fixes of HF fixed version of transformers in HF requirements Co-authored-by: Patricio Cerda Mardini <pcerdam@live.com>
mindsdb
14
Python
88
mysql_datum.py
def setFromBuff(self, buff): start = 0 if self.var_len == 'lenenc': start = 1 ln_enc = buff[0] if int(ln_enc) <= ONE_BYTE_ENC[0]: start = 0 end = 1 elif int(ln_enc) == TWO_BYTE_ENC[0]: end = 3 elif int(ln_enc) == THREE_BYTE_ENC[0]: end = 4 elif ln_enc: end = 9 num_str = buff[start:end] if end > 9: logger.error('Cant decode integer greater than 8 bytes') return buff[end - 1:] for j in range(8 - (end - start)): num_str += b'\0' if self.var_type == 'int': self.value = struct.unpack('i', num_str) return buff[end:] if self.var_type in ['byte', 'string']: length = struct.unpack('Q', num_str)[0] self.value = buff[end:(length + end)] return buff[(length + end):] if self.var_len == 'EOF': length = len(buff) self.var_len = str(length) self.value = buff return '' else: length = self.var_len if self.type == 'string<NUL>': for j, x in enumerate(buff): if int(x) == 0: length = j + 1 break length = int(length) if self.var_type in ['byte', 'string']: end = length self.value = buff[:end] else: # if its an integer end = length num_str = buff[:end] if end > 8: logger.error('cant decode integer greater than 8 bytes') return buff[end:] for j in range(8 - end): num_str += b'\0' self.value = struct.unpack('Q', num_str)[0] if str(self.var_len) == 'NUL': self.value = self.value[:-1] return buff[end:]
9ce5a21dd6359fd7e8ebf78051ce9e97bd195ec9
376
https://github.com/mindsdb/mindsdb.git
839
def setFromBuff(self, buff): start = 0 if self.var_len == 'lenenc': start = 1 ln_enc = buff[0] if int(ln_enc) <= ONE_BYTE_ENC[0]: start = 0 end = 1 elif int(ln_enc) == TWO_BYTE_ENC[0]: end = 3 elif int(ln_enc) == THREE_BYTE_ENC[0]: end = 4 elif ln_enc: end = 9 num_str = buff[start:end] if end > 9: logger.error('Cant decode integer greater than 8 bytes') return buff[end - 1:] for j in range(8 - (end - start)): num_str += b'\0' if self.var_type == 'int': self.value = struct.unpack('i', num_str) return buff[end:] if self.var_type in ['byte', 'string']: length = struct.unpack('Q', num_str)[0] self.value = buff[end:(length + end)] return buff[(length + end):] if self.var_len == 'EOF': length = len(buff) self.var_len = str(length) self.value = buff return '' else: length = self.var_len if self.type == 'string<NUL>': for j, x in enumerate(buff): if int(x) == 0: length = j + 1 break length = int(length) if self.var_type in ['byte', 'string']: end = length
26
618
setFromBuff
19
0
3
6
wagtail/api/v2/views.py
72,951
Reformat with black
wagtail
10
Python
18
views.py
def get_field_serializer_overrides(cls, model): return { field.name: field.serializer for field in cls.get_body_fields(model) + cls.get_meta_fields(model) if field.serializer is not None }
d10f15e55806c6944827d801cd9c2d53f5da4186
40
https://github.com/wagtail/wagtail.git
65
def get_field_serializer_overrides(cls, model): return { field.name: field.serializer for
8
60
get_field_serializer_overrides
174
0
4
20
rllib/algorithms/impala/impala.py
134,174
[RLlib] IMPALA: Move learner thread health-check into better place. (#29541)
ray
11
Python
122
impala.py
def training_step(self) -> ResultDict: # First, check, whether our learner thread is still healthy. if not self._learner_thread.is_alive(): raise RuntimeError("The learner thread died while training!") # Get references to sampled SampleBatches from our workers. unprocessed_sample_batches_refs = self.get_samples_from_workers() # Tag workers that actually produced ready sample batches this iteration. # Those workers will have to get updated at the end of the iteration. self.workers_that_need_updates |= unprocessed_sample_batches_refs.keys() # Send the collected batches (still object refs) to our aggregation workers. if self.config["num_aggregation_workers"] > 0: batches = self.process_experiences_tree_aggregation( unprocessed_sample_batches_refs ) # Resolve collected batches here on local process (using the mixin buffer). else: batches = self.process_experiences_directly(unprocessed_sample_batches_refs) # Increase sampling counters now that we have the actual SampleBatches on # the local process (and can measure their sizes). for batch in batches: self._counters[NUM_ENV_STEPS_SAMPLED] += batch.count self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Concatenate single batches into batches of size `train_batch_size`. self.concatenate_batches_and_pre_queue(batches) # Move train batches (of size `train_batch_size`) onto learner queue. self.place_processed_samples_on_learner_queue() # Extract most recent train results from learner thread. train_results = self.process_trained_results() # Sync worker weights. with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: self.update_workers_if_necessary() return train_results
305905f469b0c8c351670b276b54a0d3d77bc242
126
https://github.com/ray-project/ray.git
426
def training_step(self) -> ResultDict: # First, check, whether our learner thread is still healthy. if not self._learner_thread.is_alive(): raise RuntimeError("The learner thread died while training!") # Get references to sampled SampleBatches from our workers. unprocessed_sample_batches_refs = self.get_samples_from_workers() # Tag workers that actually produced ready sample batches this iteration. # Those workers will have to get updated at the end of the iteration. self.workers_that_need_updates |= unprocessed_sample_batches_refs.keys() # Send the collected batches (still object refs) to our aggregation workers. if self.config["num_aggregation_workers"] > 0: batches = self.process_experiences_tree_aggregation( unprocessed_sample_batches_refs ) # Re
27
223
training_step
36
0
1
13
zerver/tests/test_custom_profile_data.py
84,347
custom_profile: Apply ProfileDataElementUpdateDict. We explicitly annotate variables or parameters with `ProfileDataElementUpdateDict` as necessary. Signed-off-by: Zixuan James Li <p359101898@gmail.com>
zulip
10
Python
32
test_custom_profile_data.py
def test_delete_internals(self) -> None: user_profile = self.example_user("iago") realm = user_profile.realm field = CustomProfileField.objects.get(name="Phone number", realm=realm) data: List[ProfileDataElementUpdateDict] = [ {"id": field.id, "value": "123456"}, ] do_update_user_custom_profile_data_if_changed(user_profile, data) self.assertTrue(self.custom_field_exists_in_realm(field.id)) self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count) do_remove_realm_custom_profile_field(realm, field) self.assertFalse(self.custom_field_exists_in_realm(field.id)) self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count - 1)
52be020d0cbc5b8042ea89d7653c3b916badd433
129
https://github.com/zulip/zulip.git
123
def test_delete_internals(self) -> None: user_profile = self.example_user("iago") realm = user_profile.realm field = CustomProfileField.objects.get(name="Phone number", realm=realm)
23
210
test_delete_internals
16
0
1
9
tests/components/media_player/test_async_helpers.py
295,432
Add EntityFeature enum to Media Player (#69119)
core
14
Python
12
test_async_helpers.py
def supported_features(self): return ( mp.const.MediaPlayerEntityFeature.VOLUME_SET | mp.const.MediaPlayerEntityFeature.VOLUME_STEP | mp.const.MediaPlayerEntityFeature.PLAY | mp.const.MediaPlayerEntityFeature.PAUSE | mp.const.MediaPlayerEntityFeature.TURN_OFF | mp.const.MediaPlayerEntityFeature.TURN_ON )
17403f930f625dd70bbd8ab44565bbc467db886a
56
https://github.com/home-assistant/core.git
103
def supported_features(self): return ( mp.const.MediaPlayerEntityFeature.VOLUME_SET | mp.const.MediaPlayerEntityFeature.VOLUME_STEP | mp.const.MediaPlayerEntityFeatu
11
88
supported_features
59
0
8
24
homeassistant/components/device_tracker/legacy.py
318,060
Add StrEnum for device_tracker `SourceType` (#75892) Add StrEnum for device_tracker SourceType
core
13
Python
38
legacy.py
async def async_update(self) -> None: if not self.last_seen: return if self.location_name: self._state = self.location_name elif self.gps is not None and self.source_type == SourceType.GPS: zone_state = zone.async_active_zone( self.hass, self.gps[0], self.gps[1], self.gps_accuracy ) if zone_state is None: self._state = STATE_NOT_HOME elif zone_state.entity_id == zone.ENTITY_ID_HOME: self._state = STATE_HOME else: self._state = zone_state.name elif self.stale(): self.mark_stale() else: self._state = STATE_HOME self.last_update_home = True
2b1e1365fdb3bfe72feb515fcf2e02331caa4088
128
https://github.com/home-assistant/core.git
271
async def async_update(self) -> None: if not self.last_seen: return if self.location_name: self._state = self.location_name elif self.gps is not None and self.source_type == SourceType.GPS: zone_state = zone.async_active_zone( self.hass, self.gps[0], self.gps[1], self.gps_accuracy ) if zone_state is None: self._state = STATE_NOT_HOME elif zone_state.entity_id == zone.ENTITY_ID_HOME: self._state = STATE_HOME else: self._state = zone
22
205
async_update
80
0
6
27
projects/image_chat/interactive.py
195,518
[image_chat] remove decode methods for handling HTTP requests, add torchvision requirements (#4867) * [image_chat] Support both bytes and string * update cache key * Update config.yml Co-authored-by: Kurt Shuster <kshuster@meta.com>
ParlAI
16
Python
43
interactive.py
def interactive_running(self, data): reply = {} if type(data["personality"][0]) is bytes: reply["text"] = data["personality"][0].decode("utf-8") else: reply["text"] = data["personality"][0] if type(data["text"][0]) is bytes: text = data["text"][0].decode("utf-8") else: text = data["text"][0] if text: reply["text"] = "\n".join(SHARED["dialog_history"] + [text, reply["text"]]) SHARED["dialog_history"].append(text) if SHARED["image_feats"] is None: if type(data["image"][0]) is bytes: img_data = data["image"][0].decode("utf-8") _, encoded = img_data.split(",", 1) encoded = encoded[2:-1] else: img_data = data["image"][0] _, encoded = img_data.split(",", 1) image = Image.open(io.BytesIO(b64decode(encoded))).convert("RGB") SHARED["image_feats"] = SHARED["image_loader"].extract(image) reply["image"] = SHARED["image_feats"] SHARED["agent"].observe(reply) model_res = SHARED["agent"].act() return model_res
0f15b897302e7a4a257e6b2edf3c1b811c95081c
276
https://github.com/facebookresearch/ParlAI.git
349
def interactive_running(self, data): reply = {} if type(data["personality"][0]) is bytes: reply["text"] = data["personality"][0].decode("utf-8") else: reply["text"] = data["personality"][0] if type(data["text"][0]) is bytes: text = data["text"][0].decode("utf-8") else: text = data["text"][0] if text: reply["text"]
26
481
interactive_running
96
0
4
27
freqtrade/freqai/data_kitchen.py
149,834
keep model accessible in memory to avoid loading objects from disk during live/dry
freqtrade
17
Python
65
data_kitchen.py
def load_data(self) -> Any: with open(self.model_path / str(self.model_filename + "_metadata.json"), "r") as fp: self.data = json.load(fp) self.training_features_list = self.data["training_features_list"] self.data_dictionary["train_features"] = pd.read_pickle( self.model_path / str(self.model_filename + "_trained_df.pkl") ) self.model_path = Path(self.data["model_path"]) self.model_filename = self.data["model_filename"] # try to access model in memory instead of loading object from disk to save time if self.live and self.model_filename in self.model_dictionary: model = self.model_dictionary[self.model_filename] else: model = load(self.model_path / str(self.model_filename + "_model.joblib")) assert model, ( f"Unable to load model, ensure model exists at " f"{self.model_path} " ) if self.config["freqai"]["feature_parameters"]["principal_component_analysis"]: self.pca = pk.load( open(self.model_path / str(self.model_filename + "_pca_object.pkl"), "rb") ) return model
1fae6c9ef794a014c3e8f1a692bda8b66b46b960
189
https://github.com/freqtrade/freqtrade.git
337
def load_data(self) -> Any: with open(self.model_path / str(self.model_filename + "_metadata.json"), "r") as fp: self.data = json.load(fp) self.training_features_list = self.data["training_features_list"] self.data_dictionary["train_features"] = pd.read_pickle( self.model_path / str(self.model_filename + "_trained_df.pkl") ) self.model_path = Path(self.data["model_path"]) self.model_filename = self.data["model_filename"] # try to access model in memory instead of loading object from disk to save time if self.live and
22
331
load_data
21
0
1
8
wagtail/core/models/__init__.py
73,791
Reformat with black
wagtail
9
Python
20
__init__.py
def __str__(self): return "Group %d ('%s') has permission '%s' on page %d ('%s')" % ( self.group.id, self.group, self.permission_type, self.page.id, self.page, )
d10f15e55806c6944827d801cd9c2d53f5da4186
34
https://github.com/wagtail/wagtail.git
89
def __str__(self): return "Group %d ('%s') has permission '%s' on page %d ('%s')" % ( self.group.id, self.group, self.permission_type, self.page.id, self.page, )
6
50
__str__
20
0
1
12
pandas/io/json/_json.py
167,427
TYP: Return annotations for io/{formats,json} (#47516) * TYP: Return annotations for io/{formats,json} * flake8 * explicitly check whether width is None
pandas
9
Python
20
_json.py
def write(self) -> str: iso_dates = self.date_format == "iso" return dumps( self.obj_to_write, orient=self.orient, double_precision=self.double_precision, ensure_ascii=self.ensure_ascii, date_unit=self.date_unit, iso_dates=iso_dates, default_handler=self.default_handler, indent=self.indent, )
734db4f1fde2566a02b3c7ff661a479b0a71633c
62
https://github.com/pandas-dev/pandas.git
128
def write(self) -> str: iso_dates = self.date_format == "iso" return dumps( self.obj_to_write, orient=self.orient, double_precision=self.double_precision, ensure_ascii=self.ensure_a
13
90
write
116
0
1
29
wagtail/admin/tests/pages/test_edit_page.py
77,412
Replace `PageRevision` with generic `Revision` model (#8441)
wagtail
15
Python
85
test_edit_page.py
def test_edit_post_publish_scheduled_unpublished_page(self): # Unpublish the page self.child_page.live = False self.child_page.save() go_live_at = timezone.now() + datetime.timedelta(days=1) expire_at = timezone.now() + datetime.timedelta(days=2) post_data = { "title": "I've been edited!", "content": "Some content", "slug": "hello-world", "action-publish": "Publish", "go_live_at": submittable_timestamp(go_live_at), "expire_at": submittable_timestamp(expire_at), } response = self.client.post( reverse("wagtailadmin_pages:edit", args=(self.child_page.id,)), post_data ) # Should be redirected to explorer page self.assertEqual(response.status_code, 302) child_page_new = SimplePage.objects.get(id=self.child_page.id) # The page should not be live anymore self.assertFalse(child_page_new.live) # Instead a revision with approved_go_live_at should now exist self.assertTrue( Revision.page_revisions.filter(object_id=child_page_new.id) .exclude(approved_go_live_at__isnull=True) .exists() ) # The page SHOULD have the "has_unpublished_changes" flag set, # because the changes are not visible as a live page yet self.assertTrue( child_page_new.has_unpublished_changes, "A page scheduled for future publishing should have has_unpublished_changes=True", ) self.assertEqual(child_page_new.status_string, "scheduled")
52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c
194
https://github.com/wagtail/wagtail.git
401
def test_edit_post_publish_scheduled_unpublished_page(self): # Unpublish the page self.child_page.live = False self.child_page.save() go_
37
330
test_edit_post_publish_scheduled_unpublished_page
55
0
1
41
tests/sentry/api/serializers/test_organization.py
89,252
feat(discover-homepage): Default feature flag to True (#41969) This feature has been GA'd for a while now. Defaulting the feature flag to True for self hosted releases
sentry
9
Python
51
test_organization.py
def test_simple(self): user = self.create_user() organization = self.create_organization(owner=user) result = serialize(organization, user) assert result["id"] == str(organization.id) assert result["features"] == { "advanced-search", "change-alerts", "crash-rate-alerts", "custom-event-title", "custom-symbol-sources", "data-forwarding", "dashboards-basic", "dashboards-edit", "dashboards-top-level-filter", "discover-basic", "discover-query", "discover-query-builder-as-landing-page", "event-attachments", "integrations-alert-rule", "integrations-chat-unfurl", "integrations-deployment", "integrations-event-hooks", "integrations-incident-management", "integrations-issue-basic", "integrations-issue-sync", "integrations-ticket-rules", "invite-members", "invite-members-rate-limits", "minute-resolution-sessions", "open-membership", "relay", "shared-issues", "sso-basic", "sso-saml2", "symbol-sources", "team-insights", "discover-frontend-use-events-endpoint", "performance-frontend-use-events-endpoint", "performance-issues-ingest", }
d07b2f35a620fd8cec5f17a7a1605024b8b5afff
118
https://github.com/getsentry/sentry.git
470
def test_simple(self): user = self.create_user() organization = self.create_organization(owner=user) result = serialize(organization, user) assert result["id"] == str(organization.id) assert result["features"] == { "advanced-search", "change-alerts", "crash-rate-alerts", "custom-event-title", "custom-symbol-sources", "data-forwarding", "dashboards-basic", "dashboards-edit", "dashboards-top-level-filter", "discover-basic", "discover-query", "discover-query-builder-as-landing-page", "event-attachments", "integrations-alert-rule", "integrations-chat-unfurl", "integrations-deployment", "integrations-event-hooks", "integrations-incident-management", "integrations-issue-basic", "integrations-issue-sync", "integrations-ticket-rules", "invite-m
11
219
test_simple
27
0
2
16
nuitka/freezer/IncludedEntryPoints.py
178,389
UI: In case of PermissionError, allow uses to retry * Esp. on Windows it happens a lot that running programs cannot be updated by Nuitka, this avoids the cryptic error somewhere ranomly.
Nuitka
14
Python
23
IncludedEntryPoints.py
def addShlibEntryPoint(module): target_filename = os.path.join( getStandaloneDirectoryPath(), module.getFullName().asPath() ) target_filename += getSharedLibrarySuffix(preferred=False) target_dir = os.path.dirname(target_filename) if not os.path.isdir(target_dir): makePath(target_dir) copyFile(module.getFilename(), target_filename) standalone_entry_points.append( makeExtensionModuleEntryPoint( source_path=module.getFilename(), dest_path=target_filename, package_name=module.getFullName().getPackageName(), ) )
2c20b90946a8aa5ad4ee39ad365ff1b83f182770
102
https://github.com/Nuitka/Nuitka.git
111
def addShlibEntryPoint(module): target_filename = os.path.join( getStandaloneDirectoryPath(), module.getFullName().asPath()
24
165
addShlibEntryPoint
28
0
2
12
mitmproxy/utils/debug.py
253,223
more mypy (#5724) Co-authored-by: requires.io <support@requires.io> Co-authored-by: Maximilian Hils <git@maximilianhils.com>
mitmproxy
11
Python
24
debug.py
def dump_system_info(): mitmproxy_version = version.get_dev_version() openssl_version = SSL.SSLeay_version(SSL.SSLEAY_VERSION) if isinstance(openssl_version, bytes): openssl_version = openssl_version.decode() data = [ f"Mitmproxy: {mitmproxy_version}", f"Python: {platform.python_version()}", f"OpenSSL: {openssl_version}", f"Platform: {platform.platform()}", ] return "\n".join(data)
0bbb0215c16bbeaf3b048c023ed0ee55f57b0de8
59
https://github.com/mitmproxy/mitmproxy.git
86
def dump_system_info(): mitmproxy_version = version.get_dev_version() openssl_version = SSL.SSLeay_version(SSL.SSLEAY_VERSION) if isinstance(openssl_version, bytes): openssl_version = openssl_version.decode() data = [ f"Mitmproxy: {mitmproxy_version}", f"Python: {platform.python_version()}", f"OpenSSL: {openssl_version}", f"Platform: {platform.platform()}", ] retu
15
127
dump_system_info
280
1
11
67
erpnext/selling/doctype/sales_order/sales_order.py
69,262
fix: use default supplier currency if default supplier is enabled
erpnext
19
Python
186
sales_order.py
def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None): from erpnext.setup.utils import get_exchange_rate if not selected_items: return if isinstance(selected_items, str): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.currency = frappe.db.get_value( "Supplier", filters={"name": supplier}, fieldname=["default_currency"] ) company_currency = frappe.db.get_value( "Company", filters={"name": target.company}, fieldname=["default_currency"] ) target.conversion_rate = get_exchange_rate(target.currency, company_currency, args="for_buying") target.apply_discount_on = "" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = "" target.shipping_rule = "" default_price_list = frappe.get_value("Supplier", supplier, "default_price_list") if default_price_list: target.buying_price_list = default_price_list if any(item.delivered_by_supplier == 1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.contact_display target.customer_contact_mobile = source.contact_mobile target.customer_contact_email = source.contact_email else: target.customer = "" target.customer_name = "" target.run_method("set_missing_values") target.run_method("calculate_taxes_and_totals") def update_item(source, target, source_parent): target.schedule_date = source.delivery_date target.qty = flt(source.qty) - (flt(source.ordered_qty) / flt(source.conversion_factor)) target.stock_qty = flt(source.stock_qty) - flt(source.ordered_qty) target.project = source_parent.project suppliers = [item.get("supplier") for item in selected_items if item.get("supplier")] suppliers = list(dict.fromkeys(suppliers)) # remove duplicates while preserving order items_to_map = [item.get("item_code") for item in selected_items if item.get("item_code")] items_to_map = list(set(items_to_map)) if not suppliers: frappe.throw( _("Please set a Supplier against the Items to be considered in the Purchase Order.") ) purchase_orders = [] for supplier in suppliers: doc = get_mapped_doc( "Sales Order", source_name, { "Sales Order": { "doctype": "Purchase Order", "field_no_map": [ "address_display", "contact_display", "contact_mobile", "contact_email", "contact_person", "taxes_and_charges", "shipping_address", "terms", ], "validation": {"docstatus": ["=", 1]}, }, "Sales Order Item": { "doctype": "Purchase Order Item", "field_map": [ ["name", "sales_order_item"], ["parent", "sales_order"], ["stock_uom", "stock_uom"], ["uom", "uom"], ["conversion_factor", "conversion_factor"], ["delivery_date", "schedule_date"], ], "field_no_map": [ "rate", "price_list_rate", "item_tax_template", "discount_percentage", "discount_amount", "pricing_rules", ], "postprocess": update_item, "condition": lambda doc: doc.ordered_qty < doc.stock_qty and doc.supplier == supplier and doc.item_code in items_to_map, }, }, target_doc, set_missing_values, ) doc.insert() frappe.db.commit() purchase_orders.append(doc) return purchase_orders @frappe.whitelist()
77fdc37cb75d465a7a5297fc89bba31b8193ebeb
@frappe.whitelist()
305
https://github.com/frappe/erpnext.git
177
def make_purchase_order_for_default_supplier(source_name, selected_items=None, target_doc=None): from erpnext.setup.utils import get_exchange_rate if not selected_items: return if isinstance(selected_items, str): selected_items = json.loads(selected_items) def set_missing_values(source, target): target.supplier = supplier target.currency = frappe.db.get_value( "Supplier", filters={"name": supplier}, fieldname=["default_currency"] ) company_currency = frappe.db.get_value( "Company", filters={"name": target.company}, fieldname=["default_currency"] ) target.conversion_rate = get_exchange_rate(target.currency, company_currency, args="for_buying") target.apply_discount_on = "" target.additional_discount_percentage = 0.0 target.discount_amount = 0.0 target.inter_company_order_reference = "" target.shipping_rule = "" default_price_list = frappe.get_value("Supplier", supplier, "default_price_list") if default_price_list: target.buying_price_list = default_price_list if any(item.delivered_by_supplier == 1 for item in source.items): if source.shipping_address_name: target.shipping_address = source.shipping_address_name target.shipping_address_display = source.shipping_address else: target.shipping_address = source.customer_address target.shipping_address_display = source.address_display target.customer_contact_person = source.contact_person target.customer_contact_display = source.
80
1,020
make_purchase_order_for_default_supplier
138
0
9
51
erpnext/accounts/report/tds_payable_monthly/tds_payable_monthly.py
69,635
fix: Tax withholding net total for PI in reports
erpnext
13
Python
95
tds_payable_monthly.py
def get_tds_docs(filters): tds_documents = [] purchase_invoices = [] payment_entries = [] journal_entries = [] tax_category_map = frappe._dict() invoice_net_total_map = frappe._dict() or_filters = frappe._dict() journal_entry_party_map = frappe._dict() bank_accounts = frappe.get_all("Account", {"is_group": 0, "account_type": "Bank"}, pluck="name") tds_accounts = frappe.get_all( "Tax Withholding Account", {"company": filters.get("company")}, pluck="account" ) query_filters = { "account": ("in", tds_accounts), "posting_date": ("between", [filters.get("from_date"), filters.get("to_date")]), "is_cancelled": 0, "against": ("not in", bank_accounts), } if filters.get("supplier"): del query_filters["account"] del query_filters["against"] or_filters = {"against": filters.get("supplier"), "party": filters.get("supplier")} tds_docs = frappe.get_all( "GL Entry", filters=query_filters, or_filters=or_filters, fields=["voucher_no", "voucher_type", "against", "party"], ) for d in tds_docs: if d.voucher_type == "Purchase Invoice": purchase_invoices.append(d.voucher_no) elif d.voucher_type == "Payment Entry": payment_entries.append(d.voucher_no) elif d.voucher_type == "Journal Entry": journal_entries.append(d.voucher_no) tds_documents.append(d.voucher_no) if purchase_invoices: get_doc_info(purchase_invoices, "Purchase Invoice", tax_category_map, invoice_net_total_map) if payment_entries: get_doc_info(payment_entries, "Payment Entry", tax_category_map) if journal_entries: journal_entry_party_map = get_journal_entry_party_map(journal_entries) get_doc_info(journal_entries, "Journal Entry", tax_category_map) return ( tds_documents, tds_accounts, tax_category_map, journal_entry_party_map, invoice_net_total_map, )
3eb1ed19a19a0e26e9814d70267530769bf8b274
320
https://github.com/frappe/erpnext.git
87
def get_tds_docs(filters): tds_documents = [] purchase_invoices = []
26
549
get_tds_docs
11
0
1
14
saleor/graphql/order/tests/test_order_invoices.py
26,349
Migrate order id from int to UUID (#9324) * Add migration to change order id from int to UUID (#9281) * Change order token to uuid * Migrate order id to uuid * Fix failing tests * Apply code review suggestions * Fix payment migration dependencies * Fix typo in order migration name * Handle old order ids for order queries * Hanlde old order ids for order mutations * Add order relation to GiftCardEvent model * Deprecate order token related queries and fields (#9295) * Deprecate order.token field * Update description of orderByToken query * Update prepare_order_search_document_value method * Update changelog * Update schema file
saleor
11
Python
10
test_order_invoices.py
def test_order_query_invoices_customer_user_by_token(api_client, fulfilled_order): query = response = api_client.post_graphql(query, {"token": fulfilled_order.id}) assert_no_permission(response)
41b87559118f560c223f83d405efe9b406701d17
30
https://github.com/saleor/saleor.git
20
def test_order_query_invoices_customer_user_by_token(api_client, fulfilled_order): query = response = api_client
8
51
test_order_query_invoices_customer_user_by_token
20
0
1
9
lib/training/preview_tk.py
101,989
GUI - Preview updates - Training preview. Embed preview pop-out window - Bugfix - convert/extract previews
faceswap
13
Python
19
preview_tk.py
def _add_save_button(self) -> None: logger.debug("Adding save button") button = tk.Button(self._frame, text="Save", cursor="hand2", command=lambda: self.save_var.set(True)) button.pack(side=tk.LEFT) logger.debug("Added save burron: '%s'", button)
2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650
63
https://github.com/deepfakes/faceswap.git
133
def _add_save_button(self) -> None: logger.debug("Adding save button") button = tk.Button(self._frame, text="Save", cursor="hand2", command=lambda: self.save_var.set(True)) button.pack(side=tk.LEFT) logger.debug("Added save burron: '%s'", button)
16
107
_add_save_button
49
0
3
15
haystack/modeling/model/prediction_head.py
256,244
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
haystack
16
Python
40
prediction_head.py
def resize_input(self, input_dim): if "feed_forward" not in dir(self): return else: old_dims = self.feed_forward.layer_dims if input_dim == old_dims[0]: return new_dims = [input_dim] + old_dims[1:] logger.info( f"Resizing input dimensions of {type(self).__name__} ({self.task_name}) " f"from {old_dims} to {new_dims} to match language model" ) self.feed_forward = FeedForwardBlock(new_dims) self.layer_dims[0] = input_dim self.feed_forward.layer_dims[0] = input_dim
a59bca366174d9c692fa19750c24d65f47660ef7
82
https://github.com/deepset-ai/haystack.git
214
def resize_input(self, input_dim): if "feed_forward" not in dir(self): return else: old_dims = self.feed_forward.layer_dims if input_dim == old_dims[0]: return new_dims = [input_dim] + old_dims[1:] logger.info( f"Resizing input dimensions of {type(self).__name__} ({self.task_name}) "
14
162
resize_input
9
0
2
4
homeassistant/components/apple_tv/media_player.py
304,127
Improve entity type hints [a] (#76986)
core
12
Python
9
media_player.py
async def async_media_stop(self) -> None: if self.atv: await self.atv.remote_control.stop()
65eb1584f765dcc2ec502bd8a9fa8d2f23d47cfd
23
https://github.com/home-assistant/core.git
34
async def async_media_stop(self) -> None: if self.atv: await self.atv.remote_control.stop()
5
42
async_media_stop
10
0
1
4
wagtail/admin/tests/api/test_pages.py
71,349
Reformat with black
wagtail
11
Python
10
test_pages.py
def get_response(self, page_id, **params): return self.client.get( reverse("wagtailadmin_api:pages:detail", args=(page_id,)), params )
d10f15e55806c6944827d801cd9c2d53f5da4186
31
https://github.com/wagtail/wagtail.git
34
def get_response(self, page_id, **params): return self.client.get( reverse("wagtailadmin_a
8
47
get_response
9
0
1
3
test/mitmproxy/contentviews/test_http3.py
252,741
add HTTP/3 content view
mitmproxy
9
Python
9
test_http3.py
def test_render_priority(): v = http3.ViewHttp3() assert not v.render_priority(b"random stuff")
f6ac5006982fd18dfe9f9a67c3534300c7ba8192
20
https://github.com/mitmproxy/mitmproxy.git
14
def test_render_priority(): v = http3.ViewHttp3() assert not v.render_priority(b"rando
5
35
test_render_priority
27
0
1
34
tests/helpers/test_event.py
300,624
Fail template functions when no default specified (#71687)
core
9
Python
21
test_event.py
async def test_track_template_result_none(hass): specific_runs = [] wildcard_runs = [] wildercard_runs = [] template_condition = Template("{{state_attr('sensor.test', 'battery')}}", hass) template_condition_var = Template( "{{(state_attr('sensor.test', 'battery')|int(default=0)) + test }}", hass )
4885331509eeffe50f42d76b234996467b06170f
205
https://github.com/home-assistant/core.git
55
async def test_track_template_result_none(hass): specific_runs = [] wildcard_runs = [] wildercard_runs = [] template_condition =
8
63
test_track_template_result_none
27
0
5
7
networkx/algorithms/clique.py
176,268
Fix functions appearing in variables `__all__` but not in docs for NX2.7 (#5289) * Adjust functions appearing in `__all__` but not in docs * clean up coloring: merge two modules make interchange private * fix duplicate name. Probably should be changed * fix "see also" doc of recursive_simple_cycles * Rm internal uses of deprecated . * Fixup warnings filters regex. * clean up a bit more, make Node & AdjList private classes Co-authored-by: Ross Barnowski <rossbar@berkeley.edu> Co-authored-by: Mridul Seth <seth.mridul@gmail.com>
networkx
11
Python
22
clique.py
def find_cliques_recursive(G): if len(G) == 0: return iter([]) adj = {u: {v for v in G[u] if v != u} for u in G} Q = []
17fa9942568bfca34d4a68f8d93c538014f69389
63
https://github.com/networkx/networkx.git
46
def find_cliques_recursive(G): if len(G) == 0: return iter([]) adj = {u: {v for v in G[u] if v != u}
8
77
find_cliques_recursive
11
0
2
5
synapse/server.py
249,846
Add a type hint for `get_device_handler()` and fix incorrect types. (#14055) This was the last untyped handler from the HomeServer object. Since it was being treated as Any (and thus unchecked) it was being used incorrectly in a few places.
synapse
10
Python
10
server.py
def get_device_handler(self) -> DeviceWorkerHandler: if self.config.worker.worker_app: return DeviceWorkerHandler(self) else: return DeviceHandler(self)
6d47b7e32589e816eb766446cc1ff19ea73fc7c1
28
https://github.com/matrix-org/synapse.git
46
def get_device_handler(self) -> DeviceWorkerHandler: if self.config.worker.worker_app: return DeviceWorkerHandler(self) else: return DeviceHandler(self)
7
46
get_device_handler
78
0
1
31
tests/utils/test_py27hash_fix.py
213,106
fix: Py27hash fix (#2182) * Add third party py27hash code * Add Py27UniStr and unit tests * Add py27hash_fix utils and tests * Add to_py27_compatible_template and tests * Apply py27hash fix to wherever it is needed * Apply py27hash fix, all tests pass except api_with_any_method_in_swagger * apply py27hash fix in openapi + run black * remove py27 testing * remove other py27 references * black fixes * fixes/typos * remove py27 from tox.ini * refactoring * third party notice * black * Fix py27hash fix to deal with null events * Fix Py27UniStr repr for unicode literals * black reformat * Update _template_has_api_resource to check data type more defensively * Apply py27Dict in _get_authorizers * Apply Py27Dict to authorizers and gateway responses which will go into swagger * Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class * Rename _convert_to_py27_dict to _convert_to_py27_type * Apply Py27UniStr to path param name * Handle HttpApi resource under to_py27_compatible_template * Fix InvalidDocumentException to not sort different exceptions * black reformat * Remove unnecessary test files Co-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>
serverless-application-model
21
Python
45
test_py27hash_fix.py
def test_resources_api(self): template = { "Resources": { "Api": {"Type": "AWS::Serverless::Api", "Properties": {"Name": "MyApi"}}, "HttpApi": {"Type": "AWS::Serverless::HttpApi"}, "Function": { "Type": "AWS::Serverless::Function", "Properties": { "FunctionName": {"Ref": "MyFunctionName"}, "Events": { "ApiEvent": {"Type": "Api", "Properties": {"Path": "/user", "Method": "GET"}}, "SecondApiEvent": {"Type": "Api", "Properties": {"Path": "/admin", "Method": "GET"}}, }, }, }, "StateMachine": { "Type": "AWS::Serverless::StateMachine", "Condition": "ShouldAddStateMachine", "Properties": { "Event": { "ApiEvent": {"Type": "Api", "Properties": {"Path": "/state-machine", "Method": "GET"}} } }, }, } } to_py27_compatible_template(template) self.assertIsInstance(template["Resources"], Py27Dict) self.assertNotIsInstance(template["Resources"]["Api"], Py27Dict) self.assertIsInstance(template["Resources"]["Api"]["Properties"], Py27Dict) self.assertIsInstance(template["Resources"]["Api"]["Properties"]["Name"], Py27UniStr)
a5db070f446b7cfebdaa6ad2e3dcf78f6105a272
211
https://github.com/aws/serverless-application-model.git
567
def test_resources_api(self): template = { "Resources": { "Api": {"Type": "AWS::Serverless::Api", "Properties": {"Name": "MyApi"}}, "HttpApi": {"Type": "AWS::Serverless::HttpApi"}, "Function": { "Type": "AWS::Serverless::Function", "Properties": { "FunctionName": {"Ref": "MyFunctionName"}, "Events": { "ApiEvent": {"Type": "Api", "Properties": {"Path": "/user", "Method": "GET"}}, "SecondApiEvent": {"Type": "Api", "Properties": {"Path": "/admin", "Method": "GET"}}, }, }, }, "StateMachine": { "Type": "AWS::Serverless::StateMachine", "Condition": "ShouldAddStateMachine", "Properties": { "Event": { "ApiEvent": {"Type": "Api", "Properties": {"Path": "/state-machine", "Method": "GET"}} } }, }, } } to_py27_compatible_template(template) self.assertIsInstance(template["Resources"], Py27Dict) self.as
8
415
test_resources_api
158
0
1
108
wagtail/contrib/forms/tests/test_forms.py
72,997
Reformat with black
wagtail
13
Python
79
test_forms.py
def setUp(self): # Create a form page home_page = Page.objects.get(url_path="/home/") self.form_page = home_page.add_child( instance=FormPage( title="Contact us", slug="contact-us", to_address="to@email.com", from_address="from@email.com", subject="The subject", ) ) FormField.objects.create( page=self.form_page, sort_order=1, label="Your name", field_type="singleline", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your message", field_type="multiline", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your birthday", field_type="date", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your birthtime :)", field_type="datetime", required=True, ) FormField.objects.create( page=self.form_page, sort_order=1, label="Your email", field_type="email", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your homepage", field_type="url", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your favourite number", field_type="number", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your favourite text editors", field_type="multiselect", required=True, choices="vim,nano,emacs", ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your favourite Python IDEs", field_type="dropdown", required=True, choices="PyCharm,vim,nano", ) FormField.objects.create( page=self.form_page, sort_order=2, label="Ὕour favourite Αython ÏÐÈ", # unicode example help_text="Choose one", field_type="radio", required=True, choices="PyCharm,vim,nano", ) FormField.objects.create( page=self.form_page, sort_order=3, label="Your choices", field_type="checkboxes", required=False, choices="foo,bar,baz", ) FormField.objects.create( page=self.form_page, sort_order=3, label="I agree to the Terms of Use", field_type="checkbox", required=True, ) FormField.objects.create( page=self.form_page, sort_order=1, label="A Hidden Field", field_type="hidden", required=False, ) # Create a form builder self.fb = FormBuilder(self.form_page.get_form_fields())
d10f15e55806c6944827d801cd9c2d53f5da4186
462
https://github.com/wagtail/wagtail.git
1,249
def setUp(self): # Create a form page home_page = Page.objects.get(url_path="/home/") self.form_page = home_page.add_child( instance=FormPage( title="Contact us", slug="contact-us", to_address="to@email.com", from_address="from@email.com", subject="The subject", ) ) FormField.objects.create( page=self.form_page, sort_order=1, label="Your name", field_type="singleline", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your message", field_type="multiline", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your birthday", field_type="date", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your birthtime :)", field_type="datetime", required=True, ) FormField.objects.create( page=self.form_page, sort_order=1, label="Your email", field_type="email", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your homepage", field_type="url", required=True, ) FormField.objects.create( page=self.form_page, sort_order=2, label="Your favourite number", field_type="number", require
28
721
setUp
28
0
2
9
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
63,399
upd; format
transferlearning
10
Python
23
pyparsing.py
def __setstate__(self, state): self.__toklist = state[0] self.__tokdict, par, inAccumNames, self.__name = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None: self.__parent = wkref(par) else: self.__parent = None
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
66
https://github.com/jindongwang/transferlearning.git
91
def __setstate__(self, state): self.__toklist = state[0] self.__tokdict, par, inAccumNames, self.__name = state[1] self.__accumNames = {} self.__accumNames.update(inAccumNames) if par is not None:
12
102
__setstate__
13
0
1
3
test/fx_acc/test_acc_tracer.py
102,559
[fx2trt] break down div (#71172) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/71172 Break down div to smaller ops to make those div ops look like all other elementwise ops. Use operator div ops instead of torch div if possible to avoid converting literal numbers to torch tensor (like in the following). ``` a = 1 b = 2 // `c` would be 0.5 c = a / b // `c` would be torch.tensor([0.5]) c = torch.div(a, b) ``` The problem we saw on shufflenet is that there's size op followed by a div op which results in int64 tensors in acc traced graph (acc tracer turns operator.div to acc_ops.div which uses torch.div). And trt splitter splits out the reshape op that consumes the div op because we have a rule to split out ops that takes in int64 tensors as inputs. Test Plan: Unit tests. Reviewed By: wushirong Differential Revision: D33482231 fbshipit-source-id: 508a171520c4e5b4188cfc5c30c1370ba9db1c55
pytorch
12
Python
10
test_acc_tracer.py
def test_trunc_div(self): self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.div(x, 2, rounding_mode="trunc")) self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.floor_divide(x, 2))
54fe2741a1b16e36f714fa167f8f692886fd6640
49
https://github.com/pytorch/pytorch.git
26
def test_trunc_div(self): self._make_acc_op_function_test(acc_ops.trunc_div, lambda x: torch.div(x, 2, rounding_mode="tr
10
76
test_trunc_div
82
0
8
32
homeassistant/components/intent_script/__init__.py
311,659
Add support to reprompt user (#65256)
core
14
Python
51
__init__.py
async def async_handle(self, intent_obj): speech = self.config.get(CONF_SPEECH) reprompt = self.config.get(CONF_REPROMPT) card = self.config.get(CONF_CARD) action = self.config.get(CONF_ACTION) is_async_action = self.config.get(CONF_ASYNC_ACTION) slots = {key: value["value"] for key, value in intent_obj.slots.items()} if action is not None: if is_async_action: intent_obj.hass.async_create_task( action.async_run(slots, intent_obj.context) ) else: await action.async_run(slots, intent_obj.context) response = intent_obj.create_response() if speech is not None: response.async_set_speech( speech[CONF_TEXT].async_render(slots, parse_result=False), speech[CONF_TYPE], ) if reprompt is not None and reprompt[CONF_TEXT].template: response.async_set_reprompt( reprompt[CONF_TEXT].async_render(slots, parse_result=False), reprompt[CONF_TYPE], ) if card is not None: response.async_set_card( card[CONF_TITLE].async_render(slots, parse_result=False), card[CONF_CONTENT].async_render(slots, parse_result=False), card[CONF_TYPE], ) return response
076faaa4a4f231eb5b7b7c72fa20c239c7cc391c
239
https://github.com/home-assistant/core.git
430
async def async_handle(self, intent_obj): speech = self.config.get(CONF_SPEECH) reprompt = self.config.get(CONF_REPROMPT) card = self.config.get(CONF_CARD) action = self.config.get(CONF_ACTION) is_async_action = self.config.get(CONF_ASYNC_ACTION) slots = {key: value["value"] for key, value in intent_obj.slots.items()} if action is not None: if is_async_action: intent_obj.hass.async_create_task( action.async_run(slots, intent_obj.context) ) else: await action.async_run(slots, intent_obj.context) response = intent_obj.create_response() if speech is not None: response.async_set_speech( speech[CONF_TEXT].async_render(slots, parse_result=False), speech[CONF_TYPE], ) if
35
365
async_handle
41
0
1
25
tests/storages/test_mixed_storage.py
191,024
Reformat to 80 chars and mypy.ini
thumbor
14
Python
29
test_mixed_storage.py
async def test_mixed_storage_put_includes_path(self): await self.storage.put("path1", "contents") await self.storage.put_crypto("path1") await self.storage.put_detector_data("path1", "detector") expect(self.storage.file_storage.storage["path1"]["path"]).to_equal( "path1" ) expect( self.storage.file_storage.storage["path1"]["contents"] ).to_equal("contents") contents = await self.storage.get("path1") expect(contents).to_equal("contents") expect(self.storage.file_storage.storage["path1"]).not_to_include( "crypto" ) expect(self.storage.crypto_storage.storage["path1"]).not_to_include( "contents" ) expect( self.storage.crypto_storage.storage["path1"]["crypto"] ).to_equal("security-key") contents = await self.storage.get_crypto("path1") expect(contents).to_equal("security-key") contents = await self.storage.get_detector_data("path1") expect(contents).to_equal("detector")
301124c5b377fa56b940d298900dbc5816dbc24e
195
https://github.com/thumbor/thumbor.git
228
async def test_mixed_storage_put_includes_path(self): await self.storage.put("path1", "contents") await self.storage.put_crypto("path1") await self.storage.put_detector_data("path1", "detector") exp
15
358
test_mixed_storage_put_includes_path
142
0
1
53
tests/components/media_player/test_browse_media.py
299,724
Skip signing URL that we know requires no auth (#71208)
core
11
Python
68
test_browse_media.py
async def test_process_play_media_url(hass, mock_sign_path): await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) hass.config.api = Mock(use_ssl=False, port=8123, local_ip="192.168.123.123") # Not changing a url that is not a hass url assert ( async_process_play_media_url(hass, "https://not-hass.com/path") == "https://not-hass.com/path" ) # Not changing a url that is not http/https assert ( async_process_play_media_url(hass, "file:///tmp/test.mp3") == "file:///tmp/test.mp3" ) # Testing signing hass URLs assert ( async_process_play_media_url(hass, "/path") == "http://example.local:8123/path?authSig=bla" ) assert ( async_process_play_media_url(hass, "http://example.local:8123/path") == "http://example.local:8123/path?authSig=bla" ) assert ( async_process_play_media_url(hass, "http://192.168.123.123:8123/path") == "http://192.168.123.123:8123/path?authSig=bla" ) with pytest.raises(HomeAssistantError), patch( "homeassistant.components.media_player.browse_media.get_url", side_effect=NoURLAvailableError, ): async_process_play_media_url(hass, "/path") # Test skip signing URLs that have a query param assert ( async_process_play_media_url(hass, "/path?hello=world") == "http://example.local:8123/path?hello=world" ) assert ( async_process_play_media_url( hass, "http://192.168.123.123:8123/path?hello=world" ) == "http://192.168.123.123:8123/path?hello=world" ) # Test skip signing URLs if they are known to require no auth assert ( async_process_play_media_url(hass, "/api/tts_proxy/bla") == "http://example.local:8123/api/tts_proxy/bla" ) assert ( async_process_play_media_url( hass, "http://example.local:8123/api/tts_proxy/bla" ) == "http://example.local:8123/api/tts_proxy/bla" ) with pytest.raises(ValueError): async_process_play_media_url(hass, "hello")
0926470ef0f7bf7fd11da09a9d101ea17a4b4c00
177
https://github.com/home-assistant/core.git
436
async def test_process_play_media_url(hass, mock_sign_path): await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) hass.config.api = Mock(use_ssl=False, port=8123, local_ip="192.168.123.123") # Not changing a url that is not a hass url assert ( async_process_play_media_url(hass, "https://not-hass.com/path") == "https://not-hass.com/path" ) # Not changing a url that is not http/https assert ( async_process_play_media_url(hass, "file:///tmp/test.mp3") == "file:///tmp/test.mp3" ) # Testing signing hass URLs assert ( async_process_play_media_url(hass, "/path") == "http://example.local:8123/path?authSig=bla" ) assert ( async_process_play_media_url(hass, "http://example.local:8123/path") == "http://example.local:8123/path?authSig=bla" ) assert ( async_process_play_media_url(hass, "http://192.168.123.123:8123/path") == "http://192.168.123.123:8123/path?authSig=bla" ) with pytest.raises(HomeAssistantError), patch( "homeassistant.components.media_player.browse_media.get_url", side_effect=NoURLAvailableError, ): async_process_play_media_url(hass, "/path") # Test skip signing URLs that have a query param assert ( async_process_play_media_url(hass, "/path?hello=world") == "http://example.local:8123/path?hello=world" ) assert ( async_process_play_media_url( hass, "http://192.168.123.123:8123/path?hello=world" ) == "http://192.168.123.123:8123/path?hel
18
322
test_process_play_media_url
43
0
2
17
keras/layers/rnn/cudnn_test.py
273,832
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
keras
12
Python
25
cudnn_test.py
def test_trainability(self): input_size = 10 units = 2 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: layer = layer_class(units) layer.build((None, None, input_size)) self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0) layer.trainable = False self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.non_trainable_weights), 3) self.assertEqual(len(layer.trainable_weights), 0) layer.trainable = True self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqual(len(layer.non_trainable_weights), 0)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
173
https://github.com/keras-team/keras.git
206
def test_trainability(self): input_size = 10 units = 2 for layer_class in [keras.layers.CuDNNGRU, keras.layers.CuDNNLSTM]: layer = layer_class(units) layer.build((None, None, input_size)) self.assertEqual(len(layer.weights), 3) self.assertEqual(len(layer.trainable_weights), 3) self.assertEqu
17
266
test_trainability
44
1
1
16
tests/infrastructure/test_process.py
60,007
Monitor process after kill and return early when possible (#7746)
prefect
11
Python
37
test_process.py
async def test_process_kill_sends_sigterm_then_sigkill(monkeypatch): os_kill = MagicMock() monkeypatch.setattr("os.kill", os_kill) infrastructure_pid = f"{socket.gethostname()}:12345" grace_seconds = 2 process = Process(command=["noop"]) await process.kill( infrastructure_pid=infrastructure_pid, grace_seconds=grace_seconds ) os_kill.assert_has_calls( [ call(12345, signal.SIGTERM), call(12345, 0), call(12345, signal.SIGKILL), ] ) @pytest.mark.skipif( sys.platform == "win32", reason="SIGTERM/SIGKILL are only used in non-Windows environments", )
b326ebbcca00b79c82ef92ad4d823044dab40e5f
@pytest.mark.skipif( sys.platform == "win32", reason="SIGTERM/SIGKILL are only used in non-Windows environments", )
80
https://github.com/PrefectHQ/prefect.git
128
async def test_process_kill_sends_sigterm_then_sigkill(monkeypatch): os_kill = MagicMock() monkeypatch.setattr("os.kill", os_kill) infrastructure_pid = f"{socket.gethostname()}:12345" grace_seconds = 2 process = Process(command=["noop"]) await process.kill( infrastructure_pid=infrastructure_pid, grace_seconds=grace_seconds ) os_kill.assert_has_calls( [
24
170
test_process_kill_sends_sigterm_then_sigkill
90
0
2
14
tests/unit/test_normalize_icon_type.py
262,801
Icon translation using PIL (#6697) Convert icons into the correct platform dependent format using PIL/Pillow if installed.
pyinstaller
14
Python
70
test_normalize_icon_type.py
def test_normalize_icon_pillow(tmp_path): data_dir = str(Path(PyInstaller.__file__).with_name("bootloader") / "images") workpath = str(tmp_path) pytest.importorskip("PIL", reason="Needs PIL / Pillow for this test") # Alternative image - output is a different file with the correct suffix icon = os.path.join(data_dir, 'github_logo.png') ret = normalize_icon_type(icon, ("ico",), "ico", workpath) _, ret_filetype = os.path.splitext(ret) if ret_filetype != ".ico": pytest.fail("icon validation didn't convert to the right format", False) # Some random non-image file: Raises an image conversion error icon = os.path.join(data_dir, 'pyi_icon.notanicon') with open(icon, "w") as f: f.write("this is in fact, not an icon") with pytest.raises(ValueError): normalize_icon_type(icon, ("ico",), "ico", workpath)
3aad9af18641aa2181dd86cececc2aeb8a0dba06
140
https://github.com/pyinstaller/pyinstaller.git
146
def test_normalize_icon_pillow(tmp_path): data_dir = str(Path(PyInstaller.__file__).with_name("bootloader") / "images") workpath = str(tmp_path) pytest.importorskip("PIL", reason="Needs PIL / Pillow for this test") # Alternative image - output is a different file with the correct suffix icon = os.path.join(data_dir, 'github_logo.png') ret = normalize_icon_type(icon, ("ico",), "ico", workpath) _, ret_filetype = os.path.splitext(ret) if ret_filetype != ".ico": pytest.fail("icon validation didn't convert to the right format", False) # Some random non-image file: Raises an image conversion error icon =
27
248
test_normalize_icon_pillow
18
0
1
4
wagtail/admin/tests/api/test_pages.py
71,326
Reformat with black
wagtail
11
Python
17
test_pages.py
def test_meta_parent_id_doesnt_show_root_page(self): # Root page is visible in the admin API response = self.get_response(2) content = json.loads(response.content.decode("UTF-8")) self.assertIsNotNone(content["meta"]["parent"])
d10f15e55806c6944827d801cd9c2d53f5da4186
40
https://github.com/wagtail/wagtail.git
45
def test_meta_parent_id_doesnt_show_root_page(self): # Root page is visible in the admin API response = self.get_response(2) content = json.loads(response.content.decode("UTF-8")) self.assertIsNotN
9
71
test_meta_parent_id_doesnt_show_root_page
10
0
1
8
src/prefect/orion/orchestration/core_policy.py
60,024
Fix scheduled time copy for flows (#7770)
prefect
6
Python
10
core_policy.py
def priority(): return [ HandleFlowTerminalStateTransitions, PreventRedundantTransitions, CopyScheduledTime, WaitForScheduledTime, RetryFailedFlows, ]
40201d8b8c49047cb897b8b6664635dcfffe9413
17
https://github.com/PrefectHQ/prefect.git
78
def priority(): return [ HandleFlowTerminalStateTransitions, PreventRedundantTransitions, CopyScheduledTime, WaitForScheduledTime, RetryFailedFl
6
23
priority
26
0
1
7
lib/mpl_toolkits/mplot3d/art3d.py
109,922
Improve mpl_toolkit documentation
matplotlib
10
Python
20
art3d.py
def set_3d_properties(self, zs=0, zdir='z'): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._verts3d = juggle_axes(xs, ys, zs, zdir) self.stale = True
df6f95703b60348e01603f98a439b133da2938a0
72
https://github.com/matplotlib/matplotlib.git
75
def set_3d_properties(self, zs=0, zdir='z'): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._ve
17
116
set_3d_properties
35
0
3
10
saleor/core/utils/random_data.py
27,490
Update sample products set (#9796) * Update products data set * Fix image filenames * Replave `default_variant` with `default: true` * Fix fake user creation and attribute-product assignment * Drop preorders creation * Optimize images Shout out to https://github.com/ImageOptim/ImageOptim team * Load menus from JSON * Reduce the number of pregenerated sales * Fix one of the images * Apply code review changes * Fix attr values when loading data from json dump * Don't test user existence twice * Add some product descriptions * Fix failing tests * Simplify channel query * Reduce files sizes * Use relative imports Co-authored-by: Patryk Zawadzki <patrys@room-303.com> Co-authored-by: Krzysztof Wolski <krzysztof.k.wolski@gmail.com> Co-authored-by: IKarbowiak <iga.karbowiak@mirumee.com>
saleor
12
Python
28
random_data.py
def create_product_variant_channel_listings(product_variant_channel_listings_data): channel_USD = Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG) channel_PLN = Channel.objects.get(slug="channel-pln") for variant_channel_listing in product_variant_channel_listings_data: pk = variant_channel_listing["pk"] defaults = dict(variant_channel_listing["fields"]) defaults["variant_id"] = defaults.pop("variant") channel = defaults.pop("channel") defaults["channel_id"] = channel_USD.pk if channel == 1 else channel_PLN.pk ProductVariantChannelListing.objects.update_or_create(pk=pk, defaults=defaults)
08aa724176be00d7aaf654f14e9ae99dd4327f97
100
https://github.com/saleor/saleor.git
85
def create_product_variant_channel_listings(product_variant_channel_listings_data): channel_USD = Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG) channel_PLN = Channel.objects.get(slug="channel-pln") for variant_channel_listing in product_variant_channel_listings_data: pk = variant_channel_listing["pk"] defaults = dict(variant_channel_listing["fields"])
18
168
create_product_variant_channel_listings
30
0
1
13
tests/strategy/test_strategy_loading.py
149,113
Add test for can_short strategy attribute
freqtrade
11
Python
21
test_strategy_loading.py
def test_strategy_can_short(caplog, default_conf): caplog.set_level(logging.INFO) default_conf.update({ 'strategy': CURRENT_TEST_STRATEGY, }) strat = StrategyResolver.load_strategy(default_conf) assert isinstance(strat, IStrategy) default_conf['strategy'] = 'StrategyTestV3Futures' with pytest.raises(ImportError, match=""): StrategyResolver.load_strategy(default_conf) default_conf['trading_mode'] = 'futures' strat = StrategyResolver.load_strategy(default_conf) assert isinstance(strat, IStrategy)
20fc9459f23979f57d7925175dee376cd69acef0
86
https://github.com/freqtrade/freqtrade.git
73
def test_strategy_can_short(caplog, default_conf): caplog.set_level(logging.INFO) default_conf.update({ 'strategy': CURRENT_TEST_STRATEGY, }) strat = StrategyResolver.load_strategy(default_conf) assert isinstance(strat, IStrategy) default_conf['strategy'] = 'StrategyTestV3Futures' with pytest.raises(ImportError, match=""): StrategyResolver.load_strategy(default_conf) default_conf['trading_mode'] = 'futures' strat = StrategyResolver.load_strategy(default_conf) as
17
147
test_strategy_can_short
50
0
5
14
python3.10.4/Lib/asyncio/subprocess.py
220,794
add python 3.10.4 for windows
XX-Net
13
Python
41
subprocess.py
async def _feed_stdin(self, input): debug = self._loop.get_debug() self.stdin.write(input) if debug: logger.debug( '%r communicate: feed stdin (%s bytes)', self, len(input)) try: await self.stdin.drain() except (BrokenPipeError, ConnectionResetError) as exc: # communicate() ignores BrokenPipeError and ConnectionResetError if debug: logger.debug('%r communicate: stdin got %r', self, exc) if debug: logger.debug('%r communicate: close stdin', self) self.stdin.close()
8198943edd73a363c266633e1aa5b2a9e9c9f526
90
https://github.com/XX-net/XX-Net.git
183
async def _feed_stdin(self, input): debug = self._loop.get_debug() self.stdin.write(input) if debug: logger.debug( '%r communicate: feed stdin
15
151
_feed_stdin
31
0
1
9
tests/snuba/api/endpoints/test_organization_events_v2.py
92,712
tests(discover): Improve stability of eventsv2 tests (#36641) Same motivation as #36619, this aims to improve the stability of the eventsv2 tests by moving the event timestamps further in the past.
sentry
12
Python
28
test_organization_events_v2.py
def test_performance_view_feature(self): self.store_event( data={"event_id": "a" * 32, "timestamp": self.ten_mins_ago, "fingerprint": ["group1"]}, project_id=self.project.id, ) query = {"field": ["id", "project.id"], "project": [self.project.id]} response = self.do_request(query) assert response.status_code == 200 assert len(response.data["data"]) == 1
ef5a739249de199b25d2cba7a2ee52820d9f34de
87
https://github.com/getsentry/sentry.git
94
def test_performance_view_feature(self): self.store_event( data={"event_id": "a" * 32, "timestamp": self.ten_mins_ago, "fingerprint": ["group1"]}, project_id=self.project.id, ) query = {"field": ["id", "project.id"], "project": [self.project.id]} response = self.do_request(query) assert response.status_code == 200 assert len(response.
13
149
test_performance_view_feature
87
0
5
23
ppocr/postprocess/drrg_postprocess.py
25,221
add drrg
PaddleOCR
15
Python
58
drrg_postprocess.py
def __call__(self, preds, shape_list): edges, scores, text_comps = preds if edges is not None: if isinstance(edges, paddle.Tensor): edges = edges.numpy() if isinstance(scores, paddle.Tensor): scores = scores.numpy() if isinstance(text_comps, paddle.Tensor): text_comps = text_comps.numpy() assert len(edges) == len(scores) assert text_comps.ndim == 2 assert text_comps.shape[1] == 9 vertices, score_dict = graph_propagation(edges, scores, text_comps) clusters = connected_components(vertices, score_dict, self.link_thr) pred_labels = clusters2labels(clusters, text_comps.shape[0]) text_comps, pred_labels = remove_single(text_comps, pred_labels) boundaries = comps2boundaries(text_comps, pred_labels) else: boundaries = [] boundaries, scores = self.resize_boundary( boundaries, (1 / shape_list[0, 2:]).tolist()[::-1]) boxes_batch = [dict(points=boundaries, scores=scores)] return boxes_batch
1f9400dd7374ce9cc47981372e324ff412e53ba3
207
https://github.com/PaddlePaddle/PaddleOCR.git
324
def __call__(self, preds, shape_list): edges, scores, text_comps = preds if edges is not None: if isinstance(edges, paddle.Tensor): edges = edges.numpy() if isinstance(scores, paddle.Tensor): scores = scores.numpy() if isinstance(text_comps, paddle.Tensor): text_comps = text_comps.numpy() assert len(edges) == len(scores) assert text_comps.ndim == 2 assert text_comps.shape[1] == 9 vertices, score_dict = graph_propagation(edges, scores, text_comps) clusters = connected_components(vertices, score_
30
318
__call__
49
0
1
5
jax/_src/lax/fft.py
120,172
[MHLO] Add MHLO lowerings for FFT ops. PiperOrigin-RevId: 441768017
jax
11
Python
39
fft.py
def _fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths): x, = batched_args bd, = batch_dims x = batching.moveaxis(x, bd, 0) return fft(x, fft_type, fft_lengths), 0 fft_p = Primitive('fft') fft_p.def_impl(_fft_impl) fft_p.def_abstract_eval(fft_abstract_eval) xla.register_translation(fft_p, _fft_translation_rule) mlir.register_lowering(fft_p, _fft_lowering) ad.deflinear2(fft_p, _fft_transpose_rule) batching.primitive_batchers[fft_p] = _fft_batching_rule if pocketfft: xla.register_translation(fft_p, _fft_translation_rule_cpu, platform='cpu') if jax._src.lib.version >= (0, 3, 6): mlir.register_lowering(fft_p, _fft_lowering_cpu, platform='cpu')
4806c29bf784c22ea10b7c87b1d03f2f42c662d4
42
https://github.com/google/jax.git
49
def _fft_batching_rule(batched_args, batch_dims, fft_type, fft_lengths): x, = batched_args bd, = batch_dims x = batching.moveaxis(x, bd, 0) return fft(x, fft_type, fft_lengths), 0 fft_p = Primitive('fft') fft_p.def_impl(_fft_impl) fft_p.def_abstract_eval(fft_abstract_eval) xla.register_translation(fft_p, _fft_translation_rule) mlir.register_lowering(fft_p, _fft_lowering) ad.deflinear2(fft_p, _fft_transpose_rule) batching.primitive_batchers[fft_p] = _fft_batching_rule if pocketfft: xla.register_translation(fft_p, _fft_translation_rule_cpu, platform='cpu') if j
34
208
_fft_batching_rule
18
0
1
12
tests/events/test_utils.py
246,712
Replace assertEquals and friends with non-deprecated versions. (#12092)
synapse
15
Python
16
test_utils.py
def test_event_fields_works_with_nested_keys(self): self.assertEqual( self.serialize( MockEvent( sender="@alice:localhost", room_id="!foo:bar", content={"body": "A message"}, ), ["content.body"], ), {"content": {"body": "A message"}}, )
02d708568b476f2f7716000b35c0adfa4cbd31b3
50
https://github.com/matrix-org/synapse.git
166
def test_event_fields_works_with_nested_keys(self): self.assertEqual(
8
89
test_event_fields_works_with_nested_keys
194
1
2
33
test/nodes/test_table_reader.py
258,390
refactor: Remove duplicate code in TableReader (#3708) * Refactor table reader to use util functions to reduce code duplication. * Expanding the tests for the table reader * Adding types * Updating tests to work for RCIReader * Fix bug in RCIReader. Saving the wrong queries list. * Update _flatten_inputs to not change input variable * Remove duplicate code
haystack
14
Python
123
test_table_reader.py
def test_table_reader_batch_single_query_single_doc_list(table_reader_and_param, table1, table2): table_reader, param = table_reader_and_param query = "When was Di Caprio born?" prediction = table_reader.predict_batch( queries=[query], documents=[Document(content=table1, content_type="table"), Document(content=table2, content_type="table")], ) # Expected output: List of lists of answers assert isinstance(prediction["answers"], list) assert isinstance(prediction["answers"][0], list) assert isinstance(prediction["answers"][0][0], Answer) assert prediction["queries"] == ["When was Di Caprio born?", "When was Di Caprio born?"] # Check number of answers for each document num_ans_reference = { "tapas_small": {"num_answers": [1, 1]}, "rci": {"num_answers": [10, 10]}, "tapas_scored": {"num_answers": [3, 3]}, } assert len(prediction["answers"]) == 2 for i, ans_list in enumerate(prediction["answers"]): assert len(ans_list) == num_ans_reference[param]["num_answers"][i] # Check first answer from the 1ST document score_reference = {"tapas_small": {"score": 1.0}, "rci": {"score": -6.5301}, "tapas_scored": {"score": 0.50568}} assert prediction["answers"][0][0].score == pytest.approx(score_reference[param]["score"], rel=1e-3) assert prediction["answers"][0][0].answer == "11 november 1974" assert prediction["answers"][0][0].offsets_in_context[0].start == 7 assert prediction["answers"][0][0].offsets_in_context[0].end == 8 # Check first answer from the 2ND Document ans_reference = { "tapas_small": {"answer": "5 april 1980", "start": 7, "end": 8, "score": 0.86314}, "rci": {"answer": "15 september 1960", "start": 11, "end": 12, "score": -7.9429}, "tapas_scored": {"answer": "5", "start": 10, "end": 11, "score": 0.11485}, } assert prediction["answers"][1][0].score == pytest.approx(ans_reference[param]["score"], rel=1e-3) assert prediction["answers"][1][0].answer == ans_reference[param]["answer"] assert prediction["answers"][1][0].offsets_in_context[0].start == ans_reference[param]["start"] assert prediction["answers"][1][0].offsets_in_context[0].end == ans_reference[param]["end"] @pytest.mark.parametrize("table_reader_and_param", ["tapas_small", "rci", "tapas_scored"], indirect=True)
756e0114e661767e3c59bb24cbd66c41ad4a5903
@pytest.mark.parametrize("table_reader_and_param", ["tapas_small", "rci", "tapas_scored"], indirect=True)
482
https://github.com/deepset-ai/haystack.git
336
def test_table_reader_batch_single_query_single_doc_list(table_reader_and_param, table1, table2): table_reader, param = table_reader_and_param query = "When was Di Caprio born?" prediction = table_reader.predict_batch( queries=[query], documents=[Document(content=table1, content_type="table"), Document(content=table2, content_type="table")], ) # Expected output: List of lists of answers assert isinstance(prediction["answers"], list) assert isinstance(prediction["answers"][0], list) assert isinstance(prediction["answers"][0][0], Answer) assert prediction["queries"] == ["When was Di Caprio born?", "When was Di Caprio born?"] # Check number of answers for each document num_ans_reference = { "tapas_small": {"num_answers": [1, 1]}, "rci": {"num_answers": [10, 10]}, "tapas_scored": {"num_answers": [3, 3]}, } assert len(prediction["answers"]) == 2 for i, ans_list in enumerate(prediction["answers"]): assert len(ans_list) == num_ans_reference[param]["num_answers"][i] # Check first answer from the 1ST document score_reference = {"tapas_small": {"score": 1.0}, "rci": {"score": -6.5301}, "tapas_scored": {"score": 0.50568}} assert prediction["answers"][0][0].score == pytest.approx(score_reference[param]["score"], rel=1e-3) assert prediction["answers"][0][0].answer == "11 november 1974" assert prediction["answers"][0][0].offsets_in_context[0].start == 7 assert prediction["answers"][0][0].offsets_in_context[0].end == 8 # Check first answer from the 2ND Document ans_reference = { "tapas_small"
35
806
test_table_reader_batch_single_query_single_doc_list
23
0
1
10
tests/models/groupvit/test_modeling_groupvit.py
31,776
Adding GroupViT Models (#17313) * add group vit and fixed test (except slow) * passing slow test * addressed some comments * fixed test * fixed style * fixed copy * fixed segmentation output * fixed test * fixed relative path * fixed copy * add ignore non auto configured * fixed docstring, add doc * fixed copies * Apply suggestions from code review merge suggestions Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * resolve comment, renaming model * delete unused attr * use fix copies * resolve comments * fixed attn * remove unused vars * refactor tests * resolve final comments * add demo notebook * fixed inconsitent default * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * rename stage->stages * Create single GroupViTEncoderLayer class * Update conversion script * Simplify conversion script * Remove cross-attention class in favor of GroupViTAttention * Convert other model as well, add processor to conversion script * addressing final comment * fixed args * Update src/transformers/models/groupvit/modeling_groupvit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Niels Rogge <nielsrogge@Nielss-MacBook-Pro.local>
transformers
10
Python
21
test_modeling_groupvit.py
def create_and_check_model(self, config, pixel_values): model = GroupViTVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
6c8f4c9a938a09749ea1b19a5fa2a8dd27e99a29
93
https://github.com/huggingface/transformers.git
93
def create_and_check_model(self, config, pixel_values): model = GroupViTVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): resul
20
144
create_and_check_model
7
0
1
2
tests/components/fibaro/test_config_flow.py
290,682
Normalize url entered in fibaro integration setup dialog (#81996) * Normalize url entered in fibaro integration setup dialog * Improvements as suggested in code review * Fix spelling in comments
core
9
Python
7
test_config_flow.py
async def test_normalize_url_add_api(): assert _normalize_url("http://192.168.1.1/") == "http://192.168.1.1/api/"
ff1ec7a028f747de1f96521eb3df6f98d7426434
12
https://github.com/home-assistant/core.git
13
async def test_normalize_url_add_api(): assert _normalize_url("http://192.168.1.1/") == "http://192.168.1.1/api/"
2
28
test_normalize_url_add_api
36
0
4
11
tests/utils/test_ffmpeg.py
30,377
properly patch is file func
spotify-downloader
12
Python
24
test_ffmpeg.py
def test_get_local_ffmpeg(monkeypatch): monkeypatch.setattr(pathlib.Path, "isfile", lambda *_: True) platform_str = platform.system() local_ffmpeg = get_local_ffmpeg() assert local_ffmpeg is not None if platform_str == "Linux": assert str(local_ffmpeg).endswith("ffmpeg") elif platform_str == "Darwin": assert str(local_ffmpeg).endswith("ffmpeg") elif platform_str == "Windows": assert str(local_ffmpeg).endswith("ffmpeg.exe")
7595c08e122c43aa55eea9be9fabd2de2c1d7b9a
84
https://github.com/spotDL/spotify-downloader.git
81
def test_get_local_ffmpeg(monkeypatch): monkeypatch.setattr(pathlib.Path, "isfile", lambda *_: True) platform_str = platform.system() local_ffmpeg =
13
151
test_get_local_ffmpeg
70
0
4
13
references/classification/sampler.py
191,896
only set random generator if shuffle=true (#5135) Co-authored-by: Vasilis Vryniotis <datumbox@users.noreply.github.com>
vision
16
Python
52
sampler.py
def __iter__(self): if self.shuffle: # Deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.seed + self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) # Add extra samples to make it evenly divisible indices = [ele for ele in indices for i in range(self.repetitions)] indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # Subsample indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices[: self.num_selected_samples])
e65a857b5487a8493bc8a80a95d64d9f049de347
145
https://github.com/pytorch/vision.git
194
def __iter__(self): if self.shuffle: # Deterministically sh
26
230
__iter__
32
0
5
14
jina/helper.py
12,267
fix: close loop from run_async (#4734) * fix: close loop from run_async * fix: check if the loop was just created * style: fix overload and cli autocomplete * fix: context manage loop * fix: style * fix(loop): don't create new loops * fix: create loop if not existing * fix: close asyncio loop after fork * fix: better loop closing Co-authored-by: Jina Dev Bot <dev-bot@jina.ai> Co-authored-by: Deepankar Mahapatro <deepankar.mahapatro@jina.ai>
jina
16
Python
31
helper.py
def _update_policy(): if __windows__: asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) elif 'JINA_DISABLE_UVLOOP' in os.environ: return else: try: import uvloop if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy): asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) except ModuleNotFoundError: warnings.warn( 'Install `uvloop` via `pip install "jina[uvloop]"` for better performance.' )
c1f0ae51ed4ef76ff9aaa976d234670a296eac07
65
https://github.com/jina-ai/jina.git
142
def _update_policy(): if __windows__: asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) elif 'JINA_DISABLE_UVLOOP' in os.environ: return else: try: import uvloop if not isinstance(asyncio.get_event_loop_policy(
14
115
_update_policy
14
0
2
7
bypy/macos/__main__.py
102,898
Refactor: More f-string for bypy scripts
kitty
13
Python
14
__main__.py
def install_dylib(self, path, set_id=True): shutil.copy2(path, self.frameworks_dir) if set_id: self.set_id( join(self.frameworks_dir, basename(path)), f'{self.FID}/{basename(path)}') self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))
74e70d2548255b26983c34e81bf3c7f85caa778b
59
https://github.com/kovidgoyal/kitty.git
75
def install_dylib(self, path, set_id=True): shutil.copy2(path, self.frameworks_dir) if set_id: self.set_id( join(self.frameworks_dir, basename(path)), f'{self.FID}/{basename(path)}') self.fix_dependencies_in_lib(join(self.frameworks_dir, basename(path)))
11
106
install_dylib
10
0
1
6
src/prefect/orion/database/orm_models.py
53,233
fix logging override bug
prefect
13
Python
10
orm_models.py
def versions_dir(self) -> Path: return ( Path(prefect.orion.database.__file__).parent / "migrations/versions/postgresql" )
b937ae2f19021dabcb8b2548b3c204d6eb34a3e8
25
https://github.com/PrefectHQ/prefect.git
53
def versions_dir(self) -> Path: return ( Path(prefect.orion.database.__file__).parent / "migrations/version
8
44
versions_dir
168
1
1
34
sklearn/tree/tests/test_tree.py
258,952
MNT Update black to stable version (#22474)
scikit-learn
12
Python
97
test_tree.py
def check_class_weights(name): TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight="balanced", random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier( class_weight=[ {0: 2.0, 1: 2.0, 2: 1.0}, {0: 2.0, 1: 1.0, 2: 2.0}, {0: 1.0, 1: 2.0, 2: 2.0}, ], random_state=0, ) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight="balanced", random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1.0, 1: 100.0, 2: 1.0} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight**2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) @pytest.mark.parametrize("name", CLF_TREES)
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
@pytest.mark.parametrize("name", CLF_TREES)
374
https://github.com/scikit-learn/scikit-learn.git
323
def check_class_weights(name): TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight="balanced", random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier( class_weight=[ {0: 2.0, 1: 2.0, 2: 1.0}, {0: 2.0, 1: 1.0, 2: 2.0}, {0: 1.0, 1: 2.0, 2: 2.0}, ], random_state=0, ) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should
26
546
check_class_weights
24
0
1
8
modules/deepbooru.py
152,833
refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.
stable-diffusion-webui
8
Python
18
deepbooru.py
def release_process(): from modules import shared # prevents circular reference shared.deepbooru_process_queue.put("QUIT") shared.deepbooru_process.join() shared.deepbooru_process_queue = None shared.deepbooru_process = None shared.deepbooru_process_return = None shared.deepbooru_process_manager = None
1f92336be768d235c18a82acb2195b7135101ae7
44
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
49
def release_process():
9
79
release_process
42
0
1
10
pandas/tests/frame/constructors/test_from_records.py
168,545
Revert Interval/IntervalIndex/interval_range.inclusive deprecation (#48116) * Revert "Cln tests interval wrt inclusive (#47775)" This reverts commit 2d6e0b251955d3a2c0c88f7e6ddb57b335ed09b7. * Revert "CLN: Rename private variables to inclusive (#47655)" This reverts commit 102b3ca2119df822e2b0f346fa936d0fe9f17501. * Revert "TYP: Improve typing interval inclusive (#47646)" This reverts commit 55064763e8ba55f6ff5370a8dd083767a189d7a4. * Revert "DEPR: Deprecate set_closed and add set_incluive (#47636)" This reverts commit bd4ff395cbbf4cbde1fc8f1f746cae064a401638. * Revert "DEPR: Remove deprecation from private class IntervalTree (#47637)" This reverts commit f6658ef9fdef5972214fdc338e2c6b5ee308dbf4. * Revert "Revert inclusive default change of IntervalDtype (#47367)" This reverts commit d9dd1289e07d86928d144e53beb3d5b8ab3c2215. * Revert "ENH: consistency of input args for boundaries - Interval (#46522)" This reverts commit 7e23a37e1c5bda81234801a6584563e2880769eb. * Revert "ENH: consistency of input args for boundaries - pd.interval_range (#46355)" This reverts commit 073b3535d7a5171102e5915c38b57c21d13795ae. * Fix ArrowIntervalType manually * Remove unused import * Fix doctest and leftover usage * Fix remaining tests * Fix wording in doctoring Co-authored-by: Patrick Hoefler <61934744+phofl@users.noreply.github.com>
pandas
12
Python
36
test_from_records.py
def test_from_records_series_categorical_index(self): # GH#32805 index = CategoricalIndex( [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)] ) series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) frame = DataFrame.from_records(series_of_dicts, index=index) expected = DataFrame( {"a": [1, 2, np.NaN], "b": [np.NaN, np.NaN, 3]}, index=index ) tm.assert_frame_equal(frame, expected)
252ae0555abf488522f947107dcdee684be6ac8a
119
https://github.com/pandas-dev/pandas.git
119
def test_from_records_series_categorical_index(self): # GH#32805 index = CategoricalIndex( [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)] ) series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index) frame = DataFrame.from_records(series_of_dicts, index=index) expe
15
183
test_from_records_series_categorical_index
37
0
5
9
awx/main/models/ha.py
80,599
Consume control capacity (#11665) * Select control node before start task Consume capacity on control nodes for controlling tasks and consider remainging capacity on control nodes before selecting them. This depends on the requirement that control and hybrid nodes should all be in the instance group named 'controlplane'. Many tests do not satisfy that requirement. I'll update the tests in another commit. * update tests to use controlplane We don't start any tasks if we don't have a controlplane instance group Due to updates to fixtures, update tests to set node type and capacity explicitly so they get expected result. * Fixes for accounting of control capacity consumed Update method is used to account for currently consumed capacity for instance groups in the in-memory capacity tracking data structure we initialize in after_lock_init and then update via calculate_capacity_consumed (both in task_manager.py) Also update fit_task_to_instance to consider control impact on instances Trust that these functions do the right thing looking for a node with capacity, and cut out redundant check for the whole group's capacity per Alan's reccomendation. * Refactor now redundant code Deal with control type tasks before we loop over the preferred instance groups, which cuts out the need for some redundant logic. Also, fix a bug where I was missing assigning the execution node in one case! * set job explanation on tasks that need capacity move the job explanation for jobs that need capacity to a function so we can re-use it in the three places we need it. * project updates always run on the controlplane Instance group ordering makes no sense on project updates because they always need to run on the control plane. Also, since hybrid nodes should always run the control processes for the jobs running on them as execution nodes, account for this when looking for a execution node. * fix misleading message the variables and wording were both misleading, fix to be more accurate description in the two different cases where this log may be emitted. * use settings correctly use settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME instead of a hardcoded name cache the controlplane_ig object during the after lock init to avoid an uneccesary query eliminate mistakenly duplicated AWX_CONTROL_PLANE_TASK_IMPACT and use only AWX_CONTROL_NODE_TASK_IMPACT * add test for control capacity consumption add test to verify that when there are 2 jobs and only capacity for one that one will move into waiting and the other stays in pending * add test for hybrid node capacity consumption assert that the hybrid node is used for both control and execution and capacity is deducted correctly * add test for task.capacity_type = control Test that control type tasks have the right capacity consumed and get assigned to the right instance group Also fix lint in the tests * jobs_running not accurate for control nodes We can either NOT use "idle instances" for control nodes, or we need to update the jobs_running property on the Instance model to count jobs where the node is the controller_node. I didn't do that because it may be an expensive query, and it would be hard to make it match with jobs_running on the InstanceGroup which filters on tasks assigned to the instance group. This change chooses to stop considering "idle" control nodes an option, since we can't acurrately identify them. The way things are without any change, is we are continuing to over consume capacity on control nodes because this method sees all control nodes as "idle" at the beginning of the task manager run, and then only counts jobs started in that run in the in-memory tracking. So jobs which last over a number of task manager runs build up consuming capacity, which is accurately reported via Instance.consumed_capacity * Reduce default task impact for control nodes This is something we can experiment with as far as what users want at install time, but start with just 1 for now. * update capacity docs Describe usage of the new setting and the concept of control impact. Co-authored-by: Alan Rominger <arominge@redhat.com> Co-authored-by: Rebeccah <rhunter@redhat.com>
awx
16
Python
24
ha.py
def consumed_capacity(self): capacity_consumed = 0 if self.node_type in ('hybrid', 'execution'): capacity_consumed += sum(x.task_impact for x in UnifiedJob.objects.filter(execution_node=self.hostname, status__in=('running', 'waiting'))) if self.node_type in ('hybrid', 'control'): capacity_consumed += sum( settings.AWX_CONTROL_NODE_TASK_IMPACT for x in UnifiedJob.objects.filter(controller_node=self.hostname, status__in=('running', 'waiting')) ) return capacity_consumed
604cbc17376620dc67df35386421835d43732a4e
94
https://github.com/ansible/awx.git
112
def consumed_capacity(self): capacity_consumed = 0 if self.node_type in ('hybrid', 'execution'):
16
155
consumed_capacity
12
0
1
5
erpnext/regional/report/e_invoice_summary/e_invoice_summary.py
64,116
revert: "refactor!: drop e-invoicing integration from erpnext (#26940)" This reverts commit c335962827e4927f7ada084e9ba4ab2db15e3eb6.
erpnext
8
Python
10
e_invoice_summary.py
def execute(filters=None): validate_filters(filters) columns = get_columns() data = get_data(filters) return columns, data
c5782b0e7107ae9ccabc923d480d49d602bafb39
26
https://github.com/frappe/erpnext.git
7
def execute(filters=None): validate_filters(filters) columns = get_columns() data = get_data(filters)
7
44
execute
236
0
2
22
tests/test_modeling_utils.py
334,400
check with other device
diffusers
17
Python
144
test_modeling_utils.py
def test_sample(self): generator = torch.manual_seed(0) # 1. Load models scheduler = GaussianDDPMScheduler.from_config("fusing/ddpm-lsun-church") model = UNetModel.from_pretrained("fusing/ddpm-lsun-church").to(torch_device) # 2. Sample gaussian noise image = scheduler.sample_noise((1, model.in_channels, model.resolution, model.resolution), device=torch_device, generator=generator) # 3. Denoise for t in reversed(range(len(scheduler))): # i) define coefficients for time step t clipped_image_coeff = 1 / torch.sqrt(scheduler.get_alpha_prod(t)) clipped_noise_coeff = torch.sqrt(1 / scheduler.get_alpha_prod(t) - 1) image_coeff = (1 - scheduler.get_alpha_prod(t - 1)) * torch.sqrt(scheduler.get_alpha(t)) / (1 - scheduler.get_alpha_prod(t)) clipped_coeff = torch.sqrt(scheduler.get_alpha_prod(t - 1)) * scheduler.get_beta(t) / (1 - scheduler.get_alpha_prod(t)) # ii) predict noise residual with torch.no_grad(): noise_residual = model(image, t) # iii) compute predicted image from residual # See 2nd formula at https://github.com/hojonathanho/diffusion/issues/5#issue-896554416 for comparison pred_mean = clipped_image_coeff * image - clipped_noise_coeff * noise_residual pred_mean = torch.clamp(pred_mean, -1, 1) prev_image = clipped_coeff * pred_mean + image_coeff * image # iv) sample variance prev_variance = scheduler.sample_variance(t, prev_image.shape, device=torch_device, generator=generator) # v) sample x_{t-1} ~ N(prev_image, prev_variance) sampled_prev_image = prev_image + prev_variance image = sampled_prev_image # Note: The better test is to simply check with the following lines of code that the image is sensible # import PIL # import numpy as np # image_processed = image.cpu().permute(0, 2, 3, 1) # image_processed = (image_processed + 1.0) * 127.5 # image_processed = image_processed.numpy().astype(np.uint8) # image_pil = PIL.Image.fromarray(image_processed[0]) # image_pil.save("test.png") assert image.shape == (1, 3, 256, 256) image_slice = image[0, -1, -3:, -3:].cpu() import ipdb; ipdb.set_trace() assert (image_slice - torch.tensor([[-0.0598, -0.0611, -0.0506], [-0.0726, 0.0220, 0.0103], [-0.0723, -0.1310, -0.2458]])).abs().sum() < 1e-3
b76eea041234a75ebb5451a7c2aba3eb7f844f9f
369
https://github.com/huggingface/diffusers.git
578
def test_sample(self): generator = torch.manual_seed(0) # 1. Load models scheduler = GaussianDDPMScheduler.from_config("fusing/ddpm-lsun-church") model = UNetModel.from_pretrained("fusing/ddpm-lsun-church").to(torch_device) # 2. Sample gaussian noise image = scheduler.sample_noise((1, model.in_channels, model.resolution, model.resolution), device=torch_device, generator=generator) # 3. Denoise for t in reversed(range(len(scheduler))): # i) define coefficients for time step t clipped_image_coeff = 1 / torch.sqrt(scheduler.get_alpha_prod(t)) clipped_noise_coeff = torch.sqrt(1 / scheduler.get_alpha_prod(t) - 1) image_coeff = (1 - scheduler.get_alpha_prod(t - 1)) * torch.sqrt(scheduler.get_alpha(t)) / (1 - scheduler.get_alpha_prod(t)) clipped_coeff = torch.sqrt(scheduler.get_alpha_prod(t - 1)) * scheduler.get_beta(t) / (1 - scheduler.get_alpha_prod(t)) # ii) predict noise residual with torch.no_grad(): noise_residual = model(image, t) # iii) compute predicted image from residual # See 2nd formula at https://github.com/hojonathanho/diffusion/issues/5#issue-896554416 for comparison pred_mean = clipped_image_coeff * image - clipped_noise_coeff * noise_residual pred_mean = torch.clamp(pred_mean, -1, 1) prev_image = cl
46
555
test_sample
325
0
30
77
mindsdb/api/mysql/mysql_proxy/executor/executor_commands.py
114,976
del datastore
mindsdb
17
Python
168
executor_commands.py
def answer_create_predictor(self, statement): integration_name = None struct = { 'predictor_name': statement.name.parts[-1], 'select': statement.query_str, 'predict': [x.parts[-1] for x in statement.targets] } if len(struct['predict']) > 1: raise SqlApiException("Only one field can be in 'PREDICT'") if isinstance(statement.integration_name, Identifier): struct['integration_name'] = statement.integration_name.parts[-1] if statement.using is not None: struct['using'] = statement.using if statement.datasource_name is not None: struct['datasource_name'] = statement.datasource_name.parts[-1] if statement.order_by is not None: struct['order_by'] = [x.field.parts[-1] for x in statement.order_by] if len(struct['order_by']) > 1: raise SqlApiException("Only one field can be in 'OPRDER BY'") if statement.group_by is not None: struct['group_by'] = [x.parts[-1] for x in statement.group_by] if statement.window is not None: struct['window'] = statement.window if statement.horizon is not None: struct['horizon'] = statement.horizon model_interface = self.session.model_interface models = model_interface.get_models() model_names = [x['name'] for x in models] if struct['predictor_name'] in model_names: raise SqlApiException(f"Predictor with name '{struct['predictor_name']}' already exists. Each predictor must have unique name.") predictor_name = struct['predictor_name'] integration_name = struct.get('integration_name') if integration_name is not None: handler = self.session.integration_controller.get_handler(integration_name) # TODO # raise ErBadDbError(f"Unknown datasource: {integration_name}") result = handler.native_query(struct['select']) if result.get('type') != RESPONSE_TYPE.TABLE: raise Exception(f'Error during query: {result.get("error_message")}') ds_data_df = result['data_frame'] ds_column_names = list(ds_data_df.columns) predict = self._check_predict_columns(struct['predict'], ds_column_names) for i, p in enumerate(predict): predict[i] = get_column_in_case(ds_column_names, p) else: predict = struct['predict'] timeseries_settings = {} for w in ['order_by', 'group_by', 'window', 'horizon']: if w in struct: timeseries_settings[w] = struct.get(w) kwargs = struct.get('using', {}) if len(timeseries_settings) > 0: if 'timeseries_settings' not in kwargs: kwargs['timeseries_settings'] = timeseries_settings else: if isinstance(kwargs.get('timeseries_settings'), str): kwargs['timeseries_settings'] = json.loads(kwargs['timeseries_settings']) kwargs['timeseries_settings'].update(timeseries_settings) # Cast all column names to same case if isinstance(kwargs.get('timeseries_settings'), dict): order_by = kwargs['timeseries_settings'].get('order_by') if order_by is not None: for i, col in enumerate(order_by): new_name = get_column_in_case(ds_column_names, col) if new_name is None: raise Exception( f'Cant get appropriate cast column case. Columns: {ds_column_names}, column: {col}' ) kwargs['timeseries_settings']['order_by'][i] = new_name group_by = kwargs['timeseries_settings'].get('group_by') if group_by is not None: for i, col in enumerate(group_by): new_name = get_column_in_case(ds_column_names, col) kwargs['timeseries_settings']['group_by'][i] = new_name if new_name is None: raise Exception( f'Cant get appropriate cast column case. Columns: {ds_column_names}, column: {col}' ) model_interface.learn(predictor_name, ds_data_df, predict, kwargs=kwargs) return ExecuteAnswer(ANSWER_TYPE.OK)
18134ada88727d64eb2b3f04f303e3b66e43a1ec
642
https://github.com/mindsdb/mindsdb.git
1,257
def answer_create_predictor(self, statement): integration_name = None struct = { 'predictor_name': statement.name.parts[-1], 'select': statement.query_str, 'predict': [x.parts[-1] for x in statement.targets] } if len(struct['predict']) > 1: raise SqlApiException("Only one field can be in 'PREDICT'") if isinstance(statement.integration_name, Identifier): struct['integration_name'] = statement.integration_name.parts[-1] if statement.using is not None: struct['using'] = statement.using if statement.datasource_name is not None: struct['datasource_name'] = statement.datasource_name.parts[-1] if statement.order_by is not None: struct['order_by'] = [x.field.parts[-1] for x in statement.order_by] if len(struct['order_by']) > 1: raise SqlApiException("Only one field can be in 'OPRDER BY'") if statement.group_by is not None: struct['group_by'] = [x.parts[-1] for x in statement.group_by] if statement.window is not None: struct['window'] = statement.window if statement.horizon is not None: struct['horizon'] = statement.horizon m
60
1,101
answer_create_predictor
10
0
1
7
test/mitmproxy/data/addonscripts/configure.py
251,781
make it black!
mitmproxy
9
Python
10
configure.py
def load(self, loader): loader.add_option( name="optionaddon", typespec=Optional[int], default=None, help="Option Addon", )
b3587b52b25077f68116b9852b041d33e7fc6601
31
https://github.com/mitmproxy/mitmproxy.git
67
def load(self, loader): loader.add_option( name="optionaddon", typespec=Optional[int], default=None, help="Option Addon", )
10
48
load
25
0
1
10
tests/unit/types/request/test_request.py
12,707
feat: allow to access parameters of data request wo loading data (#4991)
jina
10
Python
18
test_request.py
def test_status(): r = DataRequest() r.docs.extend([Document()]) r.add_exception(ValueError('intentional_error')) byte_array = DataRequestProto.SerializeToString(r) deserialized_request = DataRequestProto.FromString(byte_array) assert not deserialized_request.is_decompressed_with_data assert deserialized_request.status.code == jina_pb2.StatusProto.ERROR assert deserialized_request.is_decompressed_wo_data assert not deserialized_request.is_decompressed_with_data
c3849c6fee4a65a77a82b2cfda9670d727ff0f53
72
https://github.com/jina-ai/jina.git
51
def test_status(): r = DataRequest() r.docs.extend([Document()]) r.add_exception(ValueError('intentional_
20
120
test_status
16
0
3
6
sympy/combinatorics/fp_groups.py
197,297
Remove abbreviations in documentation
sympy
11
Python
14
fp_groups.py
def equals(self, word1, word2): if self.reduce(word1*word2**-1) == self.identity: return True elif self._rewriting_system.is_confluent: return False return None
65be461082dda54c8748922f9c29a19af1279fe1
40
https://github.com/sympy/sympy.git
66
def equals(self, word1, word2): if self.reduce(word1*word2**-1) == self.identity: re
8
64
equals
11
0
1
5
tests/auth_tests/test_management.py
201,378
Refs #33476 -- Reformatted code with Black.
django
10
Python
11
test_management.py
def test_existing(self): User.objects.create(username="joe") management.get_system_username = lambda: "joe" self.assertEqual(management.get_default_username(), "") self.assertEqual(management.get_default_username(check_db=False), "joe")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
49
https://github.com/django/django.git
38
def test_existing(self): User.objects.create(username="joe") management.get_system_username = lambda: "joe" self.assertEqual(management.get_default_username(), "") self.assertEqual(management.get_default
11
86
test_existing
10
0
3
4
django/db/models/sql/query.py
205,843
Refs #33476 -- Reformatted code with Black.
django
10
Python
9
query.py
def has_select_fields(self): return bool( self.select or self.annotation_select_mask or self.extra_select_mask )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
20
https://github.com/django/django.git
34
def has_select_fields(self): return bool( self.select or self.annotation_select_mask or self.extra_select_mask )
6
32
has_select_fields
66
0
5
25
ppstructure/kie/predict_kie_token_ser_re.py
25,283
add ser to ppstructure system
PaddleOCR
11
Python
50
predict_kie_token_ser_re.py
def __call__(self, img): starttime = time.time() ser_results, ser_inputs, ser_elapse = self.ser_engine(img) if self.predictor is None: return ser_results, ser_elapse re_input, entity_idx_dict_batch = make_input(ser_inputs, ser_results) if self.use_visual_backbone == False: re_input.pop(4) for idx in range(len(self.input_tensor)): self.input_tensor[idx].copy_from_cpu(re_input[idx]) self.predictor.run() outputs = [] for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) preds = dict( loss=outputs[1], pred_relations=outputs[2], hidden_states=outputs[0], ) post_result = self.postprocess_op( preds, ser_results=ser_results, entity_idx_dict_batch=entity_idx_dict_batch) elapse = time.time() - starttime return post_result, elapse
d4a4c07c561421832f3207e41a0eba3460d431d7
173
https://github.com/PaddlePaddle/PaddleOCR.git
277
def __call__(self, img): starttime = time.time() ser_results, ser_inputs, ser_elapse = self.ser_engine(img) if self.predictor is None: return ser_results, ser_elapse re_input, entity_idx_dict_batch = make_input(ser_inputs, ser_results) if self.use_visual_backbone == False:
35
266
__call__
70
0
7
21
datasets/exams/exams.py
105,478
Fix bug and checksums in exams dataset (#4853) * Fix KeyError in exams dataset * Update metadata JSON * Fix dataset card
datasets
20
Python
50
exams.py
def _generate_examples(self, filepath, files=None): if self.config.name == "alignments": with open(filepath, encoding="utf-8") as f: for id_, line in enumerate(f): line_dict = json.loads(line.strip()) in_id, out_list = list(line_dict.items())[0] yield id_, {"source_id": in_id, "target_id_list": out_list} else: for path, f in files: if path == filepath: for id_, line in enumerate(f): line_dict = json.loads(line.strip()) for choice in line_dict["question"]["choices"]: choice["para"] = choice.get("para", "") yield id_, { "id": line_dict["id"], "question": line_dict["question"], "answerKey": line_dict["answerKey"], "info": line_dict["info"], } break
b88a656cf94c4ad972154371c83c1af759fde522
175
https://github.com/huggingface/datasets.git
457
def _generate_examples(self, filepath, files=None): if self.config.name == "alignments": with open(filepath, encoding="utf-8") as f: for id_, line in enumerate(f): line_dict = json.loads(line.strip()) in_id, out_list = list(line_dict.items())[0] yield id_, {"source_id": in_id, "target_id_list": out_list} else: for path, f in files: if path == filepath: for id_, line in enumerate(f): line_dict = json.loads(line.strip()) for choice in line_dict["question"]["choices"]: choice["para"] = choice.get("para", "") yield id_, { "id": line_dict["id"],
23
300
_generate_examples
187
0
2
60
python/ccxt/coinbasepro.py
17,935
1.72.67 [ci skip]
ccxt
20
Python
110
coinbasepro.py
def fetch_markets(self, params={}): response = self.publicGetProducts(params) # # [ # { # "id":"ZEC-BTC", # "base_currency":"ZEC", # "quote_currency":"BTC", # "base_min_size":"0.01000000", # "base_max_size":"1500.00000000", # "quote_increment":"0.00000100", # "base_increment":"0.00010000", # "display_name":"ZEC/BTC", # "min_market_funds":"0.001", # "max_market_funds":"30", # "margin_enabled":false, # "post_only":false, # "limit_only":false, # "cancel_only":false, # "trading_disabled":false, # "status":"online", # "status_message":"" # } # ] # result = [] for i in range(0, len(response)): market = response[i] id = self.safe_string(market, 'id') baseId = self.safe_string(market, 'base_currency') quoteId = self.safe_string(market, 'quote_currency') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) status = self.safe_string(market, 'status') result.append(self.extend(self.fees['trading'], { 'id': id, 'symbol': base + '/' + quote, 'base': base, 'quote': quote, 'settle': None, 'baseId': baseId, 'quoteId': quoteId, 'settleId': None, 'type': 'spot', 'spot': True, 'margin': self.safe_value(market, 'margin_enabled'), 'swap': False, 'future': False, 'option': False, 'active': (status == 'online'), 'contract': False, 'linear': None, 'inverse': None, 'contractSize': None, 'expiry': None, 'expiryDatetime': None, 'strike': None, 'optionType': None, 'precision': { 'amount': self.safe_number(market, 'base_increment'), 'price': self.safe_number(market, 'quote_increment'), }, 'limits': { 'leverage': { 'min': None, 'max': None, }, 'amount': { 'min': self.safe_number(market, 'base_min_size'), 'max': self.safe_number(market, 'base_max_size'), }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': self.safe_number(market, 'min_market_funds'), 'max': self.safe_number(market, 'max_market_funds'), }, }, 'info': market, })) return result
a485e7e15458907349ee510684112af2430e39e4
341
https://github.com/ccxt/ccxt.git
1,496
def fetch_markets(self, params={}): response = self.publicGetProducts(params) # # [ # { # "id":"ZEC-BTC", # "base_currency":"ZEC", # "quote_currency":"BTC", # "base_min_size":"0.01000000", #
23
609
fetch_markets