n_words
int64 3
1.95k
| n_ast_errors
int64 0
2
| complexity
int64 1
151
| nloc
int64 2
546
| path
stringlengths 8
125
| id
int64 280
339k
| commit_message
stringlengths 3
18.1k
| repo
stringlengths 3
28
| ast_levels
int64 4
28
| language
stringclasses 1
value | vocab_size
int64 3
677
| file_name
stringlengths 5
67
| code
stringlengths 101
24k
| commit_id
stringlengths 40
40
| ast_errors
stringlengths 0
2.76k
| token_counts
int64 7
3.77k
| url
stringlengths 31
61
| n_whitespaces
int64 4
13.9k
| random_cut
stringlengths 21
13.9k
| n_identifiers
int64 1
157
| n_ast_nodes
int64 10
3.6k
| fun_name
stringlengths 3
72
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73 | 0 | 6 | 22 | apps/applications/models/application.py | 188,691 | fix: 应用树隐藏mongodb节点 | jumpserver | 12 | Python | 53 | application.py | def create_types_tree_nodes(cls, pid, counts, show_empty=True, show_count=True):
nodes = []
temp_pid = pid
type_category_mapper = const.AppType.type_category_mapper()
types = const.AppType.type_category_mapper().keys()
for tp in types:
# TODO: Temporary exclude mongodb
if tp == const.AppType.mongodb:
continue
if not settings.XPACK_ENABLED and const.AppType.is_xpack(tp):
continue
category = type_category_mapper.get(tp)
pid = cls.create_tree_id(pid, 'category', category.value)
i = cls.create_tree_id(pid, 'type', tp.value)
node = cls.create_choice_node(
tp, i, pid, tp='type', counts=counts, opened=False,
show_empty=show_empty, show_count=show_count
)
pid = temp_pid
if not node:
continue
nodes.append(node)
return nodes
| 60edbb36a19188570113f9b5b2b60d01412a280d | 161 | https://github.com/jumpserver/jumpserver.git | 310 | def create_types_tree_nodes(cls, pid, counts, show_empty=True, show_count=True):
nodes = []
temp_pid = pid
type_category_mapper = const.AppType.type_category_mapper()
types = const.AppType.type_category_mapper().keys()
for tp in types:
# TODO: Temporary exclude mongodb
if tp == const.AppType.mongodb:
continue
if not settings.XPACK_ENABLED and const.AppType.is_xpack(tp):
continue
category = type_category_mapper.get(tp)
pid = cls.create_tree_id(pid, 'category', category.value)
i = cls.create_tree_id(pid, 'type', tp.value)
node = cls.create_choice_node(
tp, i, pid, tp='type', counts=counts, opened=False,
show_empty=show_empty, show_count=show_count
)
pid = temp_pid
i | 27 | 250 | create_types_tree_nodes |
|
31 | 0 | 2 | 9 | test/test_prototype_transforms.py | 194,367 | rename features._Feature to datapoints._Datapoint (#7002)
* rename features._Feature to datapoints.Datapoint
* _Datapoint to Datapoint
* move is_simple_tensor to transforms.utils
* fix CI
* move Datapoint out of public namespace | vision | 10 | Python | 27 | test_prototype_transforms.py | def test__transform(self, p, transform_cls, func_op_name, kwargs, mocker):
transform = transform_cls(p=p, **kwargs)
fn = mocker.patch(f"torchvision.prototype.transforms.functional.{func_op_name}")
inpt = mocker.MagicMock(spec=datapoints.Image)
_ = transform(inpt)
if p > 0.0:
fn.assert_called_once_with(inpt, **kwargs)
else:
assert fn.call_count == 0
| a8007dcdfb5159a711fa343d2ac4bb7df826975f | 77 | https://github.com/pytorch/vision.git | 94 | def test__transform(self, p, transform_cls, func_op_name, kwargs, mocker):
transform = transform_cls(p=p, **kwargs)
fn = mocker.patch(f"torchvision.prototype.transforms.functional.{func_op_name}")
inpt = mocker.MagicMock( | 18 | 119 | test__transform |
|
17 | 0 | 1 | 5 | python/ray/data/tests/test_context_propagation.py | 130,622 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 13 | Python | 14 | test_context_propagation.py | def test_map_batches(ray_start_regular_shared):
context = DatasetContext.get_current()
context.foo = 70003
ds = ray.data.range(1).map_batches(lambda x: [DatasetContext.get_current().foo])
assert ds.take_all()[0] == 70003
| 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 54 | https://github.com/ray-project/ray.git | 28 | def test_map_batches(ray_start_regular_shared):
context = DatasetContext.get_current()
context.foo = 70003
ds = ray.data.range(1).map_batches(lambda x: [DatasetContext.get_current().foo])
assert ds.take_all()[0] == 70003
| 13 | 88 | test_map_batches |
|
15 | 0 | 2 | 7 | homeassistant/components/zha/core/channels/lighting.py | 317,506 | ZHA light entity cleanup (#75573)
* use base class attributes
* initial hue and saturation support
* spec is 65536 not 65535
* fixes
* enhanced current hue
* fix comparison
* clean up
* fix channel test
* oops
* report enhanced current hue | core | 11 | Python | 14 | lighting.py | def hs_supported(self) -> bool:
return (
self.zcl_color_capabilities is not None
and lighting.Color.ColorCapabilities.Hue_and_saturation
in self.zcl_color_capabilities
)
| 04c6b9c51963418ffebddc7753939700fbea7e42 | 29 | https://github.com/home-assistant/core.git | 69 | def hs_supported(self) -> bool:
return (
self.zcl_color_capabilities is not None
and lighting.Color.ColorCapabilities.Hue_and_saturation
in self.zcl_color_capabilities
| 8 | 47 | hs_supported |
|
59 | 1 | 2 | 9 | python/ray/tune/insufficient_resources_manager.py | 132,214 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 13 | Python | 50 | insufficient_resources_manager.py | def _get_insufficient_resources_warning_threshold() -> float:
if is_ray_cluster():
return float(
os.environ.get(
"TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S_AUTOSCALER", "60"
)
)
else:
# Set the default to 10s so that we don't prematurely determine that
# a cluster cannot fulfill the resources requirements.
# TODO(xwjiang): Change it back once #18608 is resolved.
return float(os.environ.get("TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S", "60"))
# TODO(xwjiang): Consider having a help page with more detailed instructions.
@lru_cache() | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | @lru_cache() | 41 | https://github.com/ray-project/ray.git | 141 | def _get_insufficient_resources_warning_threshold() -> float:
if is_ray_cluster():
return float(
os.environ.get(
"TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S_AUTOSCALER", "60"
)
)
else:
# Set the default to 10s so that we don't prematurely determine that
# a cluster cannot fulfill the resources requirements.
# TODO(xwjiang): Change it back once #18608 is resolved.
return float(os.environ.get("TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S", "60"))
# TODO(x | 7 | 86 | _get_insufficient_resources_warning_threshold |
24 | 0 | 3 | 7 | homeassistant/components/ecobee/notify.py | 289,622 | Load ecobee notify platform via discovery (#78558)
* Fix ecobee notify platform KeyError
* set up notify platform via discovery
* address comments
* fix isort
* Apply suggestions from code review
Co-authored-by: Martin Hjelmare <marhje52@gmail.com> | core | 10 | Python | 22 | notify.py | def send_message(self, message="", **kwargs):
targets = kwargs.get(ATTR_TARGET)
if not targets:
raise ValueError("Missing required argument: target")
for target in targets:
thermostat_index = int(target)
self.ecobee.send_message(thermostat_index, message)
| da099532fe837604383d5e195be4a0320941a87c | 51 | https://github.com/home-assistant/core.git | 85 | def send_message(self, message="", **kwargs):
targets = kwargs.get(ATTR_TARGET)
if not targets:
raise ValueError("Missing required argument: target")
for target in targets:
thermostat_index = int(target)
self.ecobee.send_message(thermostat_index, mess | 12 | 86 | send_message |
|
59 | 1 | 1 | 19 | sklearn/neighbors/tests/test_neighbors.py | 259,533 | TST use global_dtype in sklearn/neighbors/tests/test_neighbors.py (#22663)
Co-authored-by: Jérémie du Boisberranger
Co-authored-by: Olivier Grisel <olivier.grisel@ensta.org> | scikit-learn | 12 | Python | 44 | test_neighbors.py | def test_same_knn_parallel(algorithm):
X, y = datasets.make_classification(
n_samples=30, n_features=5, n_redundant=0, random_state=0
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode="distance").toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = clf.kneighbors_graph(X_test, mode="distance").toarray()
assert_array_equal(y, y_parallel)
assert_allclose(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_allclose(graph, graph_parallel)
@pytest.mark.parametrize("algorithm", ALGORITHMS) | 7931262d737d89b795d1ea5790c44003c13c741c | @pytest.mark.parametrize("algorithm", ALGORITHMS) | 173 | https://github.com/scikit-learn/scikit-learn.git | 115 | def test_same_knn_parallel(algorithm):
X, y = datasets.make_classification(
n_samples=3 | 40 | 287 | test_same_knn_parallel |
124 | 0 | 2 | 51 | tests/snuba/api/endpoints/test_organization_events.py | 94,821 | fix(tests): Fix dnd backend test flakes (#37916)
This PR fixes 3 major flakes:
Fixes SENTRY-TESTS-3J5: Just sort the project id order
Fixes SENTRY-TESTS-3HQ: Flakes because we calculate the retention
in the test once and the value returned in the response is calculated
a little while after. We don't need to test for seconds granularity
so replacing seconds to 0.
Fixes SENTRY-TESTS-3J0: Successively calling before_now results in some flakes
particularly in tests that are calculating aggregates
on transaction.duration. Introduced a load_data method
that takes a datetime object timestamp and a timedelta duration
calculates the offset based on timestamp to get start_timestamp. | sentry | 16 | Python | 75 | test_organization_events.py | def test_count_miserable_new_alias_field(self):
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
events = [
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
]
for idx, event in enumerate(events):
data = self.load_data(
timestamp=before_now(minutes=(10 + idx)),
duration=timedelta(milliseconds=event[1]),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/count_miserable/horribilis/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
self.store_event(data, project_id=self.project.id)
query = {
"field": [
"transaction",
"count_miserable(user)",
],
"query": "event.type:transaction",
"project": [self.project.id],
"sort": "count_miserable_user",
}
response = self.do_request(
query,
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 3
data = response.data["data"]
assert data[0]["count_miserable(user)"] == 0
assert data[1]["count_miserable(user)"] == 1
assert data[2]["count_miserable(user)"] == 2
query["query"] = "event.type:transaction count_miserable(user):>0"
response = self.do_request(
query,
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 2
data = response.data["data"]
assert abs(data[0]["count_miserable(user)"]) == 1
assert abs(data[1]["count_miserable(user)"]) == 2
| ab993b32614bb83d17d10e1041817e43dd6f5980 | 332 | https://github.com/getsentry/sentry.git | 597 | def test_count_miserable_new_alias_field(self):
ProjectTransactionThreshold.objects.create(
project=self.project,
organization=self.project.organization,
threshold=400,
metric=TransactionMetric.DURATION.value,
)
events = [
("one", 400),
("one", 400),
("two", 3000),
("two", 3000),
("three", 300),
("three", 3000),
]
for idx, event in enumerate(events):
data = self.load_data(
timestamp=before_now(minutes=(10 + idx)),
duration=timedelta(milliseconds=event[1]),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/count_miserable/horribilis/{event[0]}"
data["user"] = {"email": f"{idx}@example.com"}
self.store_event(data, project_id=self.project.id)
query = {
"field": [
"transaction",
"count_miserable(user)",
],
"query": "event.type:transaction",
"project": [self.project.id],
"sort": "count_miserable_user",
}
response = self.do_request(
query,
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 3
data = response.data["data"]
assert data[0]["count_miserable(user)"] == 0
assert data[1]["count_miserable(user)"] == 1
assert data[2]["count_miserable(user)"] == 2
query["query"] = "event.type:trans | 34 | 552 | test_count_miserable_new_alias_field |
|
1,014 | 0 | 12 | 161 | openbb_terminal/terminal_controller.py | 286,162 | Add intro command + improve plot style + internationalisation consistency (#2899)
* improve style of plots
* remove the blend which doesnt look nice in some machines
* remove translation for arguments to be coherent
* Add intro command
* add check to understand if its first time user to run intro by default
* intro doesnt require docs
* silly catch
* fix line length
* anoter lengthy line
* unused import
* unused import
* words
* allow to quit earlier
* style
* fix thing for u didi
* actual fix
* last try
Co-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>
Co-authored-by: james <jmaslek11@gmail.com> | OpenBBTerminal | 10 | Python | 465 | terminal_controller.py | def call_intro(self, _):
console.print(panel.Panel("[purple]Welcome to the OpenBB Terminal.[/purple]"))
console.print(
"\nThe following walkthrough will guide you towards making the most out of the OpenBB Terminal.\n\n"
"Press Enter to continue or 'q' followed by Enter to exit."
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#1 - Commands vs menu.[/purple]"))
console.print(
"\nMenus are a collection of 'commands' and 'sub-menus'.\n"
"You can identify them through their distinct color and a '>' at the beginning of the line\n\n"
"For instance:\n"
"[menu]> stocks access historical pricing data, options, sector [/menu]"
"[menu]and industry, and overall due diligence [/menu]\n\n\n"
"Commands are expected to return data either as a chart or table.\n"
"You can identify them through their distinct color\n\n"
"For instance:\n"
"[cmds]> news display news articles based on term and data sources [/cmds]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#2 - Using commands[/purple]"))
console.print(
"\nCommands throughout the terminal can have additional arguments.\n\n"
"Let's say that in the current menu, you want to have more information about the command 'news'. \n\n"
"You can either see the available arguments in the terminal, using: [param]news -h[/param]\n\n",
"or you can find out more about it with an output example on the browser, using: [param]about news[/param]",
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#3 - Setting API Keys[/purple]"))
console.print(
"\nThe OpenBB Terminal does not own any of the data you have access to.\n\n"
"Instead, we provide the infrastructure to access over 100 different data sources from a single location.\n\n"
"Thus, it is necessary for each user to set their own API keys for the various third party sources\n\n"
"You can find more about this on the '[param]keys[/param]' menu.\n\n"
"For many commands, there are multiple data sources that can be selected.\n\n"
"The help menu shows the data sources supported by each command.\n\n"
"For instance:\n"
"[cmds] load load a specific stock ticker and additional info for analysis [/cmds]"
"[src][YahooFinance, IEXCloud, AlphaVantage, Polygon, EODHD] [/src]\n\n"
"The user can go into the '[param]sources[/param]' menu and select their preferred default data source."
)
if input("") == "q":
return
console.print("\n")
console.print(
panel.Panel("[purple]#4 - Symbol dependent menus and commands[/purple]")
)
console.print(
"\nThroughout the terminal, you will see commands and menus greyed out.\n\n"
"These menus or commands cannot be accessed until an object is loaded.\n\n"
"Let's take as an example the '[param]stocks[/param]' menu.\n\n"
"You will see that the command '[param]disc[/param]' is available as its goal is to discover new tickers:\n"
"[menu]> stocks access historical pricing data, options, sector [/menu]\n\n"
"On the other hand, '[param]fa[/param]' menu (fundamental analysis) requires a ticker to be loaded.\n\n"
"And therefore, appears as:\n"
"[dim]> fa fundamental analysis of loaded ticker [/dim]\n\n"
"Once a ticker is loaded with: [param]load TSLA[/param]\n\n"
"The '[param]fa[/param]' menu will be available as:\n"
"[menu]> fa fundamental analysis of loaded ticker [/menu]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#5 - Terminal Navigation[/purple]"))
console.print(
"\nThe terminal has a tree like structure, where menus branch off into new menus.\n\n"
"The users current location is displayed before the text prompt.\n\n"
"For instance, if the user is inside the menu disc which is inside stocks, the following prompt "
"will appear: \n2022 Oct 18, 21:53 (🦋) [param]/stocks/disc/[/param] $\n\n"
"If the user wants to go back to the menu above, all they need to do is type '[param]q[/param]'.\n\n"
"If the user wants to go back to the home of the terminal, they can type '[param]/[/param]' instead.\n\n"
"Note: Always type '[param]h[/param]' to know what commands are available in each menu"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#6 - Command Pipeline[/purple]"))
console.print(
"\nThe terminal offers the capability of allowing users to speed up their navigation and command execution."
"\n\nTherefore, typing the following prompt is valid:\n"
"2022 Oct 18, 21:53 (🦋) / $ [param]stocks/load TSLA/dd/pt[/param]\n\n"
"In this example, the terminal - in a single action - will go into '[param]stocks[/param]' menu, "
"run command '[param]load[/param]' with '[param]TSLA[/param]' as input, \n"
"go into sub-menu '[param]dd[/param]' (due diligence) and run the command '[param]pt[/param]' (price target)."
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#6 - OpenBB Scripts[/purple]"))
console.print(
"\nThe command pipeline capability is great, but the user experience wasn't great copy-pasting large "
"lists of commands.\n\n"
"We allow the user to create a text file of the form:\n\n"
"[param]FOLDER_PATH/my_script.openbb[/param]\n"
"stocks\nload TSLA\ndd\npt\n\n"
"which can be run through the '[param]exe[/param]' command in the home menu, with:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script.openbb[/param]\n\n"
)
if input("") == "q":
return
console.print("\n")
console.print(
panel.Panel("[purple]#7 - OpenBB Scripts with Arguments[/purple]")
)
console.print(
"\nThe user can create a script that includes arguments for the commands.\n\n"
"Example:\n\n"
"[param]FOLDER_PATH/my_script_with_variable_input.openbb[/param]\n"
"stocks\n# this is a comment\nload $ARGV[0]\ndd\npt\nq\nload $ARGV[1]\ncandle\n\n"
"and then, if this script is run with:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script_with_variable_input.openbb "
"-i AAPL,MSFT[/param]\n\n"
"This means that the [param]pt[/param] will run on [param]AAPL[/param] while "
"[param]candle[/param] on [param]MSFT[/param]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#8 - OpenBB Script Generation/purple]"))
console.print(
"\n"
"To make it easier for users to create scripts, we have created a "
"command that 'records' user commands "
"directly into a script.\n\n"
"From the home menu, the user can run:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]record[/param]\n\n"
"and then perform your typical investment research workflow before entering\n\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]stop[/param]\n\n"
"After stopping, the script will be saved to the 'scripts' folder."
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#9 - Terminal Customization[/purple]"))
console.print(
"\nUsers should explore the [param]settings[/param] and [param]featflags[/param] menus "
"to configure their terminal.\n\n"
"The fact that our terminal is fully open source allows users to be able to customize "
"anything they want.\n\n"
"If you are interested in contributing to the project, please check:\n"
"[param]https://github.com/OpenBB-finance/OpenBBTerminal[/param]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#10 - Support[/purple]"))
console.print(
"\n"
"We are nothing without our community, hence we put a lot of effort in being here for you.\n\n"
"If you find any bug that you wish to report to improve the terminal you can do so with:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]support CMD[/param]\n\n"
"which should open a form in your browser where you can report the bug in said 'CMD'.\n\n"
"If you want to know more, or have any further question. Please join us on Discord:\n"
"[param]https://openbb.co/discord[/param]"
)
| 3762693df7a1f8cdfeba5e14c4438f993a2eead0 | 454 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2,640 | def call_intro(self, _):
console.print(panel.Panel("[purple]Welcome to the OpenBB Terminal.[/purple]"))
console.print(
"\nThe following walkthrough will guide you towards making the most out of the OpenBB Terminal.\n\n"
"Press Enter to continue or 'q' followed by Enter to exit."
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#1 - Commands vs menu.[/purple]"))
console.print(
"\nMenus are a collection of 'commands' and 'sub-menus'.\n"
"You can identify them through their distinct color and a '>' at the beginning of the line\n\n"
"For instance:\n"
"[menu]> stocks access historical pricing data, options, sector [/menu]"
"[menu]and industry, and overall due diligence [/menu]\n\n\n"
"Commands are expected to return data either as a chart or table.\n"
"You can identify them through their distinct color\n\n"
"For instance:\n"
"[cmds]> news display news articles based on term and data sources [/cmds]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#2 - Using commands[/purple]"))
console.print(
"\nCommands throughout the terminal can have additional arguments.\n\n"
"Let's say that in the current menu, you want to have more information about the command 'news'. \n\n"
"You can either see the available arguments in the terminal, using: [param]news -h[/param]\n\n",
"or you can find out more about it with an output example on the browser, using: [param]about news[/param]",
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#3 - Setting API Keys[/purple]"))
console.print(
"\nThe OpenBB Terminal does not own any of the data you have access to.\n\n"
"Instead, we provide the infrastructure to access over 100 different data sources from a single location.\n\n"
"Thus, it is necessary for each user to set their own API keys for the various third party sources\n\n"
"You can find more about this on the '[param]keys[/param]' menu.\n\n"
"For many commands, there are multiple data sources that can be selected.\n\n"
"The help menu shows the data sources supported by each command.\n\n"
"For instance:\n"
"[cmds] load load a specific stock ticker and additional info for analysis [/cmds]"
"[src][YahooFinance, IEXCloud, AlphaVantage, Polygon, EODHD] [/src]\n\n"
"The user can go into the '[param]sources[/param]' menu and select their preferred default data source."
)
if input("") == "q":
return
console.print("\n")
console.print(
panel.Panel("[purple]#4 - Symbol dependent menus and commands[/purple]")
)
console.print(
"\nThroughout the terminal, you will see commands and menus greyed out.\n\n"
"These menus or commands cannot be accessed until an object is loaded.\n\n"
"Let's take as an example the '[param]stocks[/param]' menu.\n\n"
"You will see that the command '[param]disc[/param]' is available as its goal is to discover new tickers:\n"
"[menu]> stocks access historical pricing data, options, sector [/menu]\n\n"
"On the other hand, '[param]fa[/param]' menu (fundamental analysis) requires a ticker to be loaded.\n\n"
"And therefore, appears as:\n"
"[dim]> fa fundamental analysis of loaded ticker [/dim]\n\n"
"Once a ticker is loaded with: [param]load TSLA[/param]\n\n"
"The '[param]fa[/param]' menu will be available as:\n"
"[menu]> fa fundamental analysis of loaded ticker [/menu]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#5 - Terminal Navigation[/purple]"))
console.print(
"\nThe terminal has a tree like structure, where menus branch off into new menus.\n\n"
"The users current location is displayed before the text prompt.\n\n"
"For instance, if the user is inside the menu disc which is inside stocks, the following prompt "
"will appear: \n2022 Oct 18, 21:53 (🦋) [param]/stocks/disc/[/param] $\n\n"
"If the user wants to go back to the menu above, all they need to do is type '[param]q[/param]'.\n\n"
"If the user wants to go back to the home of the terminal, they can type '[param]/[/param]' instead.\n\n"
"Note: Always type '[param]h[/param]' to know what commands are available in each menu"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#6 - Command Pipeline[/purple]"))
console.print(
"\nThe terminal offers the capability of allowing users to speed up their navigation and command execution."
"\n\nTherefore, typing the following prompt is valid:\n"
"2022 Oct 18, 21:53 (🦋) / $ [param]stocks/load TSLA/dd/pt[/param]\n\n"
"In this example, the terminal - in a single action - will go into '[param]stocks[/param]' menu, "
"run command '[param]load[/param]' with '[param]TSLA[/param]' as input, \n"
"go into sub-menu '[param]dd[/param]' (due diligence) and run the command '[param]pt[/param]' (price target)."
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#6 - OpenBB Scripts[/purple]"))
console.print(
"\nThe command pipeline capability is great, but the user experience wasn't great copy-pasting large "
"lists of commands.\n\n"
"We allow the user to create a text file of the form:\n\n"
"[param]FOLDER_PATH/my_script.openbb[/param]\n"
"stocks\nload TSLA\ndd\npt\n\n"
"which can be run through the '[param]exe[/param]' command in the home menu, with:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script.openbb[/param]\n\n"
)
if input("") == "q":
return
console.print("\n")
console.print(
panel.Panel("[purple]#7 - OpenBB Scripts with Arguments[/purple]")
)
console.print(
"\nThe user can create a script that includes arguments for the commands.\n\n"
"Example:\n\n"
"[param]FOLDER_PATH/my_script_with_variable_input.openbb[/param]\n"
"stocks\n# this is a comment\nload $ARGV[0]\ndd\npt\nq\nload $ARGV[1]\ncandle\n\n"
"and then, if this script is run with:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]exe FOLDER_PATH/my_script_with_variable_input.openbb "
"-i AAPL,MSFT[/param]\n\n"
"This means that the [param]pt[/param] will run on [param]AAPL[/param] while "
"[param]candle[/param] on [param]MSFT[/param]"
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#8 - OpenBB Script Generation/purple]"))
console.print(
"\n"
"To make it easier for users to create scripts, we have created a "
"command that 'records' user commands "
"directly into a script.\n\n"
"From the home menu, the user can run:\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]record[/param]\n\n"
"and then perform your typical investment research workflow before entering\n\n"
"2022 Oct 18, 22:33 (🦋) / $ [param]stop[/param]\n\n"
"After stopping, the script will be saved to the 'scripts' folder."
)
if input("") == "q":
return
console.print("\n")
console.print(panel.Panel("[purple]#9 - Terminal Customization[/purple]"))
console.print(
"\nUsers should explore the [param]settings[/param] and [param]featflags[/param] menus "
"to configure their terminal.\n\n"
"The fact that our terminal is fully open source allows users to be able to customize "
"anything they want.\n\n"
"If you are interested in contributing to the project, please check:\n"
"[param]https://github.com/OpenBB-finance/Open | 8 | 1,122 | call_intro |
|
25 | 0 | 1 | 8 | mindsdb/integrations/mlflow_handler/mlflow/mlflow_integration.py | 114,378 | fix tests and reformat | mindsdb | 11 | Python | 22 | mlflow_integration.py | def connect(self, **kwargs) -> Dict[str, int]:
# noqa
print(kwargs)
self.mlflow_server_url = kwargs['mlflow_server_url']
self.mlflow_server_path = kwargs['model_registry_path']
self.connection = MlflowClient(self.mlflow_server_url, self.mlflow_server_path)
self.storage = SqliteStorageHandler(context=self.name, config=kwargs['config'])
return self.check_status()
| 0fd3b436c38f38bcae6fed9e14dc4d2a12e90793 | 75 | https://github.com/mindsdb/mindsdb.git | 75 | def connect(self, **kwargs) -> Dict[str, int]:
# noqa
print(kwargs)
self.mlflow_server_url = kwargs['mlflow_server_url']
self.mlflow_server_path = kwargs['model_registry_path']
self.connection = MlflowClient(self.mlflow_server_url, self.mlflow_server_path)
self.storage = SqliteStorageHandler(context=self.name, config=kwarg | 17 | 123 | connect |
|
10 | 0 | 1 | 6 | python/ray/serve/handle.py | 136,534 | Revert "[all_tests][python] Remove calling of get_event_loop from pyt… (#30382)
This reverts commit 784e66b. | ray | 10 | Python | 10 | handle.py | def _make_router(self) -> Router:
return Router(
self.controller_handle,
self.deployment_name,
event_loop=asyncio.get_event_loop(),
)
| c7115135ea131b29bd6ff3d32e4f90297e5e770e | 27 | https://github.com/ray-project/ray.git | 56 | def _make_router(self) -> Router:
ret | 8 | 41 | _make_router |
|
154 | 0 | 10 | 54 | src/sentry/models/deploy.py | 92,854 | feat(release-activity): backend support for tracking release activity (#36608)
Alpha Workflow 2.0 feature for tracking activity when an issue occurs in an active release-deployment time-window.
This adds the backend components needed to support surfacing the notification activity in the frontend. | sentry | 19 | Python | 119 | deploy.py | def notify_if_ready(cls, deploy_id, fetch_complete=False):
from sentry.models import (
Activity,
Environment,
Organization,
ReleaseActivity,
ReleaseCommit,
ReleaseHeadCommit,
)
lock_key = cls.get_lock_key(deploy_id)
lock = locks.get(lock_key, duration=30, name="deploy_notify")
with TimedRetryPolicy(10)(lock.acquire):
deploy = cls.objects.filter(id=deploy_id).select_related("release").get()
if deploy.notified:
return
release = deploy.release
environment = Environment.objects.get(
organization_id=deploy.organization_id, id=deploy.environment_id
)
if not fetch_complete:
release_has_commits = ReleaseCommit.objects.filter(
organization_id=release.organization_id, release=release
).exists()
if not release_has_commits:
# check if we have head commits, which
# would indicate that we're waiting for
# fetch_commits to complete
if ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id, release=release
).exists():
return
activity = None
for project in deploy.release.projects.all():
activity = Activity.objects.create(
type=ActivityType.DEPLOY.value,
project=project,
ident=Activity.get_version_ident(release.version),
data={
"version": release.version,
"deploy_id": deploy.id,
"environment": environment.name,
},
datetime=deploy.date_finished,
)
# Somewhat hacky, only send notification for one
# Deploy Activity record because it will cover all projects
if activity is not None:
activity.send_notification()
deploy.update(notified=True)
# XXX(workflow): delete this after WF 2.0 experiment over
try:
org = Organization.objects.get_from_cache(id=deploy.organization_id)
except Organization.DoesNotExist:
org = None
if org and features.has("organizations:active-release-monitor-alpha", org):
ReleaseActivity.objects.create(
type=ReleaseActivityType.DEPLOYED.value,
release=release,
data={"environment": str(environment.name)},
)
| 15e21309086e5e97fa66f0d5234cb3110bcf34f1 | 335 | https://github.com/getsentry/sentry.git | 1,066 | def notify_if_ready(cls, deploy_id, fetch_complete=False):
from sentry.models import (
Activity,
Environment,
Organization,
ReleaseActivity,
ReleaseCommit,
ReleaseHeadCommit,
)
lock_key = cls.get_lock_key(deploy_id)
lock = locks.get(lock_key, duration=30, name="deploy_notify")
with TimedRetryPolicy(10)(lock.acquire):
deploy = cls.objects.filter(id=deploy_id).select_related("release").get()
if deploy.notified:
return
release = deploy.release
environment = Environment.objects.get(
organization_id=deploy.organization_id, id=deploy.environment_id
)
if not fetch_complete:
release_has_commits = ReleaseCommit.objects.filter(
organization_id=release.organization_id, release=release
).exists()
if not release_has_commits:
# check if we have head commits, which
# would indicate that we're waiting for
# fetch_commits to complete
if ReleaseHeadCommit.objects.filter(
organization_id=release.organization_id, release=release
).exists():
return
activity = None
for project in deploy.release.projects.all():
activity = Activity.objects.create(
type=ActivityType.DEPLOY.value,
project=project,
ident=Activity.get_version_ident(release.version),
data={
"version": release.version,
"deploy_id": deploy.id,
"environment": environment.name,
},
datetime=deploy.date_finished,
)
# Somewhat hacky, only send notification for one
# Deploy Activity record because it will cover all projects
if activity is not None:
activity.send_notification()
deploy.update(notified=True)
# XXX(workflow): delete this after WF 2.0 experiment over
try:
org = Organization.objects.get_from_cache(id=deploy.organization_id)
except Organization.DoesNotExist:
org = None
if org and features.has("organizations:active-release-monitor-alpha", org):
| 58 | 535 | notify_if_ready |
|
21 | 0 | 2 | 7 | tests/unit/keyinput/test_basekeyparser.py | 321,359 | Run scripts/dev/rewrite_enums.py | qutebrowser | 11 | Python | 21 | test_basekeyparser.py | def test_binding_with_shift(self, prompt_keyparser):
for key, modifiers in [(Qt.Key.Key_Y, Qt.KeyboardModifier.NoModifier),
(Qt.Key.Key_Shift, Qt.KeyboardModifier.ShiftModifier),
(Qt.Key.Key_Y, Qt.KeyboardModifier.ShiftModifier)]:
info = keyutils.KeyInfo(key, modifiers)
prompt_keyparser.handle(info.to_event())
prompt_keyparser.execute.assert_called_once_with('yank -s', None)
| 0877fb0d78635692e481c8bde224fac5ad0dd430 | 87 | https://github.com/qutebrowser/qutebrowser.git | 124 | def test_binding_with_shift(self, prompt_keyparser):
for key, modifiers in [(Qt.Key.Key_Y, Qt.KeyboardModifier.NoModifier),
(Qt.Key.Key_Shift, Qt.KeyboardModifier.ShiftModifier),
(Qt.Key.Key_Y, Qt.KeyboardModifier.ShiftModifier)]:
info = keyutils.KeyInfo(key | 19 | 132 | test_binding_with_shift |
|
36 | 0 | 1 | 21 | examples/dev_sandbox.py | 182,308 | Displaying tabs with underline | textual | 13 | Python | 34 | dev_sandbox.py | def on_mount(self):
self.tabs = Tabs(
[
Tab("One", name="one"),
Tab("Two", name="two"),
Tab("Three", name="three"),
Tab("Four", name="four"),
Tab("Five", name="five"),
Tab("Six", name="six"),
Tab("Seven", name="seven"),
Tab("Eight", name="eight"),
],
)
self.tabs.active_tab_name = "one"
self.mount(
header=self.tabs,
content=PanelWidget(),
footer=Widget(),
sidebar=Widget(),
)
BasicApp.run(css_file="dev_sandbox.scss", watch_css=True, log="textual.log")
| b2f7c2ac850ab43020706c3e5b6660db1f25507a | 124 | https://github.com/Textualize/textual.git | 263 | def on_mount(self):
self.tabs = Tabs(
[
Tab("One", name="one"),
Tab("Two", name="two"),
Tab("Three", name="three"),
Tab("Four", name="four"),
Tab("Five", name="five"),
Tab("Six", name="six"),
Tab("Seven", name="seven"),
Tab("Eight", name="eight"),
],
)
self.tabs.active_tab_name = "one"
sel | 19 | 241 | on_mount |
|
28 | 0 | 3 | 8 | python3.10.4/Lib/email/message.py | 223,810 | add python 3.10.4 for windows | XX-Net | 11 | Python | 18 | message.py | def get_filename(self, failobj=None):
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.collapse_rfc2231_value(filename).strip()
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 62 | https://github.com/XX-net/XX-Net.git | 92 | def get_filename(self, failobj=None):
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.coll | 10 | 106 | get_filename |
|
46 | 0 | 1 | 10 | packages/syft/src/syft/core/tensor/tensor.py | 965 | working ndept pointer | PySyft | 10 | Python | 39 | tensor.py | def _object2bytes(self) -> bytes:
schema = get_capnp_schema(schema_file="tensor.capnp")
tensor_struct: CapnpModule = schema.Tensor # type: ignore
tensor_msg = tensor_struct.new_message()
# this is how we dispatch correct deserialization of bytes
tensor_msg.magicHeader = serde_magic_header(type(self))
chunk_bytes(sy.serialize(self.child, to_bytes=True), "child", tensor_msg)
tensor_msg.publicShape = sy.serialize(self.public_shape, to_bytes=True)
tensor_msg.publicDtype = self.public_dtype
tensor_msg.tagName = self.tag_name
return tensor_msg.to_bytes_packed()
| 0805df03a6a8e068bfbe039e0664a842f50ad5de | 95 | https://github.com/OpenMined/PySyft.git | 116 | def _object2bytes(self) -> bytes:
schema = get_capnp_schema(schema_file="tensor.capnp")
tensor_struct: CapnpModule = schema.Tensor # type: ignore
tensor_msg = tensor_struct.new_message()
# this is how we dispatch correct deserialization of bytes
tensor_msg.magicHeader = serde_magic_header(type(self))
chunk_bytes(sy.serialize(self.child, to_bytes=True), "child", tensor_msg)
tensor_msg.publicShape = sy.serialize(self.pub | 26 | 155 | _object2bytes |
|
22 | 0 | 1 | 2 | numpy/polynomial/_polybase.py | 160,706 | MAINT: limit the number of decimals in Polynomial representation (#21654)
* limit the number of decimals in Polynomial representation
* tests pass
* parenthesize exponential notation in polynomials
* fixed a long line warning
* added polynomial printoptions tests
* polynomial printoptions typo fixed
* made switch to exp notation in polynomial display more natural
* added a test on switching polynomials to exp notation
* fixed linter errors/warnings
* support for nanstr and infstr printoptions in polynomials
* 10^8 threshold for switching to exp notation when displaying polynomials
* merged in PR #21696 fixing issue #21695
* made linter happy
* made some docstring tests pass
* fixed the docs
Co-authored-by: Lev Maximov <lev.maximov@gmail.com> | numpy | 10 | Python | 21 | _polybase.py | def _repr_latex_scalar(x, parens=False):
# TODO: we're stuck with disabling math formatting until we handle
# exponents in this function
return r'\text{{{}}}'.format(pu.format_float(x, parens=parens))
| a5535dc6242b0decae1e65a3d4feb220fefedc49 | 26 | https://github.com/numpy/numpy.git | 42 | def _repr_latex_scalar(x, parens=False):
# TODO: we're stuck with disabling math formatting until we handle
# exponents in this function
return r'\text{{{}}}'.format(pu.format_float(x, parens= | 6 | 42 | _repr_latex_scalar |
|
33 | 0 | 3 | 12 | pipenv/environment.py | 19,664 | Issue 4993 Add standard pre commit hooks and apply linting. (#4994)
* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else. | pipenv | 13 | Python | 26 | environment.py | def reverse_dependency(cls, node):
new_node = {
"package_name": node["package_name"],
"installed_version": node["installed_version"],
"required_version": node["required_version"],
}
for dependency in node.get("dependencies", []):
for dep in cls.reverse_dependency(dependency):
new_dep = dep.copy()
new_dep["parent"] = (node["package_name"], node["installed_version"])
yield new_dep
yield new_node
| 9a3b3ce70621af6f9adaa9eeac9cf83fa149319c | 82 | https://github.com/pypa/pipenv.git | 149 | def reverse_dependency(cls, node):
new_node = {
"package_name": node["package_name"],
"installed_version": node["installed_version"],
"required_version": node["required_version"],
}
for dependency in node.get("dependencies", []):
for dep in cls.reverse_dependency(dependency):
new_dep = dep.copy()
new_dep["parent | 9 | 141 | reverse_dependency |
|
24 | 0 | 2 | 12 | tests/sentry/api/endpoints/test_organization_metric_data.py | 91,937 | fix(tests) - use _indexer_record instead of indexer.record (#35908)
fix a test that was added after I branched off and changed this file structure for my feature | sentry | 11 | Python | 24 | test_organization_metric_data.py | def test_validate_include_meta_not_enabled_by_default(self):
self.create_release(version="foo", project=self.project)
for tag in ("release", "environment"):
_indexer_record(self.project.organization_id, tag)
response = self.get_success_response(
self.project.organization.slug,
project=self.project.id,
field="sum(sentry.sessions.session)",
groupBy="environment",
query="",
)
assert response.data["meta"] == []
| a8264817cfe5a4c03515869c010c78d04a710f1e | 83 | https://github.com/getsentry/sentry.git | 124 | def test_validate_include_meta_not_enabled_by_default(self):
self.create_release(version="foo", project=self.project)
for tag in ("release", "environment"):
_indexer_record(self.project.organization_id, tag)
response = self.get_success_response(
self.project.organiz | 17 | 135 | test_validate_include_meta_not_enabled_by_default |
|
90 | 0 | 2 | 16 | ppdet/modeling/rbox_utils.py | 211,348 | Refactor rbox (#6704)
* refactor rbox
* modify the code of save results
* fix some problem
* add .gitignore in dataset/dota
* fix test anno path | PaddleDetection | 15 | Python | 61 | rbox_utils.py | def rbox2poly_np(rboxes):
polys = []
for i in range(len(rboxes)):
x_ctr, y_ctr, width, height, angle = rboxes[i][:5]
tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
poly = R.dot(rect)
x0, x1, x2, x3 = poly[0, :4] + x_ctr
y0, y1, y2, y3 = poly[1, :4] + y_ctr
poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float32)
poly = get_best_begin_point_single(poly)
polys.append(poly)
polys = np.array(polys)
return polys
| e55e41945d42db787a0f7c557d53d06a6b24536b | 227 | https://github.com/PaddlePaddle/PaddleDetection.git | 196 | def rbox2poly_np(rboxes):
polys = []
for i in range(len(rboxes)):
x_ctr, y_ctr, width, height, angle = rboxes[i][:5]
tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
poly = R.dot(rect)
x0, x1, x2, x3 = poly[0, :4] + x_ctr
y0, y1, y | 35 | 327 | rbox2poly_np |
|
20 | 1 | 2 | 6 | tests/io/test_sql.py | 105,777 | Add ability to read-write to SQL databases. (#4928)
* Add ability to read-write to SQL databases.
* Fix issue where pandas<1.4.0 doesn't return the number of rows
* Fix issue where connections were not closed properly
* Apply suggestions from code review
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Change according to reviews
* Change according to reviews
* Inherit from AbstractDatasetInputStream in SqlDatasetReader
* Revert typing in SQLDatasetReader as we do not support Connexion
* Align API with Pandas/Daskk
* Update tests
* Update docs
* Update some more tests
* Missing comma
* Small docs fix
* Style
* Update src/datasets/arrow_dataset.py
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Update src/datasets/packaged_modules/sql/sql.py
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Address some comments
* Address the rest
* Improve tests
* sqlalchemy required tip
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
Co-authored-by: mariosasko <mariosasko777@gmail.com> | datasets | 12 | Python | 19 | test_sql.py | def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy | d7dfbc83d68e87ba002c5eb2555f7a932e59038a | @require_sqlalchemy | 40 | https://github.com/huggingface/datasets.git | 53 | def iter_sql_file(sqlite_path):
with contextlib.closing(sqlite3.connect(sqlite_path)) as con:
cur = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
| 12 | 75 | iter_sql_file |
18 | 0 | 1 | 7 | src/sentry/integrations/vercel/webhook.py | 92,226 | chore(vercel): Remove deprecated project webhooks (#36260)
This PR fulfills a plan from August 2021 in which Vercel had deprecated project webhooks and we held onto compatability for a fair bit longer than expected.
We will continue to support the /delete route since existing integrations for self hosted users should still work, but we can now update the self hosted docs to use /webhook as is the case for most of our integrations. | sentry | 14 | Python | 16 | webhook.py | def verify_signature(request):
signature = request.META.get("HTTP_X_VERCEL_SIGNATURE")
secret = options.get("vercel.client-secret")
expected = hmac.new(
key=secret.encode("utf-8"), msg=bytes(request.body), digestmod=hashlib.sha1
).hexdigest()
return constant_time_compare(expected, signature)
| fe544b17a269e5ff6f86208dcf38c492f904dbb7 | 64 | https://github.com/getsentry/sentry.git | 39 | def verify_signature(request):
signature = request.META.get("HTTP_X_VERCEL_SIGNATURE")
secret = options.get("vercel.client-secret")
expected = hmac.new(
key=secret.encode("utf-8"), msg=bytes(request.body), digestmod=hashlib.sha1
).hexdigest()
return constant_time_compare(expected, sig | 20 | 106 | verify_signature |
|
107 | 0 | 3 | 43 | keras/preprocessing/image_test.py | 279,541 | Add f-string format and lint with flynt on the whole codebase | keras | 14 | Python | 67 | image_test.py | def test_dataframe_iterator_class_mode_raw(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# case for 1D output
df = pd.DataFrame({"filename": filenames}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames)),
)
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col="output_0",
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="raw",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3,))
self.assertAllEqual(batch_y, df["output_0"].values[:3])
# case with a 2D output
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=["output_0", "output_1"],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="raw",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3, 2))
self.assertAllEqual(batch_y, df[["output_0", "output_1"]].values[:3])
| be73ac1a1e25d9abd4d793cba9707098d7adf231 | 330 | https://github.com/keras-team/keras.git | 513 | def test_dataframe_iterator_class_mode_raw(self):
tmpdir = self.create_tempdir()
all_test_images = _generate_test_images(include_rgba=True)
# save the images in the paths
filenames = []
count = 0
for test_images in all_test_images:
for im in test_images:
filename = f"image-{count}.png"
im.save(os.path.join(tmpdir.full_path, filename))
filenames.append(filename)
count += 1
# case for 1D output
df = pd.DataFrame({"filename": filenames}).assign(
output_0=np.random.uniform(size=len(filenames)),
output_1=np.random.uniform(size=len(filenames)),
)
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col="output_0",
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="raw",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3,))
self.assertAllEqual(batch_y, df["output_0"].values[:3])
# case with a 2D output
df_iterator = image.ImageDataGenerator().flow_from_dataframe(
df,
y_col=["output_0", "output_1"],
directory=tmpdir.full_path,
batch_size=3,
shuffle=False,
class_mode="raw",
)
batch_x, batch_y = next(df_iterator)
self.assertIsInstance(batch_x, np.ndarray)
self.assertLen(batch_x.shape, 4)
self.assertIsInstance(batch_y, np.ndarray)
self.assertEqual(batch_y.shape, (3, 2))
self.assertAllEqual(batch_y, df[["output_0", "output_1"]].value | 48 | 520 | test_dataframe_iterator_class_mode_raw |
|
178 | 0 | 1 | 70 | tools/eval.py | 211,362 | [smalldet] fix slice_infer (#6744)
* fix slice_infer
* fix doc, test=document_fix | PaddleDetection | 9 | Python | 121 | eval.py | def parse_args():
parser = ArgsParser()
parser.add_argument(
"--output_eval",
default=None,
type=str,
help="Evaluation directory, default is current directory.")
parser.add_argument(
'--json_eval',
action='store_true',
default=False,
help='Whether to re eval with already exists bbox.json or mask.json')
parser.add_argument(
"--slim_config",
default=None,
type=str,
help="Configuration file of slim method.")
# TODO: bias should be unified
parser.add_argument(
"--bias",
action="store_true",
help="whether add bias or not while getting w and h")
parser.add_argument(
"--classwise",
action="store_true",
help="whether per-category AP and draw P-R Curve or not.")
parser.add_argument(
'--save_prediction_only',
action='store_true',
default=False,
help='Whether to save the evaluation results only')
parser.add_argument(
"--amp",
action='store_true',
default=False,
help="Enable auto mixed precision eval.")
# for smalldet slice_infer
parser.add_argument(
"--slice_infer",
action='store_true',
help="Whether to slice the image and merge the inference results for small object detection."
)
parser.add_argument(
'--slice_size',
nargs='+',
type=int,
default=[640, 640],
help="Height of the sliced image.")
parser.add_argument(
"--overlap_ratio",
nargs='+',
type=float,
default=[0.25, 0.25],
help="Overlap height ratio of the sliced image.")
parser.add_argument(
"--combine_method",
type=str,
default='nms',
help="Combine method of the sliced images' detection results, choose in ['nms', 'nmm', 'concat']."
)
parser.add_argument(
"--match_threshold",
type=float,
default=0.6,
help="Combine method matching threshold.")
parser.add_argument(
"--match_metric",
type=str,
default='ios',
help="Combine method matching metric, choose in ['iou', 'ios'].")
args = parser.parse_args()
return args
| 486121eaa4ad142dde25ff7a77a2070f5a4571d4 | 262 | https://github.com/PaddlePaddle/PaddleDetection.git | 594 | def parse_args():
parser = ArgsParser()
parser.add_argument(
"--output_eval",
default=None,
type=str,
help="Evaluation directory, default is current directory.")
parser.add_argument(
'--json_eval',
action='store_true',
default=False,
help='Whether to re eval with already exists bbox.json or mask.json')
parser.add_argument(
"--slim_config",
default=None,
type=str,
help="Configuration file of slim method.")
# TODO: bias should be unified
parser.add_argument(
"--bias",
action="store_true",
help="whether add bias or not while getting w and h")
parser.add_argument(
| 13 | 436 | parse_args |
|
99 | 0 | 5 | 18 | ludwig/data/preprocessing.py | 7,938 | Update missing value strategy to only allow bfill and ffill (#2457)
* push changes
* working missing value strategy
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Add type hints to backward compatibility transformations
* Update test to test both missing value strategy updates
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | ludwig | 13 | Python | 73 | preprocessing.py | def handle_missing_values(dataset_cols, feature, preprocessing_parameters):
missing_value_strategy = preprocessing_parameters["missing_value_strategy"]
# Check for the precomputed fill value in the metadata
computed_fill_value = preprocessing_parameters.get("computed_fill_value")
if (
missing_value_strategy in {FILL_WITH_CONST, FILL_WITH_MODE, FILL_WITH_MEAN, FILL_WITH_FALSE}
and computed_fill_value is not None
):
dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].fillna(
computed_fill_value,
)
elif missing_value_strategy in {BFILL, FFILL}:
dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].fillna(
method=missing_value_strategy,
)
elif missing_value_strategy == DROP_ROW:
# Here we only drop from this series, but after preprocessing we'll do a second
# round of dropping NA values from the entire output dataframe, which will
# result in the removal of the rows.
dataset_cols[feature[COLUMN]] = dataset_cols[feature[COLUMN]].dropna()
else:
raise ValueError(f"Invalid missing value strategy {missing_value_strategy}")
| 0ab41a299cc690940b750a79b704d69544315702 | 128 | https://github.com/ludwig-ai/ludwig.git | 221 | def handle_missing_values(dataset_cols, feature, preprocessing_parameters):
missing_value_strategy = preprocessing_parameters["missing_value_strategy"]
# Check for the precomputed fill value in the metadata
computed_fill_value = preprocessing_parameters.get("computed_fill_value")
if (
missing_value_strategy in {FILL_WITH_CONST, FILL_WITH_MODE, FILL_WITH_MEAN, FILL_WITH_FAL | 19 | 200 | handle_missing_values |
|
57 | 0 | 1 | 27 | tests/sentry/integrations/slack/notifications/test_new_processing_issues.py | 99,630 | Revert "fix(notifications): Use `metrics_key` (#34572)"
This reverts commit 1730c481f1a8a71446326fa1ff72e10663016385.
Co-authored-by: marcos.gaeta via Slack <marcos.gaeta@sentry.io> | sentry | 15 | Python | 45 | test_new_processing_issues.py | def test_new_processing_issue(self, mock_func):
notification = NewProcessingIssuesActivityNotification(
Activity(
project=self.project,
user=self.user,
type=ActivityType.NEW_PROCESSING_ISSUES,
data={
"issues": get_issues_data(),
"reprocessing_active": True,
},
)
)
with self.tasks():
notification.send()
attachment, text = get_attachment()
assert (
text
== f"Processing issues on <{self.project.slug}|http://testserver/settings/{self.organization.slug}/projects/{self.project.slug}/processing-issues/"
)
assert (
attachment["text"]
== f"Some events failed to process in your project {self.project.slug}"
)
assert (
attachment["footer"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=new-processing-issues-activity-slack-user|Notification Settings>"
)
| 04f013dd203f286aaf27b1c887bb72a2e24a498e | 95 | https://github.com/getsentry/sentry.git | 346 | def test_new_processing_issue(self, mock_func):
notification = NewProcessingIssuesActivityNotification(
Activity(
project=self.project,
user=self.user,
type=ActivityType.NEW_PROCESSING_ISSUES,
data={
"issues": get_issues_data(),
"reprocessing_active": True,
},
)
)
with self.tasks():
notification.send()
attachment, text = get_attachment()
assert (
text
== f"Processing issues on <{self.project.slug}|http://testserver/settings/{self.organization.slug}/projects/{self.project.slug}/processing-issues/"
)
assert (
attachment["text"]
== f"Some events failed to process in your project {self.project.slug}"
)
assert (
attachment["footer"]
== f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=new | 20 | 208 | test_new_processing_issue |
|
18 | 0 | 1 | 11 | tests/test_doctor.py | 191,184 | Remove snapshottest to reduce number of dependencies (#1433)
Having an extra package that can be replaced with something already
included makes packaging easier. For instance, in Debian, one would have
to either be fortunate to find an existing package or go over the
trouble of creating such package and all its dependencies.
I believe this CL is a good small compromise considering the benefit it
brings. | thumbor | 10 | Python | 18 | test_doctor.py | def test_get_doctor_output(capsys, doctor_output):
run_doctor(
{
"nocolor": True,
"config": "./tests/invalid-thumbor.conf",
},
print_version=False,
exit_with_error=False,
check_pyexiv=False,
)
assert capsys.readouterr().out == doctor_output
| 0e845259cd3d49b39889ae15df19922af0ef7269 | 43 | https://github.com/thumbor/thumbor.git | 83 | def test_get_doctor_output(capsys, doctor_output):
run_doctor(
{
"nocolor": True,
"config": "./tests/invalid-thumbor.conf",
},
print_version=False,
exit_with_error=False,
che | 9 | 68 | test_get_doctor_output |
|
5 | 0 | 1 | 2 | python3.10.4/Lib/importlib/metadata/__init__.py | 218,241 | add python 3.10.4 for windows | XX-Net | 9 | Python | 5 | __init__.py | def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 19 | https://github.com/XX-net/XX-Net.git | 11 | def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_r | 6 | 30 | _deps_from_requires_text |
|
312 | 0 | 1 | 55 | tests/strategy/test_interface.py | 149,116 | Fix typo causing an implicit bug | freqtrade | 9 | Python | 89 | test_interface.py | def test_returns_latest_signal(ohlcv_history):
ohlcv_history.loc[1, 'date'] = arrow.utcnow()
# Take a copy to correctly modify the call
mocked_history = ohlcv_history.copy()
mocked_history['enter_long'] = 0
mocked_history['exit_long'] = 0
mocked_history['enter_short'] = 0
mocked_history['exit_short'] = 0
# Set tags in lines that don't matter to test nan in the sell line
mocked_history.loc[0, 'enter_tag'] = 'wrong_line'
mocked_history.loc[0, 'exit_tag'] = 'wrong_line'
mocked_history.loc[1, 'exit_long'] = 1
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, True, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 1
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (SignalDirection.LONG, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (True, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 0
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 1
mocked_history.loc[1, 'enter_tag'] = 'buy_signal_01'
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (SignalDirection.LONG, 'buy_signal_01')
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (True, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 0
mocked_history.loc[1, 'enter_short'] = 1
mocked_history.loc[1, 'exit_short'] = 0
mocked_history.loc[1, 'enter_tag'] = 'sell_signal_01'
# Don't provide short signal while in spot mode
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
_STRATEGY.config['trading_mode'] = 'futures'
# Short signal get's ignored as can_short is not set.
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
_STRATEGY.can_short = True
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (SignalDirection.SHORT, 'sell_signal_01')
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (True, False, None)
mocked_history.loc[1, 'enter_short'] = 0
mocked_history.loc[1, 'exit_short'] = 1
mocked_history.loc[1, 'exit_tag'] = 'sell_signal_02'
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (None, None)
assert _STRATEGY.get_exit_signal(
'ETH/BTC', '5m', mocked_history) == (False, False, 'sell_signal_02')
assert _STRATEGY.get_exit_signal(
'ETH/BTC', '5m', mocked_history, True) == (False, True, 'sell_signal_02')
_STRATEGY.can_short = False
_STRATEGY.config['trading_mode'] = 'spot'
| b9b5d749bb36cfb8820756e3b4d3bb35534eab56 | 638 | https://github.com/freqtrade/freqtrade.git | 509 | def test_returns_latest_signal(ohlcv_history):
ohlcv_history.loc[1, 'date'] = arrow.utcnow()
# Take a copy to correctly modify the call
mocked_history = ohlcv_history.copy()
mocked_history['enter_long'] = 0
mocked_history['exit_long'] = 0
mocked_history['enter_short'] = 0
mocked_history['exit_short'] = 0
# Set tags in lines that don't matter to test nan in the sell line
mocked_history.loc[0, 'enter_tag'] = 'wrong_line'
mocked_history.loc[0, 'exit_tag'] = 'wrong_line'
mocked_history.loc[1, 'exit_long'] = 1
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, True, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 1
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (SignalDirection.LONG, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (True, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 0
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 1
mocked_history.loc[1, 'enter_tag'] = 'buy_signal_01'
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (SignalDirection.LONG, 'buy_signal_01')
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (True, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
mocked_history.loc[1, 'exit_long'] = 0
mocked_history.loc[1, 'enter_long'] = 0
mocked_history.loc[1, 'enter_short'] = 1
mocked_history.loc[1, 'exit_short'] = 0
mocked_history.loc[1, 'enter_tag'] = 'sell_signal_01'
# Don't provide short signal while in spot mode
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
_STRATEGY.config['trading_mode'] = 'futures'
# Short signal get's ignored as can_short is not set.
assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
_STRATEGY.can_short = True
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (SignalDirection.SHORT, 'sell_signal_01')
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, False, None)
assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (True, False, None)
mocked_history.loc[1, 'enter_short'] = 0
mocked_history.loc[1, 'exit_short'] = 1
mocked_history.loc[1, 'exit_tag'] = 'sell_signal_02'
assert _STRATEGY.get_entry_signal(
'ETH/BTC', '5m', mocked_history) == (None, None)
assert _STRATEGY.get_exit_signal(
'ETH/BTC', '5m', mocked_history) == (False, False, 'sell_signal_02')
assert _STRATEGY.get_exit_signal(
'ETH/B | 15 | 1,031 | test_returns_latest_signal |
|
187 | 0 | 9 | 57 | tools/box_distribution.py | 211,671 | fix sm_use for ppyoloe_head and output for box_dis (#7288)
* fix sm_use for ppyoloe_head and output for box_dis
* fix pos_embed of training | PaddleDetection | 14 | Python | 97 | box_distribution.py | def get_ratio_infos(jsonfile, out_img, eval_size, small_stride):
coco = COCO(annotation_file=jsonfile)
allannjson = json.load(open(jsonfile, 'r'))
be_im_id = allannjson['annotations'][0]['image_id']
be_im_w = []
be_im_h = []
ratio_w = []
ratio_h = []
for ann in tqdm(allannjson['annotations']):
if ann['iscrowd']:
continue
x0, y0, w, h = ann['bbox'][:]
if be_im_id == ann['image_id']:
be_im_w.append(w)
be_im_h.append(h)
else:
im_w = coco.imgs[be_im_id]['width']
im_h = coco.imgs[be_im_id]['height']
im_m_w = np.mean(be_im_w)
im_m_h = np.mean(be_im_h)
dis_w = im_m_w / im_w
dis_h = im_m_h / im_h
ratio_w.append(dis_w)
ratio_h.append(dis_h)
be_im_id = ann['image_id']
be_im_w = [w]
be_im_h = [h]
im_w = coco.imgs[be_im_id]['width']
im_h = coco.imgs[be_im_id]['height']
im_m_w = np.mean(be_im_w)
im_m_h = np.mean(be_im_h)
dis_w = im_m_w / im_w
dis_h = im_m_h / im_h
ratio_w.append(dis_w)
ratio_h.append(dis_h)
mid_w = median(ratio_w)
mid_h = median(ratio_h)
reg_ratio = []
ratio_all = ratio_h + ratio_w
for r in ratio_all:
if r < 0.2:
reg_ratio.append(r)
elif r < 0.4:
reg_ratio.append(r/2)
else:
reg_ratio.append(r/4)
reg_ratio = sorted(reg_ratio)
max_ratio = reg_ratio[int(0.95*len(reg_ratio))]
reg_max = round(max_ratio*eval_size/small_stride)
ratio_w = [i * 1000 for i in ratio_w]
ratio_h = [i * 1000 for i in ratio_h]
print(f'Suggested reg_range[1] is {reg_max+1}' )
print(f'Median of ratio_w is {mid_w}')
print(f'Median of ratio_h is {mid_h}')
print('all_img with box: ', len(ratio_h))
print('all_ann: ', len(allannjson['annotations']))
draw_distribution(ratio_w, ratio_h, out_img)
| 64b51f095305d5c5b477b177b35de6fec174c8b8 | 413 | https://github.com/PaddlePaddle/PaddleDetection.git | 523 | def get_ratio_infos(jsonfile, out_img, eval_size, small_stride):
coco = COCO(annotation_file=jsonfile)
allannjson = json. | 48 | 683 | get_ratio_infos |
|
55 | 0 | 4 | 18 | python/ray/air/config.py | 124,632 | [AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307)
This PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available.
In order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF.
Please note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle. | ray | 12 | Python | 45 | config.py | def as_placement_group_factory(self) -> "PlacementGroupFactory":
from ray.tune.execution.placement_groups import PlacementGroupFactory
trainer_resources = self._trainer_resources_not_none
trainer_bundle = [trainer_resources]
worker_resources = {
"CPU": self.num_cpus_per_worker,
"GPU": self.num_gpus_per_worker,
}
worker_resources_extra = (
{} if self.resources_per_worker is None else self.resources_per_worker
)
worker_bundles = [
{**worker_resources, **worker_resources_extra}
for _ in range(self.num_workers if self.num_workers else 0)
]
bundles = trainer_bundle + worker_bundles
return PlacementGroupFactory(bundles, strategy=self.placement_strategy)
| b3878e26d765e28dd7c69abadbd856181037db97 | 102 | https://github.com/ray-project/ray.git | 194 | def as_placement_group_factory(self) -> "PlacementGroupFactory":
from ray.tune.execution.placement_groups import PlacementGroupFactory
| 22 | 160 | as_placement_group_factory |
|
71 | 0 | 3 | 10 | wagtail/core/blocks/list_block.py | 70,345 | Implement a ListValue type for ListBlocks | wagtail | 15 | Python | 53 | list_block.py | def bulk_to_python(self, values):
# 'values' is a list of lists of child block values; concatenate them into one list so that
# we can make a single call to child_block.bulk_to_python
lengths = [len(val) for val in values]
raw_values = list(itertools.chain.from_iterable(values))
converted_values = self.child_block.bulk_to_python(raw_values)
# split converted_values back into sub-lists of the original lengths
result = []
offset = 0
for sublist_len in lengths:
result.append(ListValue(values=converted_values[offset:offset + sublist_len]))
offset += sublist_len
return result
| 4a848bfb4e3ec1a84a3d36fda577c1ed784de498 | 77 | https://github.com/wagtail/wagtail.git | 162 | def bulk_to_python(self, values):
# 'values' is a list of lists of child block values; concatenate them into one list so that
# we can make a single call to child_block.bulk_to_python
lengths = [len(val) for val in values]
raw_values = list(itertools.chain.from_iterable(values))
converted_values = self.child_block.bulk_to_python(raw_values)
# split converted_values back into sub-lists of the original lengths
result = []
offset = 0
for sublist_len in lengths:
result.append(ListValue(values=converted_values[offset:offset + sublist_len]))
offset += sublist_len
return | 18 | 124 | bulk_to_python |
|
34 | 0 | 1 | 8 | tests/unit/bokeh/models/test_glyphs.py | 212,239 | Fix passing of kwargs in Image glyph's constructor (#12081) | bokeh | 9 | Python | 23 | test_glyphs.py | def test_Image_kwargs() -> None:
glyph = Image(x=0, y=0, dw=10, dh=10)
assert glyph.image == field("image")
assert glyph.x == 0
assert glyph.y == 0
assert glyph.dw == 10
assert glyph.dh == 10
assert glyph.dilate is False
| 14ea3e941229c5069232bf29b48a57f4fb44394a | 65 | https://github.com/bokeh/bokeh.git | 54 | def test_Image_kwargs() -> None:
glyph | 10 | 100 | test_Image_kwargs |
|
8 | 0 | 1 | 2 | ivy/core/general.py | 213,228 | moved all inplace methods from gradients submodule to general submodule, as inplace ops are also relevant for non-Variable tensors. | ivy | 10 | Python | 8 | general.py | def inplace_increment(x, val, f=None):
return _cur_framework(x, f=f).inplace_increment(x, val)
| ec8341197ccdd240a346a95c2a434e5ef9f9ef72 | 28 | https://github.com/unifyai/ivy.git | 14 | def inplace_increment(x, val, f=None):
return _cur_fra | 5 | 43 | inplace_increment |
|
76 | 0 | 6 | 25 | erpnext/loan_management/doctype/loan/loan.py | 64,083 | feat: Refund entry against loans | erpnext | 14 | Python | 63 | loan.py | def make_refund_jv(loan, amount=0, reference_number=None, reference_date=None, submit=0):
loan_details = frappe.db.get_value('Loan', loan, ['applicant_type', 'applicant',
'loan_account', 'payment_account', 'posting_date', 'company', 'name',
'total_payment', 'total_principal_paid'], as_dict=1)
loan_details.doctype = 'Loan'
loan_details[loan_details.applicant_type.lower()] = loan_details.applicant
if not amount:
amount = flt(loan_details.total_principal_paid - loan_details.total_payment)
if amount < 0:
frappe.throw(_('No excess amount pending for refund'))
refund_jv = get_payment_entry(loan_details, {
"party_type": loan_details.applicant_type,
"party_account": loan_details.loan_account,
"amount_field_party": 'debit_in_account_currency',
"amount_field_bank": 'credit_in_account_currency',
"amount": amount,
"bank_account": loan_details.payment_account
})
if reference_number:
refund_jv.cheque_no = reference_number
if reference_date:
refund_jv.cheque_date = reference_date
if submit:
refund_jv.submit()
return refund_jv | c68c70f8bc88d9b05d64774ba070a34c059b7d30 | 170 | https://github.com/frappe/erpnext.git | 51 | def make_refund_jv(loan, amount=0, reference_number=None, reference_date=None, submit=0):
loan_details = frappe.db.get_value('Loan', loan, ['applicant_type', 'applicant',
'loan_account', 'payment_account', 'posting_date', 'company', 'name',
'total_payment', 'total_principal_paid'], as_dict=1)
loan_details.doctype = 'Loan'
loan_details[loan_details.applicant_type.lower()] = loan_details.applicant
if not amount:
amount = flt(loan_details.total_principal_paid - loan_details.total_payment)
if amount < 0:
frappe.throw(_('No excess amount pending for refund'))
refund_jv = get_payment_entry(loan_details, {
"party_type": loan_details.applicant_type,
"party_account": loan_details.loan_account,
"amount_field_party": 'debit_in_account_currency',
"amount_field_bank": 'credit_in_account_currency',
"amount": amount,
"bank_account": loan_details.payment_account
})
if reference_number:
r | 26 | 287 | make_refund_jv |
|
62 | 0 | 1 | 13 | tests/test_modeling_utils.py | 336,306 | [Half precision] Make sure half-precision is correct (#182)
* [Half precision] Make sure half-precision is correct
* Update src/diffusers/models/unet_2d.py
* Update src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py
* correct some tests
* Apply suggestions from code review
Co-authored-by: Suraj Patil <surajp815@gmail.com>
* finalize
* finish
Co-authored-by: Suraj Patil <surajp815@gmail.com> | diffusers | 13 | Python | 52 | test_modeling_utils.py | def test_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1-diffusers")
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=torch_device).manual_seed(0)
with torch.autocast("cuda"):
output = sd_pipe(
[prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np"
)
image = output["sample"]
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.898, 0.9194, 0.91, 0.8955, 0.915, 0.919, 0.9233, 0.9307, 0.8887])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 051b34635fda2fc310898a6a602c89be8663b77f | 167 | https://github.com/huggingface/diffusers.git | 161 | def test_stable_diffusion(self):
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1-diffusers")
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=torc | 26 | 224 | test_stable_diffusion |
|
24 | 0 | 3 | 10 | pandas/tests/extension/test_arrow.py | 167,324 | ENH/TST: Add BaseInterfaceTests tests for ArrowExtensionArray PT2 (#47468) | pandas | 13 | Python | 23 | test_arrow.py | def test_fillna_limit_backfill(self, data_missing, using_array_manager, request):
if using_array_manager and pa.types.is_duration(
data_missing.dtype.pyarrow_dtype
):
request.node.add_marker(
pytest.mark.xfail(
reason="Checking ndim when using arraymanager with duration type"
)
)
super().test_fillna_limit_backfill(data_missing)
| dc36ce1b3f6578833ca44cc7a5e49a75ddb02ec7 | 52 | https://github.com/pandas-dev/pandas.git | 126 | def test_fillna_limit_backfill(self, data_missing, using_array_manager, request):
if using_array_manager and pa.types.is_duration(
data_missing.dtype.pyarrow_dtype
):
request.node.add_marker(
pytest.mark.xfail(
reason=" | 17 | 83 | test_fillna_limit_backfill |
|
14 | 0 | 2 | 5 | homeassistant/components/landisgyr_heat_meter/sensor.py | 304,169 | Add Landis+Gyr Heat Meter integration (#73363)
* Add Landis+Gyr Heat Meter integration
* Add contant for better sensor config
* Add test for init
* Refactor some of the PR suggestions in config_flow
* Apply small fix
* Correct total_increasing to total
* Add test for restore state
* Add MWh entity that can be added as gas on the energy dashoard
* Remove GJ as unit
* Round MWh to 5 iso 3 digits
* Update homeassistant/components/landisgyr_heat_meter/const.py
* Update CODEOWNERS
Co-authored-by: Erik Montnemery <erik@montnemery.com> | core | 10 | Python | 12 | sensor.py | async def async_added_to_hass(self):
await super().async_added_to_hass()
state = await self.async_get_last_sensor_data()
if state:
self._attr_native_value = state.native_value
| 7a497c1e6e5a0d44b9418a754470ca9dd35e9719 | 32 | https://github.com/home-assistant/core.git | 53 | async def async_added_to_hass(self):
await super().async_added_to_hass()
state = await | 7 | 59 | async_added_to_hass |
|
21 | 0 | 1 | 3 | src/datasets/formatting/formatting.py | 106,072 | Support for decoding Image/Audio types in map when format type is not default one (#5252)
* Add iter to arrow dataset and iterable dataset
* Remove decoded from formatters
* First attempt
* Minor fix in np formatter
* Fixes
* Add tests for iter method
* Minor test refactor
* Add (and update) tests
* Code fixes
* Fix iter for drop_last_batch=True and pyarrow<=8
* Make `supports_lazy_decoding` class attribute of formatters
* Update docs
* Update lazy dict
* Test lazy formatting
* Lazily extract columns
* Update tests
* Update iter benchmark in docs
* Fix CI
* Add select to docs
* Add array_concat
* CI fix
* Apply suggestions from code review
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Style
* Add comments from code review
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
* Add test with sliced arrays
* Use array_concat only for extension arrays
* fix None -> empty array warning
* fix map with mix of lazy dict and regular dict
* fix benchmarks
* fix tests
* fix tests
Co-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>
Co-authored-by: Quentin Lhoest <lhoest.q@gmail.com> | datasets | 8 | Python | 21 | formatting.py | def __repr__(self):
self._format_all()
return repr(self.data)
if config.PY_VERSION >= version.parse("3.9"):
# merging with the union ("|") operator is supported in Python 3.9+
| a528472ad7e164588f568b75b3b3e36ff71996d2 | 17 | https://github.com/huggingface/datasets.git | 44 | def __repr__(self):
self._format_all()
return repr(self.d | 9 | 51 | __repr__ |
|
175 | 0 | 9 | 46 | ppocr/modeling/backbones/__init__.py | 23,569 | add ppocrv3 rec (#6033)
* add ppocrv3 rec | PaddleOCR | 11 | Python | 100 | __init__.py | def build_backbone(config, model_type):
if model_type == "det" or model_type == "table":
from .det_mobilenet_v3 import MobileNetV3
from .det_resnet_vd import ResNet
from .det_resnet_vd_sast import ResNet_SAST
support_dict = ["MobileNetV3", "ResNet", "ResNet_SAST"]
elif model_type == "rec" or model_type == "cls":
from .rec_mobilenet_v3 import MobileNetV3
from .rec_resnet_vd import ResNet
from .rec_resnet_fpn import ResNetFPN
from .rec_mv1_enhance import MobileNetV1Enhance
from .rec_nrtr_mtb import MTB
from .rec_resnet_31 import ResNet31
from .rec_resnet_aster import ResNet_ASTER
from .rec_micronet import MicroNet
from .rec_efficientb3_pren import EfficientNetb3_PREN
from .rec_svtrnet import SVTRNet
support_dict = [
'MobileNetV1Enhance', 'MobileNetV3', 'ResNet', 'ResNetFPN', 'MTB',
"ResNet31", "ResNet_ASTER", 'MicroNet', 'EfficientNetb3_PREN',
'SVTRNet'
]
elif model_type == "e2e":
from .e2e_resnet_vd_pg import ResNet
support_dict = ['ResNet']
elif model_type == 'kie':
from .kie_unet_sdmgr import Kie_backbone
support_dict = ['Kie_backbone']
elif model_type == "table":
from .table_resnet_vd import ResNet
from .table_mobilenet_v3 import MobileNetV3
support_dict = ["ResNet", "MobileNetV3"]
elif model_type == 'vqa':
from .vqa_layoutlm import LayoutLMForSer, LayoutLMv2ForSer, LayoutLMv2ForRe, LayoutXLMForSer, LayoutXLMForRe
support_dict = [
"LayoutLMForSer", "LayoutLMv2ForSer", 'LayoutLMv2ForRe',
"LayoutXLMForSer", 'LayoutXLMForRe'
]
else:
raise NotImplementedError
module_name = config.pop("name")
assert module_name in support_dict, Exception(
"when model typs is {}, backbone only support {}".format(model_type,
support_dict))
module_class = eval(module_name)(**config)
return module_class
| f6532a0e51222c4385dd41a0f9de169f188ac29a | 245 | https://github.com/PaddlePaddle/PaddleOCR.git | 522 | def build_backbone(config, model_type):
if model_type == "det" or model_type == "table":
from .det_mobilenet_v3 import MobileNetV3
from .det_resnet_vd import ResNet
from .det_resnet_vd_sast import ResNet_SAST
support_dict = ["MobileNetV3", "ResNet", "ResNet_SAST"]
elif model_type == "rec" or model_type == "cls":
from .rec_mobilenet_v3 import MobileNetV3
from .rec_resnet_vd import ResNet
from .rec_resnet_fpn import ResNetFPN
from .rec_mv1_enhance import Mobil | 46 | 470 | build_backbone |
|
46 | 0 | 1 | 14 | tests/components/recorder/test_websocket_api.py | 317,621 | Support non-live database migration (#72433)
* Support non-live database migration
* Tweak startup order, add test
* Address review comments
* Fix typo
* Clarify comment about promoting dependencies
* Tweak
* Fix merge mistake
* Fix some tests
* Fix additional test
* Fix additional test
* Adjust tests
* Improve test coverage | core | 11 | Python | 39 | test_websocket_api.py | async def test_recorder_info(hass, hass_ws_client, recorder_mock):
client = await hass_ws_client()
# Ensure there are no queued events
await async_wait_recording_done(hass)
await client.send_json({"id": 1, "type": "recorder/info"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == {
"backlog": 0,
"max_backlog": 40000,
"migration_in_progress": False,
"migration_is_live": False,
"recording": True,
"thread_running": True,
}
| fd6ffef52f337df71542b48565a95300c0ab2766 | 81 | https://github.com/home-assistant/core.git | 115 | async def test_recorder_info(hass, hass_ws_client, recorder_mock):
client = await hass_ws_client()
# Ensure there are no queued events
await async_wait_recording_done(hass)
await client.send_json({"id": 1, "type": "recorder | 9 | 147 | test_recorder_info |
|
30 | 0 | 1 | 26 | tests/contenttypes_tests/test_operations.py | 202,341 | Refs #33476 -- Reformatted code with Black. | django | 13 | Python | 21 | test_operations.py | def test_existing_content_type_rename_other_database(self):
ContentType.objects.using("other").create(
app_label="contenttypes_tests", model="foo"
)
other_content_types = ContentType.objects.using("other").filter(
app_label="contenttypes_tests"
)
call_command(
"migrate",
"contenttypes_tests",
database="other",
interactive=False,
verbosity=0,
)
self.assertFalse(other_content_types.filter(model="foo").exists())
self.assertTrue(other_content_types.filter(model="renamedfoo").exists())
call_command(
"migrate",
"contenttypes_tests",
"zero",
database="other",
interactive=False,
verbosity=0,
)
self.assertTrue(other_content_types.filter(model="foo").exists())
self.assertFalse(other_content_types.filter(model="renamedfoo").exists())
| 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | 149 | https://github.com/django/django.git | 256 | def test_existing_content_type_rename_other_database(self):
ContentType.objects.using("other").create(
app_label="contenttypes_tests", model="foo"
)
other_content_types = ContentType.objects.using("other").filter(
app_label="contenttypes_tests"
)
call_command(
"migrate",
"contenttypes_tests",
database="other",
interactive=False,
verbosity=0,
)
self.assertFalse(other_content_types.filter(model="foo").exists())
self.assertTrue(other_content_types.filter(model="renamedfoo").exists())
call_command(
"migrate",
"contenttypes_tests",
"zero",
database="other",
interactive=False,
verbosity=0,
)
self.assertTrue(other_content_types.filter(model="foo").exists())
self.assertFalse(other_content_types.filter(model="renamedfoo").exists())
| 17 | 261 | test_existing_content_type_rename_other_database |
|
283 | 0 | 1 | 68 | tests/test_integration.py | 149,669 | Add DCA order adjust test | freqtrade | 12 | Python | 109 | test_integration.py | def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
default_conf_usdt['position_adjustment_enable'] = True
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch.multiple(
'freqtrade.exchange.Exchange',
fetch_ticker=ticker_usdt,
get_fee=fee,
amount_to_precision=lambda s, x, y: y,
price_to_precision=lambda s, x, y: y,
)
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)
patch_get_signal(freqtrade)
freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96
freqtrade.enter_positions()
assert len(Trade.get_trades().all()) == 1
trade = Trade.get_trades().first()
assert len(trade.orders) == 1
assert trade.open_order_id is not None
assert pytest.approx(trade.stake_amount) == 60
assert trade.open_rate == 1.96
# No adjustment
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 1
assert trade.open_order_id is not None
assert pytest.approx(trade.stake_amount) == 60
# Cancel order and place new one
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.99)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 2
assert trade.open_order_id is not None
# Open rate is not adjusted yet
assert trade.open_rate == 1.96
# Fill order
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 2
assert trade.open_order_id is None
# Open rate is not adjusted yet
assert trade.open_rate == 1.99
# 2nd order - not filling
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=120)
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 3
assert trade.open_order_id is not None
assert trade.open_rate == 1.99
assert trade.orders[-1].price == 1.96
assert trade.orders[-1].cost == 120
# Replace new order with diff. order at a lower price
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 4
assert trade.open_order_id is not None
assert trade.open_rate == 1.99
assert trade.orders[-1].price == 1.95
assert pytest.approx(trade.orders[-1].cost) == 120
# Fill DCA order
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None)
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)
freqtrade.strategy.adjust_entry_price = MagicMock(side_effect=ValueError)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 4
assert trade.open_order_id is None
assert pytest.approx(trade.open_rate) == 1.963153456
assert trade.orders[-1].price == 1.95
assert pytest.approx(trade.orders[-1].cost) == 120
assert trade.orders[-1].status == 'closed'
assert pytest.approx(trade.amount) == 91.689215
# Check the 2 filled orders equal the above amount
assert pytest.approx(trade.orders[1].amount) == 30.150753768
assert pytest.approx(trade.orders[-1].amount) == 61.538461232
| 108903f7f0c968f88a3b2520a8cc8e7753c4c2e1 | 654 | https://github.com/freqtrade/freqtrade.git | 530 | def test_dca_order_adjust(default_conf_usdt, ticker_usdt, fee, mocker) -> None:
default_conf_usdt['position_adjustment_enable'] = True
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mocker.patch.multiple(
'freqtrade.exchange.Exchange',
fetch_ticker=ticker_usdt,
get_fee=fee,
amount_to_precision=lambda s, x, y: y,
price_to_precision=lambda s, x, y: y,
)
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)
patch_get_signal(freqtrade)
freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96
freqtrade.enter_positions()
assert len(Trade.get_trades().all()) == 1
trade = Trade.get_trades().first()
assert len(trade.orders) == 1
assert trade.open_order_id is not None
assert pytest.approx(trade.stake_amount) == 60
assert trade.open_rate == 1.96
# No adjustment
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 1
assert trade.open_order_id is not None
assert pytest.approx(trade.stake_amount) == 60
# Cancel order and place new one
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.99)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 2
assert trade.open_order_id is not None
# Open rate is not adjusted yet
assert trade.open_rate == 1.96
# Fill order
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 2
assert trade.open_order_id is None
# Open rate is not adjusted yet
assert trade.open_rate == 1.99
# 2nd order - not filling
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=120)
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=False)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 3
assert trade.open_order_id is not None
assert trade.open_rate == 1.99
assert trade.orders[-1].price == 1.96
assert trade.orders[-1].cost == 120
# Replace new order with diff. order at a lower price
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 4
assert trade.open_order_id is not None
assert trade.open_rate == 1.99
assert trade.orders[-1].price == 1.95
assert pytest.approx(trade.orders[-1].cost) == 120
# Fill DCA order
freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None)
mocker.patch('freqtrade.exchange.Exchange._is_dry_limit_order_filled', return_value=True)
freqtrade.strategy.adjust_entry_price = MagicMock(side_effect=ValueError)
freqtrade.process()
trade = Trade.get_trades().first()
assert len(trade.orders) == 4
assert trade.open_order_id is None
assert pytest.approx(trade.open_rate) == 1.963153456
assert trade.orders[-1].price == 1.95
assert pytest.approx(trade.orders[-1].cost) == 120
a | 44 | 995 | test_dca_order_adjust |
|
191 | 0 | 5 | 53 | tests/models/test_hooks.py | 241,752 | Add `LightningModule.lr_scheduler_step` (#10249)
Co-authored-by: Carlos Mocholi <carlossmocholi@gmail.com> | lightning | 18 | Python | 118 | test_hooks.py | def _auto_train_batch(trainer, model, batches, device=torch.device("cpu"), current_epoch=0, **kwargs):
using_native_amp = kwargs.get("amp_backend") == "native"
using_deepspeed = kwargs.get("strategy") == "deepspeed"
out = []
for i in range(batches):
out.extend(
[
dict(name="on_before_batch_transfer", args=(ANY, 0)),
dict(name="transfer_batch_to_device", args=(ANY, device, 0)),
dict(name="on_after_batch_transfer", args=(ANY, 0)),
# TODO: `on_batch_{start,end}`
dict(name="Callback.on_batch_start", args=(trainer, model)),
dict(name="Callback.on_train_batch_start", args=(trainer, model, ANY, i)),
dict(name="on_train_batch_start", args=(ANY, i)),
dict(name="forward", args=(ANY,)),
dict(name="training_step", args=(ANY, i)),
dict(name="training_step_end", args=(dict(loss=ANY),)),
dict(name="Callback.on_before_zero_grad", args=(trainer, model, ANY)),
dict(name="on_before_zero_grad", args=(ANY,)),
dict(name="optimizer_zero_grad", args=(current_epoch, i, ANY, 0)),
dict(name="Callback.on_before_backward", args=(trainer, model, ANY)),
dict(name="on_before_backward", args=(ANY,)),
# DeepSpeed handles backward internally
*([dict(name="backward", args=(ANY, ANY, 0))] if not using_deepspeed else []),
dict(name="Callback.on_after_backward", args=(trainer, model)),
dict(name="on_after_backward"),
# note: unscaling happens here in the case of AMP
dict(name="Callback.on_before_optimizer_step", args=(trainer, model, ANY, 0)),
dict(name="on_before_optimizer_step", args=(ANY, 0)),
*([dict(name="log_grad_norm", args=ANY)] if not using_deepspeed else []),
dict(
name="clip_gradients",
args=(ANY,),
kwargs=dict(gradient_clip_val=None, gradient_clip_algorithm=None),
),
dict(
name="configure_gradient_clipping",
args=(ANY, 0),
kwargs=dict(gradient_clip_val=None, gradient_clip_algorithm=None),
),
# this is after because it refers to the `LightningModule.optimizer_step` hook which encapsulates
# the actual call to `PrecisionPlugin.optimizer_step`
dict(
name="optimizer_step",
args=(current_epoch, i, ANY, 0, ANY),
kwargs=dict(on_tpu=False, using_lbfgs=False, using_native_amp=using_native_amp),
),
*(
[dict(name="lr_scheduler_step", args=(ANY, 0, None))]
if i == (trainer.num_training_batches - 1)
else []
),
dict(name="Callback.on_train_batch_end", args=(trainer, model, dict(loss=ANY), ANY, i)),
dict(name="on_train_batch_end", args=(dict(loss=ANY), ANY, i)),
dict(name="Callback.on_batch_end", args=(trainer, model)),
]
)
return out
| 82c8875f33addb0becd7761c95e9674ccc98c7ee | 591 | https://github.com/Lightning-AI/lightning.git | 1,237 | def _auto_train_batch(trainer, model, batches, device=torch.device("cpu"), current_epoch=0, **kwargs):
using_native_amp = kwargs.get("amp_backend") == "native"
using_deepspeed = kwargs.get("strategy") == "deepspeed"
out = []
for i in range(batches):
out.extend(
[
dict(name="on_before_batch_transfer", args=(ANY, 0)),
dict(name="transfer_batch_to_device", args=(ANY, device, 0)),
dict(name="on_after_batch_transfer", args=(ANY, 0)),
# TODO: `on_batch_{start,end}`
dict(name="Callback.on_batch_start", args=(trainer, model)),
dict(name="Callback.on_train_batch_start", args=(trainer, model, ANY, i)),
dict(name="on_train_batch_start", args=(ANY, i)),
dict(name="forward", args=(ANY,)),
dict(name="training_step", args=(ANY, i)),
dict(name="training_step_end", args=(dict(loss=ANY),)),
dict(name="Callback.on_before_zero_grad", args=(trainer, model, ANY)),
dict(name="on_before_zero_grad", args=(ANY,)),
dict(name="optimizer_zero_grad", args=(current_epoch, i, ANY, 0)),
dict(name="Callback.on_before_backward", args=(trainer, | 25 | 873 | _auto_train_batch |
|
338 | 0 | 14 | 50 | ldm/modules/image_degradation/bsrgan.py | 157,508 | release more models | stablediffusion | 21 | Python | 165 | bsrgan.py | def degradation_bsrgan_variant(image, sf=4, isp_model=None):
image = util.uint2single(image)
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = image.shape[:2]
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
hq = image.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
image = util.imresize_np(image, 1 / 2, True)
image = np.clip(image, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
image = add_blur(image, sf=sf)
elif i == 1:
image = add_blur(image, sf=sf)
elif i == 2:
a, b = image.shape[1], image.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1, 2 * sf)
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
image = image[0::sf, 0::sf, ...] # nearest downsampling
image = np.clip(image, 0.0, 1.0)
elif i == 3:
# downsample3
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
image = np.clip(image, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
image = add_JPEG_noise(image)
# elif i == 6:
# # add processed camera sensor noise
# if random.random() < isp_prob and isp_model is not None:
# with torch.no_grad():
# img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
image = add_JPEG_noise(image)
image = util.single2uint(image)
example = {"image":image}
return example
# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... | ca86da3a30c4e080d4db8c25fca73de843663cb4 | 619 | https://github.com/Stability-AI/stablediffusion.git | 896 | def degradation_bsrgan_variant(image, sf=4, isp_model=None):
image = util.uint2single(image)
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = image.shape[:2]
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
hq = image.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
image = util.imresize_np(image, 1 / 2, True)
image = np.clip(image, 0.0, 1.0)
sf = 2
shu | 56 | 915 | degradation_bsrgan_variant |
|
68 | 0 | 1 | 27 | src/sentry/integrations/msteams/integration.py | 94,119 | feat(msteams): Allow personal installation for Sentry in MS Teams (#36749)
- Allow personal installation of Sentry for Microsoft Teams. This would allow MS Teams users to receive personal notifications for Sentry events like Slack. Currently, it is only possible to receive issue and incident notifications in `teams`.
- The installation message/card is modified to allow the user to set up an integration from the personal chat as well. Upon successful installation a card will be presented with an option to configure notification settings.
- Create an integration with `tenant_id` as the `external_id`, as we don't know the user's team from personal chat context. All users with the same `tenant_id` would be scoped under this integration. | sentry | 12 | Python | 56 | integration.py | def build_integration(self, state):
data = state[self.key]
external_id = data["external_id"]
external_name = data["external_name"]
service_url = data["service_url"]
user_id = data["user_id"]
conversation_id = data["conversation_id"]
# TODO: add try/except for request errors
token_data = get_token_data()
integration = {
"name": external_name,
"external_id": external_id,
"metadata": {
"access_token": token_data["access_token"],
"expires_at": token_data["expires_at"],
"service_url": service_url,
"installation_type": data["installation_type"],
"tenant_id": data["tenant_id"],
},
"user_identity": {
"type": "msteams",
"external_id": user_id,
"scopes": [],
"data": {},
},
"post_install_data": {"conversation_id": conversation_id},
}
return integration
| be379770e90f6b7f97109f1cbdffd1e4749402ba | 132 | https://github.com/getsentry/sentry.git | 356 | def build_integration(self, state):
data = state[self.key]
external_id = data["external_id"]
external_name = data["external_name"]
service_url = data["service_url"]
| 13 | 238 | build_integration |
|
88 | 0 | 6 | 14 | mitmproxy/proxy/layers/quic.py | 252,168 | [quic] bugfixes and simplified connection opening | mitmproxy | 14 | Python | 58 | quic.py | def transmit(self) -> layer.CommandGenerator[None]:
assert self.quic is not None
# send all queued datagrams
for data, addr in self.quic.datagrams_to_send(now=self._loop.time()):
yield commands.SendData(self.conn, data, addr)
# mark an existing wakeup command as obsolete if it now longer matches the time
timer = self.quic.get_timer()
if self._request_wakeup_command_and_timer is not None:
command, existing_timer = self._request_wakeup_command_and_timer
if existing_timer != timer:
self._obsolete_wakeup_commands.add(command)
self._request_wakeup_command_and_timer = None
# request a new wakeup if necessary
if timer is not None and self._request_wakeup_command_and_timer is None:
command = commands.RequestWakeup(timer - self._loop.time())
self._request_wakeup_command_and_timer = (command, timer)
yield command
_handle_event = state_start
| 2426d3d03847e0273707436268d79c24616b3e74 | 135 | https://github.com/mitmproxy/mitmproxy.git | 242 | def transmit(self) -> layer.CommandGenerator[None]:
assert self.quic is not None
# send all queued datagrams
for data, addr in self.quic.datagrams_to_send(now=self._loop.time()):
yield commands.SendData(self.conn, data, addr)
# mark an existing wakeup command as obsolete if it now longer matches the time
timer = self.quic.get_timer()
if self._request_wakeup_command_and_timer is not None:
command, existing_ti | 24 | 218 | transmit |
|
14 | 0 | 1 | 4 | salt/utils/event.py | 215,506 | Address docs and hard coded strings | salt | 8 | Python | 13 | event.py | def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False):
return MasterEvent(
sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors
)
| 25c2ae356bcf684cbe20f776e1ffcab0f8aeb80c | 39 | https://github.com/saltstack/salt.git | 30 | def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False):
return MasterEvent(
sock_dir, opts, listen=listen, io_loop=io_l | 7 | 55 | get_master_event |
|
19 | 0 | 1 | 8 | jina/parsers/orchestrate/runtimes/remote.py | 13,920 | fix: list-like args passed as string (#5464)
Co-authored-by: Alaeddine Abdessalem <alaeddine-13@live.fr> | jina | 10 | Python | 19 | remote.py | def _add_host(arg_group):
arg_group.add_argument(
'--host',
'--host-in',
type=str,
default=__default_host__,
help=f'The host address of the runtime, by default it is {__default_host__}.',
)
| 87912a37ce7ab3c3b63c12b48d6cdfe31f81742c | 27 | https://github.com/jina-ai/jina.git | 59 | def _add_host(arg_group):
arg_group.add_argument(
'--host',
'--host-in',
type=str,
default=__default_host__,
help=f'The host address of the runtime, by default it is {__default_host__}.',
| 8 | 47 | _add_host |
|
24 | 1 | 3 | 7 | apps/users/signal_handlers.py | 188,681 | perf: 设置默认的角色,系统用户角色添加权限 (#7898)
* perf: 修改 role handler
* perf: 设置默认的角色,系统用户角色添加权限
* perf: authentication 还是放到系统中吧
Co-authored-by: ibuler <ibuler@qq.com>
Co-authored-by: Jiangjie.Bai <32935519+BaiJiangJie@users.noreply.github.com> | jumpserver | 11 | Python | 22 | signal_handlers.py | def on_user_create_set_default_system_role(sender, instance, created, **kwargs):
if not created:
return
has_system_role = instance.system_roles.all().exists()
if not has_system_role:
logger.debug("Receive user create signal, set default role")
instance.set_default_system_role()
@receiver(post_user_create) | 34e75099a3bc8b32d6e823660f5162094d17e511 | @receiver(post_user_create) | 45 | https://github.com/jumpserver/jumpserver.git | 52 | def on_user_create_set_default_system_role(sender, instance, created, **kwargs):
if not created:
| 14 | 85 | on_user_create_set_default_system_role |
41 | 0 | 1 | 5 | vocoder/fregan/stft_loss.py | 161,280 | Added missing files for Fre-GAN (#579)
* The new vocoder Fre-GAN is now supported
* Improved some fregan details
* Fixed the problem that the existing model could not be loaded to continue training when training GAN
* Updated reference papers
* GAN training now supports DistributedDataParallel (DDP)
* Added requirements.txt
* GAN training uses single card training by default
* Added note about GAN vocoder training with multiple GPUs
* Added missing files for Fre-GAN | MockingBird | 13 | Python | 33 | stft_loss.py | def stft(x, fft_size, hop_size, win_length, window):
x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
real = x_stft[..., 0]
imag = x_stft[..., 1]
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
| 6a793cea8488ad40fcad6ab30f9d82bc920ac114 | 77 | https://github.com/babysor/MockingBird.git | 59 | def stft(x, fft_size, hop_size, win_length, window):
x_stft = t | 14 | 108 | stft |
|
35 | 0 | 3 | 16 | homeassistant/components/mqtt/cover.py | 308,401 | Add mqtt encoding support for publishing (#62739)
* encoding support for mqtt publishing - todo tests
* signature allows None values for qos and retain
* common test for mqtt publishing encoding
* better test with command templates
* more tests
* fix tests alarm control panel+tests light basic
* tests light json and template
* add tests vacuum and fix tests light_template | core | 14 | Python | 32 | cover.py | async def async_close_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_CLOSE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = STATE_CLOSED
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config[CONF_POSITION_CLOSED], COVER_PAYLOAD
)
self.async_write_ha_state()
| d0c4f0fec4216e4193da716001b5e13e1e3f2106 | 98 | https://github.com/home-assistant/core.git | 222 | async def async_close_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_CLOSE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that cover has changed state.
se | 22 | 150 | async_close_cover |
|
51 | 0 | 1 | 12 | code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py | 60,304 | Balanced joint maximum mean discrepancy for deep transfer learning | transferlearning | 10 | Python | 45 | test_coord_map.py | def test_rect(self):
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
self.assertEquals(b_3x3, b_3x5[0])
self.assertEquals(a_5x5, a_3x5[1])
self.assertEquals(b_5x5, b_3x5[1])
| cc4d0564756ca067516f71718a3d135996525909 | 168 | https://github.com/jindongwang/transferlearning.git | 135 | def test_rect(self):
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
| 23 | 245 | test_rect |
|
42 | 0 | 1 | 5 | PPOCRLabel/libs/autoDialog.py | 23,088 | Add [ time left ] while predicting the image for user to know the situation | PaddleOCR | 15 | Python | 36 | autoDialog.py | def handleProgressBarSingal(self, i):
self.pb.setValue(i)
# calculate time left of auto labeling
avg_time = (time.time() - self.time_start) / i # Use average time to prevent time fluctuations
time_left = str(datetime.timedelta(seconds=avg_time * (self.lender - i)))
self.setWindowTitle("PPOCRLabel -- " + f"Time Left: {time_left}") # show
| c267107926bbbd977fc9d67d3c39fb5e1b77028b | 60 | https://github.com/PaddlePaddle/PaddleOCR.git | 80 | def handleProgressBarSingal(self, i):
self.pb.setValue(i)
| 15 | 105 | handleProgressBarSingal |
|
232 | 0 | 1 | 126 | python/ccxt/async_support/zaif.py | 17,268 | 1.71.83
[ci skip] | ccxt | 16 | Python | 142 | zaif.py | def describe(self):
return self.deep_extend(super(zaif, self).describe(), {
'id': 'zaif',
'name': 'Zaif',
'countries': ['JP'],
'rateLimit': 2000,
'version': '1',
'has': {
'CORS': None,
'spot': True,
'margin': None, # has but unimplemented
'swap': False,
'future': False,
'option': False,
'cancelOrder': True,
'createMarketOrder': None,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766927-39ca2ada-5eeb-11e7-972f-1b4199518ca6.jpg',
'api': 'https://api.zaif.jp',
'www': 'https://zaif.jp',
'doc': [
'https://techbureau-api-document.readthedocs.io/ja/latest/index.html',
'https://corp.zaif.jp/api-docs',
'https://corp.zaif.jp/api-docs/api_links',
'https://www.npmjs.com/package/zaif.jp',
'https://github.com/you21979/node-zaif',
],
'fees': 'https://zaif.jp/fee?lang=en',
},
'fees': {
'trading': {
'percentage': True,
'taker': self.parse_number('0.001'),
'maker': self.parse_number('0'),
},
},
'api': {
'public': {
'get': [
'depth/{pair}',
'currencies/{pair}',
'currencies/all',
'currency_pairs/{pair}',
'currency_pairs/all',
'last_price/{pair}',
'ticker/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'active_orders',
'cancel_order',
'deposit_history',
'get_id_info',
'get_info',
'get_info2',
'get_personal_info',
'trade',
'trade_history',
'withdraw',
'withdraw_history',
],
},
'ecapi': {
'post': [
'createInvoice',
'getInvoice',
'getInvoiceIdsByOrderNumber',
'cancelInvoice',
],
},
'tlapi': {
'post': [
'get_positions',
'position_history',
'active_positions',
'create_position',
'change_position',
'cancel_position',
],
},
'fapi': {
'get': [
'groups/{group_id}',
'last_price/{group_id}/{pair}',
'ticker/{group_id}/{pair}',
'trades/{group_id}/{pair}',
'depth/{group_id}/{pair}',
],
},
},
'options': {
# zaif schedule defines several market-specific fees
'fees': {
'BTC/JPY': {'maker': 0, 'taker': 0},
'BCH/JPY': {'maker': 0, 'taker': 0.3 / 100},
'BCH/BTC': {'maker': 0, 'taker': 0.3 / 100},
'PEPECASH/JPY': {'maker': 0, 'taker': 0.01 / 100},
'PEPECASH/BT': {'maker': 0, 'taker': 0.01 / 100},
},
},
'exceptions': {
'exact': {
'unsupported currency_pair': BadRequest, # {"error": "unsupported currency_pair"}
},
'broad': {
},
},
})
| ff158ebe7e1ed14772139737d13bb5edfd6d9430 | 443 | https://github.com/ccxt/ccxt.git | 2,407 | def describe(self):
return self.deep_extend(super(zaif, self).describe(), {
'id': 'zaif',
'name': 'Zaif',
'countries': ['JP'],
'rateLimit': 2000,
'version': '1',
'has': {
'CORS': None,
'spot': True,
'margin': None, # has but unimplemented
'swap': False,
'future': False,
'option': False,
'cancelOrder': True,
'createMarketOrder': None,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOpenOrders': True,
'fetchOrderBook': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766927-39ca2ada-5eeb-11e7-972f-1b4199518ca6.jpg',
'api': 'https://api.zaif.jp',
'www': 'https://zaif.jp',
'doc': [
'https://techbureau-api-document.readthedocs.io/ja/latest/index.html',
'https://corp.zaif.jp/api-docs',
'https://corp.zaif.jp/api-docs/api_links',
'https://www.npmjs.com/package/zaif.jp',
'https://github.com/you21979/node-zaif',
],
'fees': 'https://zaif.jp/fee?lang=en',
},
'fees': {
'trading': {
'percentage': True,
'taker': self.parse_number('0.001'),
'maker': self.parse_number('0'),
},
},
'api': {
'public': {
'get': [
'depth/{pair}',
'currencies/{pair}',
'currencies/all',
'currency_pairs/{pair}',
'currency_pairs/all',
'last_price/{pair}',
'ticker/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'active_orders',
'cancel_order',
'deposit_history',
'get_id_info',
'get_info',
'get_info2',
'get_personal_info',
'trade',
'trade_history',
'withdraw',
'withdraw_history',
],
},
'ecapi': {
'post': [
'createInvoice',
'getInvoice',
'getInvoiceIdsByOrderNumber',
'cancelInvoice',
],
},
'tlapi': {
'post': [
'get_positions',
'position_history',
'active_positions',
'create_position',
'change_position',
'cancel_position',
],
},
'fapi': {
'get': [
'groups/{group_id}',
'last_price/{group_id}/{pair}',
'ticker/{group_id}/{pair}',
'trades/{group_id}/{pair}',
'depth/{group_id}/{pair}',
],
},
},
'options': {
# zaif schedule defines several market-specific fees
'fees': {
'BTC/JPY': {'maker': 0, 'taker': 0},
'BCH/JPY': {'maker': 0, 'taker': 0.3 / 100},
'BCH/BTC': {'maker': 0, 'taker': 0.3 / 100},
'PEPECASH/JPY': {'maker': | 7 | 806 | describe |
|
32 | 0 | 1 | 8 | python/ray/tests/test_failure_2.py | 131,486 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 12 | Python | 29 | test_failure_2.py | def test_raylet_node_manager_server_failure(ray_start_cluster_head, log_pubsub):
cluster = ray_start_cluster_head
redis_port = int(cluster.address.split(":")[1])
# Reuse redis port to make node manager grpc server fail to start.
with pytest.raises(Exception):
cluster.add_node(wait=False, node_manager_port=redis_port)
# wait for max 10 seconds. | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 71 | https://github.com/ray-project/ray.git | 53 | def test_raylet_node_manager_server_failure(ray_start_cluster_head, log_pubsub):
cluster = ray_start_cluster_head
redis_port = int(cluster.address.split(":")[1])
# Reuse redis port to make node manager grpc server fail to start.
with pytest.raises(Exception):
cluster.add_node(w | 14 | 78 | test_raylet_node_manager_server_failure |
|
33 | 1 | 1 | 13 | src/sentry/runner/commands/repair.py | 90,853 | ref(models): `ActivityType` (#34978)
## Objective:
We want to separate enum logic from Model logic. This breaks a lot of circular dependencies. | sentry | 10 | Python | 32 | repair.py | def fix_group_counters():
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute(
,
[ActivityType.NOTE.value],
)
@click.command()
@click.option(
"--with-docs/--without-docs",
default=False,
help="Synchronize and repair embedded documentation. This " "is disabled by default.",
)
@configuration | b9f5a910dc841b85f58d46266ec049ae5a7fd305 | @click.command()
@click.option(
"--with-docs/--without-docs",
default=False,
help="Synchronize and repair embedded documentation. This " "is disabled by default.",
)
@configuration | 38 | https://github.com/getsentry/sentry.git | 66 | def fix_group_counters():
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute(
,
[ActivityType.NOTE.value],
)
@click.command()
@click.option(
"--with-do | 16 | 109 | fix_group_counters |
88 | 0 | 1 | 30 | tests/test_config.py | 289,512 | Rename IMPERIAL_SYSTEM to US_CUSTOMARY_SYSTEM (#80253)
* Rename IMPERIAL_SYSTEM
* Deprecate is_metric property and adjust tests
* Adjust unit_system config validation
* Add yaml tests
* Add tests for private name
* Fix incorrect rebase
* Adjust docstring
* Add store migration
* Update unit_system.py
* Minimise test tweaks
* Fix tests
* Add conversion to migration
* Rename new key and adjust tests
* Adjust websocket_detect_config
* Move original_unit_system tracking to subclass | core | 11 | Python | 70 | test_config.py | async def test_igration_and_updating_configuration(hass, hass_storage):
core_data = {
"data": {
"elevation": 10,
"latitude": 55,
"location_name": "Home",
"longitude": 13,
"time_zone": "Europe/Copenhagen",
"unit_system": "imperial",
"external_url": "https://www.example.com",
"internal_url": "http://example.local",
"currency": "BTC",
},
"key": "core.config",
"version": 1,
"minor_version": 1,
}
hass_storage["core.config"] = dict(core_data)
await config_util.async_process_ha_core_config(
hass, {"allowlist_external_dirs": "/etc"}
)
await hass.config.async_update(latitude=50, currency="USD")
expected_new_core_data = copy.deepcopy(core_data)
# From async_update above
expected_new_core_data["data"]["latitude"] = 50
expected_new_core_data["data"]["currency"] = "USD"
# 1.1 -> 1.2 store migration with migrated unit system
expected_new_core_data["data"]["unit_system_v2"] = "us_customary"
expected_new_core_data["minor_version"] = 2
assert hass_storage["core.config"] == expected_new_core_data
assert hass.config.latitude == 50
assert hass.config.currency == "USD"
| 67d1dde69fbacf33f2c39ea14d89f2afa425ed18 | 166 | https://github.com/home-assistant/core.git | 280 | async def test_igration_and_updating_configuration(hass, hass_storage):
core_data = {
"data": {
"elevation": 10,
"latitude": 55,
"location_name": "Home",
"longitude": 13,
"time_zone": "Europe/Copenhagen",
"unit_system": "imperial",
"external_url": "https://www.example.com",
"internal_url": "http://example.local",
"currency": "BTC",
},
"key": "core.config",
"version": 1,
"minor_version": 1,
}
hass_storage["core.config"] = dict(core_data)
await config_util.async_process_ha_core_config(
hass, {"allowlist_external_dirs": "/etc"}
)
await hass.config.async_update(latitude=50, currency="USD")
expected_new_core_data = copy.deepcopy(core_data)
# From async_update above
expected_new_core_data["data"]["latitude"] = 50
expected_new_core_data["data"]["currency"] = "USD"
# 1.1 -> 1.2 store migration with migrated unit system
expected_new_core_data["data"]["unit_system_v2"] = "us_customary"
expected_new_core_data["minor_version"] = 2
assert hass_storage["core.config"] == expected_new_core_data
assert hass.config.latitude == 50
assert hass.config.currency == "USD"
| 14 | 314 | test_igration_and_updating_configuration |
|
9 | 0 | 2 | 3 | mmdet/models/detectors/base.py | 244,636 | Refactor interface of two-stage detector | mmdetection | 9 | Python | 9 | base.py | def with_shared_head(self) -> bool:
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
| 46430db4f965a2d7e2853ce52cad828344e84ec7 | 21 | https://github.com/open-mmlab/mmdetection.git | 23 | def with_shared_head(self) -> bool:
return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
| 5 | 37 | with_shared_head |
|
49 | 0 | 2 | 13 | mitmproxy/proxy/layers/quic.py | 253,443 | [autofix.ci] apply automated fixes | mitmproxy | 11 | Python | 46 | quic.py | def receive_close(self) -> layer.CommandGenerator[None]:
assert self.quic
# if `_close_event` is not set, the underlying connection has been closed
# we turn this into a QUIC close event as well
close_event = self.quic._close_event or quic_events.ConnectionTerminated(
QuicErrorCode.NO_ERROR, None, "Connection closed."
)
yield from self.event_to_child(
QuicConnectionClosed(
self.conn,
close_event.error_code,
close_event.frame_type,
close_event.reason_phrase,
)
)
| 8c2428c9d355ca5fbc3dd90e9820ceb1cc795837 | 62 | https://github.com/mitmproxy/mitmproxy.git | 190 | def receive_close(self) -> layer.CommandGenerator[None]:
assert self.quic
# if `_close_event` is not set, the underlying connection has been closed
# we turn this into a QUIC close event as well
close_event = self.quic._close_event or quic_events.ConnectionTerminated(
QuicErrorCod | 17 | 95 | receive_close |
|
63 | 1 | 1 | 6 | tests/gamestonk_terminal/economy/test_economy_controller.py | 282,023 | Tests : Economy + Conftest (#1260)
* Updating tests : economy
* Updating tests : removing breaklines
* Updating tests : economy
* Updating tests : conftest
* Updating tests : economy | OpenBBTerminal | 10 | Python | 39 | test_economy_controller.py | def test_call_cls(mocker):
mocker.patch("os.system")
controller = economy_controller.EconomyController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "help"]),
("call_home", [], ["quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
[
"quit",
"reset",
"economy",
],
),
(
"call_reset",
["help"],
[
"quit",
"reset",
"economy",
"help",
],
),
],
) | 683a8bdd83c1b931df111a5b2b8b19350930b73a | @pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "help"]),
("call_home", [], ["quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
[
"quit",
"reset",
"economy",
],
),
(
"call_reset",
["help"],
[
"quit",
"reset",
"economy",
"help",
],
),
],
) | 43 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 386 | def test_call_cls(mocker):
mocker.patch("os.system")
controller = economy_controller.EconomyController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pyt | 16 | 307 | test_call_cls |
25 | 0 | 1 | 10 | pandas/tests/io/xml/test_xml.py | 165,699 | ENH: Add large file support for read_xml (#45724)
* ENH: Add large file support for read_xml
* Combine tests, slightly fix docs
* Adjust pytest decorator on URL test; fix doc strings
* Adjust tests for helper function
* Add iterparse feature to some tests
* Add IO docs link in docstring | pandas | 14 | Python | 25 | test_xml.py | def test_url_path_error(parser):
url = "https://www.w3schools.com/xml/books.xml"
with pytest.raises(
ParserError, match=("iterparse is designed for large XML files")
):
read_xml(
url,
parser=parser,
iterparse={"row": ["shape", "degrees", "sides", "date"]},
)
| fa7e31b8b19eb03dceebd09d03798363daae07d9 | 47 | https://github.com/pandas-dev/pandas.git | 87 | def test_url_path_error(parser):
url = "https://www.w3schools.com/xml/books.xml"
with pytest.raises(
ParserError, match=("iterparse is designed for large XML fi | 9 | 84 | test_url_path_error |
|
75 | 0 | 1 | 17 | tests/components/forked_daapd/test_media_player.py | 308,240 | Support announce and enqueue in forked-daapd (#77744) | core | 9 | Python | 45 | test_media_player.py | def test_master_state(hass, mock_api_object):
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == STATE_PAUSED
assert state.attributes[ATTR_FRIENDLY_NAME] == "forked-daapd server"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORTED_FEATURES
assert not state.attributes[ATTR_MEDIA_VOLUME_MUTED]
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.2
assert state.attributes[ATTR_MEDIA_CONTENT_ID] == 12322
assert state.attributes[ATTR_MEDIA_CONTENT_TYPE] == MediaType.MUSIC
assert state.attributes[ATTR_MEDIA_DURATION] == 0.05
assert state.attributes[ATTR_MEDIA_POSITION] == 0.005
assert state.attributes[ATTR_MEDIA_TITLE] == "No album" # reversed for url
assert state.attributes[ATTR_MEDIA_ARTIST] == "Some artist"
assert state.attributes[ATTR_MEDIA_ALBUM_NAME] == "Some song" # reversed
assert state.attributes[ATTR_MEDIA_ALBUM_ARTIST] == "The xx"
assert state.attributes[ATTR_MEDIA_TRACK] == 1
assert not state.attributes[ATTR_MEDIA_SHUFFLE]
| 420285f7ef1e170f599cf22c031987e2ceefa353 | 156 | https://github.com/home-assistant/core.git | 128 | def test_master_state(hass, mock_api_object):
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == STATE_PAUSED
assert state.attributes[ATTR_FRIENDLY_NAME] | 26 | 235 | test_master_state |
|
32 | 1 | 1 | 3 | modules/image/text_to_image/disco_diffusion_cnclip_vitb16/resize_right/interp_methods.py | 49,756 | add disco_diffusion_cnclip_vitb16 module | PaddleHub | 16 | Python | 24 | interp_methods.py | def lanczos2(x):
fw, to_dtype, eps = set_framework_dependencies(x)
return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) / ((pi**2 * x**2 / 2) + eps)) * to_dtype(abs(x) < 2))
@support_sz(6) | f4d6e64cdc132ae868699a0ba442f4ab1d304a14 | @support_sz(6) | 69 | https://github.com/PaddlePaddle/PaddleHub.git | 36 | def lanczos2(x):
fw, to_dtype, eps = set_framework_dependencies(x)
return (((fw.sin(pi * x) * fw.sin(pi * x / 2) + eps) / ((pi**2 * x**2 / 2 | 10 | 116 | lanczos2 |
37 | 0 | 1 | 16 | wagtail/core/tests/test_page_model.py | 74,321 | Reformat with black | wagtail | 14 | Python | 34 | test_page_model.py | def test_copy_page_copies_recursively_to_the_same_tree(self):
events_index = EventIndex.objects.get(url_path="/home/events/")
old_christmas_event = (
events_index.get_children().filter(slug="christmas").first().specific
)
old_christmas_event.save_revision()
with self.assertRaises(Exception) as exception:
events_index.copy(
recursive=True,
update_attrs={"title": "New events index", "slug": "new-events-index"},
to=events_index,
)
self.assertEqual(
str(exception.exception),
"You cannot copy a tree branch recursively into itself",
)
| d10f15e55806c6944827d801cd9c2d53f5da4186 | 93 | https://github.com/wagtail/wagtail.git | 185 | def test_copy_page_copies_recursively_to_the_same_tree(self):
events_index = EventIndex.objects.get(url_path="/home/events/")
old_christmas_event = (
events_index.get_children().filter(slug="christmas").first().specific
)
old_christmas_event.save_revision()
with self.assertRaises(Exception) as exception:
events_index.copy(
recursive=True,
update_attrs={"title": "New events index", "slug": "new-events-index"},
to=events_index,
)
self.assertEqual(
str(exception.exception),
"You cannot copy a tree branch recursively into itself",
)
| 23 | 162 | test_copy_page_copies_recursively_to_the_same_tree |
|
20 | 0 | 3 | 7 | awscli/utils.py | 189,211 | Improved readablity and simplified logic to find first quote character.
* Updated "_find_quote_char_in_part" function, previously it was scanning input
string multiple times however proposed logic does the same thing in single
iteration.
* Code branching is also reduced, only one if block is required. | aws-cli | 10 | Python | 14 | utils.py | def _find_quote_char_in_part(part):
quote_char = None
for ch in part:
if ch in ('"', "'"):
quote_char = ch
break
return quote_char
| 3c7e82860c6bd219f67d4373c715efb805500074 | 29 | https://github.com/aws/aws-cli.git | 61 | def _find_quote_char_in_part(part):
quote_char = None
for ch in part:
if ch in ('"', "'"):
quote_char | 4 | 52 | _find_quote_char_in_part |
|
282 | 0 | 16 | 36 | pandas/core/arrays/numeric.py | 163,800 | BUG: NumericArray * td64_array (#45622) | pandas | 16 | Python | 141 | numeric.py | def _arith_method(self, other, op):
op_name = op.__name__
omask = None
if isinstance(other, BaseMaskedArray):
other, omask = other._data, other._mask
elif is_list_like(other):
if not isinstance(other, ExtensionArray):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
# We wrap the non-masked arithmetic logic used for numpy dtypes
# in Series/Index arithmetic ops.
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
pd_op = ops.get_array_op(op)
other = ensure_wrapped_if_datetimelike(other)
mask = self._propagate_mask(omask, other)
if other is libmissing.NA:
result = np.ones_like(self._data)
if "truediv" in op_name and self.dtype.kind != "f":
# The actual data here doesn't matter since the mask
# will be all-True, but since this is division, we want
# to end up with floating dtype.
result = result.astype(np.float64)
else:
# Make sure we do this before the "pow" mask checks
# to get an expected exception message on shape mismatch.
if self.dtype.kind in ["i", "u"] and op_name in ["floordiv", "mod"]:
# ATM we don't match the behavior of non-masked types with
# respect to floordiv-by-zero
pd_op = op
with np.errstate(all="ignore"):
result = pd_op(self._data, other)
if op_name == "pow":
# 1 ** x is 1.
mask = np.where((self._data == 1) & ~self._mask, False, mask)
# x ** 0 is 1.
if omask is not None:
mask = np.where((other == 0) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 0, False, mask)
elif op_name == "rpow":
# 1 ** x is 1.
if omask is not None:
mask = np.where((other == 1) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 1, False, mask)
# x ** 0 is 1.
mask = np.where((self._data == 0) & ~self._mask, False, mask)
return self._maybe_mask_result(result, mask, other, op_name)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
| 57d7768c205d30cc50ba9b42d60d24d1e32eb249 | 360 | https://github.com/pandas-dev/pandas.git | 813 | def _arith_method(self, other, op):
op_name = op.__name__
omask = None
if isinstance(other, BaseMaskedArray):
other, omask = other._data, other._mask
elif is_list_like(other):
if not isinstance(other, ExtensionArray):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
# We wrap the non-masked arithmetic logic used for numpy dtypes
# in Series/Index arithmetic ops.
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
pd_op = ops.get_array_op(op)
other | 41 | 596 | _arith_method |
|
16 | 0 | 1 | 6 | lib/matplotlib/backends/backend_gtk3.py | 110,632 | Separately track modifier keys for mouse events.
Whether the event modifiers are directly available on enter/leave events
depends on the backend, but all are handled here (except possibly for
macos, which I haven't checked). | matplotlib | 13 | Python | 16 | backend_gtk3.py | def button_press_event(self, widget, event):
MouseEvent("button_press_event", self,
*self._mpl_coords(event), event.button,
modifiers=self._mpl_modifiers(event.state),
guiEvent=event)._process()
return False # finish event propagation?
| b4e9e3131cdd7f1ad33ea06e21e7d3e51762af91 | 48 | https://github.com/matplotlib/matplotlib.git | 84 | def button_press_event(self, widget, event):
MouseEvent("button_press_event", self,
| 12 | 73 | button_press_event |
|
45 | 0 | 3 | 5 | python/ray/util/inspect.py | 133,157 | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | ray | 6 | Python | 38 | inspect.py | def is_cython(obj):
# TODO(suo): We could split these into two functions, one for Cython
# functions and another for Cython methods.
# TODO(suo): There doesn't appear to be a Cython function 'type' we can
# check against via isinstance. Please correct me if I'm wrong. | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | 29 | https://github.com/ray-project/ray.git | 60 | def is_cython(obj):
# TODO(suo): We could split these into two functions, one for Cython
# functions and another for Cyth | 2 | 17 | is_cython |
|
155 | 0 | 1 | 29 | tests/models/test_cleartasks.py | 47,547 | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | airflow | 16 | Python | 111 | test_cleartasks.py | def test_clear_task_instances_without_task(self, dag_maker):
with dag_maker(
'test_clear_task_instances_without_task',
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
) as dag:
task0 = EmptyOperator(task_id='task0')
task1 = EmptyOperator(task_id='task1', retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
assert not dag.has_task(task0.task_id)
assert not dag.has_task(task1.task_id)
with create_session() as session:
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
assert ti0.try_number == 2
assert ti0.max_tries == 1
assert ti1.try_number == 2
assert ti1.max_tries == 2
| 49e336ae0302b386a2f47269a6d13988382d975f | 216 | https://github.com/apache/airflow.git | 451 | def test_clear_task_instances_without_task(self, dag_maker):
with dag_maker(
'test_clear_task_instances_without_task',
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10),
) as dag:
task0 = EmptyOperator(task_id='task0')
task1 = EmptyOperator(task_id='task1', retries=2)
dr = dag_maker.create_dagrun(
state=State.RUNNING,
run_type=DagRunType.SCHEDULED,
)
ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)
ti0.refresh_from_task(task0)
ti1.refresh_from_task(task1)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
assert not dag.has_task(task0.task_id)
assert not dag.has_task(task1.task_id)
with create_session() as session:
# we use order_by(task_id) here because for the test DAG structure of ours
# this is equivalent to topological sort. It would not work in general case
# but it works for our case because we specifically constructed test DAGS
# in the way that those two sort methods are equivalent
qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()
clear_task_instances(qry, session)
# When dag is None | 46 | 359 | test_clear_task_instances_without_task |
|
38 | 0 | 4 | 10 | src/PIL/ImageFile.py | 242,742 | Remove redundant parentheses | Pillow | 10 | Python | 30 | ImageFile.py | def set_as_raw(self, data, rawmode=None):
if not rawmode:
rawmode = self.mode
d = Image._getdecoder(self.mode, "raw", rawmode)
d.setimage(self.im, self.state.extents())
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
| ee85e387bab535e2339b9d3cd1ab87c61d23af15 | 85 | https://github.com/python-pillow/Pillow.git | 120 | def set_as_raw(self, data, rawmode=None):
| 15 | 139 | set_as_raw |
|
27 | 0 | 3 | 14 | setup.py | 11,965 | refactor: remove jinad (#4550) | jina | 10 | Python | 22 | setup.py | def register_ac():
import os
import re
from pathlib import Path
home = str(Path.home())
resource_path = 'jina/resources/completions/jina.%s'
regex = r'#\sJINA_CLI_BEGIN(.*)#\sJINA_CLI_END'
_check = {'zsh': '.zshrc', 'bash': '.bashrc', 'fish': '.fish'}
| 2ce767517532ebbf85ade4b84cfba0f7bb69c4f9 | 69 | https://github.com/jina-ai/jina.git | 47 | def register_ac():
import os
import re
from pathlib import Path
home = str(Path.home())
resource_path = 'jina/resources/completions/jina.%s'
regex | 10 | 87 | register_ac |
|
260 | 0 | 3 | 66 | python/ccxt/async_support/ndax.py | 16,851 | 1.70.82
[ci skip] | ccxt | 18 | Python | 151 | ndax.py | async def fetch_markets(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = await self.publicGetGetInstruments(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "InstrumentId":3,
# "Symbol":"LTCBTC",
# "Product1":3,
# "Product1Symbol":"LTC",
# "Product2":1,
# "Product2Symbol":"BTC",
# "InstrumentType":"Standard",
# "VenueInstrumentId":3,
# "VenueId":1,
# "SortIndex":0,
# "SessionStatus":"Running",
# "PreviousSessionStatus":"Stopped",
# "SessionStatusDateTime":"2020-11-25T19:42:15.245Z",
# "SelfTradePrevention":true,
# "QuantityIncrement":0.0000000100000000000000000000,
# "PriceIncrement":0.0000000100000000000000000000,
# "MinimumQuantity":0.0100000000000000000000000000,
# "MinimumPrice":0.0000010000000000000000000000,
# "VenueSymbol":"LTCBTC",
# "IsDisable":false,
# "MasterDataId":0,
# "PriceCollarThreshold":0.0000000000000000000000000000,
# "PriceCollarPercent":0.0000000000000000000000000000,
# "PriceCollarEnabled":false,
# "PriceFloorLimit":0.0000000000000000000000000000,
# "PriceFloorLimitEnabled":false,
# "PriceCeilingLimit":0.0000000000000000000000000000,
# "PriceCeilingLimitEnabled":false,
# "CreateWithMarketRunning":true,
# "AllowOnlyMarketMakerCounterParty":false,
# "PriceCollarIndexDifference":0.0000000000000000000000000000,
# "PriceCollarConvertToOtcEnabled":false,
# "PriceCollarConvertToOtcClientUserId":0,
# "PriceCollarConvertToOtcAccountId":0,
# "PriceCollarConvertToOtcThreshold":0.0000000000000000000000000000,
# "OtcConvertSizeThreshold":0.0000000000000000000000000000,
# "OtcConvertSizeEnabled":false,
# "OtcTradesPublic":true,
# "PriceTier":0
# },
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'InstrumentId')
# lowercaseId = self.safe_string_lower(market, 'symbol')
baseId = self.safe_string(market, 'Product1')
quoteId = self.safe_string(market, 'Product2')
base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol'))
quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol'))
sessionStatus = self.safe_string(market, 'SessionStatus')
isDisable = self.safe_value(market, 'IsDisable')
sessionRunning = (sessionStatus == 'Running')
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (sessionRunning and not isDisable),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'QuantityIncrement'),
'price': self.safe_number(market, 'PriceIncrement'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'MinimumQuantity'),
'max': None,
},
'price': {
'min': self.safe_number(market, 'MinimumPrice'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
| 1cfeeec8240c4c7e93e632cd9e42be2a1be33b16 | 370 | https://github.com/ccxt/ccxt.git | 2,071 | async def fetch_markets(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = await self.publicGetGetInstruments(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "InstrumentId":3,
# "Symbol":"LTCBTC",
# "Product1":3,
# "Product1Symbol":"LTC",
# "Product2":1,
# "Product2Symbol":"BTC",
# "InstrumentType":"Standard",
# "VenueInstrumentId":3,
# "VenueId":1,
# "SortIndex":0,
# "SessionStatus":"Running",
# "PreviousSessionStatus":"Stopped",
# "SessionStatusDateTime":"2020-11-25T19:42:15.245Z",
# "SelfTradePrevention":true,
# "QuantityIncrement":0.0000000100000000000000000000,
# "PriceIncrement":0.0000000100000000000000000000,
# "MinimumQuantity":0.0100000000000000000000000000,
# "MinimumPrice":0.0000010000000000000000000000,
# "VenueSymbol":"LTCBTC",
# "IsDisable":false,
# "MasterDataId":0,
# "PriceCollarThreshold":0.0000000000000000000000000000,
# "PriceCollarPercent":0.0000000000000000000000000000,
# "PriceCollarEnabled":false,
# "PriceFloorLimit":0.0000000000000000000000000000,
# "PriceFloorLimitEnabled":false,
# "PriceCeilingLimit":0.0000000000000000000000000000,
# "PriceCeilingLimitEnabled":false,
# "CreateWithMarketRunning":true,
# "AllowOnlyMarketMakerCounterParty":false,
# "PriceCollarIndexDifference":0.0000000000000000000000000000,
# "PriceCollarConvertToOtcEnabled":false,
# "PriceCollarCon | 28 | 681 | fetch_markets |
|
20 | 0 | 2 | 7 | python3.10.4/Lib/inspect.py | 218,385 | add python 3.10.4 for windows | XX-Net | 12 | Python | 17 | inspect.py | def getinnerframes(tb, context=1):
framelist = []
while tb:
frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
framelist.append(FrameInfo(*frameinfo))
tb = tb.tb_next
return framelist
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 49 | https://github.com/XX-net/XX-Net.git | 53 | def getinnerframes(tb, context=1):
framelist = []
while tb:
frameinfo = (tb.tb_frame,) + g | 10 | 80 | getinnerframes |
|
36 | 0 | 1 | 9 | erpnext/regional/india/e_invoice/utils.py | 67,091 | style: format code with black | erpnext | 12 | Python | 34 | utils.py | def show_link_to_error_log(invoice, einvoice):
err_log = log_error(einvoice)
link_to_error_log = get_link_to_form("Error Log", err_log.name, "Error Log")
frappe.throw(
_(
"An error occurred while creating e-invoice for {}. Please check {} for more information."
).format(invoice.name, link_to_error_log),
title=_("E Invoice Creation Failed"),
)
| 494bd9ef78313436f0424b918f200dab8fc7c20b | 51 | https://github.com/frappe/erpnext.git | 27 | def show_link_to_error_log(invoice, einvoice):
err_log = log_error(einvoice)
link_to_error_log = get_link_to_form("Error Log", err_ | 13 | 85 | show_link_to_error_log |
|
7 | 0 | 1 | 3 | tests/sentry/api/endpoints/test_user_notification_details.py | 100,128 | ref(tests): Remove `get_valid_response()` (#34822) | sentry | 10 | Python | 7 | test_user_notification_details.py | def test_lookup_other_user(self):
user_b = self.create_user(email="b@example.com")
self.get_error_response(user_b.id, status_code=403)
| 096b5511e244eecd8799b2a0324655207ce8985e | 27 | https://github.com/getsentry/sentry.git | 20 | def test_lookup_other_user(self):
user_b = s | 8 | 45 | test_lookup_other_user |
|
8 | 1 | 1 | 4 | saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py | 27,171 | Improve subscription payload tests and structure (#9719)
* Refactor and update tests for subscriptions
* Refactor and update tests for subscriptions
* fixes after rebasing
* fix category payload genertor
* small fixes
* fixes after rebase
* fix linters errors
* add sorting to payload generators, fixes after review
* fix linters
* remove commented | saleor | 8 | Python | 8 | fixtures.py | def subscription_page_created_webhook(subscription_webhook):
return subscription_webhook(
subscription_queries.PAGE_CREATED, WebhookEventAsyncType.PAGE_CREATED
)
@pytest.fixture | 107cfb229e75f14efc75a20c3d1f421ccb50f244 | @pytest.fixture | 16 | https://github.com/saleor/saleor.git | 19 | def subscription_page_created_webhook(subscription_webhook):
return subscription_webhook(
subscription_queries.PAGE_CREATED, WebhookEventAsyncType.PAGE_CREATED
)
@pytest.fi | 7 | 32 | subscription_page_created_webhook |
43 | 0 | 1 | 13 | onnx/backend/test/case/node/dropout.py | 254,738 | Use Python type annotations rather than comments (#3962)
* These have been supported since Python 3.5.
ONNX doesn't support Python < 3.6, so we can use the annotations.
Diffs generated by https://pypi.org/project/com2ann/.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Remove MYPY conditional logic in gen_proto.py
It breaks the type annotations and shouldn't be needed.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Get rid of MYPY bool from more scripts
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* move Descriptors class above where its referenced in type annotation
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fixes
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* remove extra blank line
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix type annotations
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix type annotation in gen_docs
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix Operators.md
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix TestCoverage.md
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix protoc-gen-mypy.py
Signed-off-by: Gary Miguel <garymiguel@microsoft.com> | onnx | 11 | Python | 37 | dropout.py | def export_training_default_ratio_mask() -> None:
seed = np.int64(0)
node = onnx.helper.make_node(
'Dropout',
inputs=['x', 'r', 't'],
outputs=['y', 'z'],
seed=seed
)
x = np.random.randn(3, 4, 5).astype(np.float32)
r = np.float32(0.5)
t = np.bool_(True)
y, z = dropout(x, r, training_mode=t, return_mask=True)
expect(node, inputs=[x, r, t], outputs=[y, z], name='test_training_dropout_default_mask')
| 83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd | 129 | https://github.com/onnx/onnx.git | 142 | def export_training_default_ratio_mask() -> None:
seed = np.int64(0)
node = onnx.helper.make_node(
'Dropout',
inputs=['x', 'r', 't'],
outputs=['y', 'z'],
seed=seed
)
x = np.random.randn(3, 4, 5).astype(np.float32)
r = np.float32(0.5)
t = np.bool_(True)
y, z = dropout(x, r, training_mode=t, return_mask=True)
expect(node, inputs=[x, r, t], | 25 | 197 | export_training_default_ratio_mask |
|
42 | 0 | 1 | 17 | pandas/tests/io/xml/test_xml_dtypes.py | 165,707 | ENH: Add large file support for read_xml (#45724)
* ENH: Add large file support for read_xml
* Combine tests, slightly fix docs
* Adjust pytest decorator on URL test; fix doc strings
* Adjust tests for helper function
* Add iterparse feature to some tests
* Add IO docs link in docstring | pandas | 18 | Python | 36 | test_xml_dtypes.py | def test_dtype_nullable_int(parser):
df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"sides": "Int64"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": Series([4.0, float("nan"), 3.0]).astype("Int64"),
}
)
tm.assert_frame_equal(df_result, df_expected)
tm.assert_frame_equal(df_iter, df_expected)
| fa7e31b8b19eb03dceebd09d03798363daae07d9 | 124 | https://github.com/pandas-dev/pandas.git | 137 | def test_dtype_nullable_int(parser):
df_result = read_xml(xml_types, dtype={"sides": "Int64"}, parser=parser)
df_iter = read_xml_iterparse(
xml_types,
parser=parser,
dtype={"sides": "Int64"},
iterparse={"row": ["shape", "degrees", "sides"]},
)
df_expected = DataFrame(
{
"shape": ["square", "circle", "triangle"],
"degrees": [360, 360, 180],
"sides": Series([4.0, float("nan"), 3.0]).astype("Int64"),
| 16 | 202 | test_dtype_nullable_int |
|
75 | 0 | 1 | 16 | code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py | 60,294 | Balanced joint maximum mean discrepancy for deep transfer learning | transferlearning | 13 | Python | 60 | test_coord_map.py | def test_nd_conv(self):
n = caffe.NetSpec()
# define data with 3 spatial dimensions, otherwise the same net
n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1],
pad=[0, 1, 2])
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=4, stride=2, pad=0)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertTrue(len(a) == len(b))
self.assertTrue(np.all(a == 1))
self.assertEquals(b[0] - 1, b[1])
self.assertEquals(b[1] - 1, b[2])
| cc4d0564756ca067516f71718a3d135996525909 | 229 | https://github.com/jindongwang/transferlearning.git | 210 | def test_nd_conv(self):
n = caffe.NetSpec()
# define data with 3 spatial dimensions, otherwise the same net
n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))
n.conv = L.Convolution(
n.data, | 32 | 333 | test_nd_conv |
|
13 | 0 | 1 | 5 | tests/components/logbook/test_init.py | 299,623 | Add json decode caching to logbook (#71080) | core | 10 | Python | 12 | test_init.py | def test_unsupported_attributes_in_cache_throws(hass):
entity_attr_cache = logbook.EntityAttributeCache(hass)
event = MockLazyEventPartialState(EVENT_STATE_CHANGED)
with pytest.raises(ValueError):
entity_attr_cache.get("sensor.xyz", "not_supported", event)
| b8442d9340b569d10e0593bb0576bdcdb9ea55e3 | 38 | https://github.com/home-assistant/core.git | 32 | def test_unsupported_attributes_in_cache_throws(hass):
entity_attr_cache = logbook.EntityAttributeCache(hass)
event = MockLazyEventPartialState(EVENT_STATE_CHANGED)
with pytest.raises(ValueError):
ent | 12 | 69 | test_unsupported_attributes_in_cache_throws |
|
132 | 0 | 3 | 20 | scapy/contrib/http2.py | 209,511 | E275 - Missing whitespace after keyword (#3711)
Co-authored-by: Alexander Aring <alex.aring@gmail.com>
Co-authored-by: Anmol Sarma <me@anmolsarma.in>
Co-authored-by: antoine.torre <torreantoine1@gmail.com>
Co-authored-by: Antoine Vacher <devel@tigre-bleu.net>
Co-authored-by: Arnaud Ebalard <arno@natisbad.org>
Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>
Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au>
Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
Co-authored-by: CQ <cq674350529@163.com>
Co-authored-by: Daniel Collins <kinap@users.noreply.github.com>
Co-authored-by: Federico Maggi <federico.maggi@gmail.com>
Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr>
Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com>
Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>
Co-authored-by: gpotter2 <gabriel@potter.fr>
Co-authored-by: Guillaume Valadon <guillaume@valadon.net>
Co-authored-by: Hao Zheng <haozheng10@gmail.com>
Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com>
Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi>
Co-authored-by: hecke <hecke@naberius.de>
Co-authored-by: Jan Romann <jan.romann@gmail.com>
Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com>
Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>
Co-authored-by: jockque <38525640+jockque@users.noreply.github.com>
Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>
Co-authored-by: Keith Scott <kscott@mitre.org>
Co-authored-by: Kfir Gollan <kfir@drivenets.com>
Co-authored-by: Lars Munch <lars@segv.dk>
Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>
Co-authored-by: Leonard Crestez <cdleonard@gmail.com>
Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com>
Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com>
Co-authored-by: Martine Lenders <authmillenon@gmail.com>
Co-authored-by: Michael Farrell <micolous+git@gmail.com>
Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Co-authored-by: mkaliszan <mkaliszan@benocs.com>
Co-authored-by: mtury <maxence.tury@ssi.gouv.fr>
Co-authored-by: Neale Ranns <nranns@cisco.com>
Co-authored-by: Octavian Toader <Octavian.Toader@belden.com>
Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org>
Co-authored-by: Phil <phil@secdev.org>
Co-authored-by: Pierre Lalet <pierre@droids-corp.org>
Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr>
Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>
Co-authored-by: plorinquer <pierre.lorinquer@ssi.gouv.fr>
Co-authored-by: pvinci <pvinci@users.noreply.github.com>
Co-authored-by: Rahul Jadhav <nyrahul@gmail.com>
Co-authored-by: Robin Jarry <robin.jarry@6wind.com>
Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>
Co-authored-by: rperez <rperez@debian>
Co-authored-by: Sabrina Dubroca <sd@queasysnail.net>
Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de>
Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr>
Co-authored-by: smehner1 <smehner1@gmail.com>
Co-authored-by: speakinghedge <hecke@naberius.de>
Co-authored-by: Steven Van Acker <steven@singularity.be>
Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com>
Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com>
Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com>
Co-authored-by: waeva <74464394+waeva@users.noreply.github.com>
Co-authored-by: Alexander Aring <alex.aring@gmail.com>
Co-authored-by: Anmol Sarma <me@anmolsarma.in>
Co-authored-by: antoine.torre <torreantoine1@gmail.com>
Co-authored-by: Antoine Vacher <devel@tigre-bleu.net>
Co-authored-by: Arnaud Ebalard <arno@natisbad.org>
Co-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>
Co-authored-by: Brian Bienvenu <brian@bienvenu.id.au>
Co-authored-by: Chris Packham <chris.packham@alliedtelesis.co.nz>
Co-authored-by: CQ <cq674350529@163.com>
Co-authored-by: Daniel Collins <kinap@users.noreply.github.com>
Co-authored-by: Federico Maggi <federico.maggi@gmail.com>
Co-authored-by: Florian Maury <florian.maury@ssi.gouv.fr>
Co-authored-by: _Frky <3105926+Frky@users.noreply.github.com>
Co-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>
Co-authored-by: gpotter2 <gabriel@potter.fr>
Co-authored-by: Guillaume Valadon <guillaume@valadon.net>
Co-authored-by: Hao Zheng <haozheng10@gmail.com>
Co-authored-by: Haresh Khandelwal <hareshkhandelwal@gmail.com>
Co-authored-by: Harri Hämäläinen <hhamalai@iki.fi>
Co-authored-by: hecke <hecke@naberius.de>
Co-authored-by: Jan Romann <jan.romann@gmail.com>
Co-authored-by: Jan Sebechlebsky <sebechlebskyjan@gmail.com>
Co-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>
Co-authored-by: jockque <38525640+jockque@users.noreply.github.com>
Co-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>
Co-authored-by: Keith Scott <kscott@mitre.org>
Co-authored-by: Kfir Gollan <kfir@drivenets.com>
Co-authored-by: Lars Munch <lars@segv.dk>
Co-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>
Co-authored-by: Leonard Crestez <cdleonard@gmail.com>
Co-authored-by: Marcel Patzlaff <mpatzlaff@benocs.com>
Co-authored-by: Martijn Thé <martijnthe@users.noreply.github.com>
Co-authored-by: Martine Lenders <authmillenon@gmail.com>
Co-authored-by: Michael Farrell <micolous+git@gmail.com>
Co-authored-by: Michał Mirosław <mirq-linux@rere.qmqm.pl>
Co-authored-by: mkaliszan <mkaliszan@benocs.com>
Co-authored-by: mtury <maxence.tury@ssi.gouv.fr>
Co-authored-by: Neale Ranns <nranns@cisco.com>
Co-authored-by: Octavian Toader <Octavian.Toader@belden.com>
Co-authored-by: Peter Eisenlohr <peter@eisenlohr.org>
Co-authored-by: Phil <phil@secdev.org>
Co-authored-by: Pierre Lalet <pierre@droids-corp.org>
Co-authored-by: Pierre Lorinquer <pierre.lorinquer@ssi.gouv.fr>
Co-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>
Co-authored-by: pvinci <pvinci@users.noreply.github.com>
Co-authored-by: Rahul Jadhav <nyrahul@gmail.com>
Co-authored-by: Robin Jarry <robin.jarry@6wind.com>
Co-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>
Co-authored-by: rperez <rperez@debian>
Co-authored-by: Sabrina Dubroca <sd@queasysnail.net>
Co-authored-by: Sebastian Baar <sebastian.baar@gmx.de>
Co-authored-by: sebastien mainand <sebastien.mainand@ssi.gouv.fr>
Co-authored-by: smehner1 <smehner1@gmail.com>
Co-authored-by: Steven Van Acker <steven@singularity.be>
Co-authored-by: Thomas Faivre <thomas.faivre@6wind.com>
Co-authored-by: Tran Tien Dat <peter.trantiendat@gmail.com>
Co-authored-by: Wael Mahlous <wael.mahlous@gmail.com>
Co-authored-by: waeva <74464394+waeva@users.noreply.github.com> | scapy | 14 | Python | 83 | http2.py | def _parse_multi_byte(self, s):
# type: (str) -> int
assert len(s) >= 2
tmp_len = len(s)
value = 0
i = 1
byte = orb(s[i])
# For CPU sake, stops at an arbitrary large number!
max_value = 1 << 64
# As long as the MSG is set, an another byte must be read
while byte & 0x80:
value += (byte ^ 0x80) << (7 * (i - 1))
if value > max_value:
raise error.Scapy_Exception(
'out-of-bound value: the string encodes a value that is too large (>2^{{64}}): {}'.format(value) # noqa: E501
)
i += 1
assert i < tmp_len, 'EINVAL: x: out-of-bound read: the string ends before the AbstractUVarIntField!' # noqa: E501
byte = orb(s[i])
value += byte << (7 * (i - 1))
value += self._max_value
assert value >= 0
return value
| 08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf | 125 | https://github.com/secdev/scapy.git | 343 | def _parse_multi_byte(self, s):
# type: (str) -> int
assert len(s) >= 2
tmp_len = len(s)
value = 0
i = 1
byte = orb(s[i])
# For CPU sake, stops at an arbitrary large numb | 14 | 202 | _parse_multi_byte |
|
24 | 0 | 3 | 5 | python3.10.4/Lib/email/iterators.py | 223,770 | add python 3.10.4 for windows | XX-Net | 12 | Python | 23 | iterators.py | def walk(self):
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are imported into the Iterators.py interface module. | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 31 | https://github.com/XX-net/XX-Net.git | 50 | def walk(self):
yield self
if self.is_multipart():
for subpart in self.get_payload():
yield from subpart.walk()
# These two functions are | 5 | 56 | walk |
|
24 | 0 | 1 | 10 | onnx/backend/test/case/node/reduce_log_sum.py | 254,930 | Use Python type annotations rather than comments (#3962)
* These have been supported since Python 3.5.
ONNX doesn't support Python < 3.6, so we can use the annotations.
Diffs generated by https://pypi.org/project/com2ann/.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Remove MYPY conditional logic in gen_proto.py
It breaks the type annotations and shouldn't be needed.
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* Get rid of MYPY bool from more scripts
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* move Descriptors class above where its referenced in type annotation
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fixes
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* remove extra blank line
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix type annotations
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix type annotation in gen_docs
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix Operators.md
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix TestCoverage.md
Signed-off-by: Gary Miguel <garymiguel@microsoft.com>
* fix protoc-gen-mypy.py
Signed-off-by: Gary Miguel <garymiguel@microsoft.com> | onnx | 11 | Python | 22 | reduce_log_sum.py | def export_keepdims() -> None:
node = onnx.helper.make_node(
'ReduceLogSum',
inputs=['data'],
outputs=["reduced"]
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, keepdims=True))
expect(node, inputs=[data], outputs=[reduced],
name='test_reduce_log_sum_default')
| 83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd | 88 | https://github.com/onnx/onnx.git | 105 | def export_keepdims() -> None:
node = onnx.helper.make_node(
'ReduceLogSum',
| 19 | 139 | export_keepdims |
|
38 | 0 | 2 | 13 | homeassistant/components/upnp/coordinator.py | 289,917 | Move upnp derived sensors to library, be more robust about failing getting some data (#79955) | core | 14 | Python | 35 | coordinator.py | async def _async_update_data(self) -> Mapping[str, Any]:
try:
return await self.device.async_get_data()
except UpnpCommunicationError as exception:
LOGGER.debug(
"Caught exception when updating device: %s, exception: %s",
self.device,
exception,
)
raise UpdateFailed(
f"Unable to communicate with IGD at: {self.device.device_url}"
) from exception
| d50795af2b861e28e717f0479ad6e800b7030620 | 50 | https://github.com/home-assistant/core.git | 174 | async def _async_update_data(self) -> Mapping[str, Any]:
try:
return await self.device.async_get_data()
except UpnpCommunicationError as exception:
LOGGER.debug(
"Caught exception when updating device: %s, exception: %s",
self.device,
exception,
)
raise UpdateFailed(
f"Unable to communicate with IGD at: {self.de | 13 | 93 | _async_update_data |
|
68 | 0 | 4 | 31 | src/documents/consumer.py | 320,217 | Adds further testing to cover scripts with non-zero exit codes | paperless-ngx | 13 | Python | 55 | consumer.py | def run_pre_consume_script(self):
if not settings.PRE_CONSUME_SCRIPT:
return
if not os.path.isfile(settings.PRE_CONSUME_SCRIPT):
self._fail(
MESSAGE_PRE_CONSUME_SCRIPT_NOT_FOUND,
f"Configured pre-consume script "
f"{settings.PRE_CONSUME_SCRIPT} does not exist.",
)
self.log("info", f"Executing pre-consume script {settings.PRE_CONSUME_SCRIPT}")
filepath_arg = os.path.normpath(self.path)
script_env = os.environ.copy()
script_env["DOCUMENT_SOURCE_PATH"] = filepath_arg
try:
completed_proc = run(
args=[
settings.PRE_CONSUME_SCRIPT,
filepath_arg,
],
env=script_env,
capture_output=True,
)
self._log_script_outputs(completed_proc)
# Raises exception on non-zero output
completed_proc.check_returncode()
except Exception as e:
self._fail(
MESSAGE_PRE_CONSUME_SCRIPT_ERROR,
f"Error while executing pre-consume script: {e}",
exc_info=True,
exception=e,
)
| 057f6016cc92f6d21b04b9a16dc6f0b255c8b401 | 133 | https://github.com/paperless-ngx/paperless-ngx.git | 436 | def run_pre_consume_script(self):
if not settings.PRE_CONSUME_SCRIPT:
return
if not os.path.isfile(settings.PRE_CONSUME_SCRIPT):
self._fail(
MESSAGE_PRE_CONSUME_SCRIPT_NOT_FOUND,
f"Configured pre-consume script "
f"{settings.PRE_CONSUME_SCRIPT} does not exist.",
)
self.log("info", f"Executing pre-consume script {settings.PRE_CONSUME_SCRIPT}")
filepath_arg = os.path.normpath(self.path)
script_env = os.environ.copy()
script_env["DOCUMENT_SOURCE_PATH"] = filepath_arg
try:
completed_proc = run(
args=[
settings.PRE_CONSUME_SCRIPT,
filepath_arg,
],
env=script_env,
capture_output=True,
)
self._log_script_outputs(completed_proc)
# Raises exception on non-zero output
completed_proc.check_returncode()
except Exception as e:
self._fail(
MESSAGE_PRE_CONSUME_SCRIPT_ERROR,
f"Error while executing pre-consume script: {e}",
exc_info=True,
exception=e,
| 27 | 231 | run_pre_consume_script |
|
26 | 0 | 3 | 31 | keras/distribute/mirrored_strategy_test.py | 270,468 | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | keras | 11 | Python | 21 | mirrored_strategy_test.py | def testTrainAndServeWithKPL(self, distribution):
use_adapt = False
test_utils_obj = kpl_test_utils.DistributeKplTestUtils()
with distribution.scope():
(
feature_mapper,
label_mapper,
) = test_utils_obj.define_kpls_for_training(use_adapt)
model = test_utils_obj.define_model()
optimizer = rmsprop.RMSprop(learning_rate=0.1)
accuracy = keras.metrics.Accuracy()
| 84afc5193d38057e2e2badf9c889ea87d80d8fbf | 168 | https://github.com/keras-team/keras.git | 131 | def testTrainAndServeWithKPL(self, distribution):
use_adapt = False
test_utils_obj = kpl_test_utils.DistributeKplTestUtils( | 21 | 104 | testTrainAndServeWithKPL |
|
146 | 0 | 10 | 30 | setup_ts.py | 113,550 | Bump node.js to 18 (#5206) | nni | 15 | Python | 112 | setup_ts.py | def copy_nni_node(version):
_print('Copying files')
if sys.version_info >= (3, 8):
shutil.copytree('ts/nni_manager/dist', 'nni_node', dirs_exist_ok=True)
else:
for item in os.listdir('ts/nni_manager/dist'):
subsrc = os.path.join('ts/nni_manager/dist', item)
subdst = os.path.join('nni_node', item)
if os.path.isdir(subsrc):
shutil.copytree(subsrc, subdst)
else:
shutil.copy2(subsrc, subdst)
shutil.copyfile('ts/nni_manager/package-lock.json', 'nni_node/package-lock.lock')
Path('nni_node/nni_manager.tsbuildinfo').unlink()
package_json = json.load(open('ts/nni_manager/package.json'))
if version:
while len(version.split('.')) < 3: # node.js semver requires at least three parts
version = version + '.0'
package_json['version'] = version
json.dump(package_json, open('nni_node/package.json', 'w'), indent=2)
if sys.platform == 'win32':
# On Windows, manually install node-gyp for sqlite3.
_npm('ts/nni_manager', 'install', '--global', 'node-gyp')
# reinstall without development dependencies
prod_path = Path('nni_node').resolve()
_yarn(str(prod_path), 'install', '--production')
shutil.copytree('ts/webui/build', 'nni_node/static')
if jupyter_lab_major_version == '2':
shutil.copytree('ts/jupyter_extension/build', 'nni_node/jupyter-extension/build')
shutil.copytree(os.path.join(sys.exec_prefix, 'share/jupyter/lab/extensions'), 'nni_node/jupyter-extension/extensions')
elif version or Path('ts/jupyter_extension/dist').exists():
shutil.copytree('ts/jupyter_extension/dist', 'nni_node/jupyter-extension')
_yarn_env = dict(os.environ)
# `Path('nni_node').resolve()` does not work on Windows if the directory not exists
_yarn_env['PATH'] = str(Path().resolve() / 'nni_node') + path_env_seperator + os.environ['PATH']
_yarn_path = Path().resolve() / 'toolchain/yarn/bin' / yarn_executable
_npm_path = Path().resolve() / 'toolchain/node' / npm_executable
| 071dfb2dcff2f2a0b66a60f5faf51a97dc135328 | 266 | https://github.com/microsoft/nni.git | 338 | def copy_nni_node(version):
_print('Copying files')
if sys.version_info >= (3, 8):
shutil.copytree('ts/nni_manager/dist', 'nni_node', dirs_exist_ok=True)
else:
for item in os.listdir('ts/nni_manager/dist'):
subsrc = os.path.join('ts/nni_manager/dist', item)
subdst = os.path.join('nni_node', item)
if os.path.isdir(subsrc):
shutil.copytree(subsrc, subdst)
else:
shutil.copy2(subsrc, subdst)
shutil.copyfile('ts/nni_manager/package-lock.json', 'nni_node/package-lock.lock')
Path('nni_node/nni_manager.tsbuildinfo').unlink()
package_json = json.load(open('ts/nni_manager/package.json'))
if version:
while len(version.split('.')) < 3: # node.js semver requires at least three parts
version = version + '.0'
package_json['version'] = version
json.dump(package_json, open('nni_node/package.json', 'w'), indent=2)
if sys.platform == 'win32':
# On Windows, manually install node-gyp for sqlite3.
_npm('ts/nni_manager', 'install', '--global', 'node-gyp')
# reinstall without development dependencies
prod_path = Path('nni_node').resolve()
_yarn(str(prod_path), 'install', '--production')
shutil.copytree('ts/webui/build', 'nni_node/static')
if jupyter_lab_major_version == '2':
shutil.copytree('ts/jupyter_extension/build', 'nni_node/jupyter-extension/build')
shutil.copytree(os.path.join(sys.exec_prefix, 'share/jupyter/lab/extensions'), 'nni_node/jupyter-extension/extensions')
elif version or Path('ts/jupyter_extension/dist').exists():
sh | 45 | 590 | copy_nni_node |
|
76 | 0 | 2 | 23 | pandas/tests/io/excel/test_readers.py | 170,454 | ENH: Add use nullable dtypes to read_excel (#49091) | pandas | 17 | Python | 65 | test_readers.py | def test_use_nullable_dtypes(self, read_ext):
# GH#36712
if read_ext == ".xlsb":
pytest.skip("No engine for filetype: 'xlsb'")
df = DataFrame(
{
"a": Series([1, 3], dtype="Int64"),
"b": Series([2.5, 4.5], dtype="Float64"),
"c": Series([True, False], dtype="boolean"),
"d": Series(["a", "b"], dtype="string"),
"e": Series([pd.NA, 6], dtype="Int64"),
"f": Series([pd.NA, 7.5], dtype="Float64"),
"g": Series([pd.NA, True], dtype="boolean"),
"h": Series([pd.NA, "a"], dtype="string"),
"i": Series([pd.Timestamp("2019-12-31")] * 2),
"j": Series([pd.NA, pd.NA], dtype="Int64"),
}
)
with tm.ensure_clean(read_ext) as file_path:
df.to_excel(file_path, "test", index=False)
result = pd.read_excel(
file_path, sheet_name="test", use_nullable_dtypes=True
)
tm.assert_frame_equal(result, df)
| 22e591f2d142b20ba294c40236954d377c7b22ed | 240 | https://github.com/pandas-dev/pandas.git | 348 | def test_use_nullable_dtypes(self, read_ext):
# GH#36712
if read_ext == ".xlsb":
pytest.skip("No engine for filetype: 'xlsb'")
df = DataFrame(
{
"a": Seri | 22 | 389 | test_use_nullable_dtypes |
|
84 | 0 | 3 | 22 | ppdet/modeling/heads/pico_head.py | 210,477 | Simplify picodet postprocess (#5650) | PaddleDetection | 16 | Python | 62 | pico_head.py | def _generate_anchors(self, feats=None):
# just use in eval time
anchor_points = []
stride_tensor = []
for i, stride in enumerate(self.fpn_stride):
if feats is not None:
_, _, h, w = feats[i].shape
else:
h = math.ceil(self.eval_size[0] / stride)
w = math.ceil(self.eval_size[1] / stride)
shift_x = paddle.arange(end=w) + self.cell_offset
shift_y = paddle.arange(end=h) + self.cell_offset
shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
anchor_point = paddle.cast(
paddle.stack(
[shift_x, shift_y], axis=-1), dtype='float32')
anchor_points.append(anchor_point.reshape([-1, 2]))
stride_tensor.append(
paddle.full(
[h * w, 1], stride, dtype='float32'))
anchor_points = paddle.concat(anchor_points)
stride_tensor = paddle.concat(stride_tensor)
return anchor_points, stride_tensor
| c612935d8d7431f3a730cf5e213159f6b20938d1 | 206 | https://github.com/PaddlePaddle/PaddleDetection.git | 333 | def _generate_anchors(self, feats=None):
# just use in eval time
anchor_points = []
stride_tensor = []
for i, stride in enumerate(self.fpn_stride):
if feats is not None:
_, _, h, w = feats[i].shape
else:
h = math.ceil(self.eval_size[0] / stride)
w = math.ceil(self.eval_size[1] / stride)
shift_x = paddle.arange(end=w) + self.cell_offset
shift_y = paddle.arange(end=h) + self.cell_offset
shift_y, shift_x = paddle.meshgrid(shift_y, shift_x)
anchor_point = paddle.cast(
paddle.stack(
[shift_x, shift_y], axis=-1), dtype='float32')
anchor_points.append(anchor_point.reshape([-1, 2]))
stride_tensor.append(
paddle.full(
[h * w, 1], stride, dtype='float32'))
ancho | 32 | 319 | _generate_anchors |
|
23 | 0 | 1 | 7 | test/test_pipeline.py | 257,263 | Validate YAML files without loading the nodes (#2438)
* Remove BasePipeline and make a module for RayPipeline
* Can load pipelines from yaml, plenty of issues left
* Extract graph validation logic into _add_node_to_pipeline_graph & refactor load_from_config and add_node to use it
* Fix pipeline tests
* Move some tests out of test_pipeline.py and create MockDenseRetriever
* myoy and pylint (silencing too-many-public-methods)
* Fix issue found in some yaml files and in schema files
* Fix paths to YAML and fix some typos in Ray
* Fix eval tests
* Simplify MockDenseRetriever
* Fix Ray test
* Accidentally pushed merge coinflict, fixed
* Typo in schemas
* Typo in _json_schema.py
* Slightly reduce noisyness of version validation warnings
* Fix version logs tests
* Fix version logs tests again
* remove seemingly unused file
* Add check and test to avoid adding the same node to the pipeline twice
* Update Documentation & Code Style
* Revert config to pipeline_config
* Remo0ve unused import
* Complete reverting to pipeline_config
* Some more stray config=
* Update Documentation & Code Style
* Feedback
* Move back other_nodes tests into pipeline tests temporarily
* Update Documentation & Code Style
* Fixing tests
* Update Documentation & Code Style
* Fixing ray and standard pipeline tests
* Rename colliding load() methods in dense retrievers and faiss
* Update Documentation & Code Style
* Fix mypy on ray.py as well
* Add check for no root node
* Fix tests to use load_from_directory and load_index
* Try to workaround the disabled add_node of RayPipeline
* Update Documentation & Code Style
* Fix Ray test
* Fix FAISS tests
* Relax class check in _add_node_to_pipeline_graph
* Update Documentation & Code Style
* Try to fix mypy in ray.py
* unused import
* Try another fix for Ray
* Fix connector tests
* Update Documentation & Code Style
* Fix ray
* Update Documentation & Code Style
* use BaseComponent.load() in pipelines/base.py
* another round of feedback
* stray BaseComponent.load()
* Update Documentation & Code Style
* Fix FAISS tests too
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: tstadel <60758086+tstadel@users.noreply.github.com> | haystack | 12 | Python | 21 | test_pipeline.py | def test_graph_validation_invalid_node():
docstore = MockDocumentStore()
retriever = DummyRetriever(document_store=docstore)
pipeline = Pipeline()
pipeline.add_node(name="DocStore", component=docstore, inputs=["Query"])
with pytest.raises(PipelineConfigError, match="Cannot find node 'InvalidNode'"):
pipeline.add_node(name="Retriever", component=retriever, inputs=["InvalidNode"])
| f8e02310bf0dfbd1ab79a1c3c73434e0aeba4f4b | 70 | https://github.com/deepset-ai/haystack.git | 44 | def test_graph_validation_invalid_node():
docstore = MockDocumentStore()
retriever = DummyRetriever(document_store=docstore)
pipeline = Pipeline()
pipeline.add_node(name="DocStore" | 16 | 121 | test_graph_validation_invalid_node |
|
57 | 1 | 1 | 22 | tests/ludwig/features/test_image_feature.py | 7,944 | Update missing value strategy to only allow bfill and ffill (#2457)
* push changes
* working missing value strategy
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Add type hints to backward compatibility transformations
* Update test to test both missing value strategy updates
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> | ludwig | 11 | Python | 50 | test_image_feature.py | def test_image_preproc_module_bad_num_channels():
metadata = {
"preprocessing": {
"missing_value_strategy": BFILL,
"in_memory": True,
"resize_method": "interpolate",
"scaling": "pixel_normalization",
"num_processes": 1,
"infer_image_num_channels": True,
"infer_image_dimensions": True,
"infer_image_max_height": 256,
"infer_image_max_width": 256,
"infer_image_sample_size": 100,
"height": 12,
"width": 12,
"num_channels": 2,
},
"reshape": (2, 12, 12),
}
module = _ImagePreprocessing(metadata)
with pytest.raises(ValueError):
module(torch.rand(2, 3, 10, 10))
@pytest.mark.parametrize("resize_method", [INTERPOLATE, CROP_OR_PAD])
@pytest.mark.parametrize(["num_channels", "num_channels_expected"], [(1, 3), (3, 1)]) | 0ab41a299cc690940b750a79b704d69544315702 | @pytest.mark.parametrize("resize_method", [INTERPOLATE, CROP_OR_PAD])
@pytest.mark.parametrize(["num_channels", "num_channels_expected"], [(1, 3), (3, 1)]) | 104 | https://github.com/ludwig-ai/ludwig.git | 237 | def test_image_preproc_module_bad_num_channels():
metadata = {
"preprocessing": {
"missing_value_strategy": BFILL,
"in_memory": True,
"resize_method": "interpolate",
"scaling": "pixel_normalization",
"num_processes": 1,
"infer_image_num_channels": True,
"infer_image_dimensions": True,
"infer_image_max_height": 256,
"infer_image_max_width": 256,
"infer_image_sample_size": 100,
"height": 12,
"width": 12,
"num_channels": 2,
},
"reshape": (2, 12, 12),
}
module = _ImagePreprocessing(metadata)
with pytest.raises(ValueError):
module(torch.rand(2, 3, 10, 10))
@pytest.mark.parametrize("resize_method", [INTERPOLATE, | 14 | 243 | test_image_preproc_module_bad_num_channels |
9 | 0 | 2 | 5 | python3.10.4/Lib/ast.py | 220,253 | add python 3.10.4 for windows | XX-Net | 10 | Python | 9 | ast.py | def visit_arg(self, node):
self.write(node.arg)
if node.annotation:
self.write(": ")
self.traverse(node.annotation)
| 8198943edd73a363c266633e1aa5b2a9e9c9f526 | 34 | https://github.com/XX-net/XX-Net.git | 44 | def visit_arg(self, node):
self.write( | 7 | 57 | visit_arg |
|
10 | 0 | 1 | 5 | tests/openbb_terminal/portfolio/test_portfolio_model.py | 285,020 | Adding new portfolio metrics (#2029)
* Making pa active + pa minor features
Makes pa actice and adds country to the df. The groupby command also gets percents of holding allocation. It also fixes warnings and prepares for a later pr that I'm currently working on.
* Fix linting
* black linter
* Fixes
Should fix everything
* Linting
* Making pa controller to base class standard
* Fix linting
* first metrics
* Adding calmar ratio
* Adding metrics to controller
* Linting
* Linting
* Test fixes
* Test fixes
* Adding tests
* Linting
* Restructuring added tests
* Linting
* Updating hugo website and fixing help
* Fixing commands
* Fix kelly criterion command
* Fixing tests
* Linting
* More linting
Co-authored-by: DidierRLopes <dro.lopes@campus.fct.unl.pt>
Co-authored-by: Jeroen Bouma <jer.bouma@gmail.com> | OpenBBTerminal | 8 | Python | 10 | test_portfolio_model.py | def test_tracking_error(recorder):
result_df, _ = portfolio_model.get_tracking_error(
portfolio_returns, benchmark_returns
)
recorder.capture(result_df)
| 89297fadc4b5f9381b259f16314e44f9ba9be7bd | 23 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 25 | def test_tracking_error(recorder):
result_df, _ = portfolio_model.get_tracking_error(
portfolio_return | 9 | 37 | test_tracking_error |
|
86 | 0 | 3 | 14 | pandas/tests/arithmetic/test_datetime64.py | 172,322 | API: dont do type inference on arithmetic results (#49714)
* API: dont do type inference on arithmetic results
* mypy fixup
* use concat_compat
* dont infer in TimedeltaArray
* update addsub
* avoid messing with box_expected | pandas | 14 | Python | 64 | test_datetime64.py | def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning):
result = dta + other
with tm.assert_produces_warning(PerformanceWarning):
expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1)
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
# Case where we expect to get a TimedeltaArray back
result2 = dta - dta.astype(object)
assert result2.shape == (4, 1)
assert all(td.value == 0 for td in result2.ravel())
| 35a7f807ac9f02128333c1b5df0f03c897d13445 | 165 | https://github.com/pandas-dev/pandas.git | 150 | def test_dt64arr_addsub_object_dtype_2d():
# block-wise DataFrame operations will require operating on 2D
# DatetimeArray/TimedeltaArray, so check that specifically.
dti = date_range("1994-02-13", freq="2W", periods=4)
dta = dti._data.reshape((4, 1))
other = np.array([[pd.offsets.Day(n)] for n in range(4)])
assert other.shape == dta.shape
with tm.assert_produces_warning(PerformanceWarning): | 30 | 266 | test_dt64arr_addsub_object_dtype_2d |
|
26 | 0 | 2 | 5 | lib/ansible/galaxy/collection/concrete_artifact_manager.py | 267,044 | ansible-galaxy collection - ensure dependencies are a dict (#77561)
* fix traceback when installing collection with dependencies set to None | ansible | 9 | Python | 20 | concrete_artifact_manager.py | def get_direct_collection_dependencies(self, collection):
# type: (Candidate | Requirement) -> dict[str, str]
collection_dependencies = self.get_direct_collection_meta(collection)['dependencies']
if collection_dependencies is None:
collection_dependencies = {}
return collection_dependencies # type: ignore[return-value]
| 4d69c09695c8f78b95edf51314999be3c19b62eb | 30 | https://github.com/ansible/ansible.git | 73 | def get_direct_collection_dependencies(self, collection):
# | 5 | 54 | get_direct_collection_dependencies |
|
50 | 0 | 2 | 13 | rllib/offline/tests/test_dataset_reader.py | 125,005 | [RLlib] improved unittests for dataset_reader and fixed bugs (#26458) | ray | 17 | Python | 45 | test_dataset_reader.py | def test_absolute_zip(self):
# this should work regardless of where th current working directory is.
with tempfile.TemporaryDirectory() as tmp_dir:
cwdir = os.getcwd()
os.chdir(tmp_dir)
unzipped_paths = _unzip_if_needed(
[str(Path(self.absolute_path) / "enormous.zip")], "json"
)
self.assertEqual(
str(Path(unzipped_paths[0]).absolute()),
str(Path("./").absolute() / "enormous.json"),
)
assert all([Path(fpath).exists() for fpath in unzipped_paths])
os.chdir(cwdir)
# @TODO: unskip when this is fixed | 569fe0109629048d08e1d9e023f7769f10bd2244 | 106 | https://github.com/ray-project/ray.git | 207 | def test_absolute_zip(self):
# this should work regardless of where th current working directory is.
with tempfile.TemporaryDirectory() as tmp_dir:
cwdir = os.getcwd()
os.chdir(tmp_dir)
unzipped_paths = _unzip_if_needed(
[str(Path(self.absolute_pat | 19 | 186 | test_absolute_zip |
|
29 | 0 | 1 | 6 | VoiceAssistant/Project_Basic_struct/speechtotext.py | 22,861 | VoiceAssistant
This is Voice Assistant coded using Python which can do the following: -
1. Speak Text entered by User.
2. Search anything on Google.
3. Search anything on Wikipedia.
4. Read an MS Word(docx) document.
5. Read a book(PDF).
6. Can be used as a Dictator. | Python | 11 | Python | 25 | speechtotext.py | def stt():
with sr.Microphone() as source:
# read the audio data from the default microphone
audio_data = r.record(source, duration=5)
print("Recognizing...")
# convert speech to text
text = r.recognize_google(audio_data)
print(text) | 39c49e07066b2a53e176d555af6a7bf8aabb8a9c | 41 | https://github.com/geekcomputers/Python.git | 73 | def stt():
with sr.Microphone() as source:
# read the audio data from the default microphone
audio_data = r.record(source, duration=5)
| 11 | 75 | stt |
|
92 | 0 | 5 | 15 | modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py | 155,185 | FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)
Signed-off-by: Igoshev, Iaroslav <iaroslav.igoshev@intel.com> | modin | 15 | Python | 71 | partition.py | def _apply_list_of_funcs(call_queue, partition): # pragma: no cover
for func, f_args, f_kwargs in call_queue:
func = deserialize(func)
args = deserialize(f_args)
kwargs = deserialize(f_kwargs)
try:
partition = func(partition, *args, **kwargs)
# Sometimes Arrow forces us to make a copy of an object before we operate on it. We
# don't want the error to propagate to the user, and we want to avoid copying unless
# we absolutely have to.
except ValueError:
partition = func(partition.copy(), *args, **kwargs)
return (
partition,
len(partition) if hasattr(partition, "__len__") else 0,
len(partition.columns) if hasattr(partition, "columns") else 0,
unidist.get_ip(),
)
| 193505fdf0c984743397ba3df56262f30aee13a8 | 109 | https://github.com/modin-project/modin.git | 211 | def _apply_list_of_funcs(call_queue, partition): # pragma: no cover
for func, f_args, f_kwargs in call_queue:
func = deserialize(func)
args = deserialize(f_args)
kwargs = deserialize(f_kwargs)
try:
partition = func(partition, *args, | 16 | 174 | _apply_list_of_funcs |
|
20 | 0 | 2 | 5 | wagtail/embeds/blocks.py | 74,930 | Reformat with black | wagtail | 9 | Python | 18 | blocks.py | def get_prep_value(self, value):
# serialisable value should be a URL string
if value is None:
return ""
else:
return value.url
| d10f15e55806c6944827d801cd9c2d53f5da4186 | 20 | https://github.com/wagtail/wagtail.git | 62 | def get_prep_value(self, value):
# serialisable value should be a URL string
if value is None:
return ""
else:
return value.url
| 4 | 35 | get_prep_value |