hash
stringlengths
40
40
repo
stringlengths
9
36
date
stringclasses
0 values
license
stringclasses
0 values
message
stringlengths
74
349
mods
listlengths
1
16
c157a38240ef590ff522d5929cfb940274556aad
lagolunatic/wwrando
null
null
Fix how starting gear is handled when making useless progress items nonprogress There was a bug where if you started with 1 bottle/wallet/picto box/bomb bag/quiver but not all, the ones you didn't start with would be considered progress items even if they shouldn't be.
[ { "change_type": "MODIFY", "diff": "@@ -277,7 +277,7 @@ class Logic:\n \n self.currently_owned_items.remove(cleaned_item_name)\n \n- if item_name in self.all_progress_items:\n+ if self.all_progress_items.count(item_name) > self.unplaced_progress_items.count(item_name):\n self.unplaced_progress_items.append(item_name)\n elif item_name in self.all_nonprogress_items:\n self.unplaced_nonprogress_items.append(item_name)\n@@ -709,11 +709,17 @@ class Logic:\n filter_sunken_treasure=filter_sunken_treasure\n )\n \n- useful_items = []\n+ items_needed = OrderedDict()\n for location_name in progress_locations:\n requirement_expression = self.item_locations[location_name][\"Need\"]\n- useful_items += self.get_item_names_from_logical_expression_req(requirement_expression)\n- useful_items += self.get_item_names_by_req_name(\"Can Reach and Defeat Ganondorf\")\n+ sub_items_needed = self.get_items_needed_from_logical_expression_req(requirement_expression)\n+ for item_name, num_required in sub_items_needed.items():\n+ items_needed[item_name] = max(num_required, items_needed.setdefault(item_name, 0))\n+ sub_items_needed = self.get_items_needed_by_req_name(\"Can Reach and Defeat Ganondorf\")\n+ for item_name, num_required in sub_items_needed.items():\n+ items_needed[item_name] = max(num_required, items_needed.setdefault(item_name, 0))\n+ \n+ useful_items = self.flatten_items_needed_to_item_names(items_needed)\n \n all_progress_items_filtered = []\n for item_name in useful_items:\n@@ -724,20 +730,25 @@ class Logic:\n if item_name not in self.all_progress_items:\n if not (item_name.startswith(\"Triforce Chart \") or item_name.startswith(\"Treasure Chart\")):\n raise Exception(\"Item %s opens up progress locations but is not in the list of all progress items.\" % item_name)\n- if item_name in all_progress_items_filtered:\n- # Avoid duplicates\n- continue\n all_progress_items_filtered.append(item_name)\n \n- items_to_make_nonprogress = [\n- item_name for item_name in self.all_progress_items\n- if item_name not in all_progress_items_filtered\n- and item_name not in self.currently_owned_items\n- ]\n- for item_name in items_to_make_nonprogress:\n+ all_items_to_make_nonprogress = self.all_progress_items.copy()\n+ starting_items_to_remove = self.rando.starting_items.copy()\n+ for item_name in all_progress_items_filtered:\n+ all_items_to_make_nonprogress.remove(item_name)\n+ if item_name in starting_items_to_remove:\n+ starting_items_to_remove.remove(item_name)\n+ unplaced_items_to_make_nonprogress = all_items_to_make_nonprogress.copy()\n+ for item_name in starting_items_to_remove:\n+ if item_name not in self.all_progress_items:\n+ continue\n+ unplaced_items_to_make_nonprogress.remove(item_name)\n+ \n+ for item_name in all_items_to_make_nonprogress:\n #print(item_name)\n self.all_progress_items.remove(item_name)\n self.all_nonprogress_items.append(item_name)\n+ for item_name in unplaced_items_to_make_nonprogress:\n self.unplaced_progress_items.remove(item_name)\n self.unplaced_nonprogress_items.append(item_name)\n \n", "new_path": "logic/logic.py", "old_path": "logic/logic.py" } ]
f5b5f69bae7543e688b771887f43aa93d50d8875
lagolunatic/wwrando
null
null
Fix styling of instance variables SCREAMING_SNAKE_CASE is usually used for constants that don't change across runs.
[ { "change_type": "MODIFY", "diff": "@@ -104,15 +104,15 @@ class Hints:\n self.logic = rando.logic\n self.options = rando.options\n \n- # Define constants for hint distribution.\n- self.MAX_PATH_HINTS = int(self.options.get(\"num_path_hints\", 0))\n- self.MAX_BARREN_HINTS = int(self.options.get(\"num_barren_hints\", 0))\n- self.MAX_LOCATION_HINTS = int(self.options.get(\"num_location_hints\", 0))\n- self.MAX_ITEM_HINTS = int(self.options.get(\"num_item_hints\", 0))\n- self.TOTAL_NUM_HINTS = self.MAX_PATH_HINTS + self.MAX_BARREN_HINTS + self.MAX_LOCATION_HINTS + self.MAX_ITEM_HINTS\n+ # Define instance variable shortcuts for hint distribution options.\n+ self.max_path_hints = int(self.options.get(\"num_path_hints\", 0))\n+ self.max_barren_hints = int(self.options.get(\"num_barren_hints\", 0))\n+ self.max_location_hints = int(self.options.get(\"num_location_hints\", 0))\n+ self.max_item_hints = int(self.options.get(\"num_item_hints\", 0))\n+ self.total_num_hints = self.max_path_hints + self.max_barren_hints + self.max_location_hints + self.max_item_hints\n \n- self.CLEARER_HINTS = self.options.get(\"clearer_hints\")\n- self.USE_ALWAYS_HINTS = self.options.get(\"use_always_hints\")\n+ self.clearer_hints = self.options.get(\"clearer_hints\")\n+ self.use_always_hints = self.options.get(\"use_always_hints\")\n \n # Import dictionaries used to build hints from files.\n with open(os.path.join(DATA_PATH, \"progress_item_hints.txt\"), \"r\") as f:\n@@ -482,7 +482,7 @@ class Hints:\n # effectively the same hint. However, if there's a Blue Chu Jelly hint, a Windfall barren hint would still be\n # helpful since Green Chu Jelly is at least another check on Windfall.\n always_hint_locations = []\n- if self.USE_ALWAYS_HINTS:\n+ if self.use_always_hints:\n always_hint_locations = list(filter(\n lambda location_name: location_name in self.location_hints\n and self.location_hints[location_name][\"Type\"] == \"Always\",\n@@ -680,7 +680,7 @@ class Hints:\n hintable_locations = list(filter(lambda loc: self.location_hints[loc][\"Type\"] == \"Sometimes\", hintable_locations))\n \n # If we're not using always hints, consider them as sometimes hints instead.\n- if not self.USE_ALWAYS_HINTS:\n+ if not self.use_always_hints:\n hintable_locations += always_hintable_locations\n always_hintable_locations = []\n \n@@ -726,7 +726,7 @@ class Hints:\n \n # Apply cryptic text to the location name, unless the clearer hints option is selected.\n item_name = self.logic.done_item_locations[location_name]\n- if not self.CLEARER_HINTS:\n+ if not self.clearer_hints:\n location_name = self.location_hints[location_name][\"Text\"]\n \n return Hint(HintType.LOCATION, location_name, item_name)\n@@ -801,11 +801,11 @@ class Hints:\n # small keys). Basically, we remove the item from that location and see if the path goal is still achievable. If\n # not, then we consider the item as required.\n required_locations_for_paths = {}\n- if self.MAX_PATH_HINTS > 0:\n+ if self.max_path_hints > 0:\n required_locations_for_paths = self.get_required_locations_for_paths()\n \n # Generate path hints.\n- # We hint at max `self.MAX_PATH_HINTS` zones at random. We start by hinted each of the race mode dungeons once.\n+ # We hint at max `self.max_path_hints` zones at random. We start by hinted each of the race mode dungeons once.\n # After that, we repeatedly select a path goal at random and use that to form another hint. Zones are weighted by\n # the number of required locations at that zone. The more required locations, the more likely that zone will be\n # chosen.\n@@ -814,7 +814,7 @@ class Hints:\n \n # If race mode is on, then remove items that are hinted on the path to a race mode dungeon from paths to Hyrule and\n # Ganondorf. This way, the path to the race mode dungeon takes hint priority for that item.\n- if self.MAX_PATH_HINTS > 0:\n+ if self.max_path_hints > 0:\n for dungeon_name in dungeon_paths:\n for item_tuple in required_locations_for_paths[dungeon_name]:\n if item_tuple in required_locations_for_paths[\"Hyrule\"]:\n@@ -824,7 +824,7 @@ class Hints:\n \n # Likewise, remove items that are hinted on the path to Hyrule from the path to Ganondorf. This way, the path to\n # Hyrule takes hint priority over the path to Ganondorf for that item.\n- if self.MAX_PATH_HINTS > 0:\n+ if self.max_path_hints > 0:\n for item_tuple in required_locations_for_paths[\"Hyrule\"]:\n if item_tuple in required_locations_for_paths[\"Ganon's Tower\"]:\n required_locations_for_paths[\"Ganon's Tower\"].remove(item_tuple)\n@@ -838,7 +838,7 @@ class Hints:\n if len(required_locations_for_paths) == 0:\n break\n \n- if len(hinted_path_zones) < self.MAX_PATH_HINTS:\n+ if len(hinted_path_zones) < self.max_path_hints:\n path_hint, location_name = self.get_path_hint(required_locations_for_paths[dungeon_name], previously_hinted_locations, dungeon_name)\n \n # Unable to generate a path hint for the dungeon, so remove path goal and move on to the next.\n@@ -847,11 +847,11 @@ class Hints:\n continue\n \n # Remove locations that are hinted in always hints from being hinted path.\n- if not self.USE_ALWAYS_HINTS or (location_name not in self.location_hints or self.location_hints[location_name][\"Type\"] != \"Always\"):\n+ if not self.use_always_hints or (location_name not in self.location_hints or self.location_hints[location_name][\"Type\"] != \"Always\"):\n hinted_path_zones.append(path_hint)\n previously_hinted_locations.append(location_name)\n \n- while len(required_locations_for_paths) > 0 and len(hinted_path_zones) < self.MAX_PATH_HINTS:\n+ while len(required_locations_for_paths) > 0 and len(hinted_path_zones) < self.max_path_hints:\n path_name = self.rando.rng.choice(list(required_locations_for_paths.keys()))\n path_hint, location_name = self.get_path_hint(required_locations_for_paths[path_name], previously_hinted_locations, path_name)\n \n@@ -860,16 +860,16 @@ class Hints:\n del required_locations_for_paths[path_name]\n else:\n # Remove locations that are hinted in always hints from being hinted path.\n- if not self.USE_ALWAYS_HINTS or (location_name not in self.location_hints or self.location_hints[location_name][\"Type\"] != \"Always\"):\n+ if not self.use_always_hints or (location_name not in self.location_hints or self.location_hints[location_name][\"Type\"] != \"Always\"):\n hinted_path_zones.append(path_hint)\n previously_hinted_locations.append(location_name)\n \n # Generate barren hints.\n- # We select at most `self.MAX_BARREN_HINTS` zones at random to hint as barren. Barren zones are weighted by the\n+ # We select at most `self.max_barren_hints` zones at random to hint as barren. Barren zones are weighted by the\n # square root of the number of locations at that zone.\n unhinted_barren_zones = self.get_barren_zones(progress_locations)\n hinted_barren_zones = []\n- while len(unhinted_barren_zones) > 0 and len(hinted_barren_zones) < self.MAX_BARREN_HINTS:\n+ while len(unhinted_barren_zones) > 0 and len(hinted_barren_zones) < self.max_barren_hints:\n # Weigh each barren zone by the square root of the number of locations there.\n zone_weights = [sqrt(location_counter[zone]) for zone in unhinted_barren_zones]\n \n@@ -878,16 +878,16 @@ class Hints:\n hinted_barren_zones.append(barren_hint)\n \n # Generate item hints.\n- # We select at most `self.MAX_ITEM_HINTS` items at random to hint at. We do not want to hint at items already\n+ # We select at most `self.max_item_hints` items at random to hint at. We do not want to hint at items already\n # covered by the path hints, nor do we want to hint at items in barren-hinted locations.\n hintable_locations = self.get_legal_item_hints(progress_locations, hinted_barren_zones, previously_hinted_locations)\n \n hinted_item_locations = []\n- while len(hintable_locations) > 0 and len(hinted_item_locations) < self.MAX_ITEM_HINTS:\n+ while len(hintable_locations) > 0 and len(hinted_item_locations) < self.max_item_hints:\n item_hint, location_name = self.get_item_hint(hintable_locations)\n \n # Apply cryptic text, unless the clearer hints option is selected.\n- if not self.CLEARER_HINTS:\n+ if not self.clearer_hints:\n item_hint.info1 = self.progress_item_hints[Hints.get_hint_item_name_static(item_hint.info1)]\n item_hint.info2 = self.island_name_hints[item_hint.info2]\n \n@@ -895,11 +895,11 @@ class Hints:\n previously_hinted_locations.append(location_name)\n \n # Generate location hints.\n- # We try to generate location hints until we get to `self.TOTAL_NUM_HINTS` total hints, but if there are not enough\n+ # We try to generate location hints until we get to `self.total_num_hints` total hints, but if there are not enough\n # valid hintable locations, then we have no choice but to return less than the desired amount of hints.\n always_hintable_locations, sometimes_hintable_locations = self.get_legal_location_hints(progress_locations, hinted_barren_zones, previously_hinted_locations)\n hinted_locations = []\n- remaining_hints_desired = self.TOTAL_NUM_HINTS - len(hinted_path_zones) - len(hinted_barren_zones) - len(hinted_item_locations)\n+ remaining_hints_desired = self.total_num_hints - len(hinted_path_zones) - len(hinted_barren_zones) - len(hinted_item_locations)\n \n # Start by exhausting the list of always hints.\n while len(always_hintable_locations) > 0 and remaining_hints_desired > 0:\n", "new_path": "hints.py", "old_path": "hints.py" }, { "change_type": "MODIFY", "diff": "@@ -898,7 +898,7 @@ def randomize_and_update_hints(self):\n hints_per_placement[option] = []\n \n hint_placement_options = list(hints_per_placement.keys())\n- if hints_manager.TOTAL_NUM_HINTS == 0 or len(hint_placement_options) == 0:\n+ if hints_manager.total_num_hints == 0 or len(hint_placement_options) == 0:\n return\n \n # Generate the hints that will be distributed over the hint placement options\n", "new_path": "tweaks.py", "old_path": "tweaks.py" } ]
3a4a347844e4a0e0aeaeeb3cb000965d969928d8
lagolunatic/wwrando
null
null
Replace filter calls with list comprehensions List comprehensions are more readable since they don't require writing `list(filter(lambda`, and they allow naming individual tuple elements instead of indexing them with `x[0]`.
[ { "change_type": "MODIFY", "diff": "@@ -498,11 +498,11 @@ class Hints:\n # helpful since Green Chu Jelly is at least another check on Windfall.\n remote_locations = []\n if self.prioritize_remote_hints:\n- remote_locations = list(filter(\n- lambda location_name: location_name in self.location_hints\n- and self.location_hints[location_name][\"Type\"] == \"Remote\",\n- progress_locations,\n- ))\n+ remote_locations = [\n+ loc for loc in progress_locations\n+ if loc in self.location_hints\n+ and self.location_hints[loc][\"Type\"] == \"Remote\"\n+ ]\n \n # Initialize possibly-required zones to all logical zones in this seed.\n # `possibly_required_zones` contains a mapping of zones -> possibly-required items.\n@@ -560,19 +560,19 @@ class Hints:\n # The zones with zero possibly-required items makes up our initial set of barren zones.\n # We also check whether there are any non-remote hinted locations there. If not, then \n # don't hint at that zone since it'd be covered already by the remote hint(s).\n- barren_zones = list(filter(\n- lambda x: x[0] in possibly_required_zones_no_remote and len(x[1]) == 0,\n- possibly_required_zones.items()\n- ))\n- barren_zones = set(zone_name for zone_name, empty_set in barren_zones)\n+ barren_zones = set(\n+ zone_name for zone_name, item_names in possibly_required_zones.items()\n+ if zone_name in possibly_required_zones_no_remote\n+ and len(item_names) == 0\n+ )\n \n # Prevent the entrances of possibly-required dungeons from being hinted at as barren.\n- possibly_required_dungeons = list(filter(\n- lambda x: len(x[1]) != 0\n- and x[0] in self.logic.DUNGEON_NAMES.values(),\n- possibly_required_zones.items(),\n- ))\n- for dungeon_name, items_set in possibly_required_dungeons:\n+ possibly_required_dungeons = [\n+ zone_name for zone_name, item_names in possibly_required_zones.items()\n+ if zone_name in self.logic.DUNGEON_NAMES.values()\n+ and len(item_names) != 0\n+ ]\n+ for dungeon_name in possibly_required_dungeons:\n if dungeon_name == \"Dragon Roost Cavern\":\n entrance_zone = self.get_entrance_zone(\"Dragon Roost Cavern - Gohma Heart Container\")\n barren_zones.discard(entrance_zone)\n@@ -639,8 +639,10 @@ class Hints:\n # Helper function to build a list of locations which may be hinted as item hints in this seed.\n \n # Filter out locations which are invalid to be hinted at for item hints.\n- hintable_locations = list(filter(lambda location_name: self.filter_legal_item_hint(\n- location_name, progress_locations, previously_hinted_locations), self.logic.done_item_locations.keys()))\n+ hintable_locations = [\n+ loc for loc in self.logic.done_item_locations\n+ if self.filter_legal_item_hint(loc, progress_locations, previously_hinted_locations)\n+ ]\n \n # Remove locations in hinted barren areas.\n new_hintable_locations = []\n@@ -694,12 +696,12 @@ class Hints:\n def get_legal_location_hints(self, progress_locations, hinted_barren_zones, previously_hinted_locations):\n # Helper function to build a list of locations which may be hinted as location hints in this seed.\n \n- hintable_locations = list(filter(lambda loc: loc in self.location_hints.keys(), progress_locations))\n+ hintable_locations = [loc for loc in progress_locations if loc in self.location_hints]\n \n # Identify valid remote hints for this seed.\n- remote_hintable_locations = list(filter(lambda loc: self.location_hints[loc][\"Type\"] == \"Remote\", hintable_locations))\n+ remote_hintable_locations = [loc for loc in hintable_locations if self.location_hints[loc][\"Type\"] == \"Remote\"]\n # The remaining locations are potential standard location hints.\n- hintable_locations = list(filter(lambda loc: self.location_hints[loc][\"Type\"] == \"Standard\", hintable_locations))\n+ hintable_locations = [loc for loc in hintable_locations if self.location_hints[loc][\"Type\"] == \"Standard\"]\n \n # If we're not prioritizing remote hints, consider them as standard location hints instead.\n if not self.prioritize_remote_hints:\n@@ -707,10 +709,10 @@ class Hints:\n remote_hintable_locations = []\n \n # Remove locations in race-mode banned dungeons.\n- hintable_locations = list(filter(lambda location_name: location_name not in self.rando.race_mode_banned_locations, hintable_locations))\n+ hintable_locations = [loc for loc in hintable_locations if loc not in self.rando.race_mode_banned_locations]\n \n # Remove locations for items that were previously hinted.\n- hintable_locations = list(filter(lambda loc: loc not in previously_hinted_locations, hintable_locations))\n+ hintable_locations = [loc for loc in hintable_locations if loc not in previously_hinted_locations]\n \n # Remove locations in hinted barren areas.\n standard_hintable_locations = []\n", "new_path": "hints.py", "old_path": "hints.py" } ]
cf0d18e6334193e198d9eb105eb775635198129b
genericmappingtools/pygmt
null
null
Create and destroy C sessions inside call_module Removes the need to always do this when using the C API. It's what the command line app does anyway so it's not wasteful.
[ { "change_type": "MODIFY", "diff": "@@ -63,20 +63,21 @@ def destroy_session(session):\n assert status == 0, 'Failed with status code {}.'.format(status)\n \n \n-def call_module(session, module, args):\n+def call_module(module, args):\n \"\"\"\n Call a GMT module with the given arguments.\n \n Makes a call to ``GMT_Call_Module`` from the C API using mode\n- \"GMT_MODULE_CMD\" (arguments passed as a single string).\n+ ``GMT_MODULE_CMD`` (arguments passed as a single string).\n \n Most interactions with the C API are done through this function.\n \n+ Creates a new C API session (:func:`gmt.clib.create_session`) to pass to\n+ ``GMT_Call_Module`` and destroys it (:func:`gmt.clib.destroy_session`)\n+ after it is used. This is what the command-line interface of GMT does.\n+\n Parameters\n ----------\n- session : ctypes.c_void_p\n- A void pointer to a GMTAPI_CTRL structure created by\n- :func:`gmt.clib.create_session`.\n module : str\n Module name (``'pscoast'``, ``'psbasemap'``, etc).\n args : str\n@@ -90,6 +91,8 @@ def call_module(session, module, args):\n c_call_module.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int,\n ctypes.c_void_p]\n c_call_module.restype = ctypes.c_int\n+ session = create_session()\n status = c_call_module(session, module.encode(), mode, args.encode())\n+ destroy_session(session)\n assert status is not None, 'Failed returning None.'\n assert status == 0, 'Failed with status code {}.'.format(status)\n", "new_path": "gmt/clib/functions.py", "old_path": "gmt/clib/functions.py" }, { "change_type": "MODIFY", "diff": "@@ -27,8 +27,7 @@ def begin(prefix='gmtsession', fmt='pdf'):\n * ps: PostScript.\n * tif: Tagged Image Format File.\n \"\"\"\n- session = clib.create_session()\n- clib.call_module(session, 'begin', '{} {}'.format(prefix, fmt))\n+ clib.call_module('begin', '{} {}'.format(prefix, fmt))\n \n \n def end():\n@@ -41,8 +40,7 @@ def end():\n ``gmt.begin``), and bring the figures to the working directory.\n \n \"\"\"\n- session = clib.create_session()\n- clib.call_module(session, 'end', '')\n+ clib.call_module('end', '')\n \n \n # Not working yet (perhaps bug in GMT).\n@@ -77,6 +75,5 @@ def figure(prefix, formats='pdf', convertoptions='A,P'):\n ``'A[<args>],C<args>,D<dir>,E<dpi>,P,Q<args>,S'``.\n \n \"\"\"\n- session = clib.create_session()\n args = '{} {} {}'.format(prefix, formats, convertoptions)\n- clib.call_module(session, 'figure', args)\n+ clib.call_module('figure', args)\n", "new_path": "gmt/session_management.py", "old_path": "gmt/session_management.py" }, { "change_type": "MODIFY", "diff": "@@ -27,9 +27,7 @@ def test_call_module():\n \"Run a psbasemap call to see if the module works\"\n module = 'psbasemap'\n args = '-R10/70/-3/8 -JX4i/3i -Ba -P ->tmp.ps'\n- session = create_session()\n- call_module(session, module, args)\n- destroy_session(session)\n+ call_module(module, args)\n assert os.path.exists('tmp.ps')\n os.remove('tmp.ps')\n # Not the most ideal test. Just check if no segfaults or exceptions occur.\n", "new_path": "gmt/tests/test_clib.py", "old_path": "gmt/tests/test_clib.py" }, { "change_type": "MODIFY", "diff": "@@ -4,7 +4,7 @@ Test the session management modules.\n import os\n \n from .. import begin, end, figure\n-from ..clib import call_module, create_session\n+from ..clib import call_module\n from .utils import figure_comparison_test\n \n \n@@ -15,8 +15,7 @@ TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')\n def test_session(prefix, fmt):\n \"Run a command inside a begin-end modern mode block.\"\n begin(prefix=prefix, fmt=fmt)\n- session = create_session()\n- call_module(session, 'psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n+ call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n \n \n@@ -25,9 +24,8 @@ def test_session_figure(prefix, fmt):\n \"Run a figure command inside a begin-end modern mode block.\"\n begin()\n figure(prefix=prefix, formats=fmt)\n- session = create_session()\n- call_module(session, 'psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n+ call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n # Plot some points with red circles\n data_file = os.path.join(TEST_DATA_DIR, 'points.txt')\n- call_module(session, 'psxy', '-<{} -Sc -Gred'.format(data_file))\n+ call_module('psxy', '-<{} -Sc -Gred'.format(data_file))\n end()\n", "new_path": "gmt/tests/test_session_management.py", "old_path": "gmt/tests/test_session_management.py" } ]
15eeaa5a0080ca063d421feb0d64e94947c478b6
genericmappingtools/pygmt
null
null
Hide away session management Implement a GMTSession class to handle calling begin and end. Use it as a global (package level) session. This way, each script/notebook uses a single session. begin and end are no longer public API functions.
[ { "change_type": "MODIFY", "diff": "@@ -6,15 +6,13 @@ API Reference\n High-level functions for GMT modules\n ------------------------------------\n \n-Each GMT module (``gmt pscoas``, ``gmt grdgradient``, etc.) is wrapped by a\n+Each GMT module (``gmt pscoast``, ``gmt psbasemap``, etc.) is wrapped by a\n function in the ``gmt`` top-level module.\n \n .. autosummary::\n :toctree: api/\n :template: function.rst\n \n- gmt.begin\n- gmt.end\n gmt.figure\n gmt.pscoast\n \n", "new_path": "doc/api.rst", "old_path": "doc/api.rst" }, { "change_type": "MODIFY", "diff": "@@ -5,12 +5,20 @@ from ._version import get_versions\n \n # Import modules to make the high-level GMT Python API\n from .ps_modules import pscoast\n-from .session_management import begin, end, figure\n+from .session_management import figure, GMTSession\n \n \n+# Get the version number through versioneer\n __version__ = get_versions()['version']\n+# Delete the function so that it doesn't appear in the public API\n del get_versions\n \n+# Start our global modern mode session. It calls \"gmt.begin\" when started and\n+# \"gmt.end\" when deleted.\n+_GLOBAL_SESSION = GMTSession()\n+# Delete the class so that it doesn't appear in the public API\n+del GMTSession\n+\n \n def test(doctest=True, verbose=True, coverage=False, figures=True):\n \"\"\"\n", "new_path": "gmt/__init__.py", "old_path": "gmt/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -46,3 +46,51 @@ def figure():\n # Passing format '-' tells gmt.end to not produce any files.\n fmt = '-'\n clib.call_module('figure', '{} {}'.format(prefix, fmt))\n+\n+\n+class GMTSession():\n+ \"\"\"\n+ Placeholder for an active modern mode session.\n+\n+ Calls ``begin`` and ``figure`` when created. Calls ``end`` when destroyed\n+ so that the tmp files are cleaned.\n+\n+ The call to ``figure`` is necessary because the default behavior in Python\n+ is to not generate figure files unless explicitly commanded by\n+ ``psconvert`` or ``savefig``. The call starts a new figure with the format\n+ ``-`` which indicates that ``end`` should skip processing that figure.\n+\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.is_active = False\n+ self.begin()\n+\n+ def begin(self):\n+ \"\"\"\n+ Starts a modern mode session by calling ``begin`` and ``figure``.\n+\n+ Sets the attribute ``_is_active`` to ``True`` to indicate that there\n+ is an active session.\n+ \"\"\"\n+ assert not self.is_active, \\\n+ \"Session is already active. Can't start two simultaneous sessions\"\n+ begin()\n+ figure()\n+ self.is_active = True\n+\n+ def end(self):\n+ \"\"\"\n+ End the current session.\n+ \"\"\"\n+ assert self.is_active, \"Can't end an inactive session.\"\n+ end()\n+ self.is_active = False\n+\n+ def restart(self):\n+ \"\"\"\n+ End the current session (if it's active) and start a new one.\n+ \"\"\"\n+ if self.is_active:\n+ self.end()\n+ self.begin()\n", "new_path": "gmt/session_management.py", "old_path": "gmt/session_management.py" }, { "change_type": "MODIFY", "diff": "@@ -6,6 +6,9 @@ import os\n from ..clib import create_session, destroy_session, call_module, load_libgmt\n \n \n+TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')\n+\n+\n def test_load_libgmt():\n \"Test that loading libgmt works and doesn't crash.\"\n libgmt = load_libgmt()\n@@ -25,9 +28,11 @@ def test_clib_session_management():\n \n def test_call_module():\n \"Run a psbasemap call to see if the module works\"\n- module = 'psbasemap'\n- args = '-R10/70/-3/8 -JX4i/3i -Ba -P ->tmp.ps'\n- call_module(module, args)\n- assert os.path.exists('tmp.ps')\n- os.remove('tmp.ps')\n- # Not the most ideal test. Just check if no segfaults or exceptions occur.\n+ data_fname = os.path.join(TEST_DATA_DIR, 'points.txt')\n+ out_fname = 'test_call_module.txt'\n+ call_module('gmtinfo', '{} -C ->{}'.format(data_fname, out_fname))\n+ assert os.path.exists(out_fname)\n+ with open(out_fname) as out_file:\n+ output = out_file.read().strip().replace('\\t', ' ')\n+ assert output == '11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338'\n+ os.remove(out_fname)\n", "new_path": "gmt/tests/test_clib.py", "old_path": "gmt/tests/test_clib.py" }, { "change_type": "MODIFY", "diff": "@@ -3,28 +3,85 @@ Test the session management modules.\n \"\"\"\n import os\n \n-from .. import begin, end, figure\n+from pytest import raises\n+\n+from .. import figure, _GLOBAL_SESSION\n+from ..session_management import begin, end, GMTSession\n from ..clib import call_module\n \n \n-def test_session():\n+def test_begin_end():\n \"\"\"\"\n Run a command inside a begin-end modern mode block.\n+ First, end the global session. When finished, restart it.\n \"\"\"\n+ _GLOBAL_SESSION.end()\n begin()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n+ _GLOBAL_SESSION.restart()\n assert os.path.exists('gmt-python-session.pdf')\n os.remove('gmt-python-session.pdf')\n \n \n def test_session_figure():\n \"\"\"\n- Run a figure command inside a begin-end modern mode block.\n- No file should be generated.\n+ Run a figure command and check that no file is be generated by gmt.end\n+\n+ Need to end the global session before doing this.\n \"\"\"\n+ _GLOBAL_SESSION.end()\n begin()\n figure()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n+ _GLOBAL_SESSION.restart()\n assert not os.path.exists('gmt-python-figure.pdf')\n+\n+\n+def test_gmtsession_begin_error():\n+ \"\"\"\n+ Check that an error is raised when trying to start session without ending\n+ it first.\n+ \"\"\"\n+ _GLOBAL_SESSION.end()\n+ session = GMTSession()\n+ with raises(AssertionError):\n+ session.begin()\n+ session.end()\n+ _GLOBAL_SESSION.restart()\n+\n+\n+def test_gmtsession_restart():\n+ \"\"\"\n+ Check that a session can be restarted without crashes.\n+ Restart should kill current session and begin a new one.\n+ There should be no way to begin a session without ending it first.\n+ \"\"\"\n+ _GLOBAL_SESSION.end()\n+ session = GMTSession()\n+ assert session.is_active\n+ # Should work when session is active\n+ session.restart()\n+ assert session.is_active\n+ # And also when it isn't\n+ session.end()\n+ assert not session.is_active\n+ session.restart()\n+ assert session.is_active\n+ session.end()\n+ _GLOBAL_SESSION.restart()\n+\n+\n+def test_gmtsession_error_end():\n+ \"\"\"\n+ Should raise an error when calling end twice in a row.\n+ \"\"\"\n+ _GLOBAL_SESSION.end()\n+ session = GMTSession()\n+ assert session.is_active\n+ session.end()\n+ assert not session.is_active\n+ with raises(AssertionError):\n+ session.end()\n+ _GLOBAL_SESSION.restart()\n", "new_path": "gmt/tests/test_session_management.py", "old_path": "gmt/tests/test_session_management.py" } ]
710501ab0ac79a8fb5d6798936de652d1a4e3d3d
genericmappingtools/pygmt
null
null
Replace the global GMTSession with 'atexit' No need for the class or the global instance. Register 'end' with 'atexit' to make sure end is called when exiting Python.
[ { "change_type": "MODIFY", "diff": "@@ -1,23 +1,22 @@\n \"\"\"\n GMT Python interface\n \"\"\"\n-from ._version import get_versions\n+import atexit as _atexit\n+\n+from ._version import get_versions as _get_versions\n \n # Import modules to make the high-level GMT Python API\n from .ps_modules import pscoast, psconvert, psbasemap\n-from .session_management import figure, GMTSession\n+from .session_management import figure, begin as _begin, end as _end\n \n \n # Get the version number through versioneer\n-__version__ = get_versions()['version']\n-# Delete the function so that it doesn't appear in the public API\n-del get_versions\n+__version__ = _get_versions()['version']\n \n-# Start our global modern mode session. It calls \"gmt.begin\" when started and\n-# \"gmt.end\" when deleted.\n-_GLOBAL_SESSION = GMTSession()\n-# Delete the class so that it doesn't appear in the public API\n-del GMTSession\n+# Start our global modern mode session\n+_begin()\n+# Tell Python to run _end when shutting down\n+_atexit.register(_end)\n \n \n def test(doctest=True, verbose=True, coverage=False, figures=True):\n", "new_path": "gmt/__init__.py", "old_path": "gmt/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -1,5 +1,5 @@\n \"\"\"\n-Session management modules: begin, end, figure, clean\n+Session management modules: begin, end, figure, etc\n \"\"\"\n from . import clib\n \n@@ -46,58 +46,3 @@ def figure():\n # Passing format '-' tells gmt.end to not produce any files.\n fmt = '-'\n clib.call_module('figure', '{} {}'.format(prefix, fmt))\n-\n-\n-class GMTSession():\n- \"\"\"\n- Placeholder for an active modern mode session.\n-\n- Calls ``begin`` and ``figure`` when created. Calls ``end`` when destroyed\n- so that the temporary files are cleaned.\n-\n- The call to ``figure`` is necessary because the default behavior in Python\n- is to not generate figure files unless explicitly commanded by\n- ``psconvert`` or ``savefig``. The call starts a new figure with the format\n- ``-`` which indicates that ``end`` should skip processing that figure.\n-\n- \"\"\"\n-\n- def __init__(self):\n- self.is_active = False\n- self.begin()\n-\n- def begin(self):\n- \"\"\"\n- Starts a modern mode session by calling ``begin`` and ``figure``.\n-\n- Sets the attribute ``_is_active`` to ``True`` to indicate that there\n- is an active session.\n- \"\"\"\n- assert not self.is_active, \\\n- \"Session is already active. Can't start two simultaneous sessions\"\n- begin()\n- figure()\n- self.is_active = True\n-\n- def end(self):\n- \"\"\"\n- End the current session.\n- \"\"\"\n- assert self.is_active, \"Can't end an inactive session.\"\n- end()\n- self.is_active = False\n-\n- def restart(self):\n- \"\"\"\n- End the current session (if it's active) and start a new one.\n- \"\"\"\n- if self.is_active:\n- self.end()\n- self.begin()\n-\n- def __del__(self):\n- \"\"\"\n- When the session is being garbage collected, call ``end`` to clean up\n- the session.\n- \"\"\"\n- self.end()\n", "new_path": "gmt/session_management.py", "old_path": "gmt/session_management.py" }, { "change_type": "MODIFY", "diff": "@@ -3,10 +3,8 @@ Test the session management modules.\n \"\"\"\n import os\n \n-from pytest import raises\n-\n-from .. import figure, _GLOBAL_SESSION\n-from ..session_management import begin, end, GMTSession\n+from .. import figure\n+from ..session_management import begin, end\n from ..clib import call_module\n \n \n@@ -15,11 +13,11 @@ def test_begin_end():\n Run a command inside a begin-end modern mode block.\n First, end the global session. When finished, restart it.\n \"\"\"\n- _GLOBAL_SESSION.end()\n+ end() # Kill the global session\n begin()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n- _GLOBAL_SESSION.restart()\n+ begin() # Restart the global session\n assert os.path.exists('gmt-python-session.pdf')\n os.remove('gmt-python-session.pdf')\n \n@@ -30,58 +28,10 @@ def test_session_figure():\n \n Need to end the global session before doing this.\n \"\"\"\n- _GLOBAL_SESSION.end()\n+ end() # Kill the global session\n begin()\n figure()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n- _GLOBAL_SESSION.restart()\n+ begin() # Restart the global session\n assert not os.path.exists('gmt-python-figure.pdf')\n-\n-\n-def test_gmtsession_begin_error():\n- \"\"\"\n- Check that an error is raised when trying to start session without ending\n- it first.\n- \"\"\"\n- _GLOBAL_SESSION.end()\n- session = GMTSession()\n- with raises(AssertionError):\n- session.begin()\n- session.end()\n- _GLOBAL_SESSION.restart()\n-\n-\n-def test_gmtsession_restart():\n- \"\"\"\n- Check that a session can be restarted without crashes.\n- Restart should kill current session and begin a new one.\n- There should be no way to begin a session without ending it first.\n- \"\"\"\n- _GLOBAL_SESSION.end()\n- session = GMTSession()\n- assert session.is_active\n- # Should work when session is active\n- session.restart()\n- assert session.is_active\n- # And also when it isn't\n- session.end()\n- assert not session.is_active\n- session.restart()\n- assert session.is_active\n- session.end()\n- _GLOBAL_SESSION.restart()\n-\n-\n-def test_gmtsession_error_end():\n- \"\"\"\n- Should raise an error when calling end twice in a row.\n- \"\"\"\n- _GLOBAL_SESSION.end()\n- session = GMTSession()\n- assert session.is_active\n- session.end()\n- assert not session.is_active\n- with raises(AssertionError):\n- session.end()\n- _GLOBAL_SESSION.restart()\n", "new_path": "gmt/tests/test_session_management.py", "old_path": "gmt/tests/test_session_management.py" } ]
c3f6817a03d60af66b4cabb47d7ecbc642bf6376
sabeechen/hassio-google-drive-backup
null
null
Use alternate headers for HA auth requests When sending a "Authorization: Bearer" header to the supervisor's /auth endpoint, the header gets used to authorize the addon and then _also_ authorize against HA which fails. This changes requests made to the /auth enpoint so they use the "X-Supervisor-Token" header to avoid the conflict.
[ { "change_type": "MODIFY", "diff": "@@ -18,6 +18,8 @@ from yarl import URL\n \r\n logger = getLogger(__name__)\r\n \r\n+HEADER_TOKEN = \"X-Supervisor-Token\"\r\n+\r\n NOTIFICATION_ID = \"backup_broken\"\r\n EVENT_BACKUP_START = \"backup_started\"\r\n EVENT_BACKUP_END = \"backup_ended\"\r\n@@ -81,7 +83,7 @@ class HaRequests():\n \r\n @supervisor_call\r\n async def auth(self, user: str, password: str) -> None:\r\n- await self._postHassioData(self.getSupervisorURL().with_path(\"auth\"), {\"username\": user, \"password\": password})\r\n+ await self._postHassioData(self.getSupervisorURL().with_path(\"auth\"), {\"username\": user, \"password\": password}, headers=self._altAuthHeaders())\r\n \r\n @supervisor_call\r\n async def upload(self, stream):\r\n@@ -178,7 +180,7 @@ class HaRequests():\n async def download(self, slug) -> AsyncHttpGetter:\r\n url = self.getSupervisorURL().with_path(\"{1}/{0}/download\".format(slug, self._getBackupPath()))\r\n ret = AsyncHttpGetter(url,\r\n- self._getHassioHeaders(),\r\n+ self._getAuthHeaders(),\r\n self.session,\r\n timeoutFactory=SupervisorTimeoutError.factory,\r\n otherErrorFactory=SupervisorUnexpectedError.factory,\r\n@@ -190,14 +192,14 @@ class HaRequests():\n @supervisor_call\r\n async def getSuperLogs(self):\r\n url = self.getSupervisorURL().with_path(\"supervisor/logs\")\r\n- async with self.session.get(url, headers=self._getHassioHeaders()) as resp:\r\n+ async with self.session.get(url, headers=self._getAuthHeaders()) as resp:\r\n resp.raise_for_status()\r\n return await resp.text()\r\n \r\n @supervisor_call\r\n async def getCoreLogs(self):\r\n url = self.getSupervisorURL().with_path(\"core/logs\")\r\n- async with self.session.get(url, headers=self._getHassioHeaders()) as resp:\r\n+ async with self.session.get(url, headers=self._getAuthHeaders()) as resp:\r\n resp.raise_for_status()\r\n return await resp.text()\r\n \r\n@@ -219,7 +221,7 @@ class HaRequests():\n \r\n async def getAddonLogo(self, slug: str):\r\n url = self.getSupervisorURL().with_path(\"addons/{0}/icon\".format(slug))\r\n- async with self.session.get(url, headers=self._getHassioHeaders()) as resp:\r\n+ async with self.session.get(url, headers=self._getAuthHeaders()) as resp:\r\n resp.raise_for_status()\r\n return (resp.headers['Content-Type'], await resp.read())\r\n \r\n@@ -232,30 +234,34 @@ class HaRequests():\n # Older versions of the supervisor use a different name for the token.\r\n return os.environ.get(\"HASSIO_TOKEN\")\r\n \r\n- def _getHassioHeaders(self):\r\n- return self._getHaHeaders()\r\n-\r\n- def _getHaHeaders(self):\r\n+ def _getAuthHeaders(self):\r\n return {\r\n 'Authorization': 'Bearer ' + self._getToken()\r\n }\r\n \r\n+ def _altAuthHeaders(self):\r\n+ return {\r\n+ HEADER_TOKEN: self._getToken()\r\n+ }\r\n+\r\n @supervisor_call\r\n async def _getHassioData(self, url: URL) -> Dict[str, Any]:\r\n logger.debug(\"Making Hassio request: \" + str(url))\r\n- return await self._validateHassioReply(await self.session.get(url, headers=self._getHassioHeaders()))\r\n+ return await self._validateHassioReply(await self.session.get(url, headers=self._getAuthHeaders()))\r\n \r\n- async def _postHassioData(self, url: URL, json=None, file=None, data=None, timeout=None) -> Dict[str, Any]:\r\n- return await self._sendHassioData(\"post\", url, json, file, data, timeout)\r\n+ async def _postHassioData(self, url: URL, json=None, file=None, data=None, timeout=None, headers=None) -> Dict[str, Any]:\r\n+ return await self._sendHassioData(\"post\", url, json, file, data, timeout, headers)\r\n \r\n @supervisor_call\r\n- async def _sendHassioData(self, method: str, url: URL, json=None, file=None, data=None, timeout=None) -> Dict[str, Any]:\r\n+ async def _sendHassioData(self, method: str, url: URL, json=None, file=None, data=None, timeout=None, headers=None) -> Dict[str, Any]:\r\n+ if headers is None:\r\n+ headers = self._getAuthHeaders()\r\n logger.debug(\"Making Hassio request: \" + str(url))\r\n- return await self._validateHassioReply(await self.session.request(method, url, headers=self._getHassioHeaders(), json=json, data=data, timeout=timeout))\r\n+ return await self._validateHassioReply(await self.session.request(method, url, headers=headers, json=json, data=data, timeout=timeout))\r\n \r\n async def _postHaData(self, path: str, data: Dict[str, Any]) -> None:\r\n url = self.getSupervisorURL().with_path(\"/core/api/\" + path)\r\n- async with self.session.post(url, headers=self._getHaHeaders(), json=data) as resp:\r\n+ async with self.session.post(url, headers=self._getAuthHeaders(), json=data) as resp:\r\n resp.raise_for_status()\r\n \r\n async def sendNotification(self, title: str, message: str) -> None:\r\n", "new_path": "hassio-google-drive-backup/backup/ha/harequests.py", "old_path": "hassio-google-drive-backup/backup/ha/harequests.py" }, { "change_type": "MODIFY", "diff": "@@ -147,6 +147,8 @@ class SimulatedSupervisor(BaseServer):\n async def _verifyHeader(self, request) -> bool:\n if request.headers.get(\"Authorization\", None) == \"Bearer \" + self._auth_token:\n return\n+ if request.headers.get(\"X-Supervisor-Token\", None) == self._auth_token:\n+ return\n raise HTTPUnauthorized()\n \n async def _getSnapshots(self, request: Request):\n", "new_path": "hassio-google-drive-backup/dev/simulated_supervisor.py", "old_path": "hassio-google-drive-backup/dev/simulated_supervisor.py" } ]
b62db9ee2ab82514ab217a950dfe35829b20950a
miurahr/aqtinstall
null
null
Allow `MetadataFactory.fetch_http` to skip sha256 `MetadataFactory.fetch_http` must often download HTML pages, not Updates.xml files. download.qt.io does not store checksums for these files, so this particular function must be allowed to download these pages without using a checksum.
[ { "change_type": "MODIFY", "diff": "@@ -452,7 +452,7 @@ class MetadataFactory:\n \n def fetch_extensions(self, version: Version) -> List[str]:\n versions_extensions = MetadataFactory.get_versions_extensions(\n- self.fetch_http(self.archive_id.to_url()), self.archive_id.category\n+ self.fetch_http(self.archive_id.to_url(), False), self.archive_id.category\n )\n filtered = filter(\n lambda ver_ext: ver_ext[0] == version and ver_ext[1],\n@@ -469,7 +469,7 @@ class MetadataFactory:\n return ver_ext[0]\n \n versions_extensions = MetadataFactory.get_versions_extensions(\n- self.fetch_http(self.archive_id.to_url()), self.archive_id.category\n+ self.fetch_http(self.archive_id.to_url(), False), self.archive_id.category\n )\n versions = sorted(filter(None, map(get_version, filter(filter_by, versions_extensions))))\n iterables = itertools.groupby(versions, lambda version: version.minor)\n@@ -479,7 +479,7 @@ class MetadataFactory:\n return self.fetch_versions().latest()\n \n def fetch_tools(self) -> List[str]:\n- html_doc = self.fetch_http(self.archive_id.to_url())\n+ html_doc = self.fetch_http(self.archive_id.to_url(), False)\n return list(MetadataFactory.iterate_folders(html_doc, \"tools\"))\n \n def fetch_tool_modules(self, tool_name: str) -> List[str]:\n@@ -572,9 +572,9 @@ class MetadataFactory:\n return version\n \n @staticmethod\n- def fetch_http(rest_of_url: str) -> str:\n+ def fetch_http(rest_of_url: str, is_check_hash: bool = True) -> str:\n timeout = (Settings.connection_timeout, Settings.response_timeout)\n- expected_hash = binascii.unhexlify(get_hash(rest_of_url, \"sha256\", timeout))\n+ expected_hash = binascii.unhexlify(get_hash(rest_of_url, \"sha256\", timeout)) if is_check_hash else None\n base_urls = Settings.baseurl, random.choice(Settings.fallbacks)\n for i, base_url in enumerate(base_urls):\n try:\n", "new_path": "aqt/metadata.py", "old_path": "aqt/metadata.py" }, { "change_type": "MODIFY", "diff": "@@ -96,7 +96,7 @@ def test_cli_determine_qt_version(\n monkeypatch, host, target, arch, version_or_spec: str, expected_version: Version, is_bad_spec: bool\n ):\n _html = (Path(__file__).parent / \"data\" / f\"{host}-{target}.html\").read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: _html)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: _html)\n cli = Cli()\n cli._setup_settings()\n \n", "new_path": "tests/test_cli.py", "old_path": "tests/test_cli.py" }, { "change_type": "MODIFY", "diff": "@@ -132,7 +132,7 @@ def spec_regex():\n )\n def test_list_versions_tools(monkeypatch, spec_regex, os_name, target, in_file, expect_out_file):\n _html = (Path(__file__).parent / \"data\" / in_file).read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: _html)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: _html)\n \n expected = json.loads((Path(__file__).parent / \"data\" / expect_out_file).read_text(\"utf-8\"))\n \n@@ -434,7 +434,7 @@ def test_list_qt_cli(\n expect_set = expect\n assert isinstance(expect_set, set)\n \n- def _mock_fetch_http(_, rest_of_url: str) -> str:\n+ def _mock_fetch_http(_, rest_of_url, *args, **kwargs: str) -> str:\n htmltext = (Path(__file__).parent / \"data\" / htmlfile).read_text(\"utf-8\")\n if not rest_of_url.endswith(\"Updates.xml\"):\n return htmltext\n@@ -723,7 +723,7 @@ def test_list_describe_filters(meta: MetadataFactory, expect: str):\n )\n def test_list_to_version(monkeypatch, archive_id, spec, version_str, expect):\n _html = (Path(__file__).parent / \"data\" / \"mac-desktop.html\").read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: _html)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: _html)\n \n if isinstance(expect, Exception):\n with pytest.raises(CliInputError) as error:\n@@ -847,7 +847,7 @@ def test_show_list_versions(monkeypatch, capsys):\n \n def test_show_list_tools(monkeypatch, capsys):\n page = (Path(__file__).parent / \"data\" / \"mac-desktop.html\").read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: page)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: page)\n \n expect_file = Path(__file__).parent / \"data\" / \"mac-desktop-expect.json\"\n expect = \"\\n\".join(json.loads(expect_file.read_text(\"utf-8\"))[\"tools\"]) + \"\\n\"\n@@ -918,7 +918,7 @@ def test_list_tool_cli(monkeypatch, capsys, host: str, target: str, tool_name: s\n xml_data = json.loads(xmljson)\n expected_tool_modules = set(xml_data[\"modules\"])\n \n- def _mock_fetch_http(_, rest_of_url: str) -> str:\n+ def _mock_fetch_http(_, rest_of_url, *args, **kwargs: str) -> str:\n if not rest_of_url.endswith(\"Updates.xml\"):\n return htmltext\n folder = urlparse(rest_of_url).path.split(\"/\")[-2]\n", "new_path": "tests/test_list.py", "old_path": "tests/test_list.py" } ]
ed6cf7696871e01685c1c96d6d13deb6439ae490
miurahr/aqtinstall
null
null
Add CLI option to install desktop qt automatically This change also causes aqt to emit a warning when the option is not turned on and the expected desktop Qt is not found.
[ { "change_type": "MODIFY", "diff": "@@ -299,6 +299,7 @@ class Cli:\n if modules is not None and archives is not None:\n archives.append(modules)\n nopatch = args.noarchives or (archives is not None and \"qtbase\" not in archives) # type: bool\n+ warn_on_missing_desktop_qt: bool = not args.autodesktop\n if not self._check_qt_arg_versions(qt_version):\n self.logger.warning(\"Specified Qt version is unknown: {}.\".format(qt_version))\n if not self._check_qt_arg_combination(qt_version, os_name, target, arch):\n@@ -328,6 +329,7 @@ class Cli:\n with TemporaryDirectory() as temp_dir:\n _archive_dest = Cli.choose_archive_dest(archive_dest, keep, temp_dir)\n run_installer(qt_archives.get_packages(), base_dir, sevenzip, keep, _archive_dest)\n+ self._handle_missing_desktop_qt(os_name, target, Version(qt_version), Path(base_dir), warn_on_missing_desktop_qt)\n if not nopatch:\n Updater.update(target_config, base_dir)\n self.logger.info(\"Finished installation\")\n@@ -603,6 +605,12 @@ class Cli:\n action=\"store_true\",\n help=\"No base packages; allow mod amendment with --modules option.\",\n )\n+ install_qt_parser.add_argument(\n+ \"--autodesktop\",\n+ action=\"store_true\",\n+ help=\"For android/ios installations, a desktop Qt installation is required. \"\n+ \"When enabled, this option installs the required desktop version automatically.\",\n+ )\n \n def _set_install_tool_parser(self, install_tool_parser, *, is_legacy: bool):\n install_tool_parser.set_defaults(func=self.run_install_tool, is_legacy=is_legacy)\n@@ -640,6 +648,43 @@ class Cli:\n f\"In the future, please omit this parameter.\"\n )\n \n+ @staticmethod\n+ def _get_missing_desktop_arch(host: str, target: str, version: Version, base_dir: Path) -> Optional[str]:\n+ \"\"\"\n+ For mobile Qt installations, the desktop version of Qt is a dependency.\n+ If the desktop version is not installed, this function returns the architecture that should be installed.\n+ If no desktop Qt is required, or it is already installed, this function returns None.\n+ \"\"\"\n+ if target not in [\"ios\", \"android\"]:\n+ return None\n+ if host != \"windows\":\n+ arch = aqt.updater.default_desktop_arch_dir(host, version)\n+ expected_qmake = base_dir / format(version) / arch / \"bin/qmake\"\n+ return arch if not expected_qmake.is_file() else None\n+ else:\n+ existing_desktop_qt = QtRepoProperty.find_installed_qt_mingw_dir(base_dir / format(version))\n+ if existing_desktop_qt:\n+ return None\n+ return MetadataFactory(ArchiveId(\"qt\", host, \"desktop\")).fetch_default_desktop_arch(version)\n+\n+ def _handle_missing_desktop_qt(self, host: str, target: str, version: Version, base_dir: Path, should_warn: bool):\n+ missing_desktop_arch = Cli._get_missing_desktop_arch(host, target, version, base_dir)\n+ if not missing_desktop_arch:\n+ return\n+\n+ msg_prefix = (\n+ f\"You are installing the {target} version of Qt, which requires that the desktop version of Qt \"\n+ f\"is also installed.\"\n+ )\n+ if should_warn:\n+ self.logger.warning(\n+ f\"{msg_prefix} You can install it with the following command:\\n\"\n+ f\" `aqt install-qt {host} desktop {version} {missing_desktop_arch}`\"\n+ )\n+ else:\n+ self.logger.info(f\"{msg_prefix} Now installing Qt: desktop {version} {missing_desktop_arch}\")\n+ self.run([\"install-qt\", host, \"desktop\", format(version), missing_desktop_arch])\n+\n def _make_all_parsers(self, subparsers: argparse._SubParsersAction):\n deprecated_msg = \"This command is deprecated and marked for removal in a future version of aqt.\"\n \n", "new_path": "aqt/installer.py", "old_path": "aqt/installer.py" }, { "change_type": "MODIFY", "diff": "@@ -576,6 +576,9 @@ def tool_archive(host: str, tool_name: str, variant: str, date: datetime = datet\n r\"^INFO : aqtinstall\\(aqt\\) v.* on Python 3.*\\n\"\n r\"INFO : Downloading qtbase...\\n\"\n r\"Finished installation of qtbase-windows-android_armv7.7z in .*\\n\"\n+ r\"WARNING : You are installing the android version of Qt, which requires that the desktop version of \"\n+ r\"Qt is also installed. You can install it with the following command:\\n\"\n+ r\" `aqt install-qt windows desktop 6.1.0 MINGW_MOCK_DEFAULT`\\n\"\n r\"INFO : Patching .*6\\.1\\.0[/\\\\]android_armv7[/\\\\]bin[/\\\\]qmake.bat\\n\"\n r\"INFO : Finished installation\\n\"\n r\"INFO : Time elapsed: .* second\"\n@@ -631,6 +634,9 @@ def tool_archive(host: str, tool_name: str, variant: str, date: datetime = datet\n r\"^INFO : aqtinstall\\(aqt\\) v.* on Python 3.*\\n\"\n r\"INFO : Downloading qtbase...\\n\"\n r\"Finished installation of qtbase-linux-android_arm64_v8a.7z in .*\\n\"\n+ r\"WARNING : You are installing the android version of Qt, which requires that the desktop version of \"\n+ r\"Qt is also installed. You can install it with the following command:\\n\"\n+ r\" `aqt install-qt linux desktop 6\\.3\\.0 gcc_64`\\n\"\n r\"INFO : Patching .*6\\.3\\.0[/\\\\]android_arm64_v8a[/\\\\]bin[/\\\\]qmake\\n\"\n r\"INFO : Finished installation\\n\"\n r\"INFO : Time elapsed: .* second\"\n@@ -686,6 +692,9 @@ def tool_archive(host: str, tool_name: str, variant: str, date: datetime = datet\n r\"^INFO : aqtinstall\\(aqt\\) v.* on Python 3.*\\n\"\n r\"INFO : Downloading qtbase...\\n\"\n r\"Finished installation of qtbase-mac-ios.7z in .*\\n\"\n+ r\"WARNING : You are installing the ios version of Qt, which requires that the desktop version of Qt is \"\n+ r\"also installed. You can install it with the following command:\\n\"\n+ r\" `aqt install-qt mac desktop 6\\.1\\.2 macos`\\n\"\n r\"INFO : Patching .*6\\.1\\.2[/\\\\]ios[/\\\\]bin[/\\\\]qmake\\n\"\n r\"INFO : Finished installation\\n\"\n r\"INFO : Time elapsed: .* second\"\n@@ -716,6 +725,7 @@ def test_install(\n monkeypatch.setattr(\"aqt.archives.getUrl\", mock_get_url)\n monkeypatch.setattr(\"aqt.helper.getUrl\", mock_get_url)\n monkeypatch.setattr(\"aqt.installer.downloadBinaryFile\", mock_download_archive)\n+ monkeypatch.setattr(\"aqt.metadata.MetadataFactory.fetch_default_desktop_arch\", lambda *args: \"MINGW_MOCK_DEFAULT\")\n \n with TemporaryDirectory() as output_dir:\n cli = Cli()\n", "new_path": "tests/test_install.py", "old_path": "tests/test_install.py" } ]
1eed101be9adc8f94036761099d512f26439a2c5
explosion/spacy
null
null
Fix Polish lemmatizer for deserialized models Restructure Polish lemmatizer not to depend on lookups data in `__init__` since the lemmatizer is initialized before the lookups data is loaded from a saved model. The lookups tables are accessed first in `__call__` instead once the data is available.
[ { "change_type": "MODIFY", "diff": "@@ -6,98 +6,73 @@ from ...parts_of_speech import NAMES\n \n \n class PolishLemmatizer(Lemmatizer):\n- # This lemmatizer implements lookup lemmatization based on\n- # the Morfeusz dictionary (morfeusz.sgjp.pl/en) by Institute of Computer Science PAS\n- # It utilizes some prefix based improvements for\n- # verb and adjectives lemmatization, as well as case-sensitive\n- # lemmatization for nouns\n- def __init__(self, lookups, *args, **kwargs):\n- # this lemmatizer is lookup based, so it does not require an index, exceptionlist, or rules\n- super(PolishLemmatizer, self).__init__(lookups)\n- self.lemma_lookups = {}\n- for tag in [\n- \"ADJ\",\n- \"ADP\",\n- \"ADV\",\n- \"AUX\",\n- \"NOUN\",\n- \"NUM\",\n- \"PART\",\n- \"PRON\",\n- \"VERB\",\n- \"X\",\n- ]:\n- self.lemma_lookups[tag] = self.lookups.get_table(\n- \"lemma_lookup_\" + tag.lower(), {}\n- )\n- self.lemma_lookups[\"DET\"] = self.lemma_lookups[\"X\"]\n- self.lemma_lookups[\"PROPN\"] = self.lemma_lookups[\"NOUN\"]\n-\n+ # This lemmatizer implements lookup lemmatization based on the Morfeusz\n+ # dictionary (morfeusz.sgjp.pl/en) by Institute of Computer Science PAS.\n+ # It utilizes some prefix based improvements for verb and adjectives\n+ # lemmatization, as well as case-sensitive lemmatization for nouns.\n def __call__(self, string, univ_pos, morphology=None):\n if isinstance(univ_pos, int):\n univ_pos = NAMES.get(univ_pos, \"X\")\n univ_pos = univ_pos.upper()\n \n+ lookup_pos = univ_pos.lower()\n+ if univ_pos == \"PROPN\":\n+ lookup_pos = \"noun\"\n+ lookup_table = self.lookups.get_table(\"lemma_lookup_\" + lookup_pos, {})\n+\n if univ_pos == \"NOUN\":\n- return self.lemmatize_noun(string, morphology)\n+ return self.lemmatize_noun(string, morphology, lookup_table)\n \n if univ_pos != \"PROPN\":\n string = string.lower()\n \n if univ_pos == \"ADJ\":\n- return self.lemmatize_adj(string, morphology)\n+ return self.lemmatize_adj(string, morphology, lookup_table)\n elif univ_pos == \"VERB\":\n- return self.lemmatize_verb(string, morphology)\n+ return self.lemmatize_verb(string, morphology, lookup_table)\n \n- lemma_dict = self.lemma_lookups.get(univ_pos, {})\n- return [lemma_dict.get(string, string.lower())]\n+ return [lookup_table.get(string, string.lower())]\n \n- def lemmatize_adj(self, string, morphology):\n+ def lemmatize_adj(self, string, morphology, lookup_table):\n # this method utilizes different procedures for adjectives\n # with 'nie' and 'naj' prefixes\n- lemma_dict = self.lemma_lookups[\"ADJ\"]\n-\n if string[:3] == \"nie\":\n search_string = string[3:]\n if search_string[:3] == \"naj\":\n naj_search_string = search_string[3:]\n- if naj_search_string in lemma_dict:\n- return [lemma_dict[naj_search_string]]\n- if search_string in lemma_dict:\n- return [lemma_dict[search_string]]\n+ if naj_search_string in lookup_table:\n+ return [lookup_table[naj_search_string]]\n+ if search_string in lookup_table:\n+ return [lookup_table[search_string]]\n \n if string[:3] == \"naj\":\n naj_search_string = string[3:]\n- if naj_search_string in lemma_dict:\n- return [lemma_dict[naj_search_string]]\n+ if naj_search_string in lookup_table:\n+ return [lookup_table[naj_search_string]]\n \n- return [lemma_dict.get(string, string)]\n+ return [lookup_table.get(string, string)]\n \n- def lemmatize_verb(self, string, morphology):\n+ def lemmatize_verb(self, string, morphology, lookup_table):\n # this method utilizes a different procedure for verbs\n # with 'nie' prefix\n- lemma_dict = self.lemma_lookups[\"VERB\"]\n-\n if string[:3] == \"nie\":\n search_string = string[3:]\n- if search_string in lemma_dict:\n- return [lemma_dict[search_string]]\n+ if search_string in lookup_table:\n+ return [lookup_table[search_string]]\n \n- return [lemma_dict.get(string, string)]\n+ return [lookup_table.get(string, string)]\n \n- def lemmatize_noun(self, string, morphology):\n+ def lemmatize_noun(self, string, morphology, lookup_table):\n # this method is case-sensitive, in order to work\n # for incorrectly tagged proper names\n- lemma_dict = self.lemma_lookups[\"NOUN\"]\n-\n if string != string.lower():\n- if string.lower() in lemma_dict:\n- return [lemma_dict[string.lower()]]\n- elif string in lemma_dict:\n- return [lemma_dict[string]]\n+ if string.lower() in lookup_table:\n+ return [lookup_table[string.lower()]]\n+ elif string in lookup_table:\n+ return [lookup_table[string]]\n return [string.lower()]\n \n- return [lemma_dict.get(string, string)]\n+ return [lookup_table.get(string, string)]\n \n def lookup(self, string, orth=None):\n return string.lower()\n", "new_path": "spacy/lang/pl/lemmatizer.py", "old_path": "spacy/lang/pl/lemmatizer.py" } ]
8a1fb887402a71849fc6d42f8ee7a8b15c7799cd
nolar/kopf
null
null
Split ResourceHandler -> specialised Resource[Watching|Changing]Handlers Same as the respective registries and causes are already specialised. Needed for extra typing, and to introduce additional kind of handlers.
[ { "change_type": "MODIFY", "diff": "@@ -145,7 +145,7 @@ def resume( # lgtm[py/similar-function]\n real_registry = registry if registry is not None else registries.get_default_registry()\n real_resource = resources.Resource(group, version, plural)\n real_id = registries.generate_id(fn=fn, id=id)\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n fn=fn, id=real_id, field=None,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n labels=labels, annotations=annotations, when=when,\n@@ -177,7 +177,7 @@ def create( # lgtm[py/similar-function]\n real_registry = registry if registry is not None else registries.get_default_registry()\n real_resource = resources.Resource(group, version, plural)\n real_id = registries.generate_id(fn=fn, id=id)\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n fn=fn, id=real_id, field=None,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n labels=labels, annotations=annotations, when=when,\n@@ -209,7 +209,7 @@ def update( # lgtm[py/similar-function]\n real_registry = registry if registry is not None else registries.get_default_registry()\n real_resource = resources.Resource(group, version, plural)\n real_id = registries.generate_id(fn=fn, id=id)\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n fn=fn, id=real_id, field=None,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n labels=labels, annotations=annotations, when=when,\n@@ -242,7 +242,7 @@ def delete( # lgtm[py/similar-function]\n real_registry = registry if registry is not None else registries.get_default_registry()\n real_resource = resources.Resource(group, version, plural)\n real_id = registries.generate_id(fn=fn, id=id)\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n fn=fn, id=real_id, field=None,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n labels=labels, annotations=annotations, when=when,\n@@ -276,7 +276,7 @@ def field( # lgtm[py/similar-function]\n real_resource = resources.Resource(group, version, plural)\n real_field = dicts.parse_field(field) or None # to not store tuple() as a no-field case.\n real_id = registries.generate_id(fn=fn, id=id, suffix=\".\".join(real_field or []))\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n fn=fn, id=real_id, field=real_field,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n labels=labels, annotations=annotations, when=when,\n@@ -303,12 +303,10 @@ def event( # lgtm[py/similar-function]\n real_registry = registry if registry is not None else registries.get_default_registry()\n real_resource = resources.Resource(group, version, plural)\n real_id = registries.generate_id(fn=fn, id=id)\n- handler = handlers.ResourceHandler(\n- fn=fn, id=real_id, field=None,\n+ handler = handlers.ResourceWatchingHandler(\n+ fn=fn, id=real_id,\n errors=None, timeout=None, retries=None, backoff=None, cooldown=None,\n labels=labels, annotations=annotations, when=when,\n- initial=None, deleted=None, requires_finalizer=None,\n- reason=None,\n )\n real_registry.resource_watching_handlers[real_resource].append(handler)\n return fn\n@@ -365,7 +363,7 @@ def this( # lgtm[py/similar-function]\n real_registry = registry if registry is not None else handling.subregistry_var.get()\n real_id = registries.generate_id(fn=fn, id=id,\n prefix=parent_handler.id if parent_handler else None)\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n fn=fn, id=real_id, field=None,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n labels=labels, annotations=annotations, when=when,\n", "new_path": "kopf/on.py", "old_path": "kopf/on.py" }, { "change_type": "MODIFY", "diff": "@@ -52,14 +52,23 @@ class ActivityHandler(BaseHandler):\n \n @dataclasses.dataclass\n class ResourceHandler(BaseHandler):\n- fn: Union[callbacks.ResourceWatchingFn, callbacks.ResourceChangingFn] # type clarification\n+ labels: Optional[filters.MetaFilter]\n+ annotations: Optional[filters.MetaFilter]\n+ when: Optional[callbacks.WhenFilterFn]\n+\n+\n+@dataclasses.dataclass\n+class ResourceWatchingHandler(ResourceHandler):\n+ fn: callbacks.ResourceWatchingFn # type clarification\n+\n+\n+@dataclasses.dataclass\n+class ResourceChangingHandler(ResourceHandler):\n+ fn: callbacks.ResourceChangingFn # type clarification\n reason: Optional[causation.Reason]\n field: Optional[dicts.FieldPath]\n initial: Optional[bool]\n deleted: Optional[bool] # used for mixed-in (initial==True) @on.resume handlers only.\n- labels: Optional[filters.MetaFilter]\n- annotations: Optional[filters.MetaFilter]\n- when: Optional[callbacks.WhenFilterFn]\n requires_finalizer: Optional[bool]\n \n @property\n", "new_path": "kopf/reactor/handlers.py", "old_path": "kopf/reactor/handlers.py" }, { "change_type": "MODIFY", "diff": "@@ -72,7 +72,7 @@ cause_var: ContextVar[causation.BaseCause] = ContextVar('cause_var')\n async def execute(\n *,\n fns: Optional[Iterable[invocation.Invokable]] = None,\n- handlers: Optional[Iterable[handlers_.ResourceHandler]] = None,\n+ handlers: Optional[Iterable[handlers_.ResourceChangingHandler]] = None,\n registry: Optional[registries.ResourceChangingRegistry] = None,\n lifecycle: Optional[lifecycles.LifeCycleFn] = None,\n cause: Optional[causation.BaseCause] = None,\n@@ -107,7 +107,7 @@ async def execute(\n subregistry = registries.ResourceChangingRegistry()\n for id, fn in fns.items():\n real_id = registries.generate_id(fn=fn, id=id, prefix=parent_prefix)\n- handler = handlers_.ResourceHandler(\n+ handler = handlers_.ResourceChangingHandler(\n fn=fn, id=real_id,\n errors=None, timeout=None, retries=None, backoff=None, cooldown=None,\n labels=None, annotations=None, when=None,\n@@ -120,7 +120,7 @@ async def execute(\n subregistry = registries.ResourceChangingRegistry()\n for fn in fns:\n real_id = registries.generate_id(fn=fn, id=None, prefix=parent_prefix)\n- handler = handlers_.ResourceHandler(\n+ handler = handlers_.ResourceChangingHandler(\n fn=fn, id=real_id,\n errors=None, timeout=None, retries=None, backoff=None, cooldown=None,\n labels=None, annotations=None, when=None,\n", "new_path": "kopf/reactor/handling.py", "old_path": "kopf/reactor/handling.py" }, { "change_type": "MODIFY", "diff": "@@ -30,16 +30,17 @@ from kopf.structs import resources as resources_\n from kopf.utilities import piggybacking\n \n # We only type-check for known classes of handlers/callbacks, and ignore any custom subclasses.\n+CauseT = TypeVar('CauseT', bound=causation.BaseCause)\n+HandlerT = TypeVar('HandlerT', bound=handlers.BaseHandler)\n+ResourceHandlerT = TypeVar('ResourceHandlerT', bound=handlers.ResourceHandler)\n HandlerFnT = TypeVar('HandlerFnT',\n callbacks.ActivityFn,\n callbacks.ResourceWatchingFn,\n callbacks.ResourceChangingFn,\n Union[callbacks.ResourceWatchingFn, callbacks.ResourceChangingFn]) # DEPRECATED: for legacy_registries\n-HandlerT = TypeVar('HandlerT', handlers.ActivityHandler, handlers.ResourceHandler)\n-CauseT = TypeVar('CauseT', bound=causation.BaseCause)\n \n \n-class GenericRegistry(Generic[HandlerT, HandlerFnT]):\n+class GenericRegistry(Generic[HandlerFnT, HandlerT]):\n \"\"\" A generic base class of a simple registry (with no handler getters). \"\"\"\n _handlers: List[HandlerT]\n \n@@ -55,8 +56,8 @@ class GenericRegistry(Generic[HandlerT, HandlerFnT]):\n \n \n class ActivityRegistry(GenericRegistry[\n- handlers.ActivityHandler,\n- callbacks.ActivityFn]):\n+ callbacks.ActivityFn,\n+ handlers.ActivityHandler]):\n \n def register(\n self,\n@@ -109,20 +110,20 @@ class ActivityRegistry(GenericRegistry[\n \n \n class ResourceRegistry(\n- Generic[CauseT, HandlerFnT],\n- GenericRegistry[handlers.ResourceHandler, HandlerFnT]):\n+ Generic[CauseT, HandlerFnT, ResourceHandlerT],\n+ GenericRegistry[HandlerFnT, ResourceHandlerT]):\n \n def get_handlers(\n self,\n cause: CauseT,\n- ) -> Sequence[handlers.ResourceHandler]:\n+ ) -> Sequence[ResourceHandlerT]:\n return list(_deduplicated(self.iter_handlers(cause=cause)))\n \n @abc.abstractmethod\n def iter_handlers(\n self,\n cause: CauseT,\n- ) -> Iterator[handlers.ResourceHandler]:\n+ ) -> Iterator[ResourceHandlerT]:\n raise NotImplementedError\n \n def get_extra_fields(\n@@ -154,7 +155,8 @@ class ResourceRegistry(\n \n class ResourceWatchingRegistry(ResourceRegistry[\n causation.ResourceWatchingCause,\n- callbacks.ResourceWatchingFn]):\n+ callbacks.ResourceWatchingFn,\n+ handlers.ResourceWatchingHandler]):\n \n def register(\n self,\n@@ -175,10 +177,9 @@ class ResourceWatchingRegistry(ResourceRegistry[\n DeprecationWarning)\n \n real_id = generate_id(fn=fn, id=id)\n- handler = handlers.ResourceHandler(\n- id=real_id, fn=fn, reason=None, field=None,\n+ handler = handlers.ResourceWatchingHandler(\n+ id=real_id, fn=fn,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n- initial=None, deleted=None, requires_finalizer=None,\n labels=labels, annotations=annotations, when=when,\n )\n self.append(handler)\n@@ -187,7 +188,7 @@ class ResourceWatchingRegistry(ResourceRegistry[\n def iter_handlers(\n self,\n cause: causation.ResourceWatchingCause,\n- ) -> Iterator[handlers.ResourceHandler]:\n+ ) -> Iterator[handlers.ResourceWatchingHandler]:\n for handler in self._handlers:\n if match(handler=handler, cause=cause, ignore_fields=True):\n yield handler\n@@ -195,7 +196,8 @@ class ResourceWatchingRegistry(ResourceRegistry[\n \n class ResourceChangingRegistry(ResourceRegistry[\n causation.ResourceChangingCause,\n- callbacks.ResourceChangingFn]):\n+ callbacks.ResourceChangingFn,\n+ handlers.ResourceChangingHandler]):\n \n def register(\n self,\n@@ -226,7 +228,7 @@ class ResourceChangingRegistry(ResourceRegistry[\n \n real_field = dicts.parse_field(field) or None # to not store tuple() as a no-field case.\n real_id = generate_id(fn=fn, id=id, suffix=\".\".join(real_field or []))\n- handler = handlers.ResourceHandler(\n+ handler = handlers.ResourceChangingHandler(\n id=real_id, fn=fn, reason=reason, field=real_field,\n errors=errors, timeout=timeout, retries=retries, backoff=backoff, cooldown=cooldown,\n initial=initial, deleted=deleted, requires_finalizer=requires_finalizer,\n@@ -239,7 +241,7 @@ class ResourceChangingRegistry(ResourceRegistry[\n def iter_handlers(\n self,\n cause: causation.ResourceChangingCause,\n- ) -> Iterator[handlers.ResourceHandler]:\n+ ) -> Iterator[handlers.ResourceChangingHandler]:\n changed_fields = frozenset(field for _, field, _, _ in cause.diff or [])\n for handler in self._handlers:\n if handler.reason is None or handler.reason == cause.reason:\n@@ -398,7 +400,7 @@ class OperatorRegistry:\n def get_resource_watching_handlers(\n self,\n cause: causation.ResourceWatchingCause,\n- ) -> Sequence[handlers.ResourceHandler]:\n+ ) -> Sequence[handlers.ResourceWatchingHandler]:\n warnings.warn(\"registry.get_resource_watching_handlers() is deprecated; \"\n \"use registry.resource_watching_handlers[resource].get_handlers().\",\n DeprecationWarning)\n@@ -407,7 +409,7 @@ class OperatorRegistry:\n def get_resource_changing_handlers(\n self,\n cause: causation.ResourceChangingCause,\n- ) -> Sequence[handlers.ResourceHandler]:\n+ ) -> Sequence[handlers.ResourceChangingHandler]:\n warnings.warn(\"registry.get_resource_changing_handlers() is deprecated; \"\n \"use registry.resource_changing_handlers[resource].get_handlers().\",\n DeprecationWarning)\n@@ -426,7 +428,7 @@ class OperatorRegistry:\n def iter_resource_watching_handlers(\n self,\n cause: causation.ResourceWatchingCause,\n- ) -> Iterator[handlers.ResourceHandler]:\n+ ) -> Iterator[handlers.ResourceWatchingHandler]:\n \"\"\"\n Iterate all handlers for the low-level events.\n \"\"\"\n@@ -438,7 +440,7 @@ class OperatorRegistry:\n def iter_resource_changing_handlers(\n self,\n cause: causation.ResourceChangingCause,\n- ) -> Iterator[handlers.ResourceHandler]:\n+ ) -> Iterator[handlers.ResourceChangingHandler]:\n \"\"\"\n Iterate all handlers that match this cause/event, in the order they were registered (even if mixed).\n \"\"\"\n@@ -599,6 +601,7 @@ def _matches_field(\n ignore_fields: bool = False,\n ) -> bool:\n return (ignore_fields or\n+ not isinstance(handler, handlers.ResourceChangingHandler) or\n not handler.field or\n any(field[:len(handler.field)] == handler.field for field in changed_fields))\n \n", "new_path": "kopf/reactor/registries.py", "old_path": "kopf/reactor/registries.py" }, { "change_type": "MODIFY", "diff": "@@ -60,7 +60,8 @@ class BaseRegistry(metaclass=abc.ABCMeta):\n \n class SimpleRegistry(BaseRegistry, registries.ResourceRegistry[\n AnyCause,\n- Union[callbacks.ResourceWatchingFn, callbacks.ResourceChangingFn]]):\n+ Union[callbacks.ResourceWatchingFn, callbacks.ResourceChangingFn],\n+ handlers.ResourceHandler]):\n \"\"\"\n .. deprecated: 1.0\n \n", "new_path": "kopf/toolkits/legacy_registries.py", "old_path": "kopf/toolkits/legacy_registries.py" }, { "change_type": "MODIFY", "diff": "@@ -1,9 +1,9 @@\n import pytest\n \n-from kopf.reactor.handlers import ActivityHandler, ResourceHandler\n+from kopf.reactor.handlers import ActivityHandler, ResourceChangingHandler\n \n \n-@pytest.mark.parametrize('cls', [ActivityHandler, ResourceHandler])\n+@pytest.mark.parametrize('cls', [ActivityHandler, ResourceChangingHandler])\n def test_handler_with_no_args(cls):\n with pytest.raises(TypeError):\n cls()\n@@ -51,7 +51,7 @@ def test_resource_handler_with_all_args(mocker):\n annotations = mocker.Mock()\n when = mocker.Mock()\n requires_finalizer = mocker.Mock()\n- handler = ResourceHandler(\n+ handler = ResourceChangingHandler(\n fn=fn,\n id=id,\n reason=reason,\n", "new_path": "tests/basic-structs/test_handlers.py", "old_path": "tests/basic-structs/test_handlers.py" }, { "change_type": "MODIFY", "diff": "@@ -1,7 +1,7 @@\n # Original test-file: tests/basic-structs/test_handlers.py\n import pytest\n \n-from kopf.reactor.handlers import ActivityHandler, ResourceHandler\n+from kopf.reactor.handlers import ActivityHandler, ResourceChangingHandler\n \n \n def test_activity_handler_with_deprecated_cooldown_instead_of_backoff(mocker):\n@@ -54,7 +54,7 @@ def test_resource_handler_with_deprecated_cooldown_instead_of_backoff(mocker):\n requires_finalizer = mocker.Mock()\n \n with pytest.deprecated_call(match=r\"use backoff=\"):\n- handler = ResourceHandler(\n+ handler = ResourceChangingHandler(\n fn=fn,\n id=id,\n reason=reason,\n", "new_path": "tests/basic-structs/test_handlers_deprecated_cooldown.py", "old_path": "tests/basic-structs/test_handlers_deprecated_cooldown.py" }, { "change_type": "MODIFY", "diff": "@@ -5,7 +5,7 @@ import pytest\n \n import kopf\n from kopf.reactor.causation import HANDLER_REASONS\n-from kopf.reactor.handlers import ResourceHandler\n+from kopf.reactor.handlers import ResourceChangingHandler\n from kopf.reactor.processing import process_resource_event\n from kopf.structs.containers import ResourceMemories\n from kopf.structs.lastseen import LAST_SEEN_ANNOTATION\n@@ -22,7 +22,7 @@ async def test_skipped_with_no_handlers(\n cause_mock.reason = cause_type\n \n assert not registry.resource_changing_handlers[resource] # prerequisite\n- registry.resource_changing_handlers[resource].append(ResourceHandler(\n+ registry.resource_changing_handlers[resource].append(ResourceChangingHandler(\n reason='a-non-existent-cause-type',\n fn=lambda **_: None, id='id',\n errors=None, timeout=None, retries=None, backoff=None, cooldown=None,\n", "new_path": "tests/handling/test_no_handlers.py", "old_path": "tests/handling/test_no_handlers.py" }, { "change_type": "MODIFY", "diff": "@@ -4,7 +4,7 @@ from kopf import ActivityRegistry\n from kopf import OperatorRegistry\n from kopf import ResourceWatchingRegistry, ResourceChangingRegistry\n from kopf import SimpleRegistry, GlobalRegistry # deprecated, but tested\n-from kopf.reactor.handlers import HandlerId, ResourceHandler\n+from kopf.reactor.handlers import HandlerId, ResourceChangingHandler\n \n \n @pytest.fixture(params=[\n@@ -47,7 +47,7 @@ def parent_handler():\n def parent_fn(**_):\n pass\n \n- return ResourceHandler(\n+ return ResourceChangingHandler(\n fn=parent_fn, id=HandlerId('parent_fn'),\n errors=None, retries=None, timeout=None, backoff=None, cooldown=None,\n labels=None, annotations=None, when=None,\n", "new_path": "tests/registries/conftest.py", "old_path": "tests/registries/conftest.py" }, { "change_type": "MODIFY", "diff": "@@ -5,7 +5,7 @@ import pytest\n import kopf\n from kopf import OperatorRegistry\n from kopf.reactor.causation import ResourceChangingCause, Reason, ALL_REASONS\n-from kopf.reactor.handlers import ResourceHandler\n+from kopf.reactor.handlers import ResourceChangingHandler\n from kopf.structs.bodies import Body\n from kopf.structs.dicts import parse_field\n from kopf.structs.filters import MetaFilterToken\n@@ -54,7 +54,7 @@ def registry():\n @pytest.fixture()\n def handler_factory(registry, resource):\n def factory(**kwargs):\n- handler = ResourceHandler(**dict(dict(\n+ handler = ResourceChangingHandler(**dict(dict(\n fn=some_fn, id='a',\n errors=None, timeout=None, retries=None, backoff=None, cooldown=None,\n initial=None, deleted=None, requires_finalizer=None,\n", "new_path": "tests/registries/test_handler_matching.py", "old_path": "tests/registries/test_handler_matching.py" } ]
d45a6e995f92155c4d4c12f71a7d8ef78d604f4b
intel/lpot
null
null
Add the performance_only option into the yaml configuration. This option is locates under the tuning/exit_policy field. If this option is set to True, the LPOT will generate the fully quantized model without criterion checking. The default value of this option is False.
[ { "change_type": "MODIFY", "diff": "@@ -302,7 +302,7 @@ schema = Schema({\n 'strategy': {'name': 'basic'},\n 'accuracy_criterion': {'relative': 0.01},\n 'objective': 'performance',\n- 'exit_policy': {'timeout': 0, 'max_trials': 100},\n+ 'exit_policy': {'timeout': 0, 'max_trials': 100, 'performance_only': False},\n 'random_seed': 1978, 'tensorboard': False,\n 'workspace': {'path': None}}): {\n Optional('strategy', default={'name': 'basic'}): {\n@@ -316,9 +316,12 @@ schema = Schema({\n Optional('absolute'): And(Or(str, float), Use(percent_to_float)),\n },\n Optional('objective', default='performance'): And(str, lambda s: s in OBJECTIVES),\n- Optional('exit_policy', default={'timeout': 0, 'max_trials': 100}): {\n+ Optional('exit_policy', default={'timeout': 0,\n+ 'max_trials': 100,\n+ 'performance_only': False}): {\n Optional('timeout', default=0): int,\n Optional('max_trials', default=100): int,\n+ Optional('performance_only', default=False): bool,\n },\n Optional('random_seed', default=1978): int,\n Optional('tensorboard', default=False): And(bool, lambda s: s in [True, False]),\n", "new_path": "lpot/conf/config.py", "old_path": "lpot/conf/config.py" }, { "change_type": "MODIFY", "diff": "@@ -421,7 +421,8 @@ class TuneStrategy(object):\n \"\"\"\n need_stop = False\n \n- if self.objective.compare(self.best_tune_result, self.baseline):\n+ if self.cfg.tuning.exit_policy.performance_only or \\\n+ self.objective.compare(self.best_tune_result, self.baseline):\n del self.best_tune_result\n del self.best_qmodel\n self.best_tune_result = self.last_tune_result\n@@ -443,7 +444,9 @@ class TuneStrategy(object):\n ('[{:.4f}, {:.4f}]'.format(\n *self.best_tune_result) if self.best_tune_result else 'None'))\n \n- if timeout.seconds != 0 and timeout.timed_out:\n+ if self.cfg.tuning.exit_policy.performance_only:\n+ need_stop = True\n+ elif timeout.seconds != 0 and timeout.timed_out:\n need_stop = True\n elif timeout.seconds == 0 and self.best_tune_result:\n need_stop = True\n", "new_path": "lpot/strategy/strategy.py", "old_path": "lpot/strategy/strategy.py" }, { "change_type": "MODIFY", "diff": "@@ -107,7 +107,7 @@ tuning:\n exit_policy:\n timeout: 0 # optional. tuning timeout (seconds). default value is 0 which means early stop. combine with max_trials field to decide when to exit.\n max_trials: 100 # optional. max tune times. default value is 100. combine with timeout field to decide when to exit.\n-\n+ performance_only: False # optional. max tune times. default value is False which means only generate fully quantized model.\n random_seed: 9527 # optional. random seed for deterministic tuning.\n tensorboard: True # optional. dump tensor distribution in evaluation phase for debug purpose. default value is False.\n \n", "new_path": "lpot/template/ptq.yaml", "old_path": "lpot/template/ptq.yaml" }, { "change_type": "MODIFY", "diff": "@@ -35,7 +35,7 @@ def build_fake_yaml():\n accuracy_criterion:\n relative: 0.05\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_config_regex.py", "old_path": "test/test_config_regex.py" }, { "change_type": "MODIFY", "diff": "@@ -36,7 +36,7 @@ def build_fake_yaml():\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_graph_concat.py", "old_path": "test/test_tensorflow_graph_concat.py" }, { "change_type": "MODIFY", "diff": "@@ -40,7 +40,7 @@ def build_fake_yaml():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_graph_conv_fusion.py", "old_path": "test/test_tensorflow_graph_conv_fusion.py" }, { "change_type": "MODIFY", "diff": "@@ -54,12 +54,10 @@ def build_fake_yaml_2():\n tuning:\n strategy:\n name: bayesian\n- exit_policy:\n- max_trials: 1\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_graph_input_output.py", "old_path": "test/test_tensorflow_graph_input_output.py" }, { "change_type": "MODIFY", "diff": "@@ -28,7 +28,7 @@ def build_fake_yaml():\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_graph_matmul_fusion.py", "old_path": "test/test_tensorflow_graph_matmul_fusion.py" }, { "change_type": "MODIFY", "diff": "@@ -32,7 +32,7 @@ def build_fake_yaml():\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_graph_pad_conv.py", "old_path": "test/test_tensorflow_graph_pad_conv.py" }, { "change_type": "MODIFY", "diff": "@@ -33,7 +33,7 @@ def build_fake_yaml():\n accuracy_criterion:\n relative: 0.9\n exit_policy:\n- max_trials: 1 \n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_graph_post_cse_optimize.py", "old_path": "test/test_tensorflow_graph_post_cse_optimize.py" }, { "change_type": "MODIFY", "diff": "@@ -37,7 +37,7 @@ def build_fake_yaml_disable_first_quantization():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n@@ -74,7 +74,7 @@ def build_fake_yaml_enable_first_quantization():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n@@ -111,7 +111,7 @@ def build_fake_yaml_disable_scale_propagation():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n@@ -148,7 +148,7 @@ def build_fake_yaml_enable_scale_propagation():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n@@ -185,7 +185,7 @@ def build_fake_yaml_enable_scale_unification():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n@@ -222,7 +222,7 @@ def build_fake_yaml_disable_scale_unification():\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n- max_trials: 1\n+ performance_only: True\n workspace:\n path: saved\n '''\n", "new_path": "test/test_tensorflow_recipe.py", "old_path": "test/test_tensorflow_recipe.py" } ]
179e3989fe9f531ff778815c6449212f390c5082
siliconcompiler/siliconcompiler
null
null
Update tool drivers for new version check scheme Give tools a >= version specifier Update parse_version()/implement normalize_version() where needed
[ { "change_type": "MODIFY", "diff": "@@ -33,7 +33,7 @@ def setup(chip):\n refdir = 'tools/'+tool\n chip.set('eda', tool, 'exe', 'bambu', clobber=False)\n chip.set('eda', tool, 'vswitch', '--version', clobber=False)\n- chip.set('eda', tool, 'version', '0.9.6', clobber=False)\n+ chip.set('eda', tool, 'version', '>=0.9.6', clobber=False)\n chip.set('eda', tool, 'refdir', step, index, refdir, clobber=False)\n chip.set('eda', tool, 'threads', step, index, os.cpu_count(), clobber=False)\n chip.set('eda', tool, 'option', step, index, [])\n", "new_path": "siliconcompiler/tools/bambu/bambu.py", "old_path": "siliconcompiler/tools/bambu/bambu.py" }, { "change_type": "MODIFY", "diff": "@@ -49,7 +49,7 @@ def setup(chip):\n # This is technically the 'verbose' flag, but used alone it happens to give\n # us the version # and exit cleanly, so we'll use it here.\n chip.set('eda', tool, 'vswitch', '-v', clobber=False)\n- chip.set('eda', tool, 'version', '2021.07', clobber=False)\n+ chip.set('eda', tool, 'version', '>=2021.07', clobber=False)\n chip.set('eda', tool, 'copy', False, clobber=False)\n chip.set('eda', tool, 'refdir', step, index, refdir, clobber=False)\n chip.set('eda', tool, 'threads', step, index, os.cpu_count(), clobber=False)\n@@ -62,7 +62,9 @@ def setup(chip):\n chip.add('eda', tool, 'require', step, index, 'source')\n \n def parse_version(stdout):\n+ # Examples:\n # Bluespec Compiler, version 2021.12.1-27-g9a7d5e05 (build 9a7d5e05)\n+ # Bluespec Compiler, version 2021.07 (build 4cac6eba)\n \n long_version = stdout.split()[3]\n return long_version.split('-')[0]\n", "new_path": "siliconcompiler/tools/bluespec/bluespec.py", "old_path": "siliconcompiler/tools/bluespec/bluespec.py" }, { "change_type": "MODIFY", "diff": "@@ -45,7 +45,7 @@ def setup(chip):\n refdir = 'tools/'+tool\n chip.set('eda', tool, 'exe', 'sbt', clobber=False)\n chip.set('eda', tool, 'vswitch', '--version', clobber=False)\n- chip.set('eda', tool, 'version', '1.5.5', clobber=False)\n+ chip.set('eda', tool, 'version', '>=1.5.5', clobber=False)\n chip.set('eda', tool, 'copy', True, clobber=False)\n chip.set('eda', tool, 'refdir', step, index, refdir, clobber=False)\n chip.set('eda', tool, 'threads', step, index, os.cpu_count(), clobber=False)\n", "new_path": "siliconcompiler/tools/chisel/chisel.py", "old_path": "siliconcompiler/tools/chisel/chisel.py" }, { "change_type": "MODIFY", "diff": "@@ -45,7 +45,7 @@ def setup(chip):\n chip.set('eda', tool, 'copy', 'false', clobber=clobber)\n chip.set('eda', tool, 'exe', 'ghdl', clobber=clobber)\n chip.set('eda', tool, 'vswitch', '--version', clobber=clobber)\n- chip.set('eda', tool, 'version', '2.0.0-dev', clobber=clobber)\n+ chip.set('eda', tool, 'version', '>=2.0.0-dev', clobber=clobber)\n chip.set('eda', tool, 'threads', step, index, '4', clobber=clobber)\n chip.set('eda', tool, 'option', step, index, '', clobber=clobber)\n \n@@ -92,6 +92,9 @@ def runtime_options(chip):\n \n def parse_version(stdout):\n # first line: GHDL 2.0.0-dev (1.0.0.r827.ge49cb7b9) [Dunoon edition]\n+\n+ # '*-dev' is interpreted by packaging.version as a \"developmental release\",\n+ # which has the correct semantics. e.g. Version('2.0.0') > Version('2.0.0-dev')\n return stdout.split()[1]\n \n ################################\n", "new_path": "siliconcompiler/tools/ghdl/ghdl.py", "old_path": "siliconcompiler/tools/ghdl/ghdl.py" }, { "change_type": "MODIFY", "diff": "@@ -47,7 +47,7 @@ def setup(chip):\n # Standard Setup\n chip.set('eda', tool, 'exe', 'iverilog', clobber=False)\n chip.set('eda', tool, 'vswitch', '-V', clobber=False)\n- chip.set('eda', tool, 'version', '10.3', clobber=False)\n+ chip.set('eda', tool, 'version', '>=10.3', clobber=False)\n chip.set('eda', tool, 'threads', step, index, os.cpu_count(), clobber=False)\n \n if step == 'compile':\n", "new_path": "siliconcompiler/tools/icarus/icarus.py", "old_path": "siliconcompiler/tools/icarus/icarus.py" }, { "change_type": "MODIFY", "diff": "@@ -90,7 +90,8 @@ def setup(chip, mode=\"batch\"):\n \n chip.set('eda', tool, 'exe', klayout_exe, clobber=True)\n chip.set('eda', tool, 'vswitch', ['-zz', '-v'], clobber=clobber)\n- chip.set('eda', tool, 'version', '0.27.8', clobber=clobber)\n+ # Versions < 0.27.6 may be bundled with an incompatible version of Python.\n+ chip.set('eda', tool, 'version', '>=0.27.6', clobber=clobber)\n chip.set('eda', tool, 'format', 'json', clobber=clobber)\n chip.set('eda', tool, 'copy', 'true', clobber=clobber)\n chip.set('eda', tool, 'refdir', step, index, refdir, clobber=clobber)\n", "new_path": "siliconcompiler/tools/klayout/klayout.py", "old_path": "siliconcompiler/tools/klayout/klayout.py" }, { "change_type": "MODIFY", "diff": "@@ -53,7 +53,7 @@ def setup(chip):\n \n chip.set('eda', tool, 'exe', tool)\n chip.set('eda', tool, 'vswitch', '--version')\n- chip.set('eda', tool, 'version', '8.3.274')\n+ chip.set('eda', tool, 'version', '>=8.3.196')\n chip.set('eda', tool, 'format', 'tcl')\n chip.set('eda', tool, 'copy', 'true') # copy in .magicrc file\n chip.set('eda', tool, 'threads', step, index, 4)\n", "new_path": "siliconcompiler/tools/magic/magic.py", "old_path": "siliconcompiler/tools/magic/magic.py" }, { "change_type": "MODIFY", "diff": "@@ -49,7 +49,7 @@ def setup(chip):\n \n chip.set('eda', tool, 'exe', tool)\n chip.set('eda', tool, 'vswitch', '-batch')\n- chip.set('eda', tool, 'version', '1.5.210')\n+ chip.set('eda', tool, 'version', '>=1.5.192')\n chip.set('eda', tool, 'format', 'tcl')\n chip.set('eda', tool, 'copy', 'true')\n chip.set('eda', tool, 'threads', step, index, 4)\n", "new_path": "siliconcompiler/tools/netgen/netgen.py", "old_path": "siliconcompiler/tools/netgen/netgen.py" }, { "change_type": "MODIFY", "diff": "@@ -40,7 +40,7 @@ def setup(chip):\n clobber = False\n chip.set('eda', tool, 'exe', 'nextpnr-ice40', clobber=clobber)\n chip.set('eda', tool, 'vswitch', '--version', clobber=clobber)\n- chip.set('eda', tool, 'version', 'c73d4cf6', clobber=clobber)\n+ chip.set('eda', tool, 'version', '>=0.2', clobber=clobber)\n chip.set('eda', tool, 'option', step, index, \"\", clobber=clobber)\n \n topmodule = chip.get('design')\n@@ -80,8 +80,14 @@ def runtime_options(chip):\n ################################\n \n def parse_version(stdout):\n+ # Examples:\n # nextpnr-ice40 -- Next Generation Place and Route (Version c73d4cf6)\n- return stdout.split()[-1].rstrip(')')\n+ # nextpnr-ice40 -- Next Generation Place and Route (Version nextpnr-0.2)\n+ version = stdout.split()[-1].rstrip(')')\n+ if version.startswith('nextpnr-'):\n+ return version.split('-')[1]\n+ else:\n+ return version\n \n ################################\n # Setup Tool (pre executable)\n", "new_path": "siliconcompiler/tools/nextpnr/nextpnr.py", "old_path": "siliconcompiler/tools/nextpnr/nextpnr.py" }, { "change_type": "MODIFY", "diff": "@@ -48,7 +48,6 @@ def setup(chip):\n index = chip.get('arg','index')\n \n \n- chip.set('eda', tool, 'version', '0.0')\n chip.set('eda', tool, 'copy', 'true')\n chip.set('eda', tool, 'refdir', step, index, refdir)\n \n", "new_path": "siliconcompiler/tools/openfpga/openfpga.py", "old_path": "siliconcompiler/tools/openfpga/openfpga.py" }, { "change_type": "MODIFY", "diff": "@@ -50,7 +50,7 @@ def setup(chip):\n # tool setup\n chip.set('eda', tool, 'exe', tool, clobber=False)\n chip.set('eda', tool, 'vswitch', '--Version', clobber=False)\n- chip.set('eda', tool, 'version', 'v0.5.0', clobber=False)\n+ chip.set('eda', tool, 'version', '0.5.0', clobber=False)\n \n options = []\n options.append(\"inputs\" + chip.get('design') + \".bit\")\n", "new_path": "siliconcompiler/tools/openfpgaloader/openfpgaloader.py", "old_path": "siliconcompiler/tools/openfpgaloader/openfpgaloader.py" }, { "change_type": "MODIFY", "diff": "@@ -62,7 +62,7 @@ def setup(chip, mode='batch'):\n \n chip.set('eda', tool, 'exe', tool, clobber=clobber)\n chip.set('eda', tool, 'vswitch', '-version', clobber=clobber)\n- chip.set('eda', tool, 'version', 'v2.0', clobber=clobber)\n+ chip.set('eda', tool, 'version', '>=v2.0-3078', clobber=clobber)\n chip.set('eda', tool, 'format', 'tcl', clobber=clobber)\n chip.set('eda', tool, 'copy', 'true', clobber=clobber)\n chip.set('eda', tool, 'option', step, index, option, clobber=clobber)\n@@ -147,8 +147,18 @@ def parse_version(stdout):\n # strip off the \"1\" prefix if it's there\n version = stdout.split()[-1]\n \n- # strip off extra details in new version styles\n- return version.split('-')[0]\n+ pieces = version.split('-')\n+ if len(pieces) > 1:\n+ # strip off the hash in the new version style\n+ return '-'.join(pieces[:-1])\n+ else:\n+ return pieces[0]\n+\n+def normalize_version(version):\n+ if '.' in version:\n+ return version.lstrip('v')\n+ else:\n+ return '0'\n \n def pre_process(chip):\n step = chip.get('arg', 'step')\n", "new_path": "siliconcompiler/tools/openroad/openroad.py", "old_path": "siliconcompiler/tools/openroad/openroad.py" }, { "change_type": "MODIFY", "diff": "@@ -52,7 +52,7 @@ def setup(chip):\n # Standard Setup\n chip.set('eda', tool, 'exe', exe, clobber=False)\n chip.set('eda', tool, 'vswitch', '--version', clobber=False)\n- chip.set('eda', tool, 'version', '>=1.14', clobber=False)\n+ chip.set('eda', tool, 'version', '>=1.13', clobber=False)\n chip.set('eda', tool, 'threads', step, index, os.cpu_count(), clobber=False)\n \n # -parse is slow but ensures the SV code is valid\n@@ -87,9 +87,6 @@ def parse_version(stdout):\n # grab version # by splitting on whitespace\n return stdout.split()[1]\n \n-def normalize_version(version):\n- return tuple(int(v) for v in version.split('.'))\n-\n ################################\n # Custom runtime options\n ################################\n", "new_path": "siliconcompiler/tools/surelog/surelog.py", "old_path": "siliconcompiler/tools/surelog/surelog.py" }, { "change_type": "MODIFY", "diff": "@@ -50,7 +50,7 @@ def setup(chip):\n \n chip.set('eda', tool, 'exe', tool)\n chip.set('eda', tool, 'vswitch', '--numeric-version')\n- chip.set('eda', tool, 'version', '0.0.9')\n+ chip.set('eda', tool, 'version', '>=0.0.9')\n chip.set('eda', tool, 'threads', step, index, 4)\n \n # Since we run sv2v after the import/preprocess step, there should be no\n@@ -72,7 +72,7 @@ def setup(chip):\n \n def parse_version(stdout):\n # 0.0.7-130-g1aa30ea\n- return stdout.split('-')[0]\n+ return '-'.join(stdout.split('-')[:-1])\n \n ################################\n # Post_process (post executable)\n", "new_path": "siliconcompiler/tools/sv2v/sv2v.py", "old_path": "siliconcompiler/tools/sv2v/sv2v.py" }, { "change_type": "MODIFY", "diff": "@@ -55,7 +55,7 @@ def setup(chip):\n # Standard Setup\n chip.set('eda', tool, 'exe', 'verilator', clobber=False)\n chip.set('eda', tool, 'vswitch', '--version', clobber=False)\n- chip.set('eda', tool, 'version', '4.028', clobber=False)\n+ chip.set('eda', tool, 'version', '>=4.028', clobber=False)\n chip.set('eda', tool, 'threads', step, index, os.cpu_count(), clobber=False)\n \n # Options driven on a per step basis (use 'set' on first call!)\n", "new_path": "siliconcompiler/tools/verilator/verilator.py", "old_path": "siliconcompiler/tools/verilator/verilator.py" }, { "change_type": "MODIFY", "diff": "@@ -50,7 +50,7 @@ def setup(chip):\n # Standard Setup\n chip.set('eda', tool, 'exe', 'yosys', clobber=False)\n chip.set('eda', tool, 'vswitch', '--version', clobber=False)\n- chip.set('eda', tool, 'version', '0.13', clobber=False)\n+ chip.set('eda', tool, 'version', '>=0.13', clobber=False)\n chip.set('eda', tool, 'format', 'tcl', clobber=False)\n chip.set('eda', tool, 'copy', 'true', clobber=False)\n chip.set('eda', tool, 'option', step, index, '-c', clobber=False)\n@@ -116,8 +116,12 @@ def pre_process(chip):\n \n def parse_version(stdout):\n # Yosys 0.9+3672 (git sha1 014c7e26, gcc 7.5.0-3ubuntu1~18.04 -fPIC -Os)\n- version = stdout.split()[1]\n- return version.split('+')[0]\n+ return stdout.split()[1]\n+\n+def normalize_version(version):\n+ # Replace '+', which represents a \"local version label\", with '-', which is\n+ # an \"implicit post release number\".\n+ return version.replace('+', '-')\n \n ################################\n # Post_process (post executable)\n", "new_path": "siliconcompiler/tools/yosys/yosys.py", "old_path": "siliconcompiler/tools/yosys/yosys.py" } ]
b39500fedcd51797794bec9aed292a647ef5bb70
astropy/astroquery
null
null
Remove deprecated NASA Exoplanet Archive code The removed code was deprecated in v0.4.1 almost two years ago.
[ { "change_type": "MODIFY", "diff": "@@ -17,7 +17,6 @@ from astropy.coordinates import SkyCoord\n from astropy.io import ascii\n from astropy.io.votable import parse_single_table\n from astropy.table import QTable\n-from astropy.utils import deprecated, deprecated_renamed_argument\n from astropy.utils.exceptions import AstropyWarning\n \n # Import astroquery utilities\n@@ -629,25 +628,6 @@ class NasaExoplanetArchiveClass(BaseQuery):\n \n return data\n \n- def _handle_all_columns_argument(self, **kwargs):\n- \"\"\"\n- Deal with the ``all_columns`` argument that was exposed by earlier versions\n-\n- This method will warn users about this deprecated argument and update the query syntax\n- to use ``select='*'``.\n- \"\"\"\n- # We also have to manually pop these arguments from the dict because\n- # `deprecated_renamed_argument` doesn't do that for some reason for all supported astropy\n- # versions (v3.1 was beheaving as expected)\n- kwargs.pop(\"show_progress\", None)\n- kwargs.pop(\"table_path\", None)\n-\n- # Deal with `all_columns` properly\n- if kwargs.pop(\"all_columns\", None):\n- kwargs[\"select\"] = kwargs.get(\"select\", \"*\")\n-\n- return kwargs\n-\n @class_or_instance\n def _request_to_sql(self, request_payload):\n \"\"\"Convert request_payload dict to SQL query string to be parsed by TAP.\"\"\"\n@@ -677,51 +657,5 @@ class NasaExoplanetArchiveClass(BaseQuery):\n \n return tap_query\n \n- @deprecated(since=\"v0.4.1\", alternative=\"query_object\")\n- @deprecated_renamed_argument([\"show_progress\", \"table_path\"],\n- [None, None], \"v0.4.1\", arg_in_kwargs=True)\n- def query_planet(self, planet_name, cache=None, **criteria):\n- \"\"\"\n- Search the ``exoplanets`` table for a confirmed planet\n-\n- Parameters\n- ----------\n- planet_name : str\n- The name of a confirmed planet. If ``regularize`` is ``True``, an attempt will be made\n- to regularize this name using the ``aliastable`` table.\n- cache : bool, optional\n- Should the request result be cached? This can be useful for large repeated queries,\n- but since the data in the archive is updated regularly, this defaults to ``False``.\n- **criteria\n- Any other filtering criteria to apply. Values provided using the ``where`` keyword will\n- be ignored.\n- \"\"\"\n- criteria = self._handle_all_columns_argument(**criteria)\n- criteria[\"where\"] = \"pl_name='{0}'\".format(planet_name.strip())\n- return self.query_criteria(\"exoplanets\", cache=cache, **criteria)\n-\n- @deprecated(since=\"v0.4.1\", alternative=\"query_object\")\n- @deprecated_renamed_argument([\"show_progress\", \"table_path\"],\n- [None, None], \"v0.4.1\", arg_in_kwargs=True)\n- def query_star(self, host_name, cache=None, **criteria):\n- \"\"\"\n- Search the ``exoplanets`` table for a confirmed planet host\n-\n- Parameters\n- ----------\n- host_name : str\n- The name of a confirmed planet host. If ``regularize`` is ``True``, an attempt will be\n- made to regularize this name using the ``aliastable`` table.\n- cache : bool, optional\n- Should the request result be cached? This can be useful for large repeated queries,\n- but since the data in the archive is updated regularly, this defaults to ``False``.\n- **criteria\n- Any other filtering criteria to apply. Values provided using the ``where`` keyword will\n- be ignored.\n- \"\"\"\n- criteria = self._handle_all_columns_argument(**criteria)\n- criteria[\"where\"] = \"pl_hostname='{0}'\".format(host_name.strip())\n- return self.query_criteria(\"exoplanets\", cache=cache, **criteria)\n-\n \n NasaExoplanetArchive = NasaExoplanetArchiveClass()\n", "new_path": "astroquery/ipac/nexsci/nasa_exoplanet_archive/core.py", "old_path": "astroquery/ipac/nexsci/nasa_exoplanet_archive/core.py" }, { "change_type": "MODIFY", "diff": "@@ -168,46 +168,6 @@ def test_get_access_url():\n assert get_access_url('aliaslookup') == conf.url_aliaslookup\n \n \n-def test_backwards_compat(patch_get):\n- \"\"\"\n- These are the tests from the previous version of this interface.\n- They query old tables by default and should return InvalidTableError.\n- \"\"\"\n- NasaExoplanetArchiveMock = NasaExoplanetArchiveClass()\n-\n- NasaExoplanetArchiveMock._tap_tables = ['list']\n-\n- # test_hd209458b_exoplanets_archive\n- with pytest.warns(AstropyDeprecationWarning):\n- with pytest.raises(InvalidTableError) as error:\n- NasaExoplanetArchiveMock.query_planet(\"HD 209458 b \")\n- assert \"replaced\" in str(error)\n-\n- # test_hd209458b_exoplanet_archive_coords\n- with pytest.warns(AstropyDeprecationWarning):\n- with pytest.raises(InvalidTableError) as error:\n- NasaExoplanetArchiveMock.query_planet(\"HD 209458 b \")\n- assert \"replaced\" in str(error)\n-\n- # test_hd209458_stellar_exoplanet\n- with pytest.warns(AstropyDeprecationWarning):\n- with pytest.raises(InvalidTableError) as error:\n- NasaExoplanetArchiveMock.query_star(\"HD 209458\")\n- assert \"replaced\" in str(error)\n-\n- # test_hd136352_stellar_exoplanet_archive\n- with pytest.warns(AstropyDeprecationWarning):\n- with pytest.raises(InvalidTableError) as error:\n- NasaExoplanetArchiveMock.query_star(\"HD 136352\")\n- assert \"replaced\" in str(error)\n-\n- # test_exoplanet_archive_query_all_columns\n- with pytest.warns(AstropyDeprecationWarning):\n- with pytest.raises(InvalidTableError) as error:\n- NasaExoplanetArchiveMock.query_planet(\"HD 209458 b \", all_columns=True)\n- assert \"replaced\" in str(error)\n-\n-\n @pytest.mark.parametrize(\"table,query\", API_TABLES)\n def test_api_tables(patch_get, table, query):\n NasaExoplanetArchiveMock = NasaExoplanetArchiveClass()\n", "new_path": "astroquery/ipac/nexsci/nasa_exoplanet_archive/tests/test_nasa_exoplanet_archive.py", "old_path": "astroquery/ipac/nexsci/nasa_exoplanet_archive/tests/test_nasa_exoplanet_archive.py" } ]
987b9c509085970d86910c48cb85304cef537c40
astropy/astroquery
null
null
Remove get_raw_response optional parameters. This functionality is already available with the *_async() methods.
[ { "change_type": "MODIFY", "diff": "@@ -176,7 +176,7 @@ class HorizonsClass(BaseQuery):\n closest_apparition=False, no_fragments=False,\n quantities=conf.eph_quantities,\n get_query_payload=False,\n- get_raw_response=False, cache=True,\n+ cache=True,\n extra_precision=False):\n \"\"\"\n Query JPL Horizons for ephemerides.\n@@ -480,10 +480,6 @@ class HorizonsClass(BaseQuery):\n When set to `True` the method returns the HTTP request parameters as\n a dict, default: False\n \n- get_raw_response : boolean, optional\n- Return raw data as obtained by JPL Horizons without parsing the data\n- into a table, default: False\n-\n extra_precision : boolean, optional\n Enables extra precision in RA and DEC values; default: False\n \n@@ -614,10 +610,6 @@ class HorizonsClass(BaseQuery):\n if get_query_payload:\n return request_payload\n \n- # set return_raw flag, if raw response desired\n- if get_raw_response:\n- self.return_raw = True\n-\n # query and parse\n response = self._request('GET', self.server_url, params=request_payload,\n timeout=self.TIMEOUT, cache=cache)\n@@ -637,7 +629,7 @@ class HorizonsClass(BaseQuery):\n refplane='ecliptic',\n tp_type='absolute',\n closest_apparition=False, no_fragments=False,\n- get_raw_response=False, cache=True):\n+ cache=True):\n \"\"\"\n Query JPL Horizons for osculating orbital elements.\n \n@@ -728,10 +720,6 @@ class HorizonsClass(BaseQuery):\n When set to ``True`` the method returns the HTTP request parameters\n as a dict, default: False\n \n- get_raw_response: boolean, optional\n- Return raw data as obtained by JPL Horizons without parsing the data\n- into a table, default: False\n-\n \n Returns\n -------\n@@ -830,10 +818,6 @@ class HorizonsClass(BaseQuery):\n if get_query_payload:\n return request_payload\n \n- # set return_raw flag, if raw response desired\n- if get_raw_response:\n- self.return_raw = True\n-\n # query and parse\n response = self._request('GET', self.server_url, params=request_payload,\n timeout=self.TIMEOUT, cache=cache)\n@@ -850,7 +834,7 @@ class HorizonsClass(BaseQuery):\n \n def vectors_async(self, get_query_payload=False,\n closest_apparition=False, no_fragments=False,\n- get_raw_response=False, cache=True,\n+ cache=True,\n refplane='ecliptic', aberrations='geometric',\n delta_T=False,):\n \"\"\"\n@@ -933,10 +917,6 @@ class HorizonsClass(BaseQuery):\n When set to `True` the method returns the HTTP request parameters as\n a dict, default: False\n \n- get_raw_response: boolean, optional\n- Return raw data as obtained by JPL Horizons without parsing the data\n- into a table, default: False\n-\n refplane : string\n Reference plane for all output quantities: ``'ecliptic'`` (ecliptic\n and mean equinox of reference epoch), ``'earth'`` (Earth mean\n@@ -1074,10 +1054,6 @@ class HorizonsClass(BaseQuery):\n if get_query_payload:\n return request_payload\n \n- # set return_raw flag, if raw response desired\n- if get_raw_response:\n- self.return_raw = True\n-\n # query and parse\n response = self._request('GET', self.server_url, params=request_payload,\n timeout=self.TIMEOUT, cache=cache)\n", "new_path": "astroquery/jplhorizons/core.py", "old_path": "astroquery/jplhorizons/core.py" }, { "change_type": "MODIFY", "diff": "@@ -174,13 +174,6 @@ class TestHorizonsClass:\n \n assert len(res) == 32\n \n- def test_ephemerides_query_raw(self):\n- res = (jplhorizons.Horizons(id='Ceres', location='500',\n- id_type='smallbody', epochs=2451544.5).\n- ephemerides(get_raw_response=True))\n-\n- assert len(res) >= 15400\n-\n def test_elements_query(self):\n res = jplhorizons.Horizons(id='Ceres', location='500@10',\n id_type='smallbody',\n@@ -226,14 +219,6 @@ class TestHorizonsClass:\n [res['Omega'], res['w'], res['Tp_jd']],\n rtol=1e-3)\n \n- def test_elements_query_raw(self):\n- res = jplhorizons.Horizons(id='Ceres', location='500@10',\n- id_type='smallbody',\n- epochs=2451544.5).elements(\n- get_raw_response=True)\n-\n- assert len(res) >= 6686\n-\n def test_vectors_query(self):\n # check values of Ceres for a given epoch\n # orbital uncertainty of Ceres is basically zero\n@@ -260,14 +245,6 @@ class TestHorizonsClass:\n res['lighttime'], res['range'],\n res['range_rate']], rtol=1e-3)\n \n- def test_vectors_query_raw(self):\n- res = jplhorizons.Horizons(id='Ceres', location='500@10',\n- id_type='smallbody',\n- epochs=2451544.5).vectors(\n- get_raw_response=True)\n-\n- assert len(res) >= 6412\n-\n def test_unknownobject(self):\n with pytest.raises(ValueError):\n jplhorizons.Horizons(id='spamspamspameggsspam', location='500',\n", "new_path": "astroquery/jplhorizons/tests/test_jplhorizons_remote.py", "old_path": "astroquery/jplhorizons/tests/test_jplhorizons_remote.py" }, { "change_type": "MODIFY", "diff": "@@ -189,9 +189,7 @@ limits fragment matching (73P-B would only match 73P-B), respectively. Note\n that these options should only be used for comets and will crash the query for\n other object types. Extra precision in the queried properties can be requested\n using the ``extra_precision`` option. Furthermore, ``get_query_payload=True``\n-skips the query and only returns the query payload, whereas\n-``get_raw_response=True`` returns the raw query response instead of the astropy\n-table.\n+skips the query and only returns the query payload.\n \n :meth:`~astroquery.jplhorizons.HorizonsClass.ephemerides` queries by default all\n available quantities from the JPL Horizons servers. This might take a while. If\n@@ -243,9 +241,8 @@ absolute representation of the time of perihelion passage. For comets, the\n options ``closest_apparition`` and ``no_fragments`` are available, which select\n the closest apparition in time and reject fragments, respectively. Note that\n these options should only be used for comets and will crash the query for other\n-object types. Also available are ``get_query_payload=True``, which skips the\n-query and only returns the query payload, and ``get_raw_response=True``, which\n-returns the raw query response instead of the astropy table.\n+object types. Also available is ``get_query_payload=True``, which skips the\n+query and only returns the query payload.\n \n Vectors\n -------\n@@ -290,16 +287,15 @@ The following fields are queried:\n \n \n Similar to the other :class:`~astroquery.jplhorizons.HorizonsClass` functions,\n-optional parameters of :meth:`~astroquery.jplhorizons.HorizonsClass.vectors` are\n-``get_query_payload=True``, which skips the query and only returns the query\n-payload, and ``get_raw_response=True``, which returns the raw query response\n-instead of the astropy table. For comets, the options ``closest_apparation`` and\n-``no_fragments`` are available, which select the closest apparition in time and\n-reject fragments, respectively. Note that these options should only be used for\n-comets and will crash the query for other object types. Options ``aberrations``\n-and ``delta_T`` provide different choices for aberration corrections as well as\n-a measure for time-varying differences between TDB and UT time-scales,\n-respectively.\n+an optional parameter of :meth:`~astroquery.jplhorizons.HorizonsClass.vectors`\n+is ``get_query_payload=True``, which skips the query and only returns the query\n+payload for diagnostic purposes. For comets, the options ``closest_apparation``\n+and ``no_fragments`` are available, which select the closest apparition in time\n+and reject fragments, respectively. Note that these options should only be used\n+for comets and will crash the query for other object types. Options\n+``aberrations`` and ``delta_T`` provide different choices for aberration\n+corrections as well as a measure for time-varying differences between TDB and UT\n+time-scales, respectively.\n \n \n How to Use the Query Tables\n", "new_path": "docs/jplhorizons/jplhorizons.rst", "old_path": "docs/jplhorizons/jplhorizons.rst" } ]
9c974445dc9db303aa9ff2808ca27fbceffdc961
astropy/astroquery
null
null
Remove custom assert messages from `gaia` tests `pytest` produces informative assert failure messages automatically.
[ { "change_type": "MODIFY", "diff": "@@ -131,16 +131,12 @@ class TestTap:\n assert \"Missing required argument: height\" in err.value.args[0]\n \n table = mock_querier.query_object(skycoord, width=width, height=10 * u.deg)\n- assert len(table) == 3, \\\n- \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n- (3, len(table))\n+ assert len(table) == 3\n for colname, attrs in column_attrs.items():\n assert table[colname].attrs_equal(attrs)\n # by radius\n table = mock_querier.query_object(skycoord, radius=1 * u.deg)\n- assert len(table) == 3, \\\n- \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n- (3, len(table))\n+ assert len(table) == 3\n for colname, attrs in column_attrs.items():\n assert table[colname].attrs_equal(attrs)\n \n@@ -148,47 +144,35 @@ class TestTap:\n table = mock_querier_async.query_object_async(\n skycoord, width=12 * u.deg, height=10 * u.deg\n )\n- assert len(table) == 3, \\\n- \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n- (3, len(table))\n+ assert len(table) == 3\n for colname, attrs in column_attrs.items():\n assert table[colname].attrs_equal(attrs)\n # by radius\n table = mock_querier_async.query_object_async(skycoord, radius=1 * u.deg)\n- assert len(table) == 3, \\\n- \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n- (3, len(table))\n+ assert len(table) == 3\n for colname, attrs in column_attrs.items():\n assert table[colname].attrs_equal(attrs)\n \n def test_cone_search_sync(self, column_attrs, mock_querier):\n job = mock_querier.cone_search(skycoord, 1 * u.deg)\n- assert job.async_ is False, \"Expected a synchronous job\"\n- assert job.get_phase() == 'COMPLETED', \\\n- \"Wrong job phase. Expected: %s, found %s\" % \\\n- ('COMPLETED', job.get_phase())\n- assert job.failed is False, \"Wrong job status (set Failed = True)\"\n+ assert job.async_ is False\n+ assert job.get_phase() == \"COMPLETED\"\n+ assert job.failed is False\n # results\n results = job.get_results()\n- assert len(results) == 3, \\\n- \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n- (3, len(results))\n+ assert len(results) == 3\n for colname, attrs in column_attrs.items():\n assert results[colname].attrs_equal(attrs)\n \n def test_cone_search_async(self, column_attrs, mock_querier_async):\n radius = 1.0 * u.deg\n job = mock_querier_async.cone_search_async(skycoord, radius)\n- assert job.async_ is True, \"Expected an asynchronous job\"\n- assert job.get_phase() == 'COMPLETED', \\\n- \"Wrong job phase. Expected: %s, found %s\" % \\\n- ('COMPLETED', job.get_phase())\n- assert job.failed is False, \"Wrong job status (set Failed = True)\"\n+ assert job.async_ is True\n+ assert job.get_phase() == \"COMPLETED\"\n+ assert job.failed is False\n # results\n results = job.get_results()\n- assert len(results) == 3, \\\n- \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n- (3, len(results))\n+ assert len(results) == 3\n for colname, attrs in column_attrs.items():\n assert results[colname].attrs_equal(attrs)\n \n@@ -318,22 +302,18 @@ class TestTap:\n full_qualified_table_name_b='schemaB.tableB',\n results_table_name='results',\n )\n- assert job.async_ is True, \"Expected an asynchronous job\"\n- assert job.get_phase() == 'COMPLETED', \\\n- \"Wrong job phase. Expected: %s, found %s\" % \\\n- ('COMPLETED', job.get_phase())\n- assert job.failed is False, \"Wrong job status (set Failed = True)\"\n+ assert job.async_ is True\n+ assert job.get_phase() == \"COMPLETED\"\n+ assert job.failed is False\n job = mock_querier_async.cross_match(\n full_qualified_table_name_a='schemaA.tableA',\n full_qualified_table_name_b='schemaB.tableB',\n results_table_name='results',\n background=True,\n )\n- assert job.async_ is True, \"Expected an asynchronous job\"\n- assert job.get_phase() == 'EXECUTING', \\\n- \"Wrong job phase. Expected: %s, found %s\" % \\\n- ('EXECUTING', job.get_phase())\n- assert job.failed is False, \"Wrong job status (set Failed = True)\"\n+ assert job.async_ is True\n+ assert job.get_phase() == \"EXECUTING\"\n+ assert job.failed is False\n \n @patch.object(TapPlus, 'login')\n def test_login(self, mock_login):\n", "new_path": "astroquery/gaia/tests/test_gaiatap.py", "old_path": "astroquery/gaia/tests/test_gaiatap.py" } ]
766b512c71e32d908bb84ba1b7b60b2b2f205437
astropy/astroquery
null
null
Remove `utils.tap.conn.TapConn.url_encode()` The removed method simply called the standard library `urllib.parse.urlencode()`.
[ { "change_type": "MODIFY", "diff": "@@ -8,6 +8,7 @@ European Space Astronomy Centre (ESAC)\n European Space Agency (ESA)\n \n \"\"\"\n+from urllib.parse import urlencode\n \n from astropy import units\n from astropy.coordinates import SkyCoord\n@@ -503,7 +504,7 @@ class ESAHubbleClass(BaseQuery):\n \n subContext = conf.EHST_TARGET_ACTION\n connHandler = self._tap._TapPlus__getconnhandler()\n- data = connHandler.url_encode(params)\n+ data = urlencode(params)\n target_response = connHandler.execute_secure(subContext, data, True)\n for line in target_response:\n target_result = json.loads(line.decode(\"utf-8\"))\n", "new_path": "astroquery/esa/hubble/core.py", "old_path": "astroquery/esa/hubble/core.py" }, { "change_type": "MODIFY", "diff": "@@ -16,6 +16,7 @@ import shutil\n import tarfile\n import zipfile\n from datetime import datetime\n+from urllib.parse import urlencode\n \n from astropy import log\n from astropy import units\n@@ -667,9 +668,8 @@ class JwstClass(BaseQuery):\n MAST token to have access to propietary data\n \"\"\"\n subContext = conf.JWST_TOKEN\n- args = {\"token\": token}\n+ data = urlencode({\"token\": token})\n connHandler = self.__jwsttap._TapPlus__getconnhandler()\n- data = connHandler.url_encode(args)\n response = connHandler.execute_secure(subContext, data, True)\n if response.status == 403:\n print(\"ERROR: MAST tokens cannot be assigned or requested by anonymous users\")\n", "new_path": "astroquery/esa/jwst/core.py", "old_path": "astroquery/esa/jwst/core.py" }, { "change_type": "MODIFY", "diff": "@@ -14,8 +14,6 @@ Created on 30 jun. 2016\n \n \n \"\"\"\n-from urllib.parse import urlencode\n-\n CONTENT_TYPE_POST_DEFAULT = \"application/x-www-form-urlencoded\"\n \n \n@@ -320,6 +318,3 @@ class DummyTapHandler:\n self.__invokedMethod = 'is_valid_user'\n self.__parameters['user_id'] = user_id\n self.__parameters['verbose'] = verbose\n-\n- def url_encode(self, data):\n- return urlencode(data)\n", "new_path": "astroquery/gaia/tests/DummyTapHandler.py", "old_path": "astroquery/gaia/tests/DummyTapHandler.py" }, { "change_type": "MODIFY", "diff": "@@ -24,8 +24,6 @@ except ImportError:\n import mimetypes\r\n import time\r\n \r\n-from urllib.parse import urlencode\r\n-\r\n from astroquery.utils.tap.xmlparser import utils\r\n from astroquery.utils.tap import taputils\r\n from astroquery import version\r\n@@ -474,16 +472,6 @@ class TapConn:\n \"\"\"\r\n return self.__currentReason\r\n \r\n- def url_encode(self, data):\r\n- \"\"\"Encodes the provided dictionary\r\n-\r\n- Parameters\r\n- ----------\r\n- data : dictionary, mandatory\r\n- dictionary to be encoded\r\n- \"\"\"\r\n- return urlencode(data)\r\n-\r\n def find_header(self, headers, key):\r\n \"\"\"Searches for the specified keyword\r\n \r\n", "new_path": "astroquery/utils/tap/conn/tapconn.py", "old_path": "astroquery/utils/tap/conn/tapconn.py" }, { "change_type": "MODIFY", "diff": "@@ -16,8 +16,6 @@ Created on 30 jun. 2016\n \"\"\"\n from astroquery.utils.tap import taputils\n \n-from urllib.parse import urlencode\n-\n import requests\n \n \n@@ -147,9 +145,6 @@ class DummyConnHandler:\n else:\n return isError\n \n- def url_encode(self, data):\n- return urlencode(data)\n-\n def get_suitable_extension(self, headers):\n return self.fileExt\n \n", "new_path": "astroquery/utils/tap/conn/tests/DummyConnHandler.py", "old_path": "astroquery/utils/tap/conn/tests/DummyConnHandler.py" }, { "change_type": "MODIFY", "diff": "@@ -13,6 +13,8 @@ European Space Agency (ESA)\n Created on 30 jun. 2016\r\n Modified on 1 jun. 2021 by mhsarmiento\r\n \"\"\"\r\n+from urllib.parse import urlencode\n+\n from astroquery.utils.tap import taputils\r\n from astroquery.utils.tap.conn.tapconn import TapConn\r\n from astroquery.utils.tap.xmlparser.tableSaxParser import TableSaxParser\r\n@@ -554,7 +556,7 @@ class Tap:\n return jobs\r\n \r\n def __appendData(self, args):\r\n- data = self.__connHandler.url_encode(args)\r\n+ data = urlencode(args)\n result = \"\"\r\n firtsTime = True\r\n for k in data:\r\n@@ -633,9 +635,8 @@ class Tap:\n args['PHASE'] = 'RUN'\r\n if name is not None:\r\n args['jobname'] = name\r\n- data = self.__connHandler.url_encode(args)\r\n response = self.__connHandler.execute_tappost(subcontext=context,\r\n- data=data,\r\n+ data=urlencode(args),\n verbose=verbose)\r\n if verbose:\r\n print(response.status, response.reason)\r\n@@ -847,7 +848,7 @@ class TapPlus(Tap):\n connHandler = self.__getconnhandler()\r\n if not isinstance(params_dict, dict):\r\n raise ValueError(\"Parameters dictionary expected\")\r\n- data = connHandler.url_encode(params_dict)\r\n+ data = urlencode(params_dict)\n if verbose:\r\n print(f\"Data request: {data}\")\r\n response = connHandler.execute_datapost(data=data, verbose=verbose)\r\n@@ -1371,8 +1372,7 @@ class TapPlus(Tap):\n \"username\": usr,\r\n \"password\": pwd}\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_secure(subContext, data, verbose)\r\n+ response = connHandler.execute_secure(subContext, urlencode(args), verbose)\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -1568,8 +1568,7 @@ class TapPlus(Tap):\n \"DELETE\": \"TRUE\",\r\n \"FORCE_REMOVAL\": \"FALSE\"}\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_upload(data, verbose=verbose)\r\n+ response = connHandler.execute_upload(urlencode(args), verbose=verbose)\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -1620,8 +1619,7 @@ class TapPlus(Tap):\n args = self.get_args_4_rename_table(table_name, table_name, new_column_names_dict)\r\n \r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_table_tool(data, verbose=verbose)\r\n+ response = connHandler.execute_table_tool(urlencode(args), verbose=verbose)\n \r\n if verbose:\r\n print(response.status, response.reason)\r\n@@ -1724,8 +1722,7 @@ class TapPlus(Tap):\n list_of_changes)\r\n \r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_table_edit(data, verbose=verbose)\r\n+ response = connHandler.execute_table_edit(urlencode(args), verbose=verbose)\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -1897,8 +1894,7 @@ class TapPlus(Tap):\n \"DEC\": str(dec_column_name),\r\n }\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_table_edit(data, verbose=verbose)\r\n+ response = connHandler.execute_table_edit(urlencode(args), verbose=verbose)\n isError = connHandler.check_launch_response_status(response,\r\n verbose,\r\n 200)\r\n@@ -1995,10 +1991,8 @@ class TapPlus(Tap):\n flag to display information about the process\r\n \"\"\"\r\n subContext = \"logout\"\r\n- args = {}\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_secure(subContext, data)\r\n+ response = connHandler.execute_secure(subContext, \"\")\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n", "new_path": "astroquery/utils/tap/core.py", "old_path": "astroquery/utils/tap/core.py" }, { "change_type": "MODIFY", "diff": "@@ -16,6 +16,7 @@ Created on 30 jun. 2016\n \"\"\"\r\n \r\n import time\r\n+from urllib.parse import urlencode\n \r\n from astroquery.utils.tap.model import modelutils\r\n from astroquery.utils.tap.xmlparser import utils\r\n@@ -110,12 +111,9 @@ class Job:\n def __change_phase(self, phase, verbose=False):\r\n if self._phase == 'PENDING':\r\n context = f\"async/{self.jobid}/phase\"\r\n- args = {\r\n- \"PHASE\": str(phase)}\r\n- data = self.connHandler.url_encode(args)\r\n- response = self.connHandler.execute_tappost(subcontext=context,\r\n- data=data,\r\n- verbose=verbose)\r\n+ response = self.connHandler.execute_tappost(\n+ subcontext=context, data=urlencode({\"PHASE\": phase}), verbose=verbose\n+ )\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -150,11 +148,8 @@ class Job:\n if self._phase == 'PENDING':\r\n # send post parameter/value\r\n context = f\"async/{self.jobid}\"\r\n- args = {\r\n- name: str(value)}\r\n- data = self.connHandler.url_encode(args)\r\n response = self.connHandler.execute_tappost(subcontext=context,\r\n- data=data,\r\n+ data=urlencode({name: value}),\n verbose=verbose)\r\n if verbose:\r\n print(response.status, response.reason)\r\n", "new_path": "astroquery/utils/tap/model/job.py", "old_path": "astroquery/utils/tap/model/job.py" }, { "change_type": "MODIFY", "diff": "@@ -14,6 +14,7 @@ Created on 30 jun. 2016\n \"\"\"\n import os\n from unittest.mock import patch\n+from urllib.parse import quote_plus, urlencode\n \n import numpy as np\n import pytest\n@@ -156,17 +157,13 @@ def test_launch_sync_job():\n jobData = utils.read_file_content(jobDataFile)\n responseLaunchJob.set_data(method='POST', body=jobData)\n query = 'select top 5 * from table'\n- dTmp = {\"q\": query}\n- dTmpEncoded = connHandler.url_encode(dTmp)\n- p = dTmpEncoded.find(\"=\")\n- q = dTmpEncoded[p + 1:]\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n \"LANG\": \"ADQL\",\n \"FORMAT\": \"votable\",\n \"tapclient\": str(tap.tap_client_id),\n \"PHASE\": \"RUN\",\n- \"QUERY\": str(q)}\n+ \"QUERY\": quote_plus(query)}\n sortedKey = taputils.taputil_create_sorted_dict_key(dictTmp)\n jobRequest = f\"sync?{sortedKey}\"\n connHandler.set_response(jobRequest, responseLaunchJob)\n@@ -220,17 +217,13 @@ def test_launch_sync_job_redirect():\n ]\n responseLaunchJob.set_data(method='POST')\n query = 'select top 5 * from table'\n- dTmp = {\"q\": query}\n- dTmpEncoded = connHandler.url_encode(dTmp)\n- p = dTmpEncoded.find(\"=\")\n- q = dTmpEncoded[p + 1:]\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n \"LANG\": \"ADQL\",\n \"FORMAT\": \"votable\",\n \"tapclient\": str(tap.tap_client_id),\n \"PHASE\": \"RUN\",\n- \"QUERY\": str(q)}\n+ \"QUERY\": quote_plus(query)}\n sortedKey = taputils.taputil_create_sorted_dict_key(dictTmp)\n jobRequest = f\"sync?{sortedKey}\"\n connHandler.set_response(jobRequest, responseLaunchJob)\n@@ -844,9 +837,7 @@ def test_rename_table():\n \"new_table_name\": newTableName,\n \"table_name\": tableName,\n }\n- data = connHandler.url_encode(dictArgs)\n- req = f\"TableTool?{data}\"\n- connHandler.set_response(req, responseRenameTable)\n+ connHandler.set_response(f\"TableTool?{urlencode(dictArgs)}\", responseRenameTable)\n tap.rename_table(table_name=tableName, new_table_name=newTableName, new_column_names_dict=newColumnNames)\n \n \n", "new_path": "astroquery/utils/tap/tests/test_tap.py", "old_path": "astroquery/utils/tap/tests/test_tap.py" } ]
45708a41dd24384b99cac6af7c7e805613067654
cleverhans-lab/cleverhans
null
null
Support for tensorflow Dataset API. Using this API, training with data augmentation (shift and mirror) performs at the same speed than without. Without using this API and the parallelism that comes with it, the runtime degradation is 15%.
[ { "change_type": "MODIFY", "diff": "@@ -16,6 +16,7 @@ import struct\n import tempfile\n import sys\n import numpy as np\n+import tensorflow as tf\n \n from cleverhans import utils\n \n@@ -49,6 +50,22 @@ class Dataset(object):\n return (getattr(self, 'x_' + which_set),\n getattr(self, 'y_' + which_set))\n \n+ def to_tensorflow(self):\n+ raise NotImplementedError()\n+\n+ @classmethod\n+ def in_memory_dataset(cls, x, y, shuffle=None, repeat=True):\n+ assert x.shape[0] == y.shape[0]\n+ d = tf.data.Dataset.range(x.shape[0])\n+ if repeat:\n+ d = d.repeat()\n+ if shuffle:\n+ d = d.shuffle(shuffle)\n+ def lookup(p):\n+ return x[p], y[p]\n+ d = d.map(lambda i: tf.py_func(lookup, [i], [tf.float32] * 2))\n+ return d\n+\n \n class MNIST(Dataset):\n \"\"\"The MNIST dataset\"\"\"\n@@ -72,6 +89,10 @@ class MNIST(Dataset):\n self.x_test = x_test.astype('float32')\n self.y_test = y_test.astype('float32')\n \n+ def to_tensorflow(self, shuffle=4096):\n+ return (self.in_memory_dataset(self.x_train, self.y_train, shuffle),\n+ self.in_memory_dataset(self.x_test, self.y_test, repeat=False))\n+\n \n class CIFAR10(Dataset):\n \"\"\"The CIFAR-10 dataset\"\"\"\n@@ -96,6 +117,11 @@ class CIFAR10(Dataset):\n self.x_test = x_test\n self.y_test = y_test\n \n+ def to_tensorflow(self, shuffle=4096):\n+ # This is much more efficient with data augmentation, see tutorials.\n+ return (self.in_memory_dataset(self.x_train, self.y_train, shuffle),\n+ self.in_memory_dataset(self.x_test, self.y_test, repeat=False))\n+\n \n class Factory(object):\n \"\"\"\n", "new_path": "cleverhans/dataset.py", "old_path": "cleverhans/dataset.py" }, { "change_type": "MODIFY", "diff": "@@ -18,6 +18,7 @@ import logging\n import os\n import time\n \n+import math\n import numpy as np\n from six.moves import xrange\n import tensorflow as tf\n@@ -37,13 +38,13 @@ def train(sess, loss, x_train, y_train,\n rng=None, var_list=None, fprop_args=None, optimizer=None,\n devices=None, x_batch_preprocessor=None, use_ema=False,\n ema_decay=.998, run_canary=True,\n- loss_threshold=1e5):\n+ loss_threshold=1e5, dataset_train=None, dataset_size=None):\n \"\"\"\n Run (optionally multi-replica, synchronous) training to minimize `loss`\n :param sess: TF session to use when training the graph\n :param loss: tensor, the loss to minimize\n- :param x_train: numpy array with training inputs\n- :param y_train: numpy array with training outputs\n+ :param x_train: numpy array with training inputs or tf Dataset\n+ :param y_train: numpy array with training outputs or tf Dataset\n :param init_all: (boolean) If set to true, all TF variables in the session\n are (re)initialized, otherwise only previously\n uninitialized variables are initialized before training.\n@@ -84,6 +85,9 @@ def train(sess, loss, x_train, y_train,\n This is intended to rapidly detect numerical problems.\n Sometimes the loss may legitimately be higher than this value. In\n such cases, raise the value. If needed it can be np.inf.\n+ :param dataset_train: tf Dataset instance.\n+ Used as a replacement for x_train, y_train for faster performance.\n+ :param dataset_size: integer, the size of the dataset_train.\n :return: True if model trained\n \"\"\"\n args = _ArgsWrapper(args or {})\n@@ -110,12 +114,18 @@ def train(sess, loss, x_train, y_train,\n xs = []\n preprocessed_xs = []\n ys = []\n+ if dataset_train is not None:\n+ assert x_train is None and y_train is None and x_batch_preprocessor is None\n+ if dataset_size is None:\n+ raise ValueError(\"You must provide a dataset size\")\n+ data_iterator = dataset_train.make_one_shot_iterator().get_next()\n+ x_train, y_train = sess.run(data_iterator)\n \n devices = infer_devices(devices)\n for device in devices:\n with tf.device(device):\n x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:])\n- y = tf.placeholder(x_train.dtype, (None,) + y_train.shape[1:])\n+ y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:])\n xs.append(x)\n ys.append(y)\n \n@@ -218,37 +228,43 @@ def train(sess, loss, x_train, y_train,\n quit()\n \n for epoch in xrange(args.nb_epochs):\n- # Indices to shuffle training set\n- index_shuf = list(range(len(x_train)))\n- # Randomly repeat a few training examples each epoch to avoid\n- # having a too-small batch\n- while len(index_shuf) % batch_size != 0:\n- index_shuf.append(rng.randint(len(x_train)))\n- nb_batches = len(index_shuf) // batch_size\n- rng.shuffle(index_shuf)\n- # Shuffling here versus inside the loop doesn't seem to affect\n- # timing very much, but shuffling here makes the code slightly\n- # easier to read\n- x_train_shuffled = x_train[index_shuf]\n- y_train_shuffled = y_train[index_shuf]\n+ if dataset_train is not None:\n+ nb_batches = int(math.ceil(float(dataset_size) / batch_size))\n+ else:\n+ # Indices to shuffle training set\n+ index_shuf = list(range(len(x_train)))\n+ # Randomly repeat a few training examples each epoch to avoid\n+ # having a too-small batch\n+ while len(index_shuf) % batch_size != 0:\n+ index_shuf.append(rng.randint(len(x_train)))\n+ nb_batches = len(index_shuf) // batch_size\n+ rng.shuffle(index_shuf)\n+ # Shuffling here versus inside the loop doesn't seem to affect\n+ # timing very much, but shuffling here makes the code slightly\n+ # easier to read\n+ x_train_shuffled = x_train[index_shuf]\n+ y_train_shuffled = y_train[index_shuf]\n \n prev = time.time()\n for batch in range(nb_batches):\n+ if dataset_train is not None:\n+ x_train_shuffled, y_train_shuffled = sess.run(data_iterator)\n+ start, end = 0, batch_size\n+ else:\n+ # Compute batch start and end indices\n+ start = batch * batch_size\n+ end = (batch + 1) * batch_size\n+ # Perform one training step\n+ diff = end - start\n+ assert diff == batch_size\n \n- # Compute batch start and end indices\n- start = batch * batch_size\n- end = (batch + 1) * batch_size\n-\n- # Perform one training step\n feed_dict = {epoch_tf: epoch, batch_tf: batch}\n- diff = end - start\n- assert diff == batch_size\n for dev_idx in xrange(num_devices):\n cur_start = start + dev_idx * device_batch_size\n cur_end = start + (dev_idx + 1) * device_batch_size\n feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end]\n feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end]\n- if cur_end != end:\n+ if cur_end != end and dataset_train is None:\n msg = (\"batch_size (%d) must be a multiple of num_devices \"\n \"(%d).\\nCUDA_VISIBLE_DEVICES: %s\"\n \"\\ndevices: %s\")\n@@ -266,7 +282,8 @@ def train(sess, loss, x_train, y_train,\n raise ValueError(\"Extreme loss during training: \", loss_numpy)\n if np.isnan(loss_numpy) or np.isinf(loss_numpy):\n raise ValueError(\"NaN/Inf loss during training\")\n- assert end == len(index_shuf) # Check that all examples were used\n+ assert (dataset_train is not None or\n+ end == len(index_shuf)) # Check that all examples were used\n cur = time.time()\n _logger.info(\"Epoch \" + str(epoch) + \" took \" +\n str(cur - prev) + \" seconds\")\n", "new_path": "cleverhans/train.py", "old_path": "cleverhans/train.py" }, { "change_type": "MODIFY", "diff": "@@ -1,8 +1,6 @@\n \"\"\"\n This tutorial shows how to generate adversarial examples using FGSM\n and train a model using adversarial training with TensorFlow.\n-It is very similar to mnist_tutorial_keras_tf.py, which does the same\n-thing but with a dependence on keras.\n The original paper can be found at:\n https://arxiv.org/abs/1412.6572\n \"\"\"\n@@ -17,6 +15,7 @@ import tensorflow as tf\n from tensorflow.python.platform import flags\n \n from cleverhans.attacks import FastGradientMethod\n+from cleverhans.augmentation import random_horizontal_flip, random_shift\n from cleverhans.dataset import CIFAR10\n from cleverhans.loss import CrossEntropy\n from cleverhans.model_zoo.all_convolutional import ModelAllConvolutional\n@@ -81,12 +80,18 @@ def cifar10_tutorial(train_start=0, train_end=60000, test_start=0,\n # Get CIFAR10 data\n data = CIFAR10(train_start=train_start, train_end=train_end,\n test_start=test_start, test_end=test_end)\n+ dataset_size = data.x_train.shape[0]\n+ dataset_train = data.to_tensorflow()[0]\n+ dataset_train = dataset_train.map(\n+ lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)\n+ dataset_train = dataset_train.batch(batch_size)\n+ dataset_train = dataset_train.prefetch(16)\n x_train, y_train = data.get_set('train')\n x_test, y_test = data.get_set('test')\n \n # Use Image Parameters\n- img_rows, img_cols, nchannels = x_train.shape[1:4]\n- nb_classes = y_train.shape[1]\n+ img_rows, img_cols, nchannels = x_test.shape[1:4]\n+ nb_classes = y_test.shape[1]\n \n # Define input TF placeholder\n x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,\n@@ -128,8 +133,10 @@ def cifar10_tutorial(train_start=0, train_end=60000, test_start=0,\n def evaluate():\n do_eval(preds, x_test, y_test, 'clean_train_clean_eval', False)\n \n- train(sess, loss, x_train, y_train, evaluate=evaluate,\n- args=train_params, rng=rng, var_list=model.get_params())\n+ train(sess, loss, None, None,\n+ dataset_train=dataset_train, dataset_size=dataset_size,\n+ evaluate=evaluate, args=train_params, rng=rng,\n+ var_list=model.get_params())\n \n # Calculate training error\n if testing:\n@@ -179,8 +186,10 @@ def cifar10_tutorial(train_start=0, train_end=60000, test_start=0,\n do_eval(preds2_adv, x_test, y_test, 'adv_train_adv_eval', True)\n \n # Perform and evaluate adversarial training\n- train(sess, loss2, x_train, y_train, evaluate=evaluate2,\n- args=train_params, rng=rng, var_list=model2.get_params())\n+ train(sess, loss2, None, None,\n+ dataset_train=dataset_train, dataset_size=dataset_size,\n+ evaluate=evaluate2, args=train_params, rng=rng,\n+ var_list=model2.get_params())\n \n # Calculate training errors\n if testing:\n", "new_path": "cleverhans_tutorials/cifar10_tutorial_tf.py", "old_path": "cleverhans_tutorials/cifar10_tutorial_tf.py" } ]
5bc9311a8d92705371f880331ed751b9abb9ee68
marshmallow-code/webargs
null
null
Rename and deprecate argmap2scheme in favor of dict2schema There's nothing webargs-specific about the function, so the more general "dict2schema" is more appropriate
[ { "change_type": "MODIFY", "diff": "@@ -1,6 +1,12 @@\n Changelog\n ---------\n \n+4.4.0 (unreleased)\n+******************\n+\n+* *Deprecation*: ``argmap2schema`` is deprecated in favor of\n+ ``dict2schema``.\n+\n 4.3.1 (2018-12-31)\n ******************\n \n", "new_path": "CHANGELOG.rst", "old_path": "CHANGELOG.rst" }, { "change_type": "MODIFY", "diff": "@@ -14,6 +14,7 @@ from webargs import fields, missing, ValidationError\n from webargs.core import (\n Parser,\n get_value,\n+ dict2schema,\n argmap2schema,\n is_json,\n get_mimetype,\n@@ -885,7 +886,7 @@ def test_use_kwargs_force_all_false(web_request, parser):\n \n def test_delimited_list_default_delimiter(web_request, parser):\n web_request.json = {\"ids\": \"1,2,3\"}\n- schema_cls = argmap2schema({\"ids\": fields.DelimitedList(fields.Int())})\n+ schema_cls = dict2schema({\"ids\": fields.DelimitedList(fields.Int())})\n schema = schema_cls()\n \n parsed = parser.parse(schema, web_request)\n@@ -898,7 +899,7 @@ def test_delimited_list_default_delimiter(web_request, parser):\n \n def test_delimited_list_as_string(web_request, parser):\n web_request.json = {\"ids\": \"1,2,3\"}\n- schema_cls = argmap2schema(\n+ schema_cls = dict2schema(\n {\"ids\": fields.DelimitedList(fields.Int(), as_string=True)}\n )\n schema = schema_cls()\n@@ -913,7 +914,7 @@ def test_delimited_list_as_string(web_request, parser):\n \n def test_delimited_list_as_string_v2(web_request, parser):\n web_request.json = {\"dates\": \"2018-11-01,2018-11-02\"}\n- schema_cls = argmap2schema(\n+ schema_cls = dict2schema(\n {\n \"dates\": fields.DelimitedList(\n fields.DateTime(format=\"%Y-%m-%d\"), as_string=True\n@@ -935,9 +936,7 @@ def test_delimited_list_as_string_v2(web_request, parser):\n \n def test_delimited_list_custom_delimiter(web_request, parser):\n web_request.json = {\"ids\": \"1|2|3\"}\n- schema_cls = argmap2schema(\n- {\"ids\": fields.DelimitedList(fields.Int(), delimiter=\"|\")}\n- )\n+ schema_cls = dict2schema({\"ids\": fields.DelimitedList(fields.Int(), delimiter=\"|\")})\n schema = schema_cls()\n \n parsed = parser.parse(schema, web_request)\n@@ -946,7 +945,7 @@ def test_delimited_list_custom_delimiter(web_request, parser):\n \n def test_delimited_list_load_list(web_request, parser):\n web_request.json = {\"ids\": [1, 2, 3]}\n- schema_cls = argmap2schema({\"ids\": fields.DelimitedList(fields.Int())})\n+ schema_cls = dict2schema({\"ids\": fields.DelimitedList(fields.Int())})\n schema = schema_cls()\n \n parsed = parser.parse(schema, web_request)\n@@ -956,7 +955,7 @@ def test_delimited_list_load_list(web_request, parser):\n # Regresion test for https://github.com/marshmallow-code/webargs/issues/149\n def test_delimited_list_passed_invalid_type(web_request, parser):\n web_request.json = {\"ids\": 1}\n- schema_cls = argmap2schema({\"ids\": fields.DelimitedList(fields.Int())})\n+ schema_cls = dict2schema({\"ids\": fields.DelimitedList(fields.Int())})\n schema = schema_cls()\n \n with pytest.raises(ValidationError) as excinfo:\n@@ -1041,7 +1040,7 @@ def test_parse_raises_validation_error_if_data_invalid(web_request, parser):\n parser.parse(args, web_request)\n \n \n-def test_argmap2schema():\n+def test_dict2schema():\n data_key_kwargs = {\n \"load_from\" if (MARSHMALLOW_VERSION_INFO[0] < 3) else \"data_key\": \"content-type\"\n }\n@@ -1052,7 +1051,7 @@ def test_argmap2schema():\n \"content_type\": fields.Str(**data_key_kwargs),\n }\n \n- schema_cls = argmap2schema(argmap)\n+ schema_cls = dict2schema(argmap)\n assert issubclass(schema_cls, Schema)\n \n schema = schema_cls()\n@@ -1065,10 +1064,7 @@ def test_argmap2schema():\n \n \n # Regression test for https://github.com/marshmallow-code/webargs/issues/101\n-@pytest.mark.skipif(\n- MARSHMALLOW_VERSION_INFO < (2, 7, 1), reason=\"will only work on marshmallow>=2.7.1\"\n-)\n-def test_argmap2schema_doesnt_add_to_class_registry():\n+def test_dict2schema_doesnt_add_to_class_registry():\n old_n_entries = len(\n list(\n itertools.chain(\n@@ -1077,8 +1073,8 @@ def test_argmap2schema_doesnt_add_to_class_registry():\n )\n )\n argmap = {\"id\": fields.Field()}\n- argmap2schema(argmap)\n- argmap2schema(argmap)\n+ dict2schema(argmap)\n+ dict2schema(argmap)\n new_n_entries = len(\n list(\n itertools.chain(\n@@ -1089,9 +1085,9 @@ def test_argmap2schema_doesnt_add_to_class_registry():\n assert new_n_entries == old_n_entries\n \n \n-def test_argmap2schema_with_nesting():\n+def test_dict2schema_with_nesting():\n argmap = {\"nest\": fields.Nested({\"foo\": fields.Field()})}\n- schema_cls = argmap2schema(argmap)\n+ schema_cls = dict2schema(argmap)\n assert issubclass(schema_cls, Schema)\n schema = schema_cls()\n assert \"nest\" in schema.fields\n@@ -1099,6 +1095,11 @@ def test_argmap2schema_with_nesting():\n assert \"foo\" in schema.fields[\"nest\"].schema.fields\n \n \n+def test_argmap2schema_is_deprecated():\n+ with pytest.warns(DeprecationWarning):\n+ argmap2schema({\"arg\": fields.Str()})\n+\n+\n def test_is_json():\n assert is_json(None) is False\n assert is_json(\"application/json\") is True\n", "new_path": "tests/test_core.py", "old_path": "tests/test_core.py" }, { "change_type": "MODIFY", "diff": "@@ -4,7 +4,7 @@ from marshmallow.utils import missing\n # Make marshmallow's validation functions importable from webargs\n from marshmallow import validate\n \n-from webargs.core import argmap2schema, WebargsError, ValidationError\n+from webargs.core import argmap2schema, dict2schema, WebargsError, ValidationError\n from webargs import fields\n \n __version__ = \"4.3.1\"\n@@ -13,6 +13,7 @@ __license__ = \"MIT\"\n \n \n __all__ = (\n+ \"dict2schema\",\n \"argmap2schema\",\n \"WebargsError\",\n \"ValidationError\",\n", "new_path": "webargs/__init__.py", "old_path": "webargs/__init__.py" }, { "change_type": "MODIFY", "diff": "@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)\n __all__ = [\n \"WebargsError\",\n \"ValidationError\",\n- \"argmap2schema\",\n+ \"dict2schema\",\n \"is_multiple\",\n \"Parser\",\n \"get_value\",\n@@ -137,11 +137,11 @@ def fill_in_missing_args(ret, argmap):\n return ret\n \n \n-def argmap2schema(argmap):\n- \"\"\"Generate a `marshmallow.Schema` class given a dictionary of argument\n- names to `Fields <marshmallow.fields.Field>`.\n+def dict2schema(dct):\n+ \"\"\"Generate a `marshmallow.Schema` class given a dictionary of\n+ `Fields <marshmallow.fields.Field>`.\n \"\"\"\n- attrs = argmap.copy()\n+ attrs = dct.copy()\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n \n class Meta(object):\n@@ -151,6 +151,14 @@ def argmap2schema(argmap):\n return type(str(\"\"), (ma.Schema,), attrs)\n \n \n+def argmap2schema(argmap):\n+ warnings.warn(\n+ \"argmap2schema is deprecated. Use dict2schema instead.\",\n+ RemovedInWebargs5Warning,\n+ )\n+ return dict2schema(argmap)\n+\n+\n def is_multiple(field):\n \"\"\"Return whether or not `field` handles repeated/multi-value arguments.\"\"\"\n return isinstance(field, ma.fields.List) and not hasattr(field, \"delimiter\")\n@@ -423,7 +431,7 @@ class Parser(object):\n elif callable(argmap):\n schema = argmap(req)\n else:\n- schema = argmap2schema(argmap)()\n+ schema = dict2schema(argmap)()\n if MARSHMALLOW_VERSION_INFO[0] < 3 and not schema.strict:\n warnings.warn(\n \"It is highly recommended that you set strict=True on your schema \"\n@@ -555,7 +563,7 @@ class Parser(object):\n # Optimization: If argmap is passed as a dictionary, we only need\n # to generate a Schema once\n if isinstance(argmap, collections.Mapping):\n- argmap = argmap2schema(argmap)()\n+ argmap = dict2schema(argmap)()\n \n def decorator(func):\n req_ = request_obj\n", "new_path": "webargs/core.py", "old_path": "webargs/core.py" } ]