Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
600
def get_model_dir(cfg): for key in cfg.keys(): if type(cfg[key]) == dict and \ ("enable" in cfg[key].keys() and cfg[key]['enable'] or "enable" not in cfg[key].keys()): if "model_dir" in cfg[key].keys(): model_dir = cfg[key]["model_dir"] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key]["model_dir"] = model_dir print(key, " model dir: ", model_dir) elif key == "VEHICLE_PLATE": det_model_dir = cfg[key]["det_model_dir"] downloaded_det_model_dir = auto_download_model(det_model_dir) if downloaded_det_model_dir: det_model_dir = downloaded_det_model_dir cfg[key]["det_model_dir"] = det_model_dir print("det_model_dir model dir: ", det_model_dir) rec_model_dir = cfg[key]["rec_model_dir"] downloaded_rec_model_dir = auto_download_model(rec_model_dir) if downloaded_rec_model_dir: rec_model_dir = downloaded_rec_model_dir cfg[key]["rec_model_dir"] = rec_model_dir print("rec_model_dir model dir: ", rec_model_dir) elif key == "MOT": # for idbased and skeletonbased actions model_dir = cfg[key]["model_dir"] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key]["model_dir"] = model_dir print("mot_model_dir model_dir: ", model_dir)
Auto download inference model if the model_path is a url link. Otherwise it will use the model_path directly.
18
116
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_model_dir(cfg): for key in cfg.keys(): if type(cfg[key]) == dict and \ ("enable" in cfg[key].keys() and cfg[key]['enable'] or "enable" not in cfg[key].keys()): if "model_dir" in cfg[key].keys(): model_dir = cfg[key]["model_dir"] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key]["model_dir"] = model_dir print(key, " model dir: ", model_dir) elif key == "VEHICLE_PLATE": det_model_dir = cfg[key]["det_model_dir"] downloaded_det_model_dir = auto_download_model(det_model_dir) if downloaded_det_model_dir: det_model_dir = downloaded_det_model_dir cfg[key]["det_model_dir"] = det_model_dir print("det_model_dir model dir: ", det_model_dir) rec_model_dir = cfg[key]["rec_model_dir"] downloaded_rec_model_dir = auto_download_model(rec_model_dir) if downloaded_rec_model_dir: rec_model_dir = downloaded_rec_model_dir cfg[key]["rec_model_dir"] = rec_model_dir print("rec_model_dir model dir: ", rec_model_dir) elif key == "MOT": # for idbased and skeletonbased actions model_dir = cfg[key]["model_dir"] downloaded_model_dir = auto_download_model(model_dir) if downloaded_model_dir: model_dir = downloaded_model_dir cfg[key]["model_dir"] = model_dir print("mot_model_dir model_dir: ", model_dir) ``` ###Assistant : Auto download inference model if the model_path is a url link. Otherwise it will use the model_path directly.
601
def component(self, x, y): if x >= 0 and x < self.__height and y >= 0 and y < self.__width: return self.__matrix[x][y] else: raise Exception("changeComponent: indices out of bounds")
returns the specified (x,y) component
5
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def component(self, x, y): if x >= 0 and x < self.__height and y >= 0 and y < self.__width: return self.__matrix[x][y] else: raise Exception("changeComponent: indices out of bounds") ``` ###Assistant : returns the specified (x,y) component
602
def toggle(self, all=None, ticks=None, ticklabels=None, label=None): if all: _ticks, _ticklabels, _label = True, True, True elif all is not None: _ticks, _ticklabels, _label = False, False, False else: _ticks, _ticklabels, _label = None, None, None if ticks is not None: _ticks = ticks if ticklabels is not None: _ticklabels = ticklabels if label is not None: _label = label if _ticks is not None: self.major_ticks.set_visible(_ticks) self.minor_ticks.set_visible(_ticks) if _ticklabels is not None: self.major_ticklabels.set_visible(_ticklabels) self.minor_ticklabels.set_visible(_ticklabels) if _label is not None: self.label.set_visible(_label)
Toggle visibility of ticks, ticklabels, and (axis) label. To turn all off, :: axis.toggle(all=False) To turn all off but ticks on :: axis.toggle(all=False, ticks=True) To turn all on but (axis) label off :: axis.toggle(all=True, label=False)
35
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def toggle(self, all=None, ticks=None, ticklabels=None, label=None): if all: _ticks, _ticklabels, _label = True, True, True elif all is not None: _ticks, _ticklabels, _label = False, False, False else: _ticks, _ticklabels, _label = None, None, None if ticks is not None: _ticks = ticks if ticklabels is not None: _ticklabels = ticklabels if label is not None: _label = label if _ticks is not None: self.major_ticks.set_visible(_ticks) self.minor_ticks.set_visible(_ticks) if _ticklabels is not None: self.major_ticklabels.set_visible(_ticklabels) self.minor_ticklabels.set_visible(_ticklabels) if _label is not None: self.label.set_visible(_label) ``` ###Assistant : Toggle visibility of ticks, ticklabels, and (axis) label. To turn all off, :: axis.toggle(all=False) To turn all off but ticks on :: axis.toggle(all=False, ticks=True) To turn all on but (axis) label off :: axis.toggle(all=True, label=False)
603
def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if "integration" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, "find_subclasses_in_modules", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path) # Generate mock schema in tmp_path filename = f"haystack-pipeline-master.schema.json" test_schema = _json_schema.get_json_schema(filename=filename, version="ignore") with open(tmp_path / filename, "w") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch
JSON schema with the master version and only mocked nodes.
10
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if "integration" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, "find_subclasses_in_modules", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path) # Generate mock schema in tmp_path filename = f"haystack-pipeline-master.schema.json" test_schema = _json_schema.get_json_schema(filename=filename, version="ignore") with open(tmp_path / filename, "w") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch ``` ###Assistant : JSON schema with the master version and only mocked nodes.
604
def _has_nchw_support(): explicitly_on_cpu = _is_current_explicit_device("CPU") gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION
Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw
52
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _has_nchw_support(): explicitly_on_cpu = _is_current_explicit_device("CPU") gpus_available = bool(_get_available_gpus()) return not explicitly_on_cpu and gpus_available # VARIABLE MANIPULATION ``` ###Assistant : Check whether the current scope supports NCHW ops. TensorFlow does not support NCHW on CPU. Therefore we check if we are not explicitly put on CPU, and have GPUs available. In this case there will be soft-placing on the GPU device. Returns: bool: if the current scope device placement would support nchw
605
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type)
Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) # -------------------------------------------- # Augmentation, flipe and/or rotate # -------------------------------------------- # The following two are enough. # (1) augmet_img: numpy image of WxHxC or WxH # (2) augment_img_tensor4: tensor image 1xCxWxH # --------------------------------------------
62
117
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type) ``` ###Assistant : Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) # -------------------------------------------- # Augmentation, flipe and/or rotate # -------------------------------------------- # The following two are enough. # (1) augmet_img: numpy image of WxHxC or WxH # (2) augment_img_tensor4: tensor image 1xCxWxH # --------------------------------------------
606
def get_local_ip_address() -> str: try: ip_address = requests.get( "https://checkip.amazonaws.com/", timeout=3 ).text.strip() except (requests.ConnectionError, requests.exceptions.ReadTimeout): ip_address = "No internet connection" return ip_address
Gets the public IP address or returns the string "No internet connection" if unable to obtain it.
17
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_local_ip_address() -> str: try: ip_address = requests.get( "https://checkip.amazonaws.com/", timeout=3 ).text.strip() except (requests.ConnectionError, requests.exceptions.ReadTimeout): ip_address = "No internet connection" return ip_address ``` ###Assistant : Gets the public IP address or returns the string "No internet connection" if unable to obtain it.
607
def get_tip(self): tips = self.get_tips() if len(tips) == 0: raise Exception("tip not found") else: return tips[0]
Returns the TipableVMobject instance's (first) tip, otherwise throws an exception.
10
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tip(self): tips = self.get_tips() if len(tips) == 0: raise Exception("tip not found") else: return tips[0] ``` ###Assistant : Returns the TipableVMobject instance's (first) tip, otherwise throws an exception.
608
def shuffle(self, func, lengths, **kwargs): num_splits = len(lengths) # We add these to kwargs and will pop them off before performing the operation. kwargs["manual_partition"] = True kwargs["_lengths"] = lengths args = [self.axis, func, num_splits, False] args.extend(self.list_of_blocks) return self._wrap_partitions(self.deploy_axis_func(*args, **kwargs))
Shuffle the order of the data in this axis partition based on the `lengths`. Parameters ---------- func : callable The function to apply before splitting. lengths : list The list of partition lengths to split the result into. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- list A list of `PandasDataframePartition` objects split by `lengths`.
60
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shuffle(self, func, lengths, **kwargs): num_splits = len(lengths) # We add these to kwargs and will pop them off before performing the operation. kwargs["manual_partition"] = True kwargs["_lengths"] = lengths args = [self.axis, func, num_splits, False] args.extend(self.list_of_blocks) return self._wrap_partitions(self.deploy_axis_func(*args, **kwargs)) ``` ###Assistant : Shuffle the order of the data in this axis partition based on the `lengths`. Parameters ---------- func : callable The function to apply before splitting. lengths : list The list of partition lengths to split the result into. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- list A list of `PandasDataframePartition` objects split by `lengths`.
609
def test_background_add_room_type_column(self): # Create a room without a type room_id = self._generate_room() # Get event_id of the m.room.create event event_id = self.get_success( self.store.db_pool.simple_select_one_onecol( table="current_state_events", keyvalues={ "room_id": room_id, "type": "m.room.create", }, retcol="event_id", ) ) # Fake a room creation event with a room type event = { "content": { "creator": "@user:server.org", "room_version": "9", "type": RoomTypes.SPACE, }, "type": "m.room.create", } self.get_success( self.store.db_pool.simple_update( table="event_json", keyvalues={"event_id": event_id}, updatevalues={"json": json.dumps(event)}, desc="test", ) ) # Insert and run the background update self.get_success( self.store.db_pool.simple_insert( "background_updates", { "update_name": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN, "progress_json": "{}", }, ) ) # ... and tell the DataStore that it hasn't finished all updates yet self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Make sure the background update filled in the room type room_type_after = self.get_success( self.store.db_pool.simple_select_one_onecol( table="room_stats_state", keyvalues={"room_id": room_id}, retcol="room_type", allow_none=True, ) ) self.assertEqual(room_type_after, RoomTypes.SPACE)
Test that the background update to populate the `room_type` column in `room_stats_state` works properly.
14
136
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_background_add_room_type_column(self): # Create a room without a type room_id = self._generate_room() # Get event_id of the m.room.create event event_id = self.get_success( self.store.db_pool.simple_select_one_onecol( table="current_state_events", keyvalues={ "room_id": room_id, "type": "m.room.create", }, retcol="event_id", ) ) # Fake a room creation event with a room type event = { "content": { "creator": "@user:server.org", "room_version": "9", "type": RoomTypes.SPACE, }, "type": "m.room.create", } self.get_success( self.store.db_pool.simple_update( table="event_json", keyvalues={"event_id": event_id}, updatevalues={"json": json.dumps(event)}, desc="test", ) ) # Insert and run the background update self.get_success( self.store.db_pool.simple_insert( "background_updates", { "update_name": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN, "progress_json": "{}", }, ) ) # ... and tell the DataStore that it hasn't finished all updates yet self.store.db_pool.updates._all_done = False # Now let's actually drive the updates to completion self.wait_for_background_updates() # Make sure the background update filled in the room type room_type_after = self.get_success( self.store.db_pool.simple_select_one_onecol( table="room_stats_state", keyvalues={"room_id": room_id}, retcol="room_type", allow_none=True, ) ) self.assertEqual(room_type_after, RoomTypes.SPACE) ``` ###Assistant : Test that the background update to populate the `room_type` column in `room_stats_state` works properly.
610
def get_feedback(): labels = DOCUMENT_STORE.get_all_labels() return labels @router.delete("/feedback")
This endpoint allows the API user to retrieve all the feedback that has been submitted through the `POST /feedback` endpoint.
20
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_feedback(): labels = DOCUMENT_STORE.get_all_labels() return labels @router.delete("/feedback") ``` ###Assistant : This endpoint allows the API user to retrieve all the feedback that has been submitted through the `POST /feedback` endpoint.
611
def get_formatter_for_filename(fn, **options): fn = basename(fn) for modname, name, _, filenames, _ in FORMATTERS.values(): for filename in filenames: if _fn_matches(fn, filename): if name not in _formatter_cache: _load_formatters(modname) return _formatter_cache[name](**options) for cls in find_plugin_formatters(): for filename in cls.filenames: if _fn_matches(fn, filename): return cls(**options) raise ClassNotFound("no formatter found for file name %r" % fn)
Lookup and instantiate a formatter by filename pattern. Raises ClassNotFound if not found.
13
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_formatter_for_filename(fn, **options): fn = basename(fn) for modname, name, _, filenames, _ in FORMATTERS.values(): for filename in filenames: if _fn_matches(fn, filename): if name not in _formatter_cache: _load_formatters(modname) return _formatter_cache[name](**options) for cls in find_plugin_formatters(): for filename in cls.filenames: if _fn_matches(fn, filename): return cls(**options) raise ClassNotFound("no formatter found for file name %r" % fn) ``` ###Assistant : Lookup and instantiate a formatter by filename pattern. Raises ClassNotFound if not found.
612
def setup_sigterm_on_parent_death(): try: import ctypes import signal libc = ctypes.CDLL("libc.so.6") # Set the parent process death signal of the command process to SIGTERM. libc.prctl(1, signal.SIGTERM) # PR_SET_PDEATHSIG, see prctl.h except OSError as e: _logger.warning(f"Setup libc.prctl PR_SET_PDEATHSIG failed, error {repr(e)}.")
Uses prctl to automatically send SIGTERM to the child process when its parent is dead. The child process itself should handle SIGTERM properly.
23
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_sigterm_on_parent_death(): try: import ctypes import signal libc = ctypes.CDLL("libc.so.6") # Set the parent process death signal of the command process to SIGTERM. libc.prctl(1, signal.SIGTERM) # PR_SET_PDEATHSIG, see prctl.h except OSError as e: _logger.warning(f"Setup libc.prctl PR_SET_PDEATHSIG failed, error {repr(e)}.") ``` ###Assistant : Uses prctl to automatically send SIGTERM to the child process when its parent is dead. The child process itself should handle SIGTERM properly.
613
def force_reads(self) -> "Dataset[T]": blocks = self.get_internal_block_refs() bar = ProgressBar("Force reads", len(blocks)) bar.block_until_complete(blocks) return self
Force full evaluation of the blocks of this dataset. This can be used to read all blocks into memory. By default, Datasets doesn't read blocks from the datasource until the first transform.
32
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def force_reads(self) -> "Dataset[T]": blocks = self.get_internal_block_refs() bar = ProgressBar("Force reads", len(blocks)) bar.block_until_complete(blocks) return self ``` ###Assistant : Force full evaluation of the blocks of this dataset. This can be used to read all blocks into memory. By default, Datasets doesn't read blocks from the datasource until the first transform.
614
def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]: layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True) intersection = Region.intersection for widget, (region, _order, clip, virtual_size, container_size) in layers: yield ( widget, intersection(region, clip), region, virtual_size, container_size, )
Iterate map with information regarding each widget and is position Yields: Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of Widget, clip region, region, virtual size, and container size.
29
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]: layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True) intersection = Region.intersection for widget, (region, _order, clip, virtual_size, container_size) in layers: yield ( widget, intersection(region, clip), region, virtual_size, container_size, ) ``` ###Assistant : Iterate map with information regarding each widget and is position Yields: Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of Widget, clip region, region, virtual size, and container size.
615
def get_names_flat(adtype): listnames = [] names = adtype.names for name in names: listnames.append(name) current = adtype[name] if current.names is not None: listnames.extend(get_names_flat(current)) return tuple(listnames)
Returns the field names of the input datatype as a tuple. Input datatype has to have fields otherwise error is raised. Nested structure are flattened beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb')
72
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_names_flat(adtype): listnames = [] names = adtype.names for name in names: listnames.append(name) current = adtype[name] if current.names is not None: listnames.extend(get_names_flat(current)) return tuple(listnames) ``` ###Assistant : Returns the field names of the input datatype as a tuple. Input datatype has to have fields otherwise error is raised. Nested structure are flattened beforehand. Parameters ---------- adtype : dtype Input datatype Examples -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) ('A', 'B') >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb')
616
def test_memory_leak(self): import gc import weakref results = {} for kind in plotting.PlotAccessor._all_kinds: args = {} if kind in ["hexbin", "scatter", "pie"]: df = DataFrame( { "A": np.random.uniform(size=20), "B": np.random.uniform(size=20), "C": np.arange(20) + np.random.uniform(size=20), } ) args = {"x": "A", "y": "B"} elif kind == "area": df = tm.makeTimeDataFrame().abs() else: df = tm.makeTimeDataFrame() # Use a weakref so we can see if the object gets collected without # also preventing it from being collected results[kind] = weakref.proxy(df.plot(kind=kind, **args)) # have matplotlib delete all the figures tm.close() # force a garbage collection gc.collect() msg = "weakly-referenced object no longer exists" for key in results: # check that every plot was collected with pytest.raises(ReferenceError, match=msg): # need to actually access something to get an error results[key].lines
Check that every plot type gets properly collected.
8
124
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_memory_leak(self): import gc import weakref results = {} for kind in plotting.PlotAccessor._all_kinds: args = {} if kind in ["hexbin", "scatter", "pie"]: df = DataFrame( { "A": np.random.uniform(size=20), "B": np.random.uniform(size=20), "C": np.arange(20) + np.random.uniform(size=20), } ) args = {"x": "A", "y": "B"} elif kind == "area": df = tm.makeTimeDataFrame().abs() else: df = tm.makeTimeDataFrame() # Use a weakref so we can see if the object gets collected without # also preventing it from being collected results[kind] = weakref.proxy(df.plot(kind=kind, **args)) # have matplotlib delete all the figures tm.close() # force a garbage collection gc.collect() msg = "weakly-referenced object no longer exists" for key in results: # check that every plot was collected with pytest.raises(ReferenceError, match=msg): # need to actually access something to get an error results[key].lines ``` ###Assistant : Check that every plot type gets properly collected.
617
def disabled_excepthook() -> Iterator[None]: old_excepthook = sys.excepthook sys.excepthook = sys.__excepthook__ try: yield finally: # If the code we did run did change sys.excepthook, we leave it # unchanged. Otherwise, we reset it. if sys.excepthook is sys.__excepthook__: sys.excepthook = old_excepthook
Run code with the exception hook temporarily disabled.
8
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def disabled_excepthook() -> Iterator[None]: old_excepthook = sys.excepthook sys.excepthook = sys.__excepthook__ try: yield finally: # If the code we did run did change sys.excepthook, we leave it # unchanged. Otherwise, we reset it. if sys.excepthook is sys.__excepthook__: sys.excepthook = old_excepthook ``` ###Assistant : Run code with the exception hook temporarily disabled.
618
def trigger_import(*dfs): if ASV_USE_STORAGE_FORMAT != "hdk" or ASV_USE_IMPL == "pandas": return from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import ( DbWorker, ) for df in dfs: df.shape # to trigger real execution df._query_compiler._modin_frame._partitions[0][ 0 ].frame_id = DbWorker().import_arrow_table( df._query_compiler._modin_frame._partitions[0][0].get() ) # to trigger real execution
Trigger import execution for DataFrames obtained by HDK engine. Parameters ---------- *dfs : iterable DataFrames to trigger import.
18
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def trigger_import(*dfs): if ASV_USE_STORAGE_FORMAT != "hdk" or ASV_USE_IMPL == "pandas": return from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import ( DbWorker, ) for df in dfs: df.shape # to trigger real execution df._query_compiler._modin_frame._partitions[0][ 0 ].frame_id = DbWorker().import_arrow_table( df._query_compiler._modin_frame._partitions[0][0].get() ) # to trigger real execution ``` ###Assistant : Trigger import execution for DataFrames obtained by HDK engine. Parameters ---------- *dfs : iterable DataFrames to trigger import.
619
def _jacfwd(f, primals): jac_flat = [] flat_primals = tf.nest.flatten(primals) tangent_mask = [tf.zeros_like(primal) for primal in flat_primals] for primal_index, primal in enumerate(flat_primals): primal_vector = tf.reshape(primal, [-1]) primal_vector_length = tf.size(primal_vector) jac_columns = [] for element_index in tf.range(primal_vector_length): mask = tf.one_hot(element_index, primal_vector_length) tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal)) jac_columns.append( tf.nest.map_structure( functools.partial(tf.reshape, shape=[-1]), _jvp( f, primals, tf.nest.pack_sequence_as(primals, tangent_mask), )[1], ) ) jac_flat.append(tf.stack(jac_columns, axis=1)) tangent_mask[primal_index] = tf.zeros_like(primal) return tf.nest.pack_sequence_as(primals, jac_flat)
Compute the jacobian of `f` at `primals` using forward-mode autodiff.
10
63
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _jacfwd(f, primals): jac_flat = [] flat_primals = tf.nest.flatten(primals) tangent_mask = [tf.zeros_like(primal) for primal in flat_primals] for primal_index, primal in enumerate(flat_primals): primal_vector = tf.reshape(primal, [-1]) primal_vector_length = tf.size(primal_vector) jac_columns = [] for element_index in tf.range(primal_vector_length): mask = tf.one_hot(element_index, primal_vector_length) tangent_mask[primal_index] = tf.reshape(mask, tf.shape(primal)) jac_columns.append( tf.nest.map_structure( functools.partial(tf.reshape, shape=[-1]), _jvp( f, primals, tf.nest.pack_sequence_as(primals, tangent_mask), )[1], ) ) jac_flat.append(tf.stack(jac_columns, axis=1)) tangent_mask[primal_index] = tf.zeros_like(primal) return tf.nest.pack_sequence_as(primals, jac_flat) ``` ###Assistant : Compute the jacobian of `f` at `primals` using forward-mode autodiff.
620
def record_timing(name, duration=None, description=None): timing_information = getattr(flask.g, "timing_information", {}) if name in timing_information: raise KeyError(f'Duplicate resource name "{name}" found.') timing_information[name] = {"dur": round(duration * 1000), "desc": description} setattr(flask.g, "timing_information", timing_information)
Records timing information for a server resource. :param name: The name of the resource. :type name: string :param duration: The time in seconds to report. Internally, this is rounded to the nearest millisecond. :type duration: float or None :param description: A description of the resource. :type description: string or None
50
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def record_timing(name, duration=None, description=None): timing_information = getattr(flask.g, "timing_information", {}) if name in timing_information: raise KeyError(f'Duplicate resource name "{name}" found.') timing_information[name] = {"dur": round(duration * 1000), "desc": description} setattr(flask.g, "timing_information", timing_information) ``` ###Assistant : Records timing information for a server resource. :param name: The name of the resource. :type name: string :param duration: The time in seconds to report. Internally, this is rounded to the nearest millisecond. :type duration: float or None :param description: A description of the resource. :type description: string or None
621
def admin_actions(context): context["action_index"] = context.get("action_index", -1) + 1 return context @register.tag(name="admin_actions")
Track the number of times the action field has been rendered on the page, so we know which value to use.
21
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def admin_actions(context): context["action_index"] = context.get("action_index", -1) + 1 return context @register.tag(name="admin_actions") ``` ###Assistant : Track the number of times the action field has been rendered on the page, so we know which value to use.
622
async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, "light") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1") assert light_1 is not None assert ( light_1.attributes["friendly_name"] == "Hue light with color and color temperature 1" ) assert light_1.state == "on" assert light_1.attributes["brightness"] == int(46.85 / 100 * 255) assert light_1.attributes["mode"] == "normal" assert light_1.attributes["color_mode"] == COLOR_MODE_XY assert set(light_1.attributes["supported_color_modes"]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes["xy_color"] == (0.5614, 0.4058) assert light_1.attributes["min_mireds"] == 153 assert light_1.attributes["max_mireds"] == 500 assert light_1.attributes["dynamics"] == "dynamic_palette" assert light_1.attributes["effect_list"] == ["None", "candle", "fire"] assert light_1.attributes["effect"] == "None" # test light which supports color temperature only light_2 = hass.states.get("light.hue_light_with_color_temperature_only") assert light_2 is not None assert ( light_2.attributes["friendly_name"] == "Hue light with color temperature only" ) assert light_2.state == "off" assert light_2.attributes["mode"] == "normal" assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes["min_mireds"] == 153 assert light_2.attributes["max_mireds"] == 454 assert light_2.attributes["dynamics"] == "none" assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"] # test light which supports color only light_3 = hass.states.get("light.hue_light_with_color_only") assert light_3 is not None assert light_3.attributes["friendly_name"] == "Hue light with color only" assert light_3.state == "on" assert light_3.attributes["brightness"] == 128 assert light_3.attributes["mode"] == "normal" assert light_3.attributes["supported_color_modes"] == [COLOR_MODE_XY] assert light_3.attributes["color_mode"] == COLOR_MODE_XY assert light_3.attributes["dynamics"] == "dynamic_palette" # test light which supports on/off only light_4 = hass.states.get("light.hue_on_off_light") assert light_4 is not None assert light_4.attributes["friendly_name"] == "Hue on/off light" assert light_4.state == "off" assert light_4.attributes["mode"] == "normal" assert light_4.attributes["supported_color_modes"] == []
Test if all v2 lights get created with correct features.
10
264
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_lights(hass, mock_bridge_v2, v2_resources_test_data): await mock_bridge_v2.api.load_test_data(v2_resources_test_data) await setup_platform(hass, mock_bridge_v2, "light") # there shouldn't have been any requests at this point assert len(mock_bridge_v2.mock_requests) == 0 # 6 entities should be created from test data (grouped_lights are disabled by default) assert len(hass.states.async_all()) == 6 # test light which supports color and color temperature light_1 = hass.states.get("light.hue_light_with_color_and_color_temperature_1") assert light_1 is not None assert ( light_1.attributes["friendly_name"] == "Hue light with color and color temperature 1" ) assert light_1.state == "on" assert light_1.attributes["brightness"] == int(46.85 / 100 * 255) assert light_1.attributes["mode"] == "normal" assert light_1.attributes["color_mode"] == COLOR_MODE_XY assert set(light_1.attributes["supported_color_modes"]) == { COLOR_MODE_COLOR_TEMP, COLOR_MODE_XY, } assert light_1.attributes["xy_color"] == (0.5614, 0.4058) assert light_1.attributes["min_mireds"] == 153 assert light_1.attributes["max_mireds"] == 500 assert light_1.attributes["dynamics"] == "dynamic_palette" assert light_1.attributes["effect_list"] == ["None", "candle", "fire"] assert light_1.attributes["effect"] == "None" # test light which supports color temperature only light_2 = hass.states.get("light.hue_light_with_color_temperature_only") assert light_2 is not None assert ( light_2.attributes["friendly_name"] == "Hue light with color temperature only" ) assert light_2.state == "off" assert light_2.attributes["mode"] == "normal" assert light_2.attributes["supported_color_modes"] == [COLOR_MODE_COLOR_TEMP] assert light_2.attributes["min_mireds"] == 153 assert light_2.attributes["max_mireds"] == 454 assert light_2.attributes["dynamics"] == "none" assert light_2.attributes["effect_list"] == ["None", "candle", "sunrise"] # test light which supports color only light_3 = hass.states.get("light.hue_light_with_color_only") assert light_3 is not None assert light_3.attributes["friendly_name"] == "Hue light with color only" assert light_3.state == "on" assert light_3.attributes["brightness"] == 128 assert light_3.attributes["mode"] == "normal" assert light_3.attributes["supported_color_modes"] == [COLOR_MODE_XY] assert light_3.attributes["color_mode"] == COLOR_MODE_XY assert light_3.attributes["dynamics"] == "dynamic_palette" # test light which supports on/off only light_4 = hass.states.get("light.hue_on_off_light") assert light_4 is not None assert light_4.attributes["friendly_name"] == "Hue on/off light" assert light_4.state == "off" assert light_4.attributes["mode"] == "normal" assert light_4.attributes["supported_color_modes"] == [] ``` ###Assistant : Test if all v2 lights get created with correct features.
623
def setup_data(self, path): for message, new_episode in super().setup_data(path): assert ( message['text'] == '__SILENCE__' ), 'The expected original context string is not found!' message['text'] = 'Person 1:' yield message, new_episode
Modify each output message to add in an OPT-compatible context string.
11
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def setup_data(self, path): for message, new_episode in super().setup_data(path): assert ( message['text'] == '__SILENCE__' ), 'The expected original context string is not found!' message['text'] = 'Person 1:' yield message, new_episode ``` ###Assistant : Modify each output message to add in an OPT-compatible context string.
624
def _get_input_locations(self) -> List[str]: if not self._args.batch_mode or os.path.isfile(self._args.input_dir): return [self._args.input_dir] # Not batch mode or a single file retval = [os.path.join(self._args.input_dir, fname) for fname in os.listdir(self._args.input_dir) if os.path.isdir(os.path.join(self._args.input_dir, fname)) or os.path.splitext(fname)[-1].lower() in _video_extensions] logger.debug("Input locations: %s", retval) return retval
Obtain the full path to input locations. Will be a list of locations if batch mode is selected, or a containing a single location if batch mode is not selected. Returns ------- list: The list of input location paths
39
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_input_locations(self) -> List[str]: if not self._args.batch_mode or os.path.isfile(self._args.input_dir): return [self._args.input_dir] # Not batch mode or a single file retval = [os.path.join(self._args.input_dir, fname) for fname in os.listdir(self._args.input_dir) if os.path.isdir(os.path.join(self._args.input_dir, fname)) or os.path.splitext(fname)[-1].lower() in _video_extensions] logger.debug("Input locations: %s", retval) return retval ``` ###Assistant : Obtain the full path to input locations. Will be a list of locations if batch mode is selected, or a containing a single location if batch mode is not selected. Returns ------- list: The list of input location paths
625
def get_party_gle_currency(party_type, party, company): def generator(): existing_gle_currency = frappe.db.sql( , {"company": company, "party_type": party_type, "party": party}, ) return existing_gle_currency[0][0] if existing_gle_currency else None return frappe.local_cache( "party_gle_currency", (party_type, party, company), generator, regenerate_if_none=True )
select account_currency from `tabGL Entry` where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s limit 1
15
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_party_gle_currency(party_type, party, company): def generator(): existing_gle_currency = frappe.db.sql( , {"company": company, "party_type": party_type, "party": party}, ) return existing_gle_currency[0][0] if existing_gle_currency else None return frappe.local_cache( "party_gle_currency", (party_type, party, company), generator, regenerate_if_none=True ) ``` ###Assistant : select account_currency from `tabGL Entry` where docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s limit 1
626
def load_breast_cancer(*, return_X_y=False, as_frame=False): data_file_name = "breast_cancer.csv" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name="breast_cancer.rst" ) feature_names = np.array( [ "mean radius", "mean texture", "mean perimeter", "mean area", "mean smoothness", "mean compactness", "mean concavity", "mean concave points", "mean symmetry", "mean fractal dimension", "radius error", "texture error", "perimeter error", "area error", "smoothness error", "compactness error", "concavity error", "concave points error", "symmetry error", "fractal dimension error", "worst radius", "worst texture", "worst perimeter", "worst area", "worst smoothness", "worst compactness", "worst concavity", "worst concave points", "worst symmetry", "worst fractal dimension", ] ) frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_breast_cancer", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, )
Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://goo.gl/U2Uwz2 Read more in the :ref:`User Guide <breast_cancer_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : list The names of the dataset columns. target_names : list The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) ['malignant', 'benign']
356
125
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_breast_cancer(*, return_X_y=False, as_frame=False): data_file_name = "breast_cancer.csv" data, target, target_names, fdescr = load_csv_data( data_file_name=data_file_name, descr_file_name="breast_cancer.rst" ) feature_names = np.array( [ "mean radius", "mean texture", "mean perimeter", "mean area", "mean smoothness", "mean compactness", "mean concavity", "mean concave points", "mean symmetry", "mean fractal dimension", "radius error", "texture error", "perimeter error", "area error", "smoothness error", "compactness error", "concavity error", "concave points error", "symmetry error", "fractal dimension error", "worst radius", "worst texture", "worst perimeter", "worst area", "worst smoothness", "worst compactness", "worst concavity", "worst concave points", "worst symmetry", "worst fractal dimension", ] ) frame = None target_columns = [ "target", ] if as_frame: frame, data, target = _convert_data_dataframe( "load_breast_cancer", data, target, feature_names, target_columns ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, DESCR=fdescr, feature_names=feature_names, filename=data_file_name, data_module=DATA_MODULE, ) ``` ###Assistant : Load and return the breast cancer wisconsin dataset (classification). The breast cancer dataset is a classic and very easy binary classification dataset. ================= ============== Classes 2 Samples per class 212(M),357(B) Samples total 569 Dimensionality 30 Features real, positive ================= ============== The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is downloaded from: https://goo.gl/U2Uwz2 Read more in the :ref:`User Guide <breast_cancer_dataset>`. Parameters ---------- return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.18 as_frame : bool, default=False If True, the data is a pandas DataFrame including columns with appropriate dtypes (numeric). The target is a pandas DataFrame or Series depending on the number of target columns. If `return_X_y` is True, then (`data`, `target`) will be pandas DataFrames or Series as described below. .. versionadded:: 0.23 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (569, 30) The data matrix. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, Series} of shape (569,) The classification target. If `as_frame=True`, `target` will be a pandas Series. feature_names : list The names of the dataset columns. target_names : list The names of target classes. frame : DataFrame of shape (569, 31) Only present when `as_frame=True`. DataFrame with `data` and `target`. .. versionadded:: 0.23 DESCR : str The full description of the dataset. filename : str The path to the location of the data. .. versionadded:: 0.20 (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarrays by default. The first contains a 2D ndarray of shape (569, 30) with each row representing one sample and each column representing the features. The second ndarray of shape (569,) contains the target samples. If `as_frame=True`, both arrays are pandas objects, i.e. `X` a dataframe and `y` a series. .. versionadded:: 0.18 Examples -------- Let's say you are interested in the samples 10, 50, and 85, and want to know their class name. >>> from sklearn.datasets import load_breast_cancer >>> data = load_breast_cancer() >>> data.target[[10, 50, 85]] array([0, 1, 0]) >>> list(data.target_names) ['malignant', 'benign']
627
def get_filter_by_name(filtername, **options): cls = find_filter_class(filtername) if cls: return cls(**options) else: raise ClassNotFound('filter %r not found' % filtername)
Return an instantiated filter. Options are passed to the filter initializer if wanted. Raise a ClassNotFound if not found.
19
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_filter_by_name(filtername, **options): cls = find_filter_class(filtername) if cls: return cls(**options) else: raise ClassNotFound('filter %r not found' % filtername) ``` ###Assistant : Return an instantiated filter. Options are passed to the filter initializer if wanted. Raise a ClassNotFound if not found.
628
def wide_resnet50_2(pretrained=False, **kwargs): kwargs['width'] = 64 * 2 return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)
Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet Examples: .. code-block:: python import paddle from paddle.vision.models import wide_resnet50_2 # build model model = wide_resnet50_2() # build model and load imagenet pretrained weight # model = wide_resnet50_2(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape)
57
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wide_resnet50_2(pretrained=False, **kwargs): kwargs['width'] = 64 * 2 return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs) ``` ###Assistant : Wide ResNet-50-2 model from `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet Examples: .. code-block:: python import paddle from paddle.vision.models import wide_resnet50_2 # build model model = wide_resnet50_2() # build model and load imagenet pretrained weight # model = wide_resnet50_2(pretrained=True) x = paddle.rand([1, 3, 224, 224]) out = model(x) print(out.shape)
629
def test_username_available(self) -> None: url = "%s?username=%s" % (self.url, "allowed") channel = self.make_request("GET", url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["available"])
The endpoint should return a HTTPStatus.OK response if the username does not exist
13
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_username_available(self) -> None: url = "%s?username=%s" % (self.url, "allowed") channel = self.make_request("GET", url, access_token=self.admin_user_tok) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertTrue(channel.json_body["available"]) ``` ###Assistant : The endpoint should return a HTTPStatus.OK response if the username does not exist
630
def test_with_include_glob_filtering_case4a_include_strong(): incl_dom = {} incl_glob = {"*working"} incl_ent = {"binary_sensor.specificly_included"} excl_dom = {} excl_glob = {"*broken", "*notworking", "binary_sensor.*"} excl_ent = {"light.ignoreme"} testfilter = generate_filter( incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob ) assert testfilter("sensor.working") is True assert testfilter("sensor.notworking") is True # include is stronger assert testfilter("sensor.broken") is False assert testfilter("light.test") is False assert testfilter("light.notworking") is True # include is stronger assert testfilter("light.ignoreme") is False assert testfilter("binary_sensor.not_working") is True # include is stronger assert testfilter("binary_sensor.another") is False assert testfilter("binary_sensor.specificly_included") is True assert testfilter("sun.sun") is False
Test case 4 - include and exclude specified, both have globs, and a specifically included entity.
16
84
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_with_include_glob_filtering_case4a_include_strong(): incl_dom = {} incl_glob = {"*working"} incl_ent = {"binary_sensor.specificly_included"} excl_dom = {} excl_glob = {"*broken", "*notworking", "binary_sensor.*"} excl_ent = {"light.ignoreme"} testfilter = generate_filter( incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob ) assert testfilter("sensor.working") is True assert testfilter("sensor.notworking") is True # include is stronger assert testfilter("sensor.broken") is False assert testfilter("light.test") is False assert testfilter("light.notworking") is True # include is stronger assert testfilter("light.ignoreme") is False assert testfilter("binary_sensor.not_working") is True # include is stronger assert testfilter("binary_sensor.another") is False assert testfilter("binary_sensor.specificly_included") is True assert testfilter("sun.sun") is False ``` ###Assistant : Test case 4 - include and exclude specified, both have globs, and a specifically included entity.
631
def get_sales_orders(self): so_filter = item_filter = "" bom_item = "bom.item = so_item.item_code" date_field_mapper = { "from_date": (">=", "so.transaction_date"), "to_date": ("<=", "so.transaction_date"), "from_delivery_date": (">=", "so_item.delivery_date"), "to_delivery_date": ("<=", "so_item.delivery_date"), } for field, value in date_field_mapper.items(): if self.get(field): so_filter += f" and {value[1]} {value[0]} %({field})s" for field in ["customer", "project", "sales_order_status"]: if self.get(field): so_field = "status" if field == "sales_order_status" else field so_filter += f" and so.{so_field} = %({field})s" if self.item_code and frappe.db.exists("Item", self.item_code): bom_item = self.get_bom_item() or bom_item item_filter += " and so_item.item_code = %(item_code)s" open_so = frappe.db.sql( f, self.as_dict(), as_dict=1, ) return open_so @frappe.whitelist()
select distinct so.name, so.transaction_date, so.customer, so.base_grand_total from `tabSales Order` so, `tabSales Order Item` so_item where so_item.parent = so.name and so.docstatus = 1 and so.status not in ("Stopped", "Closed") and so.company = %(company)s and so_item.qty > so_item.work_order_qty {so_filter} {item_filter} and (exists (select name from `tabBOM` bom where {bom_item} and bom.is_active = 1) or exists (select name from `tabPacked Item` pi where pi.parent = so.name and pi.parent_item = so_item.item_code and exists (select name from `tabBOM` bom where bom.item=pi.item_code and bom.is_active = 1)))
80
93
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_sales_orders(self): so_filter = item_filter = "" bom_item = "bom.item = so_item.item_code" date_field_mapper = { "from_date": (">=", "so.transaction_date"), "to_date": ("<=", "so.transaction_date"), "from_delivery_date": (">=", "so_item.delivery_date"), "to_delivery_date": ("<=", "so_item.delivery_date"), } for field, value in date_field_mapper.items(): if self.get(field): so_filter += f" and {value[1]} {value[0]} %({field})s" for field in ["customer", "project", "sales_order_status"]: if self.get(field): so_field = "status" if field == "sales_order_status" else field so_filter += f" and so.{so_field} = %({field})s" if self.item_code and frappe.db.exists("Item", self.item_code): bom_item = self.get_bom_item() or bom_item item_filter += " and so_item.item_code = %(item_code)s" open_so = frappe.db.sql( f, self.as_dict(), as_dict=1, ) return open_so @frappe.whitelist() ``` ###Assistant : select distinct so.name, so.transaction_date, so.customer, so.base_grand_total from `tabSales Order` so, `tabSales Order Item` so_item where so_item.parent = so.name and so.docstatus = 1 and so.status not in ("Stopped", "Closed") and so.company = %(company)s and so_item.qty > so_item.work_order_qty {so_filter} {item_filter} and (exists (select name from `tabBOM` bom where {bom_item} and bom.is_active = 1) or exists (select name from `tabPacked Item` pi where pi.parent = so.name and pi.parent_item = so_item.item_code and exists (select name from `tabBOM` bom where bom.item=pi.item_code and bom.is_active = 1)))
632
def test_copy_published_emits_signal(self): christmas_page = EventPage.objects.get(url_path="/home/events/christmas/") signal_fired = False signal_page = None
Test that copying of a published page emits a page_published signal.
11
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_copy_published_emits_signal(self): christmas_page = EventPage.objects.get(url_path="/home/events/christmas/") signal_fired = False signal_page = None ``` ###Assistant : Test that copying of a published page emits a page_published signal.
633
def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, "tests") # test folders/files directly under `tests` folder tests = os.listdir(test_root_dir) tests = sorted( list(filter(lambda x: os.path.isdir(x) or x.startswith("tests/test_"), [f"tests/{x}" for x in tests])) ) # model specific test folders model_tests_folders = os.listdir(os.path.join(test_root_dir, "models")) model_test_folders = sorted(list(filter(os.path.isdir, [f"tests/models/{x}" for x in model_tests_folders]))) tests.remove("tests/models") tests = model_test_folders + tests return tests
Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`. - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.
46
55
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, "tests") # test folders/files directly under `tests` folder tests = os.listdir(test_root_dir) tests = sorted( list(filter(lambda x: os.path.isdir(x) or x.startswith("tests/test_"), [f"tests/{x}" for x in tests])) ) # model specific test folders model_tests_folders = os.listdir(os.path.join(test_root_dir, "models")) model_test_folders = sorted(list(filter(os.path.isdir, [f"tests/models/{x}" for x in model_tests_folders]))) tests.remove("tests/models") tests = model_test_folders + tests return tests ``` ###Assistant : Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`. - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded. - folders under `tests/models`: `bert`, `gpt2`, etc. - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.
634
def test_vtrace(self): seq_len = 5 batch_size = 10 # Create log_rhos such that rho will span from near-zero to above the # clipping thresholds. In particular, calculate log_rhos in # [-2.5, 2.5), # so that rho is in approx [0.08, 12.2). space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32) space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32) log_rhos = space_w_time.sample() / (batch_size * seq_len) log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5). values = { "log_rhos": log_rhos, # T, B where B_i: [0.9 / (i+1)] * T "discounts": np.array( [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)] ), "rewards": space_w_time.sample(), "values": space_w_time.sample() / batch_size, "bootstrap_value": space_only_batch.sample() + 1.0, "clip_rho_threshold": 3.7, "clip_pg_rho_threshold": 2.2, } for fw, sess in framework_iterator(frameworks=("torch", "tf"), session=True): vtrace = vtrace_tf if fw != "torch" else vtrace_torch output = vtrace.from_importance_weights(**values) if sess: output = sess.run(output) ground_truth_v = _ground_truth_calculation(vtrace, **values) check(output, ground_truth_v)
Tests V-trace against ground truth data calculated in python.
9
150
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_vtrace(self): seq_len = 5 batch_size = 10 # Create log_rhos such that rho will span from near-zero to above the # clipping thresholds. In particular, calculate log_rhos in # [-2.5, 2.5), # so that rho is in approx [0.08, 12.2). space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32) space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32) log_rhos = space_w_time.sample() / (batch_size * seq_len) log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5). values = { "log_rhos": log_rhos, # T, B where B_i: [0.9 / (i+1)] * T "discounts": np.array( [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)] ), "rewards": space_w_time.sample(), "values": space_w_time.sample() / batch_size, "bootstrap_value": space_only_batch.sample() + 1.0, "clip_rho_threshold": 3.7, "clip_pg_rho_threshold": 2.2, } for fw, sess in framework_iterator(frameworks=("torch", "tf"), session=True): vtrace = vtrace_tf if fw != "torch" else vtrace_torch output = vtrace.from_importance_weights(**values) if sess: output = sess.run(output) ground_truth_v = _ground_truth_calculation(vtrace, **values) check(output, ground_truth_v) ``` ###Assistant : Tests V-trace against ground truth data calculated in python.
635
def pdfdump(self, filename=None, **kargs): # type: (Optional[str], **Any) -> None from scapy.config import conf from scapy.utils import get_temp_file, ContextManagerSubprocess canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=kargs.get("suffix", ".pdf")) canvas.writePDFfile(fname) if WINDOWS and not conf.prog.pdfreader: os.startfile(fname) else: with ContextManagerSubprocess(conf.prog.pdfreader): subprocess.Popen([conf.prog.pdfreader, fname]) else: canvas.writePDFfile(filename) print()
pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called. :param filename: the file's filename
29
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def pdfdump(self, filename=None, **kargs): # type: (Optional[str], **Any) -> None from scapy.config import conf from scapy.utils import get_temp_file, ContextManagerSubprocess canvas = self.canvas_dump(**kargs) if filename is None: fname = get_temp_file(autoext=kargs.get("suffix", ".pdf")) canvas.writePDFfile(fname) if WINDOWS and not conf.prog.pdfreader: os.startfile(fname) else: with ContextManagerSubprocess(conf.prog.pdfreader): subprocess.Popen([conf.prog.pdfreader, fname]) else: canvas.writePDFfile(filename) print() ``` ###Assistant : pdfdump(filename=None, layer_shift=0, rebuild=1) Creates a PDF file describing a packet. If filename is not provided a temporary file is created and xpdf is called. :param filename: the file's filename
636
def index_sample(x, index): x_s = x.shape dim = len(index.shape) - 1 assert x_s[:dim] == index.shape[:dim] if len(x_s) == 3 and dim == 1: r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]]) else: r_x = paddle.reshape(x, shape=[-1, x_s[-1]]) index = paddle.reshape(index, shape=[len(r_x), -1, 1]) # Generate arange index, shape like index arr_index = paddle.arange(start=0, end=len(index), dtype=index.dtype) arr_index = paddle.unsqueeze(arr_index, axis=[1, 2]) arr_index = paddle.expand(arr_index, index.shape) # Genrate new index new_index = paddle.concat((arr_index, index), -1) new_index = paddle.reshape(new_index, (-1, 2)) # Get output out = paddle.gather_nd(r_x, new_index) if len(x_s) == 3 and dim == 2: out = paddle.reshape(out, shape=[x_s[0], x_s[1], -1]) else: out = paddle.reshape(out, shape=[x_s[0], -1]) return out
Select input value according to index Arags: input: input matrix index: index matrix Returns: output >>> input [ [1, 2, 3], [4, 5, 6] ] >>> index [ [1, 2], [0, 1] ] >>> index_sample(input, index) [ [2, 3], [4, 5] ]
42
105
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def index_sample(x, index): x_s = x.shape dim = len(index.shape) - 1 assert x_s[:dim] == index.shape[:dim] if len(x_s) == 3 and dim == 1: r_x = paddle.reshape(x, shape=[-1, x_s[1], x_s[-1]]) else: r_x = paddle.reshape(x, shape=[-1, x_s[-1]]) index = paddle.reshape(index, shape=[len(r_x), -1, 1]) # Generate arange index, shape like index arr_index = paddle.arange(start=0, end=len(index), dtype=index.dtype) arr_index = paddle.unsqueeze(arr_index, axis=[1, 2]) arr_index = paddle.expand(arr_index, index.shape) # Genrate new index new_index = paddle.concat((arr_index, index), -1) new_index = paddle.reshape(new_index, (-1, 2)) # Get output out = paddle.gather_nd(r_x, new_index) if len(x_s) == 3 and dim == 2: out = paddle.reshape(out, shape=[x_s[0], x_s[1], -1]) else: out = paddle.reshape(out, shape=[x_s[0], -1]) return out ``` ###Assistant : Select input value according to index Arags: input: input matrix index: index matrix Returns: output >>> input [ [1, 2, 3], [4, 5, 6] ] >>> index [ [1, 2], [0, 1] ] >>> index_sample(input, index) [ [2, 3], [4, 5] ]
637
def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): if not isinstance(name, Str): name = Str(name) # canonicallize the symbols if symbols is None: names = kwargs.get('names', None) if names is None: symbols = Tuple( *[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)] ) else: sympy_deprecation_warning( f, deprecated_since_version="1.7", active_deprecations_target="deprecated-diffgeom-mutable", ) symbols = Tuple( *[Symbol(n, real=True) for n in names] ) else: syms = [] for s in symbols: if isinstance(s, Symbol): syms.append(Symbol(s.name, **s._assumptions.generator)) elif isinstance(s, str): sympy_deprecation_warning( f, deprecated_since_version="1.7", active_deprecations_target="deprecated-diffgeom-mutable", ) syms.append(Symbol(s, real=True)) symbols = Tuple(*syms) # canonicallize the relations rel_temp = {} for k,v in relations.items(): s1, s2 = k if not isinstance(s1, Str): s1 = Str(s1) if not isinstance(s2, Str): s2 = Str(s2) key = Tuple(s1, s2) # Old version used Lambda as a value. if isinstance(v, Lambda): v = (tuple(v.signature), tuple(v.expr)) else: v = (tuple(v[0]), tuple(v[1])) rel_temp[key] = v relations = Dict(rel_temp) # construct the object obj = super().__new__(cls, name, patch, symbols, relations) # Add deprecated attributes obj.transforms = _deprecated_dict( , {}) obj._names = [str(n) for n in symbols] obj.patch.coord_systems.append(obj) # deprecated obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated obj._dummy = Dummy() return obj
The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That is, replace CoordSystem(..., names={names}) with CoordSystem(..., symbols=[{', '.join(["Symbol(" + repr(n) + ", real=True)" for n in names])}]) Passing a string as the coordinate symbol name to CoordSystem is deprecated. Pass a Symbol with the appropriate name and assumptions instead. That is, replace {s} with Symbol({s!r}, real=True). CoordSystem.transforms is deprecated. The CoordSystem class is now immutable. Use the 'relations' keyword argument to the CoordSystems() constructor to specify relations.
78
188
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, name, patch, symbols=None, relations={}, **kwargs): if not isinstance(name, Str): name = Str(name) # canonicallize the symbols if symbols is None: names = kwargs.get('names', None) if names is None: symbols = Tuple( *[Symbol('%s_%s' % (name.name, i), real=True) for i in range(patch.dim)] ) else: sympy_deprecation_warning( f, deprecated_since_version="1.7", active_deprecations_target="deprecated-diffgeom-mutable", ) symbols = Tuple( *[Symbol(n, real=True) for n in names] ) else: syms = [] for s in symbols: if isinstance(s, Symbol): syms.append(Symbol(s.name, **s._assumptions.generator)) elif isinstance(s, str): sympy_deprecation_warning( f, deprecated_since_version="1.7", active_deprecations_target="deprecated-diffgeom-mutable", ) syms.append(Symbol(s, real=True)) symbols = Tuple(*syms) # canonicallize the relations rel_temp = {} for k,v in relations.items(): s1, s2 = k if not isinstance(s1, Str): s1 = Str(s1) if not isinstance(s2, Str): s2 = Str(s2) key = Tuple(s1, s2) # Old version used Lambda as a value. if isinstance(v, Lambda): v = (tuple(v.signature), tuple(v.expr)) else: v = (tuple(v[0]), tuple(v[1])) rel_temp[key] = v relations = Dict(rel_temp) # construct the object obj = super().__new__(cls, name, patch, symbols, relations) # Add deprecated attributes obj.transforms = _deprecated_dict( , {}) obj._names = [str(n) for n in symbols] obj.patch.coord_systems.append(obj) # deprecated obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated obj._dummy = Dummy() return obj ``` ###Assistant : The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That is, replace CoordSystem(..., names={names}) with CoordSystem(..., symbols=[{', '.join(["Symbol(" + repr(n) + ", real=True)" for n in names])}]) Passing a string as the coordinate symbol name to CoordSystem is deprecated. Pass a Symbol with the appropriate name and assumptions instead. That is, replace {s} with Symbol({s!r}, real=True). CoordSystem.transforms is deprecated. The CoordSystem class is now immutable. Use the 'relations' keyword argument to the CoordSystems() constructor to specify relations.
638
def tab_focus(*, info): model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id, add_win_id=False, current_win_id=info.win_id) special = [ ("last", "Focus the last-focused tab"), ("stack-next", "Go forward through a stack of focused tabs"), ("stack-prev", "Go backward through a stack of focused tabs"), ] model.add_category(listcategory.ListCategory("Special", special)) return model
A model to complete on open tabs in the current window.
11
43
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def tab_focus(*, info): model = _tabs(win_id_filter=lambda win_id: win_id == info.win_id, add_win_id=False, current_win_id=info.win_id) special = [ ("last", "Focus the last-focused tab"), ("stack-next", "Go forward through a stack of focused tabs"), ("stack-prev", "Go backward through a stack of focused tabs"), ] model.add_category(listcategory.ListCategory("Special", special)) return model ``` ###Assistant : A model to complete on open tabs in the current window.
639
def get_mode_of_payment_details(filters): mode_of_payment_details = {} invoice_list = get_invoices(filters) invoice_list_names = ",".join("'" + invoice["name"] + "'" for invoice in invoice_list) if invoice_list: inv_mop_detail = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) inv_change_amount = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) for d in inv_change_amount: for det in inv_mop_detail: if ( det["owner"] == d["owner"] and det["posting_date"] == d["posting_date"] and det["mode_of_payment"] == d["mode_of_payment"] ): paid_amount = det["paid_amount"] - d["change_amount"] det["paid_amount"] = paid_amount for d in inv_mop_detail: mode_of_payment_details.setdefault(d["owner"] + cstr(d["posting_date"]), []).append( (d.mode_of_payment, d.paid_amount) ) return mode_of_payment_details
select t.owner, t.posting_date, t.mode_of_payment, sum(t.paid_amount) as paid_amount from ( select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner,a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount from `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c where a.name = c.reference_name and b.name = c.parent and b.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner, a.posting_date, ifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit) from `tabJournal Entry` a, `tabJournal Entry Account` b where a.name = b.parent and a.docstatus = 1 and b.reference_type = 'Sales Invoice' and b.reference_name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment ) t group by t.owner, t.posting_date, t.mode_of_payment select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.name in ({invoice_list_names}) and b.type = 'Cash' and a.base_change_amount > 0 group by a.owner, a.posting_date, mode_of_payment
169
80
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_mode_of_payment_details(filters): mode_of_payment_details = {} invoice_list = get_invoices(filters) invoice_list_names = ",".join("'" + invoice["name"] + "'" for invoice in invoice_list) if invoice_list: inv_mop_detail = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) inv_change_amount = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) for d in inv_change_amount: for det in inv_mop_detail: if ( det["owner"] == d["owner"] and det["posting_date"] == d["posting_date"] and det["mode_of_payment"] == d["mode_of_payment"] ): paid_amount = det["paid_amount"] - d["change_amount"] det["paid_amount"] = paid_amount for d in inv_mop_detail: mode_of_payment_details.setdefault(d["owner"] + cstr(d["posting_date"]), []).append( (d.mode_of_payment, d.paid_amount) ) return mode_of_payment_details ``` ###Assistant : select t.owner, t.posting_date, t.mode_of_payment, sum(t.paid_amount) as paid_amount from ( select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner,a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount from `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c where a.name = c.reference_name and b.name = c.parent and b.docstatus = 1 and a.name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment union select a.owner, a.posting_date, ifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit) from `tabJournal Entry` a, `tabJournal Entry Account` b where a.name = b.parent and a.docstatus = 1 and b.reference_type = 'Sales Invoice' and b.reference_name in ({invoice_list_names}) group by a.owner, a.posting_date, mode_of_payment ) t group by t.owner, t.posting_date, t.mode_of_payment select a.owner, a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount from `tabSales Invoice` a, `tabSales Invoice Payment` b where a.name = b.parent and a.name in ({invoice_list_names}) and b.type = 'Cash' and a.base_change_amount > 0 group by a.owner, a.posting_date, mode_of_payment
640
def get_config_context(self): data = {} if not hasattr(self, 'config_context_data'): # The annotation is not available, so we fall back to manually querying for the config context objects config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True) else: # The attribute may exist, but the annotated value could be None if there is no config context data config_context_data = self.config_context_data or [] for context in config_context_data: data = deepmerge(data, context) # If the object has local config context data defined, merge it last if self.local_context_data: data = deepmerge(data, self.local_context_data) return data
Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs. Return the rendered configuration context for a device or VM.
24
85
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_config_context(self): data = {} if not hasattr(self, 'config_context_data'): # The annotation is not available, so we fall back to manually querying for the config context objects config_context_data = ConfigContext.objects.get_for_object(self, aggregate_data=True) else: # The attribute may exist, but the annotated value could be None if there is no config context data config_context_data = self.config_context_data or [] for context in config_context_data: data = deepmerge(data, context) # If the object has local config context data defined, merge it last if self.local_context_data: data = deepmerge(data, self.local_context_data) return data ``` ###Assistant : Compile all config data, overwriting lower-weight values with higher-weight values where a collision occurs. Return the rendered configuration context for a device or VM.
641
def test_displayname_is_set_avatar_is_none(self) -> None: channel = self.make_request( "POST", self.url, access_token=self.admin_user_tok, content={ "user_id": self.other_user, "content": {"msgtype": "m.text", "body": "test msg"}, }, ) self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite self._check_invite_and_join_status(self.other_user, 1, 0)
Tests that sending a server notices is successfully, if a display_name is set, avatar_url is `None` and "check avatar size and mime type" is set.
25
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_displayname_is_set_avatar_is_none(self) -> None: channel = self.make_request( "POST", self.url, access_token=self.admin_user_tok, content={ "user_id": self.other_user, "content": {"msgtype": "m.text", "body": "test msg"}, }, ) self.assertEqual(200, channel.code, msg=channel.json_body) # user has one invite self._check_invite_and_join_status(self.other_user, 1, 0) ``` ###Assistant : Tests that sending a server notices is successfully, if a display_name is set, avatar_url is `None` and "check avatar size and mime type" is set.
642
def cur_iter_done(self) -> bool: return all( self._get_result_time(result) >= self._cumul_r for result in self._live_trials.values() )
Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r`
13
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cur_iter_done(self) -> bool: return all( self._get_result_time(result) >= self._cumul_r for result in self._live_trials.values() ) ``` ###Assistant : Checks if all iterations have completed. TODO(rliaw): also check that `t.iterations == self._r`
643
def get_attendance_years(): year_list = frappe.db.sql_list( ) if not year_list: year_list = [getdate().year] return "\n".join(str(year) for year in year_list)
select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC
10
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_attendance_years(): year_list = frappe.db.sql_list( ) if not year_list: year_list = [getdate().year] return "\n".join(str(year) for year in year_list) ``` ###Assistant : select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC
644
async def test_discovery_notification(hass): mock_integration(hass, MockModule("test")) mock_entity_platform(hass, "config_flow.test", None) with patch.dict(config_entries.HANDLERS):
Test that we create/dismiss a notification when source is discovery.
10
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_discovery_notification(hass): mock_integration(hass, MockModule("test")) mock_entity_platform(hass, "config_flow.test", None) with patch.dict(config_entries.HANDLERS): ``` ###Assistant : Test that we create/dismiss a notification when source is discovery.
645
def get_preference(self, identifier, resolutions, candidates, information): raise NotImplementedError
Produce a sort key for given requirement based on preference. The preference is defined as "I think this requirement should be resolved first". The lower the return value is, the more preferred this group of arguments is. :param identifier: An identifier as returned by ``identify()``. This identifies the dependency matches of which should be returned. :param resolutions: Mapping of candidates currently pinned by the resolver. Each key is an identifier, and the value a candidate. The candidate may conflict with requirements from ``information``. :param candidates: Mapping of each dependency's possible candidates. Each value is an iterator of candidates. :param information: Mapping of requirement information of each package. Each value is an iterator of *requirement information*. A *requirement information* instance is a named tuple with two members: * ``requirement`` specifies a requirement contributing to the current list of candidates. * ``parent`` specifies the candidate that provides (dependend on) the requirement, or ``None`` to indicate a root requirement. The preference could depend on a various of issues, including (not necessarily in this order): * Is this package pinned in the current resolution result? * How relaxed is the requirement? Stricter ones should probably be worked on first? (I don't know, actually.) * How many possibilities are there to satisfy this requirement? Those with few left should likely be worked on first, I guess? * Are there any known conflicts for this requirement? We should probably work on those with the most known conflicts. A sortable value should be returned (this will be used as the ``key`` parameter of the built-in sorting function). The smaller the value is, the more preferred this requirement is (i.e. the sorting function is called with ``reverse=False``).
279
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_preference(self, identifier, resolutions, candidates, information): raise NotImplementedError ``` ###Assistant : Produce a sort key for given requirement based on preference. The preference is defined as "I think this requirement should be resolved first". The lower the return value is, the more preferred this group of arguments is. :param identifier: An identifier as returned by ``identify()``. This identifies the dependency matches of which should be returned. :param resolutions: Mapping of candidates currently pinned by the resolver. Each key is an identifier, and the value a candidate. The candidate may conflict with requirements from ``information``. :param candidates: Mapping of each dependency's possible candidates. Each value is an iterator of candidates. :param information: Mapping of requirement information of each package. Each value is an iterator of *requirement information*. A *requirement information* instance is a named tuple with two members: * ``requirement`` specifies a requirement contributing to the current list of candidates. * ``parent`` specifies the candidate that provides (dependend on) the requirement, or ``None`` to indicate a root requirement. The preference could depend on a various of issues, including (not necessarily in this order): * Is this package pinned in the current resolution result? * How relaxed is the requirement? Stricter ones should probably be worked on first? (I don't know, actually.) * How many possibilities are there to satisfy this requirement? Those with few left should likely be worked on first, I guess? * Are there any known conflicts for this requirement? We should probably work on those with the most known conflicts. A sortable value should be returned (this will be used as the ``key`` parameter of the built-in sorting function). The smaller the value is, the more preferred this requirement is (i.e. the sorting function is called with ``reverse=False``).
646
def prepare_metadata(self) -> None: assert self.source_dir details = self.name or f"from {self.link}" if self.use_pep517: assert self.pep517_backend is not None if ( self.editable and self.permit_editable_wheels and self.supports_pyproject_editable() ): self.metadata_directory = generate_editable_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata_legacy( build_env=self.build_env, setup_py_path=self.setup_py_path, source_dir=self.unpacked_source_directory, isolated=self.isolated, details=details, ) # Act on the newly generated metadata, based on the name and version. if not self.name: self._set_requirement() else: self.warn_on_mismatching_name() self.assert_source_matches_version()
Ensure that project metadata is available. Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. Under legacy processing, call setup.py egg-info.
26
72
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def prepare_metadata(self) -> None: assert self.source_dir details = self.name or f"from {self.link}" if self.use_pep517: assert self.pep517_backend is not None if ( self.editable and self.permit_editable_wheels and self.supports_pyproject_editable() ): self.metadata_directory = generate_editable_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata( build_env=self.build_env, backend=self.pep517_backend, details=details, ) else: self.metadata_directory = generate_metadata_legacy( build_env=self.build_env, setup_py_path=self.setup_py_path, source_dir=self.unpacked_source_directory, isolated=self.isolated, details=details, ) # Act on the newly generated metadata, based on the name and version. if not self.name: self._set_requirement() else: self.warn_on_mismatching_name() self.assert_source_matches_version() ``` ###Assistant : Ensure that project metadata is available. Under PEP 517 and PEP 660, call the backend hook to prepare the metadata. Under legacy processing, call setup.py egg-info.
647
def get_image_filename(self, image, filterspec): name, ext = os.path.splitext(os.path.basename(image.file.name)) return "{}images/{}.{}{}".format(settings.MEDIA_URL, name, filterspec, ext)
Get the generated filename for a resized image
8
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_image_filename(self, image, filterspec): name, ext = os.path.splitext(os.path.basename(image.file.name)) return "{}images/{}.{}{}".format(settings.MEDIA_URL, name, filterspec, ext) ``` ###Assistant : Get the generated filename for a resized image
648
def min(self, other, context=None): other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context)
Returns the smaller value. Like min(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds.
24
95
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def min(self, other, context=None): other = _convert_other(other, raiseit=True) if context is None: context = getcontext() if self._is_special or other._is_special: # If one operand is a quiet NaN and the other is number, then the # number is always returned sn = self._isnan() on = other._isnan() if sn or on: if on == 1 and sn == 0: return self._fix(context) if sn == 1 and on == 0: return other._fix(context) return self._check_nans(other, context) c = self._cmp(other) if c == 0: c = self.compare_total(other) if c == -1: ans = self else: ans = other return ans._fix(context) ``` ###Assistant : Returns the smaller value. Like min(self, other) except if one is not a number, returns NaN (and signals if one is sNaN). Also rounds.
649
def test_simplelistfilter_with_none_returning_lookups(self): modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site) request = self.request_factory.get("/", {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0)
A SimpleListFilter lookups method can return None but disables the filter completely.
12
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_simplelistfilter_with_none_returning_lookups(self): modeladmin = DecadeFilterBookAdminWithNoneReturningLookups(Book, site) request = self.request_factory.get("/", {}) request.user = self.alfred changelist = modeladmin.get_changelist_instance(request) filterspec = changelist.get_filters(request)[0] self.assertEqual(len(filterspec), 0) ``` ###Assistant : A SimpleListFilter lookups method can return None but disables the filter completely.
650
def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend): missing_value_strategy = preprocessing_parameters["missing_value_strategy"] if missing_value_strategy == FILL_WITH_CONST: return preprocessing_parameters["fill_value"] elif missing_value_strategy == FILL_WITH_MODE: return dataset_cols[feature[COLUMN]].value_counts().index[0] elif missing_value_strategy == FILL_WITH_MEAN: if feature[TYPE] != NUMBER: raise ValueError( f"Filling missing values with mean is supported " f"only for number types, not for type {feature[TYPE]}.", ) return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean()) elif missing_value_strategy == FILL_WITH_FALSE: distinct_values = backend.df_engine.compute( dataset_cols[feature[COLUMN]].drop_duplicates().dropna() ).values.tolist() if len(distinct_values) > 2: raise ValueError( f"Missing value strategy `fill_with_false` " f"for column {feature[COLUMN]} expects 2 distinct values, " f"found: {len(distinct_values)} (ex: {distinct_values[:10]})" ) # Determine the False label. # Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in # binary_feature.get_feature_meta) for binary columns with unconventional boolean values, "human"/"bot". for v in sorted(distinct_values, reverse=True): fallback_true_label = preprocessing_parameters.get("fallback_true_label", "true") if strings_utils.str2bool(v, fallback_true_label) is False: return v raise ValueError( f"Unable to determine False value for column {feature[COLUMN]} with distinct values: {distinct_values}." ) # Otherwise, we cannot precompute the fill value for this dataset return None
Precomputes the fill value for a feature. NOTE: this is called before NaNs are removed from the dataset. Modifications here must handle NaNs gracefully.
24
157
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def precompute_fill_value(dataset_cols, feature, preprocessing_parameters, backend): missing_value_strategy = preprocessing_parameters["missing_value_strategy"] if missing_value_strategy == FILL_WITH_CONST: return preprocessing_parameters["fill_value"] elif missing_value_strategy == FILL_WITH_MODE: return dataset_cols[feature[COLUMN]].value_counts().index[0] elif missing_value_strategy == FILL_WITH_MEAN: if feature[TYPE] != NUMBER: raise ValueError( f"Filling missing values with mean is supported " f"only for number types, not for type {feature[TYPE]}.", ) return backend.df_engine.compute(dataset_cols[feature[COLUMN]].mean()) elif missing_value_strategy == FILL_WITH_FALSE: distinct_values = backend.df_engine.compute( dataset_cols[feature[COLUMN]].drop_duplicates().dropna() ).values.tolist() if len(distinct_values) > 2: raise ValueError( f"Missing value strategy `fill_with_false` " f"for column {feature[COLUMN]} expects 2 distinct values, " f"found: {len(distinct_values)} (ex: {distinct_values[:10]})" ) # Determine the False label. # Distinct values are sorted in reverse to mirror the selection of the default fallback_true_label (in # binary_feature.get_feature_meta) for binary columns with unconventional boolean values, "human"/"bot". for v in sorted(distinct_values, reverse=True): fallback_true_label = preprocessing_parameters.get("fallback_true_label", "true") if strings_utils.str2bool(v, fallback_true_label) is False: return v raise ValueError( f"Unable to determine False value for column {feature[COLUMN]} with distinct values: {distinct_values}." ) # Otherwise, we cannot precompute the fill value for this dataset return None ``` ###Assistant : Precomputes the fill value for a feature. NOTE: this is called before NaNs are removed from the dataset. Modifications here must handle NaNs gracefully.
651
def loss_labels(self, outputs, targets, indices, num_boxes, log=True): if "logits" not in outputs: raise ValueError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses
Classification loss (NLL) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
16
84
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def loss_labels(self, outputs, targets, indices, num_boxes, log=True): if "logits" not in outputs: raise ValueError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses ``` ###Assistant : Classification loss (NLL) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
652
def test_stacking_classifier_multilabel_predict_proba(estimator): X_train, X_test, y_train, y_test = train_test_split( X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 ) n_outputs = 3 estimators = [("est", estimator)] stacker = StackingClassifier( estimators=estimators, final_estimator=KNeighborsClassifier(), stack_method="predict_proba", ).fit(X_train, y_train) X_trans = stacker.transform(X_test) assert X_trans.shape == (X_test.shape[0], n_outputs) # we should not have any collinear classes and thus nothing should sum to 1 assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) y_pred = stacker.predict(X_test) assert y_pred.shape == y_test.shape
Check the behaviour for the multilabel classification case and the `predict_proba` stacking method. Estimators are not consistent with the output arrays and we need to ensure that we handle all cases.
31
62
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stacking_classifier_multilabel_predict_proba(estimator): X_train, X_test, y_train, y_test = train_test_split( X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 ) n_outputs = 3 estimators = [("est", estimator)] stacker = StackingClassifier( estimators=estimators, final_estimator=KNeighborsClassifier(), stack_method="predict_proba", ).fit(X_train, y_train) X_trans = stacker.transform(X_test) assert X_trans.shape == (X_test.shape[0], n_outputs) # we should not have any collinear classes and thus nothing should sum to 1 assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) y_pred = stacker.predict(X_test) assert y_pred.shape == y_test.shape ``` ###Assistant : Check the behaviour for the multilabel classification case and the `predict_proba` stacking method. Estimators are not consistent with the output arrays and we need to ensure that we handle all cases.
653
def get_page(self, url): # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile('<a href=[^>]*>([^<]+)<')
Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator).
41
199
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_page(self, url): # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: # pragma: no cover data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile('<a href=[^>]*>([^<]+)<') ``` ###Assistant : Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator).
654
def enable_all_warnings() -> None: __diag__.enable_all_warnings() # hide abstract class del __config_flags
Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
8
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def enable_all_warnings() -> None: __diag__.enable_all_warnings() # hide abstract class del __config_flags ``` ###Assistant : Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
655
def export_yaml(self): yaml_data = [obj.to_yaml() for obj in self.queryset] return '---\n'.join(yaml_data)
Export the queryset of objects as concatenated YAML documents.
9
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export_yaml(self): yaml_data = [obj.to_yaml() for obj in self.queryset] return '---\n'.join(yaml_data) ``` ###Assistant : Export the queryset of objects as concatenated YAML documents.
656
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): if url is not None: url = url.strip() if not url: return False if allowed_hosts is None: allowed_hosts = set() elif isinstance(allowed_hosts, str): allowed_hosts = {allowed_hosts} # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return _url_has_allowed_host_and_scheme( url, allowed_hosts, require_https=require_https ) and _url_has_allowed_host_and_scheme( url.replace("\\", "/"), allowed_hosts, require_https=require_https ) # Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
Return ``True`` if the url uses an allowed host and a safe scheme. Always return ``False`` on an empty url. If ``require_https`` is ``True``, only 'https' will be considered a valid scheme, as opposed to 'http' and 'https' with the default, ``False``. Note: "True" doesn't entail that a URL is "safe". It may still be e.g. quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() on the path component of untrusted URLs.
70
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): if url is not None: url = url.strip() if not url: return False if allowed_hosts is None: allowed_hosts = set() elif isinstance(allowed_hosts, str): allowed_hosts = {allowed_hosts} # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return _url_has_allowed_host_and_scheme( url, allowed_hosts, require_https=require_https ) and _url_has_allowed_host_and_scheme( url.replace("\\", "/"), allowed_hosts, require_https=require_https ) # Copied from urllib.parse.urlparse() but uses fixed urlsplit() function. ``` ###Assistant : Return ``True`` if the url uses an allowed host and a safe scheme. Always return ``False`` on an empty url. If ``require_https`` is ``True``, only 'https' will be considered a valid scheme, as opposed to 'http' and 'https' with the default, ``False``. Note: "True" doesn't entail that a URL is "safe". It may still be e.g. quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() on the path component of untrusted URLs.
657
def get_ps(module, pattern): found = False if platform.system() == 'SunOS': flags = '-ef' else: flags = 'auxww' psbin = module.get_bin_path('ps', True) (rc, psout, pserr) = module.run_command([psbin, flags]) if rc == 0: for line in psout.splitlines(): if pattern in line: # FIXME: should add logic to prevent matching 'self', though that should be extremely rare found = True break return found
Last resort to find a service by trying to match pattern to programs in memory
15
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_ps(module, pattern): found = False if platform.system() == 'SunOS': flags = '-ef' else: flags = 'auxww' psbin = module.get_bin_path('ps', True) (rc, psout, pserr) = module.run_command([psbin, flags]) if rc == 0: for line in psout.splitlines(): if pattern in line: # FIXME: should add logic to prevent matching 'self', though that should be extremely rare found = True break return found ``` ###Assistant : Last resort to find a service by trying to match pattern to programs in memory
658
def apply(self, sample, context=None): im = sample['image'] im = im.astype(np.float32, copy=False) if self.is_scale: scale = 1.0 / 255.0 im *= scale if self.norm_type == 'mean_std': mean = np.array(self.mean)[np.newaxis, np.newaxis, :] std = np.array(self.std)[np.newaxis, np.newaxis, :] im -= mean im /= std sample['image'] = im return sample @register_op
Normalize the image. Operators: 1.(optional) Scale the pixel to [0,1] 2.(optional) Each pixel minus mean and is divided by std
20
47
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply(self, sample, context=None): im = sample['image'] im = im.astype(np.float32, copy=False) if self.is_scale: scale = 1.0 / 255.0 im *= scale if self.norm_type == 'mean_std': mean = np.array(self.mean)[np.newaxis, np.newaxis, :] std = np.array(self.std)[np.newaxis, np.newaxis, :] im -= mean im /= std sample['image'] = im return sample @register_op ``` ###Assistant : Normalize the image. Operators: 1.(optional) Scale the pixel to [0,1] 2.(optional) Each pixel minus mean and is divided by std
659
def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors
Check if no relationship model is used by more than one m2m field.
13
88
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _check_m2m_through_same_relationship(cls): errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors ``` ###Assistant : Check if no relationship model is used by more than one m2m field.
660
def image(self) -> "np.ndarray": assert self._image is not None return self._image
:class:`numpy.ndarray`: The source frame for this object.
7
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def image(self) -> "np.ndarray": assert self._image is not None return self._image ``` ###Assistant : :class:`numpy.ndarray`: The source frame for this object.
661
def test_sends_deployment_notification(self, record_analytics): release = self.create_release() version_parsed = self.version_parsed = parse_release(release.version)["description"] url = f"/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/" with self.tasks(): response = self.client.post( url, format="json", data={"environment": self.environment.name} ) assert response.status_code == 201, response.content msg = mail.outbox[0] # check the txt version assert f"Version {version_parsed} was deployed to {self.environment.name} on" in msg.body # check the html version assert ( f"Version {version_parsed} was deployed to {self.environment.name}\n </h2>\n" in msg.alternatives[0][0] ) attachment, text = get_attachment() assert ( text == f"Release {version_parsed} was deployed to {self.environment.name} for this project" ) assert ( attachment["actions"][0]["url"] == f"http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy/" ) assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/deploy/?referrer=release_activity-slack-user|Notification Settings>" ) assert analytics_called_with_args( record_analytics, "integrations.email.notification_sent", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) assert analytics_called_with_args( record_analytics, "integrations.slack.notification_sent", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, )
Test that an email AND Slack notification are sent with the expected values when a release is deployed.
18
113
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_sends_deployment_notification(self, record_analytics): release = self.create_release() version_parsed = self.version_parsed = parse_release(release.version)["description"] url = f"/api/0/organizations/{self.organization.slug}/releases/{release.version}/deploys/" with self.tasks(): response = self.client.post( url, format="json", data={"environment": self.environment.name} ) assert response.status_code == 201, response.content msg = mail.outbox[0] # check the txt version assert f"Version {version_parsed} was deployed to {self.environment.name} on" in msg.body # check the html version assert ( f"Version {version_parsed} was deployed to {self.environment.name}\n </h2>\n" in msg.alternatives[0][0] ) attachment, text = get_attachment() assert ( text == f"Release {version_parsed} was deployed to {self.environment.name} for this project" ) assert ( attachment["actions"][0]["url"] == f"http://testserver/organizations/{self.organization.slug}/releases/{release.version}/?project={self.project.id}&unselectedSeries=Healthy/" ) assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/deploy/?referrer=release_activity-slack-user|Notification Settings>" ) assert analytics_called_with_args( record_analytics, "integrations.email.notification_sent", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) assert analytics_called_with_args( record_analytics, "integrations.slack.notification_sent", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) ``` ###Assistant : Test that an email AND Slack notification are sent with the expected values when a release is deployed.
662
def get_all_mode_of_payments(doc): return frappe.db.sql( , {"company": doc.company}, as_dict=1, )
select mpa.default_account, mpa.parent, mp.type as type from `tabMode of Payment Account` mpa,`tabMode of Payment` mp where mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1
27
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_all_mode_of_payments(doc): return frappe.db.sql( , {"company": doc.company}, as_dict=1, ) ``` ###Assistant : select mpa.default_account, mpa.parent, mp.type as type from `tabMode of Payment Account` mpa,`tabMode of Payment` mp where mpa.parent = mp.name and mpa.company = %(company)s and mp.enabled = 1
663
def _on_move(self, event): if not self.button_pressed: return if self.get_navigate_mode() is not None: # we don't want to rotate if we are zooming/panning # from the toolbar return if self.M is None: return x, y = event.xdata, event.ydata # In case the mouse is out of bounds. if x is None or event.inaxes != self: return dx, dy = x - self._sx, y - self._sy w = self._pseudo_w h = self._pseudo_h # Rotation if self.button_pressed in self._rotate_btn: # rotate viewing point # get the x and y pixel coords if dx == 0 and dy == 0: return roll = np.deg2rad(self.roll) delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll) dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll) self.elev = self.elev + delev self.azim = self.azim + dazim self.stale = True elif self.button_pressed in self._pan_btn: # Start the pan event with pixel coordinates px, py = self.transData.transform([self._sx, self._sy]) self.start_pan(px, py, 2) # pan view (takes pixel coordinate input) self.drag_pan(2, None, event.x, event.y) self.end_pan() # Zoom elif self.button_pressed in self._zoom_btn: # zoom view (dragging down zooms in) scale = h/(h - dy) self._scale_axis_limits(scale, scale, scale) # Store the event coordinates for the next time through. self._sx, self._sy = x, y # Always request a draw update at the end of interaction self.figure.canvas.draw_idle()
Mouse moving. By default, button-1 rotates, button-2 pans, and button-3 zooms; these buttons can be modified via `mouse_init`.
18
203
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _on_move(self, event): if not self.button_pressed: return if self.get_navigate_mode() is not None: # we don't want to rotate if we are zooming/panning # from the toolbar return if self.M is None: return x, y = event.xdata, event.ydata # In case the mouse is out of bounds. if x is None or event.inaxes != self: return dx, dy = x - self._sx, y - self._sy w = self._pseudo_w h = self._pseudo_h # Rotation if self.button_pressed in self._rotate_btn: # rotate viewing point # get the x and y pixel coords if dx == 0 and dy == 0: return roll = np.deg2rad(self.roll) delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll) dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll) self.elev = self.elev + delev self.azim = self.azim + dazim self.stale = True elif self.button_pressed in self._pan_btn: # Start the pan event with pixel coordinates px, py = self.transData.transform([self._sx, self._sy]) self.start_pan(px, py, 2) # pan view (takes pixel coordinate input) self.drag_pan(2, None, event.x, event.y) self.end_pan() # Zoom elif self.button_pressed in self._zoom_btn: # zoom view (dragging down zooms in) scale = h/(h - dy) self._scale_axis_limits(scale, scale, scale) # Store the event coordinates for the next time through. self._sx, self._sy = x, y # Always request a draw update at the end of interaction self.figure.canvas.draw_idle() ``` ###Assistant : Mouse moving. By default, button-1 rotates, button-2 pans, and button-3 zooms; these buttons can be modified via `mouse_init`.
664
def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Install the fileset (normally ends with bff or rte) or rpm package(s) errors = [] for target in targets: filename = os.path.basename(target) if filename.endswith(".bff") or filename.endswith(".rte"): if _is_installed(target): continue cmd = "/usr/sbin/installp -acYXg" if test: cmd += "p" cmd += " -d " dirpath = os.path.dirname(target) cmd += dirpath + " " + filename out = __salt__["cmd.run_all"](cmd, python_shell=False) else: if _is_installed_rpm(filename.split(".aix")[0]): continue # assume use dnf or yum cmdflags = " install --allowerasing " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" if test: cmdflags += " --assumeno" else: cmdflags += " --assumeyes" if refresh: cmdflags += " --refresh" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" if test: cmdflags += " --assumeno" else: cmdflags += " --assumeyes" if refresh: cmdflags += " --refresh" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" if test: cmdflags += " --assumeno" else: cmdflags += " --assumeyes" else: cmdexe = "/usr/bin/rpm" cmdflags = " -Uivh " if test: cmdflags += " --test" cmd = [cmdexe, cmdflags, target] out = __salt__["cmd.run_all"](cmd, python_shell=False) if 0 != out["retcode"]: errors.append(out["stderr"]) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered installing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) # No error occurred if test: return "Test succeeded." return ret
Install the named fileset(s)/rpm package(s). .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Note: use of rpm to install implies that rpm's dependencies must have been previously installed. dnf and yum automatically install rpm's dependencies as part of the install process name The name of the fileset or rpm package to be installed. refresh Whether or not to update the yum database before executing. Multiple Package Installation Options: pkgs A list of filesets and/or rpm packages to install. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. version Install a specific version of a fileset/rpm package. (Unused at present). test Verify that command functions correctly: Returns a dict containing the new fileset(s)/rpm package(s) names and versions: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base salt '*' pkg.install pkgs='["foo", "bar"]'
172
248
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs): targets = salt.utils.args.split_input(pkgs) if pkgs else [name] if not targets: return {} if pkgs: log.debug("Removing these fileset(s)/rpm package(s) %s: %s", name, targets) # Get a list of the currently installed pkgs. old = list_pkgs() # Install the fileset (normally ends with bff or rte) or rpm package(s) errors = [] for target in targets: filename = os.path.basename(target) if filename.endswith(".bff") or filename.endswith(".rte"): if _is_installed(target): continue cmd = "/usr/sbin/installp -acYXg" if test: cmd += "p" cmd += " -d " dirpath = os.path.dirname(target) cmd += dirpath + " " + filename out = __salt__["cmd.run_all"](cmd, python_shell=False) else: if _is_installed_rpm(filename.split(".aix")[0]): continue # assume use dnf or yum cmdflags = " install --allowerasing " if pathlib.Path("/opt/freeware/bin/dnf").is_file(): cmdexe = "/opt/freeware/bin/dnf" if test: cmdflags += " --assumeno" else: cmdflags += " --assumeyes" if refresh: cmdflags += " --refresh" elif pathlib.Path("/opt/freeware/bin/yum").is_file(): cmdexe = "/opt/freeware/bin/yum" if test: cmdflags += " --assumeno" else: cmdflags += " --assumeyes" if refresh: cmdflags += " --refresh" elif pathlib.Path("/usr/bin/yum").is_file(): cmdexe = "/usr/bin/yum" if test: cmdflags += " --assumeno" else: cmdflags += " --assumeyes" else: cmdexe = "/usr/bin/rpm" cmdflags = " -Uivh " if test: cmdflags += " --test" cmd = [cmdexe, cmdflags, target] out = __salt__["cmd.run_all"](cmd, python_shell=False) if 0 != out["retcode"]: errors.append(out["stderr"]) # Get a list of the packages after the uninstall __context__.pop("pkg.list_pkgs", None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( "Problems encountered installing filesets(s)/package(s)", info={"changes": ret, "errors": errors}, ) # No error occurred if test: return "Test succeeded." return ret ``` ###Assistant : Install the named fileset(s)/rpm package(s). .. versionadded:: 3005 preference to install rpm packages are to use in the following order: /opt/freeware/bin/dnf /opt/freeware/bin/yum /usr/bin/yum /usr/bin/rpm Note: use of rpm to install implies that rpm's dependencies must have been previously installed. dnf and yum automatically install rpm's dependencies as part of the install process name The name of the fileset or rpm package to be installed. refresh Whether or not to update the yum database before executing. Multiple Package Installation Options: pkgs A list of filesets and/or rpm packages to install. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. version Install a specific version of a fileset/rpm package. (Unused at present). test Verify that command functions correctly: Returns a dict containing the new fileset(s)/rpm package(s) names and versions: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base salt '*' pkg.install pkgs='["foo", "bar"]'
665
def is_false(self, ds_key_long): value = self.get_value(ds_key_long) return False if value is None else not bool(value)
Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set).
38
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_false(self, ds_key_long): value = self.get_value(ds_key_long) return False if value is None else not bool(value) ``` ###Assistant : Returns `True`/``False` only if the value is set, always `False` otherwise. So use this method to ask the very specific question of whether the value is set to `False` (and it's not set to `True`` or isn't set).
666
def extract_pytorch_structures(): for opt in lmo.optimizer_registry: # Get the torch class: optimizer_class = lmo.optimizer_registry[opt][0] # Parse and clean the class structure: path = get_fully_qualified_class_name(optimizer_class) opt_struct = get_pytkdocs_structure_for_path(path, "google")["objects"][0] prune_pytorch_structures(opt_struct) # Write it to a file: parent_dir = str(Path(__file__).parent.parent) filename = os.path.join(parent_dir, "ludwig/validation/generated/torch/", optimizer_class.__name__) + ".json" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, "w") as outfile: json.dump( opt_struct, outfile, indent=4, sort_keys=True, separators=(",", ": "), ) outfile.write("\n")
Extracts and saves the parsed structure of all pytorch classes referenced in `ludwig.modules.optimization_modules.optimizer_registry` as JSON files under `ludwig/validation/generated/torch/`.
18
62
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_pytorch_structures(): for opt in lmo.optimizer_registry: # Get the torch class: optimizer_class = lmo.optimizer_registry[opt][0] # Parse and clean the class structure: path = get_fully_qualified_class_name(optimizer_class) opt_struct = get_pytkdocs_structure_for_path(path, "google")["objects"][0] prune_pytorch_structures(opt_struct) # Write it to a file: parent_dir = str(Path(__file__).parent.parent) filename = os.path.join(parent_dir, "ludwig/validation/generated/torch/", optimizer_class.__name__) + ".json" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, "w") as outfile: json.dump( opt_struct, outfile, indent=4, sort_keys=True, separators=(",", ": "), ) outfile.write("\n") ``` ###Assistant : Extracts and saves the parsed structure of all pytorch classes referenced in `ludwig.modules.optimization_modules.optimizer_registry` as JSON files under `ludwig/validation/generated/torch/`.
667
def test_hf_classification_bin(self, mock_handler): # create predictor create_sql = model_name = 'spam_classifier' predict_sql = self.hf_test_run(mock_handler, model_name, create_sql, predict_sql)
CREATE PREDICTOR huggingface.spam_classifier predict PRED USING task='text-classification', model_name= "mrm8488/bert-tiny-finetuned-sms-spam-detection", input_column = 'text_spammy', labels=['ham','spam'] SELECT h.* FROM pg.df as t JOIN huggingface.spam_classifier as h
23
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_hf_classification_bin(self, mock_handler): # create predictor create_sql = model_name = 'spam_classifier' predict_sql = self.hf_test_run(mock_handler, model_name, create_sql, predict_sql) ``` ###Assistant : CREATE PREDICTOR huggingface.spam_classifier predict PRED USING task='text-classification', model_name= "mrm8488/bert-tiny-finetuned-sms-spam-detection", input_column = 'text_spammy', labels=['ham','spam'] SELECT h.* FROM pg.df as t JOIN huggingface.spam_classifier as h
668
def match_submerged_margins(layoutgrids, fig): for sfig in fig.subfigs: match_submerged_margins(layoutgrids, sfig) axs = [a for a in fig.get_axes() if a.get_subplotspec() is not None and a.get_in_layout()] for ax1 in axs: ss1 = ax1.get_subplotspec() if ss1.get_gridspec() not in layoutgrids: axs.remove(ax1) continue lg1 = layoutgrids[ss1.get_gridspec()] # interior columns: if len(ss1.colspan) > 1: maxsubl = np.max( lg1.margin_vals['left'][ss1.colspan[1:]] + lg1.margin_vals['leftcb'][ss1.colspan[1:]] ) maxsubr = np.max( lg1.margin_vals['right'][ss1.colspan[:-1]] + lg1.margin_vals['rightcb'][ss1.colspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None and len(ss2.colspan) > 1: maxsubl2 = np.max( lg2.margin_vals['left'][ss2.colspan[1:]] + lg2.margin_vals['leftcb'][ss2.colspan[1:]]) if maxsubl2 > maxsubl: maxsubl = maxsubl2 maxsubr2 = np.max( lg2.margin_vals['right'][ss2.colspan[:-1]] + lg2.margin_vals['rightcb'][ss2.colspan[:-1]]) if maxsubr2 > maxsubr: maxsubr = maxsubr2 for i in ss1.colspan[1:]: lg1.edit_margin_min('left', maxsubl, cell=i) for i in ss1.colspan[:-1]: lg1.edit_margin_min('right', maxsubr, cell=i) # interior rows: if len(ss1.rowspan) > 1: maxsubt = np.max( lg1.margin_vals['top'][ss1.rowspan[1:]] + lg1.margin_vals['topcb'][ss1.rowspan[1:]] ) maxsubb = np.max( lg1.margin_vals['bottom'][ss1.rowspan[:-1]] + lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None: if len(ss2.rowspan) > 1: maxsubt = np.max([np.max( lg2.margin_vals['top'][ss2.rowspan[1:]] + lg2.margin_vals['topcb'][ss2.rowspan[1:]] ), maxsubt]) maxsubb = np.max([np.max( lg2.margin_vals['bottom'][ss2.rowspan[:-1]] + lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]] ), maxsubb]) for i in ss1.rowspan[1:]: lg1.edit_margin_min('top', maxsubt, cell=i) for i in ss1.rowspan[:-1]: lg1.edit_margin_min('bottom', maxsubb, cell=i)
Make the margins that are submerged inside an Axes the same size. This allows axes that span two columns (or rows) that are offset from one another to have the same size. This gives the proper layout for something like:: fig = plt.figure(constrained_layout=True) axs = fig.subplot_mosaic("AAAB\nCCDD") Without this routine, the axes D will be wider than C, because the margin width between the two columns in C has no width by default, whereas the margins between the two columns of D are set by the width of the margin between A and B. However, obviously the user would like C and D to be the same size, so we need to add constraints to these "submerged" margins. This routine makes all the interior margins the same, and the spacing between the three columns in A and the two column in C are all set to the margins between the two columns of D. See test_constrained_layout::test_constrained_layout12 for an example.
158
190
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_submerged_margins(layoutgrids, fig): for sfig in fig.subfigs: match_submerged_margins(layoutgrids, sfig) axs = [a for a in fig.get_axes() if a.get_subplotspec() is not None and a.get_in_layout()] for ax1 in axs: ss1 = ax1.get_subplotspec() if ss1.get_gridspec() not in layoutgrids: axs.remove(ax1) continue lg1 = layoutgrids[ss1.get_gridspec()] # interior columns: if len(ss1.colspan) > 1: maxsubl = np.max( lg1.margin_vals['left'][ss1.colspan[1:]] + lg1.margin_vals['leftcb'][ss1.colspan[1:]] ) maxsubr = np.max( lg1.margin_vals['right'][ss1.colspan[:-1]] + lg1.margin_vals['rightcb'][ss1.colspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None and len(ss2.colspan) > 1: maxsubl2 = np.max( lg2.margin_vals['left'][ss2.colspan[1:]] + lg2.margin_vals['leftcb'][ss2.colspan[1:]]) if maxsubl2 > maxsubl: maxsubl = maxsubl2 maxsubr2 = np.max( lg2.margin_vals['right'][ss2.colspan[:-1]] + lg2.margin_vals['rightcb'][ss2.colspan[:-1]]) if maxsubr2 > maxsubr: maxsubr = maxsubr2 for i in ss1.colspan[1:]: lg1.edit_margin_min('left', maxsubl, cell=i) for i in ss1.colspan[:-1]: lg1.edit_margin_min('right', maxsubr, cell=i) # interior rows: if len(ss1.rowspan) > 1: maxsubt = np.max( lg1.margin_vals['top'][ss1.rowspan[1:]] + lg1.margin_vals['topcb'][ss1.rowspan[1:]] ) maxsubb = np.max( lg1.margin_vals['bottom'][ss1.rowspan[:-1]] + lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None: if len(ss2.rowspan) > 1: maxsubt = np.max([np.max( lg2.margin_vals['top'][ss2.rowspan[1:]] + lg2.margin_vals['topcb'][ss2.rowspan[1:]] ), maxsubt]) maxsubb = np.max([np.max( lg2.margin_vals['bottom'][ss2.rowspan[:-1]] + lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]] ), maxsubb]) for i in ss1.rowspan[1:]: lg1.edit_margin_min('top', maxsubt, cell=i) for i in ss1.rowspan[:-1]: lg1.edit_margin_min('bottom', maxsubb, cell=i) ``` ###Assistant : Make the margins that are submerged inside an Axes the same size. This allows axes that span two columns (or rows) that are offset from one another to have the same size. This gives the proper layout for something like:: fig = plt.figure(constrained_layout=True) axs = fig.subplot_mosaic("AAAB\nCCDD") Without this routine, the axes D will be wider than C, because the margin width between the two columns in C has no width by default, whereas the margins between the two columns of D are set by the width of the margin between A and B. However, obviously the user would like C and D to be the same size, so we need to add constraints to these "submerged" margins. This routine makes all the interior margins the same, and the spacing between the three columns in A and the two column in C are all set to the margins between the two columns of D. See test_constrained_layout::test_constrained_layout12 for an example.
669
def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__)
Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
37
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__) ``` ###Assistant : Parse a version string into a tuple. Args: version_str (str): The version string. Returns: tuple[int | str]: The version info, e.g., "1.3.0" is parsed into (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
670
def adjust_settings_for_relay_tests(settings): settings.ALLOWED_HOSTS = [ "localhost", "testserver", "host.docker.internal", "0.0.0.0", "127.0.0.1", ] settings.KAFKA_CLUSTERS = { "default": { "common": {"bootstrap.servers": "127.0.0.1:9092"}, "producers": { "compression.type": "lz4", "message.max.bytes": 50000000, # 50MB, default is 1MB }, } } settings.SENTRY_RELAY_WHITELIST_PK = ["SMSesqan65THCV6M4qs4kBzPai60LzuDn-xNsvYpuP8"] settings.SENTRY_USE_RELAY = True @pytest.fixture
Adjusts the application settings to accept calls from a Relay instance running inside a docker container. :param settings: the app settings
21
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def adjust_settings_for_relay_tests(settings): settings.ALLOWED_HOSTS = [ "localhost", "testserver", "host.docker.internal", "0.0.0.0", "127.0.0.1", ] settings.KAFKA_CLUSTERS = { "default": { "common": {"bootstrap.servers": "127.0.0.1:9092"}, "producers": { "compression.type": "lz4", "message.max.bytes": 50000000, # 50MB, default is 1MB }, } } settings.SENTRY_RELAY_WHITELIST_PK = ["SMSesqan65THCV6M4qs4kBzPai60LzuDn-xNsvYpuP8"] settings.SENTRY_USE_RELAY = True @pytest.fixture ``` ###Assistant : Adjusts the application settings to accept calls from a Relay instance running inside a docker container. :param settings: the app settings
671
def clear_backends(): if xc._version < 79: raise RuntimeError("clear_backends is not supported in the jaxlib used." "Please update your jaxlib package.") xb._clear_backends() jax.lib.xla_bridge._backends = {} dispatch.xla_callable.cache_clear() # type: ignore dispatch.xla_primitive_callable.cache_clear() _cpp_jit_cache.clear() jax_jit.CompiledFunctionCache.clear_all()
Clear all backend clients so that new backend clients can be created later.
13
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def clear_backends(): if xc._version < 79: raise RuntimeError("clear_backends is not supported in the jaxlib used." "Please update your jaxlib package.") xb._clear_backends() jax.lib.xla_bridge._backends = {} dispatch.xla_callable.cache_clear() # type: ignore dispatch.xla_primitive_callable.cache_clear() _cpp_jit_cache.clear() jax_jit.CompiledFunctionCache.clear_all() ``` ###Assistant : Clear all backend clients so that new backend clients can be created later.
672
def get_power_utilization(self): powerfeeds = PowerFeed.objects.filter(rack=self) available_power_total = sum(pf.available_power for pf in powerfeeds) print(f'available_power_total: {available_power_total}') if not available_power_total: return 0 powerports = [] for powerfeed in powerfeeds: powerports.extend([ peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort) ]) allocated_draw = 0 for powerport in powerports: allocated_draw += powerport.get_power_draw()['allocated'] print(f'allocated_draw: {allocated_draw}') return int(allocated_draw / available_power_total * 100)
Determine the utilization rate of power in the rack and return it as a percentage.
15
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_power_utilization(self): powerfeeds = PowerFeed.objects.filter(rack=self) available_power_total = sum(pf.available_power for pf in powerfeeds) print(f'available_power_total: {available_power_total}') if not available_power_total: return 0 powerports = [] for powerfeed in powerfeeds: powerports.extend([ peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort) ]) allocated_draw = 0 for powerport in powerports: allocated_draw += powerport.get_power_draw()['allocated'] print(f'allocated_draw: {allocated_draw}') return int(allocated_draw / available_power_total * 100) ``` ###Assistant : Determine the utilization rate of power in the rack and return it as a percentage.
673
def cleanup(self): orphaned = [] for w in self.workers[::]: if not w.alive: # the worker process has exited # 1. take the task it was running and enqueue the error # callbacks # 2. take any pending tasks delivered to its queue and # send them to another worker logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode)) if w.current_task: if w.current_task != 'QUIT': try: for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']): reaper.reap_job(j, 'failed') except Exception: logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid'])) orphaned.extend(w.orphaned_tasks) self.workers.remove(w) elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down: # the process has an empty queue (it's idle) and we have # more processes in the pool than we need (> min) # send this process a message so it will exit gracefully # at the next opportunity logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}') w.quit() self.workers.remove(w) if w.alive: # if we discover a task manager invocation that's been running # too long, reap it (because otherwise it'll just hold the postgres # advisory lock forever); the goal of this code is to discover # deadlocks or other serious issues in the task manager that cause # the task manager to never do more work current_task = w.current_task if current_task and isinstance(current_task, dict): endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager'] current_task_name = current_task.get('task', '') if any(current_task_name.endswith(e) for e in endings): if 'started' not in current_task: w.managed_tasks[current_task['uuid']]['started'] = time.time() age = time.time() - current_task['started'] w.managed_tasks[current_task['uuid']]['age'] = age if age > self.task_manager_timeout: logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}') os.kill(w.pid, signal.SIGTERM) for m in orphaned: # if all the workers are dead, spawn at least one if not len(self.workers): self.up() idx = random.choice(range(len(self.workers))) self.write(idx, m)
Perform some internal account and cleanup. This is run on every cluster node heartbeat: 1. Discover worker processes that exited, and recover messages they were handling. 2. Clean up unnecessary, idle workers. IMPORTANT: this function is one of the few places in the dispatcher (aside from setting lookups) where we talk to the database. As such, if there's an outage, this method _can_ throw various django.db.utils.Error exceptions. Act accordingly.
69
270
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cleanup(self): orphaned = [] for w in self.workers[::]: if not w.alive: # the worker process has exited # 1. take the task it was running and enqueue the error # callbacks # 2. take any pending tasks delivered to its queue and # send them to another worker logger.error('worker pid:{} is gone (exit={})'.format(w.pid, w.exitcode)) if w.current_task: if w.current_task != 'QUIT': try: for j in UnifiedJob.objects.filter(celery_task_id=w.current_task['uuid']): reaper.reap_job(j, 'failed') except Exception: logger.exception('failed to reap job UUID {}'.format(w.current_task['uuid'])) orphaned.extend(w.orphaned_tasks) self.workers.remove(w) elif (len(self.workers) > self.min_workers) and w.ready_to_scale_down: # the process has an empty queue (it's idle) and we have # more processes in the pool than we need (> min) # send this process a message so it will exit gracefully # at the next opportunity logger.info(f'scaling down worker pid:{w.pid} prior total:{len(self.workers)}') w.quit() self.workers.remove(w) if w.alive: # if we discover a task manager invocation that's been running # too long, reap it (because otherwise it'll just hold the postgres # advisory lock forever); the goal of this code is to discover # deadlocks or other serious issues in the task manager that cause # the task manager to never do more work current_task = w.current_task if current_task and isinstance(current_task, dict): endings = ['tasks.task_manager', 'tasks.dependency_manager', 'tasks.workflow_manager'] current_task_name = current_task.get('task', '') if any(current_task_name.endswith(e) for e in endings): if 'started' not in current_task: w.managed_tasks[current_task['uuid']]['started'] = time.time() age = time.time() - current_task['started'] w.managed_tasks[current_task['uuid']]['age'] = age if age > self.task_manager_timeout: logger.error(f'{current_task_name} has held the advisory lock for {age}, sending SIGTERM to {w.pid}') os.kill(w.pid, signal.SIGTERM) for m in orphaned: # if all the workers are dead, spawn at least one if not len(self.workers): self.up() idx = random.choice(range(len(self.workers))) self.write(idx, m) ``` ###Assistant : Perform some internal account and cleanup. This is run on every cluster node heartbeat: 1. Discover worker processes that exited, and recover messages they were handling. 2. Clean up unnecessary, idle workers. IMPORTANT: this function is one of the few places in the dispatcher (aside from setting lookups) where we talk to the database. As such, if there's an outage, this method _can_ throw various django.db.utils.Error exceptions. Act accordingly.
674
def test_token_node_empty_csrf_cookie(self): req = self._get_request(cookie="") mw = CsrfViewMiddleware(token_view) mw.process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) csrf_secret = _unmask_cipher_token(token) self._check_token_present(resp, csrf_secret)
A new token is sent if the csrf_cookie is the empty string.
12
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_token_node_empty_csrf_cookie(self): req = self._get_request(cookie="") mw = CsrfViewMiddleware(token_view) mw.process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) csrf_secret = _unmask_cipher_token(token) self._check_token_present(resp, csrf_secret) ``` ###Assistant : A new token is sent if the csrf_cookie is the empty string.
675
def save(self, envs): assert isinstance(envs, list), "envs should be a list" if len(envs) > 0: for env in envs: assert isstr(env), "env should be a string" return self._send( { "data": envs, }, "save", )
This function allows the user to save envs that are alive on the Tornado server. The envs can be specified as a list of env ids.
26
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, envs): assert isinstance(envs, list), "envs should be a list" if len(envs) > 0: for env in envs: assert isstr(env), "env should be a string" return self._send( { "data": envs, }, "save", ) ``` ###Assistant : This function allows the user to save envs that are alive on the Tornado server. The envs can be specified as a list of env ids.
676
def test_has_related_field_in_list_display_o2o(self): media = Media.objects.create(name="Foo") Vodcast.objects.create(media=media) response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {}) response.context["cl"].list_display = ["media"] self.assertIs(response.context["cl"].has_related_field_in_list_display(), True) response.context["cl"].list_display = ["media_id"] self.assertIs(response.context["cl"].has_related_field_in_list_display(), False)
Joins shouldn't be performed for <O2O>_id fields in list display.
10
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_has_related_field_in_list_display_o2o(self): media = Media.objects.create(name="Foo") Vodcast.objects.create(media=media) response = self.client.get(reverse("admin:admin_views_vodcast_changelist"), {}) response.context["cl"].list_display = ["media"] self.assertIs(response.context["cl"].has_related_field_in_list_display(), True) response.context["cl"].list_display = ["media_id"] self.assertIs(response.context["cl"].has_related_field_in_list_display(), False) ``` ###Assistant : Joins shouldn't be performed for <O2O>_id fields in list display.
677
def binary_crossentropy(target, output, from_logits=False): target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, "_keras_logits"): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`binary_crossentropy` received `from_logits=True`, but the `output`' " argument was produced by a sigmoid or softmax activation and thus " 'does not represent logits. Was this intended?"', stacklevel=2, ) from_logits = True if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) if ( not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == "Sigmoid" ) and not hasattr(output, "_keras_history"): # When sigmoid activation function is used for output operation, we # use logits from the sigmoid function directly to compute loss in order # to prevent collapsing zero when training. assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) # Compute cross entropy from probabilities. bce = target * tf.math.log(output + epsilon()) bce += (1 - target) * tf.math.log(1 - output + epsilon()) return -bce @keras_export("keras.backend.binary_focal_crossentropy") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs
Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor.
46
176
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def binary_crossentropy(target, output, from_logits=False): target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, "_keras_logits"): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`binary_crossentropy` received `from_logits=True`, but the `output`' " argument was produced by a sigmoid or softmax activation and thus " 'does not represent logits. Was this intended?"', stacklevel=2, ) from_logits = True if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) if ( not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == "Sigmoid" ) and not hasattr(output, "_keras_history"): # When sigmoid activation function is used for output operation, we # use logits from the sigmoid function directly to compute loss in order # to prevent collapsing zero when training. assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.sigmoid_cross_entropy_with_logits( labels=target, logits=output ) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_) # Compute cross entropy from probabilities. bce = target * tf.math.log(output + epsilon()) bce += (1 - target) * tf.math.log(1 - output + epsilon()) return -bce @keras_export("keras.backend.binary_focal_crossentropy") @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs ``` ###Assistant : Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor.
678
def test_process_pulled_event_with_missing_state(self) -> None: return self._test_process_pulled_event_with_missing_state(False)
Ensure that we correctly handle pulled events with lots of missing state In this test, we pretend we are processing a "pulled" event (eg, via backfill or get_missing_events). The pulled event has a prev_event we haven't previously seen, so the server requests the state at that prev_event. There is a lot of state we don't have, so we expect the server to make a /state request. We check that the pulled event is correctly persisted, and that the state is as we expect.
83
6
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_process_pulled_event_with_missing_state(self) -> None: return self._test_process_pulled_event_with_missing_state(False) ``` ###Assistant : Ensure that we correctly handle pulled events with lots of missing state In this test, we pretend we are processing a "pulled" event (eg, via backfill or get_missing_events). The pulled event has a prev_event we haven't previously seen, so the server requests the state at that prev_event. There is a lot of state we don't have, so we expect the server to make a /state request. We check that the pulled event is correctly persisted, and that the state is as we expect.
679
def test_resolved_in_release(self, mock_func): notification = ResolvedInReleaseActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.SET_RESOLVED_IN_RELEASE, data={"version": "meow"}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() release_name = notification.activity.data["version"] assert text == f"Issue marked as resolved in {release_name} by {self.name}" assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user|Notification Settings>" )
Test that a Slack message is sent with the expected payload when an issue is resolved in a release
19
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_resolved_in_release(self, mock_func): notification = ResolvedInReleaseActivityNotification( Activity( project=self.project, group=self.group, user=self.user, type=ActivityType.SET_RESOLVED_IN_RELEASE, data={"version": "meow"}, ) ) with self.tasks(): notification.send() attachment, text = get_attachment() release_name = notification.activity.data["version"] assert text == f"Issue marked as resolved in {release_name} by {self.name}" assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_in_release_activity-slack-user|Notification Settings>" ) ``` ###Assistant : Test that a Slack message is sent with the expected payload when an issue is resolved in a release
680
def get_vocabulary(self, include_special_tokens=True): # The lookup table data will not be sorted, so we will create a inverted # lookup here, and use that to lookup a range of indices [0, # vocab_size). if self.lookup_table.size() == 0: vocab, indices = [], [] else: keys, values = self.lookup_table.export() vocab, indices = (values, keys) if self.invert else (keys, values) vocab, indices = ( self._tensor_vocab_to_numpy(vocab), indices.numpy(), ) lookup = collections.defaultdict( lambda: self.oov_token, zip(indices, vocab) ) vocab = [lookup[x] for x in range(self.vocabulary_size())] if self.mask_token is not None and self.output_mode == INT: vocab[0] = self.mask_token if not include_special_tokens: vocab = vocab[self._token_start_index() :] return vocab
Returns the current vocabulary of the layer. Args: include_special_tokens: If True, the returned vocabulary will include mask and OOV tokens, and a term's index in the vocabulary will equal the term's index when calling the layer. If False, the returned vocabulary will not include any mask or OOV tokens.
49
100
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_vocabulary(self, include_special_tokens=True): # The lookup table data will not be sorted, so we will create a inverted # lookup here, and use that to lookup a range of indices [0, # vocab_size). if self.lookup_table.size() == 0: vocab, indices = [], [] else: keys, values = self.lookup_table.export() vocab, indices = (values, keys) if self.invert else (keys, values) vocab, indices = ( self._tensor_vocab_to_numpy(vocab), indices.numpy(), ) lookup = collections.defaultdict( lambda: self.oov_token, zip(indices, vocab) ) vocab = [lookup[x] for x in range(self.vocabulary_size())] if self.mask_token is not None and self.output_mode == INT: vocab[0] = self.mask_token if not include_special_tokens: vocab = vocab[self._token_start_index() :] return vocab ``` ###Assistant : Returns the current vocabulary of the layer. Args: include_special_tokens: If True, the returned vocabulary will include mask and OOV tokens, and a term's index in the vocabulary will equal the term's index when calling the layer. If False, the returned vocabulary will not include any mask or OOV tokens.
681
def test_error_raised_with_float_limited_eval_batches(): model = BoringModel() dl_size = len(model.val_dataloader()) limit_val_batches = 1 / (dl_size + 2) trainer = Trainer(limit_val_batches=limit_val_batches) trainer._data_connector.attach_data(model) with pytest.raises( MisconfigurationException, match=fr"{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`", ): trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model) @pytest.mark.parametrize( "val_dl", [ DataLoader(dataset=RandomDataset(32, 64), shuffle=True), CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)), CombinedLoader( [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)] ), CombinedLoader( { "dl1": DataLoader(dataset=RandomDataset(32, 64)), "dl2": DataLoader(dataset=RandomDataset(32, 64), shuffle=True), } ), ], )
Test that an error is raised if there are not enough batches when passed with float value of limit_eval_batches.
19
63
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_error_raised_with_float_limited_eval_batches(): model = BoringModel() dl_size = len(model.val_dataloader()) limit_val_batches = 1 / (dl_size + 2) trainer = Trainer(limit_val_batches=limit_val_batches) trainer._data_connector.attach_data(model) with pytest.raises( MisconfigurationException, match=fr"{limit_val_batches} \* {dl_size} < 1. Please increase the `limit_val_batches`", ): trainer._data_connector._reset_eval_dataloader(RunningStage.VALIDATING, model) @pytest.mark.parametrize( "val_dl", [ DataLoader(dataset=RandomDataset(32, 64), shuffle=True), CombinedLoader(DataLoader(dataset=RandomDataset(32, 64), shuffle=True)), CombinedLoader( [DataLoader(dataset=RandomDataset(32, 64)), DataLoader(dataset=RandomDataset(32, 64), shuffle=True)] ), CombinedLoader( { "dl1": DataLoader(dataset=RandomDataset(32, 64)), "dl2": DataLoader(dataset=RandomDataset(32, 64), shuffle=True), } ), ], ) ``` ###Assistant : Test that an error is raised if there are not enough batches when passed with float value of limit_eval_batches.
682
def not_none_device_or_backend_on_jit(backend, device, num_ins): # TODO(yashkatariya): Remove this entire function when backend and device are # removed as arguments on jit. from jax.experimental import sharding if device is not None and backend is not None: raise ValueError("can't specify both a device and a backend for jit, " "got device={} and backend={}".format(device, backend)) if backend is not None: da = [xb.get_backend(backend).get_default_device_assignment(1)[0]] else: assert device is not None da = [device] assert len(da) == 1 # Set committed to True for this path because it simulates a device_put on # behalf of a user. committed = True # in_shardings will be marked as replicated regardless of whatever the input # had. Given that only a single device is allowed above, this is correct. in_shardings = [sharding.OpShardingSharding.get_replicated(da)] * num_ins return committed, da, in_shardings
This is to support the backend and device argument on jit. It's a feature that's deprecated but needs to be supported for feature parity and so that we can delete the non-Array paths when Array is switched on.
38
130
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def not_none_device_or_backend_on_jit(backend, device, num_ins): # TODO(yashkatariya): Remove this entire function when backend and device are # removed as arguments on jit. from jax.experimental import sharding if device is not None and backend is not None: raise ValueError("can't specify both a device and a backend for jit, " "got device={} and backend={}".format(device, backend)) if backend is not None: da = [xb.get_backend(backend).get_default_device_assignment(1)[0]] else: assert device is not None da = [device] assert len(da) == 1 # Set committed to True for this path because it simulates a device_put on # behalf of a user. committed = True # in_shardings will be marked as replicated regardless of whatever the input # had. Given that only a single device is allowed above, this is correct. in_shardings = [sharding.OpShardingSharding.get_replicated(da)] * num_ins return committed, da, in_shardings ``` ###Assistant : This is to support the backend and device argument on jit. It's a feature that's deprecated but needs to be supported for feature parity and so that we can delete the non-Array paths when Array is switched on.
683
def is_mouse_scrolling(self, *args): return 'button' in self.profile and 'scroll' in self.button
Returns True if the touch event is a mousewheel scrolling .. versionadded:: 1.6.0
13
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_mouse_scrolling(self, *args): return 'button' in self.profile and 'scroll' in self.button ``` ###Assistant : Returns True if the touch event is a mousewheel scrolling .. versionadded:: 1.6.0
684
def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False, no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None, erase_all=False, resizable=True, blocking=None): if _DebugWin.debug_window is None: _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar, no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking) txt_color, bg_color = _parse_colors_parm(c or colors) _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color, erase_all=erase_all, font=font, blocking=blocking)
Works like a "print" statement but with windowing options. Routes output to the "Debug Window" In addition to the normal text and background colors, you can use a "colors" tuple/string The "colors" or "c" parameter defines both the text and background in a single parm. It can be a tuple or a single single. Both text and background colors need to be specified colors -(str, str) or str. A combined text/background color definition in a single parameter c - (str, str) - Colors tuple has format (foreground, backgrouned) c - str - can also be a string of the format "foreground on background" ("white on red") :param *args: stuff to output :type *args: (Any) :param size: (w,h) w=characters-wide, h=rows-high :type size: (int, int) :param end: end character :type end: (str) :param sep: separator character :type sep: (str) :param location: Location of upper left corner of the window :type location: (int, int) :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative. :type relative_location: (int, int) :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike :type font: (str or (str, int[, str]) or None) :param no_titlebar: If True no titlebar will be shown :type no_titlebar: (bool) :param no_button: don't show button :type no_button: (bool) :param grab_anywhere: If True: can grab anywhere to move the window (Default = False) :type grab_anywhere: (bool) :param background_color: color of background :type background_color: (str) :param text_color: color of the text :type text_color: (str) :param keep_on_top: If True the window will remain above all current windows :type keep_on_top: (bool) :param location: Location of upper left corner of the window :type location: (int, int) :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here :type do_not_reroute_stdout: (bool) :param echo_stdout: If True stdout is sent to both the console and the debug window :type echo_stdout: (bool) :param colors: Either a tuple or a string that has both the text and background colors :type colors: (str) or (str, str) :param c: Either a tuple or a string that has both the text and background colors :type c: (str) or (str, str) :param resizable: if True, the user can resize the debug window. Default is True :type resizable: (bool) :param erase_all: If True when erase the output before printing :type erase_all: (bool) :param blocking: if True, makes the window block instead of returning immediately. The "Quit" button changers to "More" :type blocking: (bool | None) :return: :rtype:
444
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def easy_print(*args, size=(None, None), end=None, sep=None, location=(None, None), relative_location=(None, None), font=None, no_titlebar=False, no_button=False, grab_anywhere=False, keep_on_top=None, do_not_reroute_stdout=True, echo_stdout=False, text_color=None, background_color=None, colors=None, c=None, erase_all=False, resizable=True, blocking=None): if _DebugWin.debug_window is None: _DebugWin.debug_window = _DebugWin(size=size, location=location, relative_location=relative_location, font=font, no_titlebar=no_titlebar, no_button=no_button, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, do_not_reroute_stdout=do_not_reroute_stdout, echo_stdout=echo_stdout, resizable=resizable, blocking=blocking) txt_color, bg_color = _parse_colors_parm(c or colors) _DebugWin.debug_window.Print(*args, end=end, sep=sep, text_color=text_color or txt_color, background_color=background_color or bg_color, erase_all=erase_all, font=font, blocking=blocking) ``` ###Assistant : Works like a "print" statement but with windowing options. Routes output to the "Debug Window" In addition to the normal text and background colors, you can use a "colors" tuple/string The "colors" or "c" parameter defines both the text and background in a single parm. It can be a tuple or a single single. Both text and background colors need to be specified colors -(str, str) or str. A combined text/background color definition in a single parameter c - (str, str) - Colors tuple has format (foreground, backgrouned) c - str - can also be a string of the format "foreground on background" ("white on red") :param *args: stuff to output :type *args: (Any) :param size: (w,h) w=characters-wide, h=rows-high :type size: (int, int) :param end: end character :type end: (str) :param sep: separator character :type sep: (str) :param location: Location of upper left corner of the window :type location: (int, int) :param relative_location: (x,y) location relative to the default location of the window, in pixels. Normally the window centers. This location is relative to the location the window would be created. Note they can be negative. :type relative_location: (int, int) :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike :type font: (str or (str, int[, str]) or None) :param no_titlebar: If True no titlebar will be shown :type no_titlebar: (bool) :param no_button: don't show button :type no_button: (bool) :param grab_anywhere: If True: can grab anywhere to move the window (Default = False) :type grab_anywhere: (bool) :param background_color: color of background :type background_color: (str) :param text_color: color of the text :type text_color: (str) :param keep_on_top: If True the window will remain above all current windows :type keep_on_top: (bool) :param location: Location of upper left corner of the window :type location: (int, int) :param do_not_reroute_stdout: do not reroute stdout and stderr. If False, both stdout and stderr will reroute to here :type do_not_reroute_stdout: (bool) :param echo_stdout: If True stdout is sent to both the console and the debug window :type echo_stdout: (bool) :param colors: Either a tuple or a string that has both the text and background colors :type colors: (str) or (str, str) :param c: Either a tuple or a string that has both the text and background colors :type c: (str) or (str, str) :param resizable: if True, the user can resize the debug window. Default is True :type resizable: (bool) :param erase_all: If True when erase the output before printing :type erase_all: (bool) :param blocking: if True, makes the window block instead of returning immediately. The "Quit" button changers to "More" :type blocking: (bool | None) :return: :rtype:
685
def wait_start_success(self): _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 if self._wait_for_ready_or_shutdown(_timeout): self._check_failed_to_start() self.logger.debug(__ready_msg__) else: self._fail_start_timeout(_timeout)
Block until all pods starts successfully. If not success, it will raise an error hoping the outer function to catch it
21
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wait_start_success(self): _timeout = self.args.timeout_ready if _timeout <= 0: _timeout = None else: _timeout /= 1e3 if self._wait_for_ready_or_shutdown(_timeout): self._check_failed_to_start() self.logger.debug(__ready_msg__) else: self._fail_start_timeout(_timeout) ``` ###Assistant : Block until all pods starts successfully. If not success, it will raise an error hoping the outer function to catch it
686
def _rank_decomposition(M, iszerofunc=_iszero, simplify=False): r F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc, pivots=True) rank = len(pivot_cols) C = M.extract(range(M.rows), pivot_cols) F = F[:rank, :] return C, F
Returns a pair of matrices (`C`, `F`) with matching rank such that `A = C F`. Parameters ========== iszerofunc : Function, optional A function used for detecting whether an element can act as a pivot. ``lambda x: x.is_zero`` is used by default. simplify : Bool or Function, optional A function used to simplify elements when looking for a pivot. By default SymPy's ``simplify`` is used. Returns ======= (C, F) : Matrices `C` and `F` are full-rank matrices with rank as same as `A`, whose product gives `A`. See Notes for additional mathematical details. Examples ======== >>> from sympy import Matrix >>> A = Matrix([ ... [1, 3, 1, 4], ... [2, 7, 3, 9], ... [1, 5, 3, 1], ... [1, 2, 0, 8] ... ]) >>> C, F = A.rank_decomposition() >>> C Matrix([ [1, 3, 4], [2, 7, 9], [1, 5, 1], [1, 2, 8]]) >>> F Matrix([ [1, 0, -2, 0], [0, 1, 1, 0], [0, 0, 0, 1]]) >>> C * F == A True Notes ===== Obtaining `F`, an RREF of `A`, is equivalent to creating a product .. math:: E_n E_{n-1} ... E_1 A = F where `E_n, E_{n-1}, \dots, E_1` are the elimination matrices or permutation matrices equivalent to each row-reduction step. The inverse of the same product of elimination matrices gives `C`: .. math:: C = \left(E_n E_{n-1} \dots E_1\right)^{-1} It is not necessary, however, to actually compute the inverse: the columns of `C` are those from the original matrix with the same column indices as the indices of the pivot columns of `F`. References ========== .. [1] https://en.wikipedia.org/wiki/Rank_factorization .. [2] Piziak, R.; Odell, P. L. (1 June 1999). "Full Rank Factorization of Matrices". Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882 See Also ======== sympy.matrices.matrices.MatrixReductions.rref
291
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _rank_decomposition(M, iszerofunc=_iszero, simplify=False): r F, pivot_cols = M.rref(simplify=simplify, iszerofunc=iszerofunc, pivots=True) rank = len(pivot_cols) C = M.extract(range(M.rows), pivot_cols) F = F[:rank, :] return C, F ``` ###Assistant : Returns a pair of matrices (`C`, `F`) with matching rank such that `A = C F`. Parameters ========== iszerofunc : Function, optional A function used for detecting whether an element can act as a pivot. ``lambda x: x.is_zero`` is used by default. simplify : Bool or Function, optional A function used to simplify elements when looking for a pivot. By default SymPy's ``simplify`` is used. Returns ======= (C, F) : Matrices `C` and `F` are full-rank matrices with rank as same as `A`, whose product gives `A`. See Notes for additional mathematical details. Examples ======== >>> from sympy import Matrix >>> A = Matrix([ ... [1, 3, 1, 4], ... [2, 7, 3, 9], ... [1, 5, 3, 1], ... [1, 2, 0, 8] ... ]) >>> C, F = A.rank_decomposition() >>> C Matrix([ [1, 3, 4], [2, 7, 9], [1, 5, 1], [1, 2, 8]]) >>> F Matrix([ [1, 0, -2, 0], [0, 1, 1, 0], [0, 0, 0, 1]]) >>> C * F == A True Notes ===== Obtaining `F`, an RREF of `A`, is equivalent to creating a product .. math:: E_n E_{n-1} ... E_1 A = F where `E_n, E_{n-1}, \dots, E_1` are the elimination matrices or permutation matrices equivalent to each row-reduction step. The inverse of the same product of elimination matrices gives `C`: .. math:: C = \left(E_n E_{n-1} \dots E_1\right)^{-1} It is not necessary, however, to actually compute the inverse: the columns of `C` are those from the original matrix with the same column indices as the indices of the pivot columns of `F`. References ========== .. [1] https://en.wikipedia.org/wiki/Rank_factorization .. [2] Piziak, R.; Odell, P. L. (1 June 1999). "Full Rank Factorization of Matrices". Mathematics Magazine. 72 (3): 193. doi:10.2307/2690882 See Also ======== sympy.matrices.matrices.MatrixReductions.rref
687
def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False): if norm not in ("l1", "l2", "max"): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = "csc" elif axis == 1: sparse_format = "csr" else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array( X, accept_sparse=sparse_format, copy=copy, estimator="the normalize function", dtype=FLOAT_DTYPES, ) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in ("l1", "l2"): raise NotImplementedError( "return_norm=True is not implemented " "for sparse matrices with norm 'l1' " "or norm 'l2'" ) if norm == "l1": inplace_csr_row_normalize_l1(X) elif norm == "l2": inplace_csr_row_normalize_l2(X) elif norm == "max": mins, maxes = min_max_axis(X, 1) norms = np.maximum(abs(mins), maxes) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == "l1": norms = np.abs(X).sum(axis=1) elif norm == "l2": norms = row_norms(X) elif norm == "max": norms = np.max(abs(X), axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X
Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : {'l1', 'l2', 'max'}, default='l2' The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : {0, 1}, default=1 axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : bool, default=True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : bool, default=False Whether to return the computed norms. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Normalized input X. norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See Also -------- Normalizer : Performs normalization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
220
172
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def normalize(X, norm="l2", *, axis=1, copy=True, return_norm=False): if norm not in ("l1", "l2", "max"): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = "csc" elif axis == 1: sparse_format = "csr" else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array( X, accept_sparse=sparse_format, copy=copy, estimator="the normalize function", dtype=FLOAT_DTYPES, ) if axis == 0: X = X.T if sparse.issparse(X): if return_norm and norm in ("l1", "l2"): raise NotImplementedError( "return_norm=True is not implemented " "for sparse matrices with norm 'l1' " "or norm 'l2'" ) if norm == "l1": inplace_csr_row_normalize_l1(X) elif norm == "l2": inplace_csr_row_normalize_l2(X) elif norm == "max": mins, maxes = min_max_axis(X, 1) norms = np.maximum(abs(mins), maxes) norms_elementwise = norms.repeat(np.diff(X.indptr)) mask = norms_elementwise != 0 X.data[mask] /= norms_elementwise[mask] else: if norm == "l1": norms = np.abs(X).sum(axis=1) elif norm == "l2": norms = row_norms(X) elif norm == "max": norms = np.max(abs(X), axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X ``` ###Assistant : Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : {'l1', 'l2', 'max'}, default='l2' The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : {0, 1}, default=1 axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : bool, default=True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : bool, default=False Whether to return the computed norms. Returns ------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Normalized input X. norms : ndarray of shape (n_samples, ) if axis=1 else (n_features, ) An array of norms along given axis for X. When X is sparse, a NotImplementedError will be raised for norm 'l1' or 'l2'. See Also -------- Normalizer : Performs normalization using the Transformer API (e.g. as part of a preprocessing :class:`~sklearn.pipeline.Pipeline`). Notes ----- For a comparison of the different scalers, transformers, and normalizers, see :ref:`examples/preprocessing/plot_all_scaling.py <sphx_glr_auto_examples_preprocessing_plot_all_scaling.py>`.
688
def find_version_to_install(self, name): version = Version.parse(name) if version.patch is not None: return name try: best_match = max( ( inst_version for inst_version in self.iter_installable_versions() if inst_version.matches_minor(version) ), key=operator.attrgetter("cmpkey"), ) except ValueError: raise ValueError( f"no installable version found for {name!r}", ) return best_match
Find a version in the installer from the version supplied. A ValueError is raised if a matching version cannot be found.
21
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def find_version_to_install(self, name): version = Version.parse(name) if version.patch is not None: return name try: best_match = max( ( inst_version for inst_version in self.iter_installable_versions() if inst_version.matches_minor(version) ), key=operator.attrgetter("cmpkey"), ) except ValueError: raise ValueError( f"no installable version found for {name!r}", ) return best_match ``` ###Assistant : Find a version in the installer from the version supplied. A ValueError is raised if a matching version cannot be found.
689
def copy_safe_request(request): meta = { k: request.META[k] for k in HTTP_REQUEST_META_SAFE_COPY if k in request.META and isinstance(request.META[k], str) } return NetBoxFakeRequest({ 'META': meta, 'COOKIES': request.COOKIES, 'POST': request.POST, 'GET': request.GET, 'FILES': request.FILES, 'user': request.user, 'path': request.path, 'id': getattr(request, 'id', None), # UUID assigned by middleware })
Copy selected attributes from a request object into a new fake request object. This is needed in places where thread safe pickling of the useful request data is needed.
29
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def copy_safe_request(request): meta = { k: request.META[k] for k in HTTP_REQUEST_META_SAFE_COPY if k in request.META and isinstance(request.META[k], str) } return NetBoxFakeRequest({ 'META': meta, 'COOKIES': request.COOKIES, 'POST': request.POST, 'GET': request.GET, 'FILES': request.FILES, 'user': request.user, 'path': request.path, 'id': getattr(request, 'id', None), # UUID assigned by middleware }) ``` ###Assistant : Copy selected attributes from a request object into a new fake request object. This is needed in places where thread safe pickling of the useful request data is needed.
690
def batchify(self, obs_batch, sort=False): batch = super().batchify(obs_batch, sort=sort) if batch.valid_indices is None: return batch batch.classifier_label = torch.tensor( [ [obs_batch[i].get('classifier_label_idx', -1)] for i in batch.valid_indices ] ) batch.is_ltr = torch.tensor( [[obs_batch[i].get('is_ltr', False)] for i in batch.valid_indices] ) return batch
This method calls the parent class's batchify method and then add classifier_label and is_ltr property to the the batch.
19
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batchify(self, obs_batch, sort=False): batch = super().batchify(obs_batch, sort=sort) if batch.valid_indices is None: return batch batch.classifier_label = torch.tensor( [ [obs_batch[i].get('classifier_label_idx', -1)] for i in batch.valid_indices ] ) batch.is_ltr = torch.tensor( [[obs_batch[i].get('is_ltr', False)] for i in batch.valid_indices] ) return batch ``` ###Assistant : This method calls the parent class's batchify method and then add classifier_label and is_ltr property to the the batch.
691
def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs): for _ in range(n_convs): height = (height - kernel_size + 2 * pad) // stride + 1 return height
Height of spec after n convolutions with fixed kernel/stride/pad.
9
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def calculate_post_conv_height(height, kernel_size, stride, pad, n_convs): for _ in range(n_convs): height = (height - kernel_size + 2 * pad) // stride + 1 return height ``` ###Assistant : Height of spec after n convolutions with fixed kernel/stride/pad.
692
async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync): stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings()) stream_worker_sync.pause() hls = stream.add_provider(HLS_PROVIDER) for i in range(2): segment = Segment(sequence=i, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() hls_client = await hls_stream(stream) resp = await hls_client.get("/playlist.m3u8") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1)] ) segment = Segment(sequence=2, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() resp = await hls_client.get("/playlist.m3u8") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1), make_segment(2)] ) stream_worker_sync.resume() await stream.stop()
Test rendering the hls playlist with 1 and 2 output segments.
11
76
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_hls_playlist_view(hass, setup_component, hls_stream, stream_worker_sync): stream = create_stream(hass, STREAM_SOURCE, {}, dynamic_stream_settings()) stream_worker_sync.pause() hls = stream.add_provider(HLS_PROVIDER) for i in range(2): segment = Segment(sequence=i, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() hls_client = await hls_stream(stream) resp = await hls_client.get("/playlist.m3u8") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1)] ) segment = Segment(sequence=2, duration=SEGMENT_DURATION) hls.put(segment) await hass.async_block_till_done() resp = await hls_client.get("/playlist.m3u8") assert resp.status == HTTPStatus.OK assert await resp.text() == make_playlist( sequence=0, segments=[make_segment(0), make_segment(1), make_segment(2)] ) stream_worker_sync.resume() await stream.stop() ``` ###Assistant : Test rendering the hls playlist with 1 and 2 output segments.
693
def _create_local_rank_map(self) -> Dict: rank_mapping = {} ip_dict = defaultdict(int) for world_rank in range(len(self.worker_group)): worker = self.worker_group.workers[world_rank] node_ip = worker.metadata.node_ip rank_mapping[world_rank] = ip_dict[node_ip] ip_dict[node_ip] += 1 return rank_mapping
Create mapping from worker world_rank to local_rank. Example: Worker 0: 0.0.0.0 Worker 1: 0.0.0.0 Worker 2: 0.0.0.1 Worker 3: 0.0.0.0 Worker 4: 0.0.0.1 Workers 0, 1, 3 are on 0.0.0.0. Workers 2, 4 are on 0.0.0.1. Expected Output: { 0 -> 0, 1 -> 1, 2 -> 0, 3 -> 2, 4 -> 1 }
55
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_local_rank_map(self) -> Dict: rank_mapping = {} ip_dict = defaultdict(int) for world_rank in range(len(self.worker_group)): worker = self.worker_group.workers[world_rank] node_ip = worker.metadata.node_ip rank_mapping[world_rank] = ip_dict[node_ip] ip_dict[node_ip] += 1 return rank_mapping ``` ###Assistant : Create mapping from worker world_rank to local_rank. Example: Worker 0: 0.0.0.0 Worker 1: 0.0.0.0 Worker 2: 0.0.0.1 Worker 3: 0.0.0.0 Worker 4: 0.0.0.1 Workers 0, 1, 3 are on 0.0.0.0. Workers 2, 4 are on 0.0.0.1. Expected Output: { 0 -> 0, 1 -> 1, 2 -> 0, 3 -> 2, 4 -> 1 }
694
def _output_groups(self) -> None: is_rename = self._args.sort_method != "none" logger.info("Creating %s group folders in '%s'.", len(self._sorter.binned), self._args.output_dir) bin_names = [f"_{b}" for b in self._sorter.bin_names] if is_rename: bin_names = [f"{name}_by_{self._args.sort_method}" for name in bin_names] for name in bin_names: folder = os.path.join(self._args.output_dir, name) if os.path.exists(folder): rmtree(folder) os.makedirs(folder) description = f"{'Copying' if self._args.keep_original else 'Moving'} into groups" description += " and renaming" if is_rename else "" pbar = tqdm(range(len(self._sorter.sorted_filelist)), desc=description, file=sys.stdout, leave=False) idx = 0 for bin_id, bin_ in enumerate(self._sorter.binned): pbar.set_description(f"{description}: Bin {bin_id + 1} of {len(self._sorter.binned)}") output_path = os.path.join(self._args.output_dir, bin_names[bin_id]) if not bin_: logger.debug("Removing empty bin: %s", output_path) os.rmdir(output_path) for source in bin_: basename = os.path.basename(source) dst_name = f"{idx:06d}_{basename}" if is_rename else basename dest = os.path.join(output_path, dst_name) self._sort_file(source, dest) idx += 1 pbar.update(1) # Output methods
Move the files to folders. Obtains the bins and original filenames from :attr:`_sorter` and outputs into appropriate bins in the output location
22
124
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _output_groups(self) -> None: is_rename = self._args.sort_method != "none" logger.info("Creating %s group folders in '%s'.", len(self._sorter.binned), self._args.output_dir) bin_names = [f"_{b}" for b in self._sorter.bin_names] if is_rename: bin_names = [f"{name}_by_{self._args.sort_method}" for name in bin_names] for name in bin_names: folder = os.path.join(self._args.output_dir, name) if os.path.exists(folder): rmtree(folder) os.makedirs(folder) description = f"{'Copying' if self._args.keep_original else 'Moving'} into groups" description += " and renaming" if is_rename else "" pbar = tqdm(range(len(self._sorter.sorted_filelist)), desc=description, file=sys.stdout, leave=False) idx = 0 for bin_id, bin_ in enumerate(self._sorter.binned): pbar.set_description(f"{description}: Bin {bin_id + 1} of {len(self._sorter.binned)}") output_path = os.path.join(self._args.output_dir, bin_names[bin_id]) if not bin_: logger.debug("Removing empty bin: %s", output_path) os.rmdir(output_path) for source in bin_: basename = os.path.basename(source) dst_name = f"{idx:06d}_{basename}" if is_rename else basename dest = os.path.join(output_path, dst_name) self._sort_file(source, dest) idx += 1 pbar.update(1) # Output methods ``` ###Assistant : Move the files to folders. Obtains the bins and original filenames from :attr:`_sorter` and outputs into appropriate bins in the output location
695
def _setSharedLibraryRPATHElf(filename, rpath): # TODO: Might write something that makes a shell script replacement # in case no rpath is present, or use patchelf, for now our use # case seems to use rpaths for executables. # patchelf --set-rpath "$ORIGIN/path/to/library" <executable> with withEnvironmentVarOverriden("LANG", "C"): executeToolChecked( logger=postprocessing_logger, command=("patchelf", "--set-rpath", rpath, filename), stderr_filter=_filterPatchelfErrorOutput, absence_message=, )
\ Error, needs 'patchelf' on your system, due to 'RPATH' settings that need to be set.
16
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _setSharedLibraryRPATHElf(filename, rpath): # TODO: Might write something that makes a shell script replacement # in case no rpath is present, or use patchelf, for now our use # case seems to use rpaths for executables. # patchelf --set-rpath "$ORIGIN/path/to/library" <executable> with withEnvironmentVarOverriden("LANG", "C"): executeToolChecked( logger=postprocessing_logger, command=("patchelf", "--set-rpath", rpath, filename), stderr_filter=_filterPatchelfErrorOutput, absence_message=, ) ``` ###Assistant : \ Error, needs 'patchelf' on your system, due to 'RPATH' settings that need to be set.
696
def _get_columns(self): if self._columns_cache is None: self._columns_cache, column_widths = self._compute_axis_labels_and_lengths( 1 ) if self._column_widths_cache is None: self._column_widths_cache = column_widths return self._columns_cache
Get the columns from the cache object. Returns ------- pandas.Index An index object containing the column labels.
17
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_columns(self): if self._columns_cache is None: self._columns_cache, column_widths = self._compute_axis_labels_and_lengths( 1 ) if self._column_widths_cache is None: self._column_widths_cache = column_widths return self._columns_cache ``` ###Assistant : Get the columns from the cache object. Returns ------- pandas.Index An index object containing the column labels.
697
async def async_update(self) -> None: # Update values from controller's device dictionary self._connected = self._controller.is_connected self._current_temp = self._controller.get_temperature(self._device_id) self._fan_speed = self._controller.get_fan_speed(self._device_id) self._power = self._controller.is_on(self._device_id) self._min_temp = self._controller.get_min_setpoint(self._device_id) self._max_temp = self._controller.get_max_setpoint(self._device_id) self._rssi = self._controller.get_rssi(self._device_id) self._run_hours = self._controller.get_run_hours(self._device_id) self._target_temp = self._controller.get_setpoint(self._device_id) self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id) # Operation mode mode = self._controller.get_mode(self._device_id) self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode) # Preset mode preset = self._controller.get_preset_mode(self._device_id) self._preset = MAP_IH_TO_PRESET_MODE.get(preset) # Swing mode # Climate module only supports one swing setting. self._vvane = self._controller.get_vertical_swing(self._device_id) self._hvane = self._controller.get_horizontal_swing(self._device_id) # Power usage self._power_consumption_heat = self._controller.get_heat_power_consumption( self._device_id ) self._power_consumption_cool = self._controller.get_cool_power_consumption( self._device_id )
Copy values from controller dictionary to climate device.
8
90
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_update(self) -> None: # Update values from controller's device dictionary self._connected = self._controller.is_connected self._current_temp = self._controller.get_temperature(self._device_id) self._fan_speed = self._controller.get_fan_speed(self._device_id) self._power = self._controller.is_on(self._device_id) self._min_temp = self._controller.get_min_setpoint(self._device_id) self._max_temp = self._controller.get_max_setpoint(self._device_id) self._rssi = self._controller.get_rssi(self._device_id) self._run_hours = self._controller.get_run_hours(self._device_id) self._target_temp = self._controller.get_setpoint(self._device_id) self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id) # Operation mode mode = self._controller.get_mode(self._device_id) self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode) # Preset mode preset = self._controller.get_preset_mode(self._device_id) self._preset = MAP_IH_TO_PRESET_MODE.get(preset) # Swing mode # Climate module only supports one swing setting. self._vvane = self._controller.get_vertical_swing(self._device_id) self._hvane = self._controller.get_horizontal_swing(self._device_id) # Power usage self._power_consumption_heat = self._controller.get_heat_power_consumption( self._device_id ) self._power_consumption_cool = self._controller.get_cool_power_consumption( self._device_id ) ``` ###Assistant : Copy values from controller dictionary to climate device.
698
def test_kb_valid_entities(nlp): mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3]) mykb.add_entity(entity="Q2", freq=5, entity_vector=[2, 1, 0]) mykb.add_entity(entity="Q3", freq=25, entity_vector=[-1, -6, 5]) # adding aliases mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.2]) mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9]) # test the size of the corresponding KB assert mykb.get_size_entities() == 3 assert mykb.get_size_aliases() == 2 # test retrieval of the entity vectors assert mykb.get_vector("Q1") == [8, 4, 3] assert mykb.get_vector("Q2") == [2, 1, 0] assert mykb.get_vector("Q3") == [-1, -6, 5] # test retrieval of prior probabilities assert_almost_equal(mykb.get_prior_prob(entity="Q2", alias="douglas"), 0.8) assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglas"), 0.2) assert_almost_equal(mykb.get_prior_prob(entity="Q342", alias="douglas"), 0.0) assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglassssss"), 0.0)
Test the valid construction of a KB with 3 entities and two aliases
13
94
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_kb_valid_entities(nlp): mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3]) mykb.add_entity(entity="Q2", freq=5, entity_vector=[2, 1, 0]) mykb.add_entity(entity="Q3", freq=25, entity_vector=[-1, -6, 5]) # adding aliases mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.2]) mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9]) # test the size of the corresponding KB assert mykb.get_size_entities() == 3 assert mykb.get_size_aliases() == 2 # test retrieval of the entity vectors assert mykb.get_vector("Q1") == [8, 4, 3] assert mykb.get_vector("Q2") == [2, 1, 0] assert mykb.get_vector("Q3") == [-1, -6, 5] # test retrieval of prior probabilities assert_almost_equal(mykb.get_prior_prob(entity="Q2", alias="douglas"), 0.8) assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglas"), 0.2) assert_almost_equal(mykb.get_prior_prob(entity="Q342", alias="douglas"), 0.0) assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglassssss"), 0.0) ``` ###Assistant : Test the valid construction of a KB with 3 entities and two aliases
699
def eye(N, M=None, k=0, dtype=float, order='C', *, like=None): if like is not None: return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like) if M is None: M = N m = zeros((N, M), dtype=dtype, order=order) if k >= M: return m # Ensure M and k are integers, so we don't get any surprise casting # results in the expressions `M-k` and `M+1` used below. This avoids # a problem with inputs with type (for example) np.uint64. M = operator.index(M) k = operator.index(k) if k >= 0: i = k else: i = (-k) * M m[:M-k].flat[i::M+1] = 1 return m _eye_with_like = array_function_dispatch( _eye_dispatcher )(eye)
Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. .. versionadded:: 1.14.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- identity : (almost) equivalent function diag : diagonal 2-D array from a 1-D array specified by the user. Examples -------- >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) array([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]])
176
104
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def eye(N, M=None, k=0, dtype=float, order='C', *, like=None): if like is not None: return _eye_with_like(N, M=M, k=k, dtype=dtype, order=order, like=like) if M is None: M = N m = zeros((N, M), dtype=dtype, order=order) if k >= M: return m # Ensure M and k are integers, so we don't get any surprise casting # results in the expressions `M-k` and `M+1` used below. This avoids # a problem with inputs with type (for example) np.uint64. M = operator.index(M) k = operator.index(k) if k >= 0: i = k else: i = (-k) * M m[:M-k].flat[i::M+1] = 1 return m _eye_with_like = array_function_dispatch( _eye_dispatcher )(eye) ``` ###Assistant : Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to `N`. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. order : {'C', 'F'}, optional Whether the output should be stored in row-major (C-style) or column-major (Fortran-style) order in memory. .. versionadded:: 1.14.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the `k`-th diagonal, whose values are equal to one. See Also -------- identity : (almost) equivalent function diag : diagonal 2-D array from a 1-D array specified by the user. Examples -------- >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) array([[0., 1., 0.], [0., 0., 1.], [0., 0., 0.]])